Skip to content

Commit

Permalink
Refactor sending of data to ReBenchDB and test it
Browse files Browse the repository at this point in the history
- extract convert_data_to_api_format
- extract convert_data_to_json method
- add a test for the JSON rendering
  - this is really mostly for me to design the new API
  • Loading branch information
smarr committed Feb 18, 2024
1 parent 21bd647 commit d964faf
Show file tree
Hide file tree
Showing 3 changed files with 130 additions and 5 deletions.
11 changes: 8 additions & 3 deletions rebench/persistence.py
Original file line number Diff line number Diff line change
Expand Up @@ -432,12 +432,11 @@ def _send_data_and_empty_cache(self):
if self._send_data(self._cache):
self._cache = {}

def _send_data(self, cache):
self.ui.debug_output_info("ReBenchDB: Prepare data for sending\n")
def convert_data_to_api_format(self, data):
num_measurements = 0
all_data = []
criteria = {}
for run_id, data_points in cache.items():
for run_id, data_points in data.items():
dp_data = []
for dp in data_points:
measurements = dp.measurements_as_dict(criteria)
Expand All @@ -452,6 +451,12 @@ def _send_data(self, cache):
for c, idx in criteria.items():
criteria_index.append({'c': c[0], 'u': c[1], 'i': idx})

return all_data, criteria_index, num_measurements

def _send_data(self, cache):
self.ui.debug_output_info("ReBenchDB: Prepare data for sending\n")
all_data, criteria_index, num_measurements = self.convert_data_to_api_format(cache)

self.ui.debug_output_info(
"ReBenchDB: Sending {num_m} measures. startTime: {st}\n",
num_m=num_measurements, st=self._start_time)
Expand Down
5 changes: 4 additions & 1 deletion rebench/rebenchdb.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,12 +66,15 @@ def _send_payload(payload, url):
response = socket.read()
return response

def convert_data_to_json(self, data):
return json.dumps(data, separators=(',', ':'), ensure_ascii=True)

def _send_to_rebench_db(self, payload_data, operation):
payload_data['projectName'] = self._project_name
payload_data['experimentName'] = self._experiment_name
url = self._server_base_url + operation

payload = json.dumps(payload_data, separators=(',', ':'), ensure_ascii=True)
payload = self.convert_data_to_json(payload_data)

# self.ui.output("Saving JSON Payload of size: %d\n" % len(payload))
with open("payload.json", "w") as text_file: # pylint: disable=unspecified-encoding
Expand Down
119 changes: 118 additions & 1 deletion rebench/tests/persistency_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,10 @@
from unittest import skipIf
from .mock_http_server import MockHTTPServer
from .rebench_test_case import ReBenchTestCase
from .persistence import TestPersistence

from ..persistence import DataStore
from ..persistence import DataStore, _ReBenchDB
from ..rebenchdb import ReBenchDB

from ..configurator import Configurator, load_config
from ..environment import git_not_available, git_repo_not_initialized
Expand Down Expand Up @@ -237,3 +239,118 @@ def test_check_single_csv_header(self):
# count the number of lines starting with 'invocation'
invocation_lines = [line for line in lines if line.startswith('invocation')]
self.assertEqual(len(invocation_lines), 1)

def _create_dummy_rebench_db_persistence(self):
class _Cfg(object):
@staticmethod
def get_rebench_db_connector():
return None

return _ReBenchDB(_Cfg(), None, self.ui)

def _run_exp_to_get_data_points_with_inconsistent_set_of_criteria(self):
yaml = load_config(self._path + '/features/issue_16.conf')
yaml['executors']['TestRunner']['executable'] = 'features/issue_16_vm2.py'
cnf = Configurator(yaml, DataStore(self.ui),
self.ui, exp_name='Test1',
data_file=self._tmp_file)
runs = cnf.get_runs()
persistence = TestPersistence()
persistence.use_on(runs)
run_id_obj = list(runs)[0]
ex = Executor(runs, False, self.ui)
ex.execute()
return {list(runs)[0]: persistence.get_data_points()}, run_id_obj

def _assert_criteria_index_structure(self, criteria_index):
criteria = ['bar', 'baz', 'total', 'foo']
for i, c in enumerate(criteria_index):
self.assertEqual(i, c['i'])
self.assertEqual(criteria[i], c['c'])
self.assertEqual('ms', c['u'])

def _assert_run_id_structure(self, run_id, run_id_obj):
self.assertEqual(run_id['varValue'], run_id_obj.var_value)
self.assertIsNone(run_id['varValue'])

self.assertEqual(run_id['machine'], run_id_obj.machine)
self.assertIsNone(run_id['machine'])

self.assertEqual(run_id['location'], run_id_obj.location)
self.assertEqual(run_id['inputSize'], run_id_obj.input_size)

self.assertEqual(run_id['extraArgs'], run_id_obj.benchmark.extra_args)
self.assertIsNone(run_id['extraArgs'])

self.assertEqual(run_id['cores'], run_id_obj.cores)
self.assertEqual(1, run_id['cores'])

self.assertEqual(run_id['cmdline'], run_id_obj.cmdline())

def _assert_benchmark_structure(self, run_id, run_id_obj):
benchmark = run_id['benchmark']

self.assertEqual(benchmark['name'], run_id_obj.benchmark.name)
run_details = benchmark['runDetails']
self.assertEqual(-1, run_details['maxInvocationTime'])
self.assertEqual(50, run_details['minIterationTime'])
self.assertIsNone(run_details['warmup'])

suite = benchmark['suite']
self.assertIsNone(suite['desc'])
self.assertEqual('Suite', suite['name'])
executor = suite['executor']
self.assertIsNone(executor['desc'])
self.assertEqual('TestRunner', executor['name'])

def _assert_data_point_structure(self, data):
self.assertEqual(10, len(data))
for point, i in zip(data, list(range(0, 10))):
self.assertEqual(1, point['in'])
self.assertEqual(i + 1, point['it'])

criteria = []
if i % 2 == 0:
criteria.append(0)
if i % 3 == 0:
criteria.append(1)
if i % 2 == 1:
criteria.append(3)
criteria.append(2)

for criterion, m in zip(criteria, point['m']):
self.assertEqual(criterion, m['c'])
self.assertEqual(i, int(m['v']))

def _create_dummy_rebench_db_adapter(self):
return ReBenchDB('http://localhost', '', '', self.ui)

def test_data_conversion_to_rebench_db_api(self):
cache, run_id_obj = self._run_exp_to_get_data_points_with_inconsistent_set_of_criteria()
rebench_db = self._create_dummy_rebench_db_persistence()
all_data, criteria_index, num_measurements = rebench_db.convert_data_to_api_format(cache)

self.assertEqual(24, num_measurements)

self._assert_criteria_index_structure(criteria_index)

run_id = all_data[0]['runId']
data = all_data[0]['d']

self._assert_run_id_structure(run_id, run_id_obj)
self._assert_benchmark_structure(run_id, run_id_obj)
self._assert_data_point_structure(data)

rdb = self._create_dummy_rebench_db_adapter()

self.assertEqual('[{"in":1,"it":1,"m":[{"v":0.0,"c":0},{"v":0.0,"c":1},{"v":0.0,"c":2}]},' +
'{"in":1,"it":2,"m":[{"v":1.1,"c":3},{"v":1.1,"c":2}]},' +
'{"in":1,"it":3,"m":[{"v":2.2,"c":0},{"v":2.2,"c":2}]},' +
'{"in":1,"it":4,"m":[{"v":3.3,"c":1},{"v":3.3,"c":3},{"v":3.3,"c":2}]},' +
'{"in":1,"it":5,"m":[{"v":4.4,"c":0},{"v":4.4,"c":2}]},' +
'{"in":1,"it":6,"m":[{"v":5.5,"c":3},{"v":5.5,"c":2}]},' +
'{"in":1,"it":7,"m":[{"v":6.6,"c":0},{"v":6.6,"c":1},{"v":6.6,"c":2}]},' +
'{"in":1,"it":8,"m":[{"v":7.7,"c":3},{"v":7.7,"c":2}]},' +
'{"in":1,"it":9,"m":[{"v":8.8,"c":0},{"v":8.8,"c":2}]},' +
'{"in":1,"it":10,"m":[{"v":9.9,"c":1},{"v":9.9,"c":3},{"v":9.9,"c":2}]}]',
rdb.convert_data_to_json(data))

0 comments on commit d964faf

Please sign in to comment.