diff --git a/src/bindings/python/fluxacct/accounting/jobs_table_subcommands.py b/src/bindings/python/fluxacct/accounting/jobs_table_subcommands.py index bfbafd76..5de18e5f 100644 --- a/src/bindings/python/fluxacct/accounting/jobs_table_subcommands.py +++ b/src/bindings/python/fluxacct/accounting/jobs_table_subcommands.py @@ -123,7 +123,7 @@ def convert_to_str(job_records): ) ) - return job_record_str + return "\n".join(job_record_str) def convert_to_obj(rows): diff --git a/src/cmd/flux-account.py b/src/cmd/flux-account.py index 24c2b0e9..7bcd3342 100755 --- a/src/cmd/flux-account.py +++ b/src/cmd/flux-account.py @@ -686,17 +686,6 @@ def select_accounting_function(args, output_file, parser): "pop_db": "accounting.pop_db", } - if args.func == "view_job_records": - data["output_file"] = output_file - return_val = flux.Flux().rpc(func_map[args.func], data).get() - # the return value of view-job-records without an output file is - # just a list of strings, so just iterate through that list and - # then return - job_record_list = list(return_val.values()) - for job_record in job_record_list[0]: - print(job_record) - return - if args.func in func_map: return_val = flux.Flux().rpc(func_map[args.func], data).get() else: diff --git a/t/python/t1006_job_archive.py b/t/python/t1006_job_archive.py index 0621b83c..ed1b7075 100755 --- a/t/python/t1006_job_archive.py +++ b/t/python/t1006_job_archive.py @@ -32,9 +32,6 @@ def setUpClass(self): global cur # create example job-archive database, output file - global op - op = "job_records.csv" - c.create_db("FluxAccountingUsers.db") try: acct_conn = sqlite3.connect("file:FluxAccountingUsers.db?mode=rw", uri=True) @@ -147,74 +144,74 @@ def populate_job_archive_db(acct_conn, userid, bank, ranks, nodes, num_entries): # its job information def test_01_with_jobid_valid(self): my_dict = {"jobid": 102} - job_records = j.view_jobs(acct_conn, op, **my_dict) - self.assertEqual(len(job_records), 2) + job_records = j.get_jobs(acct_conn, **my_dict) + self.assertEqual(len(job_records), 1) # passing a bad jobid should return no records def test_02_with_jobid_failure(self): my_dict = {"jobid": 000} - job_records = j.view_jobs(acct_conn, op, **my_dict) - self.assertEqual(len(job_records), 1) + job_records = j.get_jobs(acct_conn, **my_dict) + self.assertEqual(len(job_records), 0) # passing a timestamp before the first job to # start should return all of the jobs def test_03_after_start_time_all(self): my_dict = {"after_start_time": 0} - job_records = j.view_jobs(acct_conn, op, **my_dict) - self.assertEqual(len(job_records), 19) + job_records = j.get_jobs(acct_conn, **my_dict) + self.assertEqual(len(job_records), 18) # passing a timestamp after all of the start time - # of all the completed jobs should return a failure message + # of all the completed jobs should return no jobs @mock.patch("time.time", mock.MagicMock(return_value=11000000)) def test_04_after_start_time_none(self): my_dict = {"after_start_time": time.time()} - job_records = j.view_jobs(acct_conn, op, **my_dict) - self.assertEqual(len(job_records), 1) + job_records = j.get_jobs(acct_conn, **my_dict) + self.assertEqual(len(job_records), 0) # passing a timestamp before the end time of the # last job should return all of the jobs @mock.patch("time.time", mock.MagicMock(return_value=11000000)) def test_05_before_end_time_all(self): my_dict = {"before_end_time": time.time()} - job_records = j.view_jobs(acct_conn, op, **my_dict) - self.assertEqual(len(job_records), 19) + job_records = j.get_jobs(acct_conn, **my_dict) + self.assertEqual(len(job_records), 18) # passing a timestamp before the end time of # the first completed jobs should return no jobs def test_06_before_end_time_none(self): my_dict = {"before_end_time": 0} - job_records = j.view_jobs(acct_conn, op, **my_dict) - self.assertEqual(len(job_records), 1) + job_records = j.get_jobs(acct_conn, **my_dict) + self.assertEqual(len(job_records), 0) # passing a user not in the jobs table # should return no jobs def test_07_by_user_failure(self): my_dict = {"user": "9999"} - job_records = j.view_jobs(acct_conn, op, **my_dict) - self.assertEqual(len(job_records), 1) + job_records = j.get_jobs(acct_conn, **my_dict) + self.assertEqual(len(job_records), 0) # view_jobs_run_by_username() interacts with a # passwd file; for the purpose of these tests, # just pass the userid def test_08_by_user_success(self): my_dict = {"user": "1001"} - job_records = j.view_jobs(acct_conn, op, **my_dict) - self.assertEqual(len(job_records), 3) + job_records = j.get_jobs(acct_conn, **my_dict) + self.assertEqual(len(job_records), 2) # passing a combination of params should further # refine the query @mock.patch("time.time", mock.MagicMock(return_value=9000500)) def test_09_multiple_params(self): my_dict = {"user": "1001", "after_start_time": time.time()} - job_records = j.view_jobs(acct_conn, op, **my_dict) - self.assertEqual(len(job_records), 2) + job_records = j.get_jobs(acct_conn, **my_dict) + self.assertEqual(len(job_records), 1) # passing no parameters will result in a generic query # returning all results def test_10_no_options_passed(self): my_dict = {} - job_records = j.view_jobs(acct_conn, op, **my_dict) - self.assertEqual(len(job_records), 19) + job_records = j.get_jobs(acct_conn, **my_dict) + self.assertEqual(len(job_records), 18) # users that have run a lot of jobs should have a larger usage factor @mock.patch("time.time", mock.MagicMock(return_value=9900000)) @@ -425,7 +422,6 @@ def test_20_update_job_usage_next_half_life_period(self): # remove database and log file @classmethod def tearDownClass(self): - os.remove("job_records.csv") os.remove("FluxAccountingUsers.db")