From df70fbaf8411f99a8caf67ffe422d50cd0e093e4 Mon Sep 17 00:00:00 2001 From: Samzcodes Date: Wed, 21 Jun 2023 02:37:10 +0200 Subject: [PATCH] Enhancing results | WIP | pytype --- scripts/test_pytype_enhanced.py | 26 +++++- src/tool_scripts/results.py | 141 +++++++++++++++++--------------- 2 files changed, 98 insertions(+), 69 deletions(-) diff --git a/scripts/test_pytype_enhanced.py b/scripts/test_pytype_enhanced.py index 35c4d7807..e2d5aa978 100644 --- a/scripts/test_pytype_enhanced.py +++ b/scripts/test_pytype_enhanced.py @@ -60,19 +60,29 @@ def get_annotations_list(module): current_class = None for node in ast.walk(module): if hasattr(node, "resolved_type"): + """if hasattr(node, "id"): + print(node.id) + elif isinstance(node, ast.Attribute): + print(node.attr) + else: + members = dir(node) + for member in members: + value = getattr(node, member) + print(f"{member}: {value}")""" if isinstance(node, ast.ClassDef): - current_class = node.name + current_class = node.id + print(node.name) if isinstance(node, ast.FunctionDef): + # print(node.id) function = { "file": "test.py", "line_number": node.lineno, "function": node.name, "type": [node.resolved_annotation], } - results.append(function) - if current_class: function["function"] = f"{current_class}.{node.name}" + results.append(function) for param in node.args.args: parameter = { @@ -86,6 +96,15 @@ def get_annotations_list(module): function["function"] = f"{current_class}.{node.name}" results.append(parameter) + elif isinstance(node, ast.Attribute): + parameter = { + "file": "test.py", + "line_number": node.lineno, + "parameter": node.attr, + "function": node.value, + "type": [node.resolved_annotation], + } + results.append(parameter) elif isinstance(node, ast.Name) and isinstance(node.ctx, ast.Store): variable = { "file": "test.py", @@ -94,7 +113,6 @@ def get_annotations_list(module): "type": [node.resolved_annotation], } results.append(variable) - return results diff --git a/src/tool_scripts/results.py b/src/tool_scripts/results.py index 5de021388..0b58a42f2 100644 --- a/src/tool_scripts/results.py +++ b/src/tool_scripts/results.py @@ -29,75 +29,86 @@ def compare_json_files(gt_file, tool_file): missing_matches.append(gt_entry) success_rate = (total_matches / total_entries) * 100 - print("Success rate for file :", gt_file, "is : ", success_rate) + print("Success rate for file:", gt_file, "is:", success_rate) return success_rate, missing_matches -def compare_json_files_in_directory(directory): - total_files = 0 - total_success_rate = 0 - all_missing_matches = {} - - for root, dirs, files in os.walk(directory): - try: - gt_file = None - tool_file = None - - for file in files: - if file.endswith(".json"): - file_path = os.path.join(root, file) - if "gt" in file.lower(): - gt_file = file_path - elif "jedi" in file.lower(): - tool_file = file_path - - if gt_file and tool_file: - success_rate, missing_matches = compare_json_files(gt_file, tool_file) - total_success_rate += success_rate - total_files += 1 - - # Group missing matches by file name - file_name = "gt|" + os.path.basename(gt_file) - if file_name in all_missing_matches: - all_missing_matches[gt_file].extend(missing_matches) - else: - all_missing_matches[gt_file] = missing_matches - except Exception as e: - print(e) - - if all_missing_matches: - headers = ["File", "Line Number", "Function", "Parameter/Variable", "Type"] - rows = [] - for file_name, missing_matches in all_missing_matches.items(): - merged_cell = file_name - num_entries = len(missing_matches) - for i, entry in enumerate(missing_matches): - line_number = entry.get("line_number", "") - function = entry.get("function", "") - param_variable = entry.get("parameter", entry.get("variable", "")) - types = ", ".join(entry.get("type", [])) - rows.append( - [ - merged_cell if i == 0 else "", - line_number, - function, - param_variable, - types, - ] +def iterate_cats(test_suite_dir): + for cat in sorted(os.listdir(test_suite_dir)): + cat_dir = os.path.join(test_suite_dir, cat) + if os.path.isdir(cat_dir): + print("Iterating category {}...".format(cat)) + all_missing_matches = {} + + for root, dirs, files in os.walk(cat_dir): + try: + gt_file = None + tool_file = None + + for file in files: + if file.endswith(".json"): + file_path = os.path.join(root, file) + if "gt" in file.lower(): + gt_file = file_path + elif "jedi" in file.lower(): + tool_file = file_path + + if gt_file and tool_file: + success_rate, missing_matches = compare_json_files( + gt_file, tool_file + ) + + # Group missing matches by file name + dir_path = os.path.relpath(os.path.dirname(gt_file), cat_dir) + file_name = dir_path + "/" + os.path.basename(gt_file) + if file_name in all_missing_matches: + all_missing_matches[file_name].extend(missing_matches) + else: + all_missing_matches[file_name] = missing_matches + + except Exception as e: + print(e) + + if all_missing_matches: + headers = [ + "File", + "Line Number", + "Function", + "Parameter/Variable", + "Type", + ] + rows = [] + for file_name, missing_matches in all_missing_matches.items(): + merged_cell = file_name + num_entries = len(missing_matches) + for i, entry in enumerate(missing_matches): + line_number = entry.get("line_number", "") + function = entry.get("function", "") + param_variable = entry.get( + "parameter", entry.get("variable", "") + ) + types = ", ".join(entry.get("type", [])) + rows.append( + [ + merged_cell if i == 0 else "", + line_number, + function, + param_variable, + types, + ] + ) + + print("\nMissing matches:") + print(tabulate(rows, headers=headers, tablefmt="grid")) + print( + "\nTotal missing entries:" + f" {sum(len(matches) for matches in all_missing_matches.values())}" ) + else: + print("No missing matches.") - print("\nMissing matches:") - print(tabulate(rows, headers=headers, tablefmt="grid")) - print( - f"\nTotal missing entries: {sum(len(matches) for matches in all_missing_matches.values())}" - ) - else: - print("No missing matches.") + print("-" * 50) - average_success_rate = total_success_rate / total_files if total_files else 0 - print(f"\nTotal files processed: {total_files}") - print(f"Average success rate: {average_success_rate:.2f}%") - - -compare_json_files_in_directory("micro-benchmark") +test_suite_dir = "micro-benchmark/python_features" +iterate_cats(test_suite_dir)