diff --git a/tests/tools/python/impl/plot_benchmark_results.py b/tests/tools/python/impl/plot_benchmark_results.py index 98b8de96b..83dc7e040 100644 --- a/tests/tools/python/impl/plot_benchmark_results.py +++ b/tests/tools/python/impl/plot_benchmark_results.py @@ -20,7 +20,7 @@ """ Read google benchmark data from json file """ -def read_benchmark_data(input_path, logging, det_name, plugins): +def read_benchmark_data(input_path, logging, det_name, plugin): # Input data directory input_dir = os.fsencode(input_path) @@ -29,7 +29,9 @@ def read_benchmark_data(input_path, logging, det_name, plugins): for file_item in os.listdir(input_dir): file_name = os.fsdecode(file_item) - if file_name.find(det_name + "_benchmark_data") != -1: + if (file_name.find(det_name + "_benchmark_data_") != -1) and ( + file_name.find(plugin) != -1 + ): file_path = input_path + file_name with open(file_path, "r") as file: results = json.load(file) @@ -39,6 +41,12 @@ def read_benchmark_data(input_path, logging, det_name, plugins): return context, data + logging.error( + f"Could not find benchmark results for '{det_name}' and plugin '{plugin}'" + ) + + return None, None + """ Adds a column 'x' to the data frame that contains the number of tracks """ @@ -78,7 +86,7 @@ def compactify_proc_name(name): """ Plot the benchmark results for different hardware and algebra plugins """ -def plot_benchmarks(context, df, detector, plot_factory, out_format="pdf"): +def plot_benchmarks(context, df, plot_factory, out_format="pdf", plot=None): assert len(df["x"]) != 0, "Data frame has to provide column 'x'" assert len(df["real_time"]) != 0, "Data frame has to provide column 'real_time'" @@ -106,24 +114,35 @@ def plot_benchmarks(context, df, detector, plot_factory, out_format="pdf"): lgd_ops = plotting.get_legend_options() lgd_ops._replace(loc=ldg_loc) - # Plot the propagation latency agains the number of tracks - plot_data = plot_factory.graph2D( - x=n_tracks, - y=real_time, - y_errors=real_time_stddev, - x_label="No. tracks", - y_label="t [ns]", - title="Propagation Latency", - label=plot_label, - lgd_ops=lgd_ops, - set_log_x=True, - set_log_y=True, - figsize=(12, 8), - ) - - plot_data.ax.set_xscale("log", base=2) - plot_data.ax.set_xticks(n_tracks) - - plot_factory.write_plot( - plot_data, detector + f"_prop_latency_{plot_label}", out_format - ) + plot_data = plotting.plt_data(None, None, None, None, None, None, None, None) + if plot is None: + # Plot the propagation latency agains the number of tracks + plot_data = plot_factory.graph2D( + x=n_tracks, + y=real_time, + y_errors=real_time_stddev, + x_label="No. tracks", + y_label="t [ns]", + title="Propagation Latency", + label=plot_label, + lgd_ops=lgd_ops, + set_log_x=True, + set_log_y=True, + figsize=(12, 8), + ) + + plot_data.ax.set_xscale("log", base=2) + plot_data.ax.set_xticks(n_tracks) + else: + # Add new data to exiting plot + plot_data = plot_factory.add_graph( + plot=plot, + x=n_tracks, + y=real_time, + y_errors=real_time_stddev, + label=plot_label, + marker="+", + color="tab:orange", + ) + + return plot_data diff --git a/tests/tools/python/options/plotting_options.py b/tests/tools/python/options/plotting_options.py index 16a2b7e5f..7d5b61583 100644 --- a/tests/tools/python/options/plotting_options.py +++ b/tests/tools/python/options/plotting_options.py @@ -37,7 +37,7 @@ def plotting_options(): "--output_format", "-of", help=("Format of the plot files (svg|png|pdf)."), - default="png", + default="pdf", type=str, ) diff --git a/tests/tools/python/plotting/__init__.py b/tests/tools/python/plotting/__init__.py index 801cf873c..861d5ca4f 100644 --- a/tests/tools/python/plotting/__init__.py +++ b/tests/tools/python/plotting/__init__.py @@ -1,3 +1,3 @@ from .plot_helpers import filter_data from .pyplot_factory import pyplot_factory -from .pyplot_factory import legend_options, get_legend_options +from .pyplot_factory import plt_data, legend_options, get_legend_options diff --git a/tests/tools/python/plotting/pyplot_factory.py b/tests/tools/python/plotting/pyplot_factory.py index e0d720fb7..42eccd171 100644 --- a/tests/tools/python/plotting/pyplot_factory.py +++ b/tests/tools/python/plotting/pyplot_factory.py @@ -91,7 +91,6 @@ def graph2D( self, x, y, - x_errors=None, y_errors=None, x_label="x", y_label="y", @@ -137,7 +136,7 @@ def graph2D( if len(x) != len(y): self.logger.debug(rf" create graph: x range does match y range {label}") - return plt_data(fig, ax, None, None, None, None, None, None) + return plt_data(fig, ax, None, None, None, None, None, y_errors) data = ax.errorbar(x=x, y=y, label=label, yerr=y_errors, marker=marker) @@ -156,7 +155,45 @@ def graph2D( if set_log_y: ax.set_yscale("log") - return plt_data(fig, ax, lgd, data, None, None, None, None) + return plt_data(fig, ax, lgd, data, None, None, None, y_errors) + + """ Add new graph to an existing plot """ + + def add_graph( + self, + plot, + x, + y, + y_errors=None, + label="", + marker="+", + color="tab:orange", + alpha=0.75, + ): + # Nothing left to do + if len(y) == 0 or plot.data is None: + self.logger.debug(rf" add graph: empty data {label}") + return plot + + # Add new data to old plot axis + data = plot.ax.errorbar( + x=x, + y=y, + label=label, + yerr=y_errors, + color=color, + marker=marker, + ) + + # Update legend + lgd = plot.lgd + handles, labels = lgd.axes.get_legend_handles_labels() + lgd._legend_box = None + lgd._init_legend_box(handles, labels) + lgd._set_loc(lgd._loc) + lgd.set_title(lgd.get_title().get_text()) + + return plt_data(plot.fig, plot.ax, plot.lgd, data, None, None, None, y_errors) """ Create a histogram from given input data. The normalization is achieved by @@ -305,7 +342,7 @@ def hist1D( return plt_data(fig, ax, lgd, data, bins, mean, stdev, err) - """ Add new data to an existing plot """ + """ Add new histogram to an existing plot """ def add_hist( self, diff --git a/tests/tools/python/propagation_benchmarks.py b/tests/tools/python/propagation_benchmarks.py index 637ca6c54..2ccd13a39 100644 --- a/tests/tools/python/propagation_benchmarks.py +++ b/tests/tools/python/propagation_benchmarks.py @@ -108,11 +108,11 @@ def __main__(): # Gather and check benchmark executables for every plugin cpu_benchmarks = [] cuda_benchmarks = [] - algebra_plugins = list(args.algebra_plugins) + algebra_plugins = set(args.algebra_plugins) if "array" not in algebra_plugins: algebra_plugins.insert(0, "array") - for plugin in set(algebra_plugins): + for plugin in algebra_plugins: cpu_benchmark = bindir + "/detray_propagation_benchmark_cpu_" + plugin cuda_benchmark = bindir + "/detray_detector_validation_cuda_" + plugin @@ -187,7 +187,7 @@ def __main__(): logging.debug("Running CPU Propagation Benchmarks") for benchmark in cpu_benchmarks: - plugin = benchmark.split("_")[-1] + plugin = benchmark.split("benchmark_cpu_")[-1] subprocess.run( [ benchmark, @@ -234,122 +234,34 @@ def __main__(): plot_factory = plt_factory(out_dir, logging) # Read the benchmark data into a pandas frame - context, data = read_benchmark_data(input_dir, logging, det_name, algebra_plugins) + context, data = read_benchmark_data(input_dir, logging, det_name, "array") + if context is None or data is None: + sys.exit(1) # Add the number of tracks per benchmark case as new column 'x' add_track_multiplicity_column(data) # Plot the data against the number of tracks - benchmark_plotter.plot_benchmarks(context, data, det_name, plot_factory) - - """ - plot_detector_scan_data(args, det_name, plot_factory, "ray", ray_scan_df, "png") - plot_detector_scan_data(args, det_name, plot_factory, "helix", helix_scan_df, "png") - - # Plot distributions of track parameter values - # Only take initial track parameters from generator - ray_intial_trk_df = ray_scan_df.drop_duplicates(subset=["track_id"]) - helix_intial_trk_df = helix_scan_df.drop_duplicates(subset=["track_id"]) - plot_track_params( - args, det_name, "helix", plot_factory, out_format, helix_intial_trk_df - ) - plot_track_params( - args, det_name, "ray", plot_factory, out_format, ray_intial_trk_df - ) - - # Read the recorded data - ( - ray_nav_df, - ray_truth_df, - ray_nav_cuda_df, - helix_nav_df, - helix_truth_df, - helix_nav_cuda_df, - ) = read_navigation_data( - datadir, det_name, str(args.transverse_momentum), args.cuda, logging - ) - - # Plot - label_cpu = "navigation (CPU)" - label_cuda = "navigation (CUDA)" - - plot_navigation_data( - args, - det_name, - plot_factory, - "ray", - ray_truth_df, - "truth", - ray_nav_df, - label_cpu, - out_format, - ) - - plot_navigation_data( - args, - det_name, - plot_factory, - "helix", - helix_truth_df, - "truth", - helix_nav_df, - label_cpu, - out_format, + plot_data = benchmark_plotter.plot_benchmarks( + context, data, plot_factory, out_format ) - if args.cuda: - # Truth vs. Device - plot_navigation_data( - args, - det_name, - plot_factory, - "ray", - ray_truth_df, - "truth", - ray_nav_cuda_df, - label_cuda, - out_format, - ) + # Add plots for the other algebra plugins + for plugin in algebra_plugins: + if plugin != "array": + context, data = read_benchmark_data(input_dir, logging, det_name, plugin) + if context is None or data is None: + sys.exit(1) - plot_navigation_data( - args, - det_name, - plot_factory, - "helix", - helix_truth_df, - "truth", - helix_nav_cuda_df, - label_cuda, - out_format, - ) + add_track_multiplicity_column(data) - # Host vs. Device - plot_navigation_data( - args, - det_name, - plot_factory, - "ray", - ray_nav_df, - label_cpu, - ray_nav_cuda_df, - label_cuda, - out_format, - ) - - plot_navigation_data( - args, - det_name, - plot_factory, - "helix", - helix_nav_df, - label_cpu, - helix_nav_cuda_df, - label_cuda, - out_format, - ) - """ + # Add new data to 'plot_data' + benchmark_plotter.plot_benchmarks( + context, data, plot_factory, out_format, plot_data + ) - logging.info("\nDone.\n") + # Write to disk + plot_factory.write_plot(plot_data, det_name + f"_prop_latency", out_format) # ------------------------------------------------------------------------------