diff --git a/bin/all_sky_search/pycbc_bin_trigger_rates_dq b/bin/all_sky_search/pycbc_bin_trigger_rates_dq index 74d61544560..5cf49118359 100644 --- a/bin/all_sky_search/pycbc_bin_trigger_rates_dq +++ b/bin/all_sky_search/pycbc_bin_trigger_rates_dq @@ -11,7 +11,8 @@ from ligo.segments import segmentlist import pycbc from pycbc.events import stat as pystat -from pycbc.events.veto import (select_segments_by_definer, start_end_to_segments, +from pycbc.events.veto import (select_segments_by_definer, + start_end_to_segments, segments_to_start_end) from pycbc.types.optparse import MultiDetOptionAction from pycbc.io.hdf import HFile, SingleDetTriggers @@ -28,9 +29,9 @@ parser.add_argument("--analysis-segment-file", required=True) parser.add_argument("--analysis-segment-name", required=True) parser.add_argument("--gating-windows", nargs='+', action=MultiDetOptionAction, - help="Seconds to be reweight before and after the central time " - "of each gate. Given as detector-values pairs, e.g. " - "H1:-1,2.5 L1:-1,2.5 V1:0,0") + help="Seconds to be reweight before and after the central" + "time of each gate. Given as detector-values pairs," + "e.g. H1:-1,2.5 L1:-1,2.5 V1:0,0") parser.add_argument("--stat-threshold", type=float, default=1., help="Only consider triggers with --sngl-ranking value " "above this threshold") @@ -120,9 +121,20 @@ with h5.File(args.template_bins_file, 'r') as f: # get analysis segments -analysis_segs = select_segments_by_definer(args.analysis_segment_file, - segment_name=args.analysis_segment_name, - ifo=ifo) +analysis_segs = select_segments_by_definer( + args.analysis_segment_file, + segment_name=args.analysis_segment_name, + ifo=ifo) + + +# utility function to compute segment list durations +def segs_duration(segs): + starts, ends = segments_to_start_end(segs) + durs = ends - starts + return np.sum(durs) + + +livetime = segs_duration(analysis_segs) # get flag segments flag_segs = select_segments_by_definer(args.flag_file, @@ -162,13 +174,6 @@ def dq_state_at_time(t): return None -# utility function to compute segment list durations -def segs_duration(segs): - starts, ends = segments_to_start_end(segs) - durs = ends - starts - return np.sum(durs) - - # compute and save results with h5.File(args.output_file, 'w') as f: ifo_grp = f.create_group(ifo) @@ -189,7 +194,7 @@ with h5.File(args.output_file, 'w') as f: dq_rates = np.zeros(3, dtype=np.float64) for state in [0, 1, 2]: frac_eff = np.mean(trig_states == state) - frac_dt = segs_duration(dq_state_segs_dict[state]) / segs_duration(analysis_segs) + frac_dt = segs_duration(dq_state_segs_dict[state]) / livetime dq_rates[state] = frac_eff / frac_dt bin_grp['dq_rates'] = dq_rates diff --git a/bin/plotting/pycbc_plot_dq_flag_likelihood b/bin/plotting/pycbc_plot_dq_flag_likelihood index ffe95fbfa14..b294221f5e7 100644 --- a/bin/plotting/pycbc_plot_dq_flag_likelihood +++ b/bin/plotting/pycbc_plot_dq_flag_likelihood @@ -54,7 +54,8 @@ ymax = 1 ymin = 1 for n, dqstate_name in dq_states.items(): - dq_rates = numpy.array([ifo_grp['bins'][b]['dq_rates'][n] for b in bin_names]) + dq_rates = numpy.array( + [ifo_grp['bins'][b]['dq_rates'][n] for b in bin_names]) logrates = numpy.log(dq_rates) ymax = max(ymax, numpy.max(dq_rates)) @@ -81,6 +82,8 @@ ax2.set_ylim(numpy.log(ymin), numpy.log(ymax)) # add meta data and save figure plot_title = f'{ifo} DQ Trigger Rates' -plot_caption = 'The log likelihood correction during during each dq state for each template bin.' -pycbc.results.save_fig_with_metadata(fig, args.output_file, title=plot_title, - caption=plot_caption, cmd=' '.join(sys.argv)) +plot_caption = 'The log likelihood correction \ + during during each dq state for each template bin.' +pycbc.results.save_fig_with_metadata( + fig, args.output_file, title=plot_title, + caption=plot_caption, cmd=' '.join(sys.argv)) diff --git a/bin/workflows/pycbc_make_offline_search_workflow b/bin/workflows/pycbc_make_offline_search_workflow index cd6ad217d83..d688e05c7c8 100755 --- a/bin/workflows/pycbc_make_offline_search_workflow +++ b/bin/workflows/pycbc_make_offline_search_workflow @@ -557,7 +557,8 @@ for dqf, dql in zip(dqfiles, dqfile_labels): # DQ background binning plot ifo_bins = workflow.cp.get_subsections('bin_templates') for ifo in ifo_bins: - background_bins = workflow.cp.get_opt_tags('bin_templates', 'background-bins', tags=[ifo]) + background_bins = workflow.cp.get_opt_tags( + 'bin_templates', 'background-bins', tags=[ifo]) wf.make_template_plot(workflow, hdfbank, rdir['data_quality'], bins=background_bins, tags=[ifo, 'dq_bins']) diff --git a/pycbc/workflow/dq.py b/pycbc/workflow/dq.py index 69fd3432c77..fe2ee5f6a89 100644 --- a/pycbc/workflow/dq.py +++ b/pycbc/workflow/dq.py @@ -34,7 +34,8 @@ def create_node(self, workflow, ifo, template_bank_file): node = Node(self) node.add_opt('--ifo', ifo) node.add_input_opt('--bank-file', template_bank_file) - node.new_output_file_opt(workflow.analysis_time, '.hdf', '--output-file') + node.new_output_file_opt( + workflow.analysis_time, '.hdf', '--output-file') return node @@ -98,7 +99,8 @@ def setup_dq_reranking(workflow, insps, bank, # get triggers for this ifo ifo_insp = [insp for insp in insps if (insp.ifo == ifo)] - assert len(ifo_insp) == 1, f"Received more than one inspiral file for {ifo}" + assert len(ifo_insp) == 1, \ + f"Received more than one inspiral file for {ifo}" ifo_insp = ifo_insp[0] # calculate template bins for this ifo