From 6823cc8fc7f6a59fe05b384daed27f5fec1fa2b5 Mon Sep 17 00:00:00 2001 From: Evan Harvey Date: Tue, 19 Mar 2024 15:03:12 -0600 Subject: [PATCH] Fix pep8 violations --- .github/workflows/format.yml | 2 +- contrib/app/sofast/load_saved_data.py | 1 + .../run_and_characterize_sofast_1_cam.py | 1 + .../run_and_characterize_sofast_2_cam.py | 1 + .../02_main_altitude_gaze_analysis_yz.py | 36 +- .../U_Code/03_main_flight_plan_assembly.py | 36 +- .../U_Code/04_main_ufacet_xy_analysis.py | 108 +- .../U_Code/06_main_planner_trial_study.py | 6 +- .../07_main_generate_flight_plan_suite.py | 6 +- ...main_generate_half_and_half_flight_plan.py | 6 +- .../U_Code/93_quick_nsttf_survey.py | 36 +- .../U_Code/lib/define_scan_nsttf.py | 42 +- .../U_Code/lib/plan_scan_parameters.py | 12 +- .../U_Code/lib/plan_scan_raster_parameters.py | 6 +- .../U_Code/lib/plan_scan_ufacet.py | 6 +- .../U_Code/lib/plan_scan_ufacet_parameters.py | 36 +- .../lib/plan_scan_ufacet_section_analysis.py | 90 +- .../plan_scan_ufacet_section_construction.py | 9 +- .../U_Code/lib/plan_scan_vanity_parameters.py | 6 +- .../helio_scan/080_FramesNoDuplicates.py | 15 +- .../helio_scan/130_KeyFramesGivenManual.py | 11 +- .../app/ufacet-s/helio_scan/140_KeyCorners.py | 19 +- .../app/ufacet-s/helio_scan/150_KeyTracks.py | 18 +- .../helio_scan/170_HeliostatTracks.py | 6 +- .../ufacet-s/helio_scan/180_Heliostats3d.py | 24 +- .../helio_scan/190_TrajectoryAnalysis.py | 110 +- .../helio_scan/lib/DEPRECATED_utils.py | 2 +- .../helio_scan/lib/HeliostatInfer3d.py | 32 +- .../helio_scan/lib/KeyFrameCornerSearch.py | 2040 +++++++++++------ contrib/scripts/SensitiveStringMatcher.py | 6 +- contrib/scripts/sensitive_strings.py | 89 +- contrib/scripts/test/test_FileCache.py | 2 +- contrib/scripts/test/test_FileFingerprint.py | 32 +- .../test/test_SensitiveStringMatcher.py | 46 +- .../scripts/test/test_sensitive_strings.py | 60 +- .../downsample_data_general.py | 1 + .../generate_downsampled_dataset.py | 12 +- .../generate_downsampled_calibration_data.py | 14 +- .../sofast_fringe/downsample_data.py | 5 +- .../generate_downsampled_calibration_data.py | 28 +- .../generate_downsampled_dataset.py | 1 + .../generate_test_data_multi_facet.py | 9 +- .../generate_test_data_single_facet.py | 9 +- .../generate_test_data_undefined.py | 9 +- example/camera_io/live_view_color_Basler.py | 1 + example/camera_io/live_view_mono_Basler.py | 1 + .../run_and_save_images_Basler_color.py | 1 + example/csp/example_optics_and_ray_tracing.py | 1 + example/mirror/example_MirrorOutput.py | 8 +- .../example_annotate_aruco_markers.py | 8 +- .../example_make_aruco_markers.py | 6 +- .../example_scene_reconstruction.py | 3 +- ...ulate_dot_locations_from_display_object.py | 1 + example/sofast_fixed/find_blobs_in_image.py | 1 + .../physical_target_dot_calibration.py | 32 +- .../process_fixed_pattern_data.py | 9 +- .../run_and_characterize_fixed_pattern.py | 1 + .../run_fixed_pattern_projection.py | 1 + .../example_calibration_camera_pose.py | 7 +- ...mple_calibration_save_DisplayShape_file.py | 7 +- .../example_calibration_screen_shape.py | 24 +- .../example_multi_facet_data_process.py | 8 +- .../example_single_facet_data_process.py | 8 +- .../example_standard_mirror_plot_output.py | 4 +- .../example_undefined_facet_data_process.py | 8 +- .../camera_calibration/CameraCalibration.py | 1 + .../lib/ViewAnnotatedImages.py | 1 + .../lib/calibration_camera.py | 1 + .../lib/image_processing.py | 1 + .../test/test_camera_calibration.py | 10 +- .../lib/SceneReconstruction.py | 1 + .../test/generate_downsampled_dataset.py | 4 +- .../test/test_SceneReconstruction.py | 1 + .../select_image_points/SelectImagePoints.py | 1 + opencsp/app/sofast/SofastGUI.py | 5 +- opencsp/app/sofast/lib/BlobIndex.py | 24 +- .../app/sofast/lib/CalibrateDisplayShape.py | 12 +- .../sofast/lib/CalibrateSofastFixedDots.py | 47 +- opencsp/app/sofast/lib/DefinitionEnsemble.py | 5 +- opencsp/app/sofast/lib/DefinitionFacet.py | 10 +- opencsp/app/sofast/lib/DisplayShape.py | 13 +- .../sofast/lib/ImageCalibrationAbstract.py | 4 +- .../app/sofast/lib/ImageCalibrationGlobal.py | 4 +- .../app/sofast/lib/ImageCalibrationScaling.py | 4 +- .../app/sofast/lib/MeasurementSofastFringe.py | 5 +- opencsp/app/sofast/lib/ParamsSofastFixed.py | 5 +- opencsp/app/sofast/lib/ParamsSofastFringe.py | 13 +- opencsp/app/sofast/lib/ProcessSofastFixed.py | 1 + opencsp/app/sofast/lib/ProcessSofastFringe.py | 79 +- opencsp/app/sofast/lib/SpatialOrientation.py | 6 +- opencsp/app/sofast/lib/SystemSofastFringe.py | 7 +- .../app/sofast/lib/process_optics_geometry.py | 17 +- .../app/sofast/lib/save_DisplayShape_file.py | 1 + opencsp/app/sofast/lib/visualize_setup.py | 1 + .../sofast/test/ImageAcquisition_no_camera.py | 1 + .../sofast/test/test_CalibrateDisplayShape.py | 14 +- .../test/test_CalibrateSofastFixedDots.py | 18 +- opencsp/app/sofast/test/test_Display.py | 5 +- opencsp/app/sofast/test/test_Fringes.py | 1 + .../test/test_ImageCalibrationGlobal.py | 5 +- .../sofast/test/test_SpatialOrientation.py | 1 + .../sofast/test/test_SystemSofastFringe.py | 5 +- .../app/sofast/test/test_image_processing.py | 5 +- .../test/test_integration_multi_facet.py | 9 +- .../test/test_integration_single_facet.py | 5 +- .../sofast/test/test_integration_undefined.py | 9 +- .../test/test_project_fixed_pattern_target.py | 1 + .../test/test_save_DisplayShape_file.py | 19 +- .../sofast/test/test_spatial_processing.py | 5 +- .../app/target/target_color/lib/ImageColor.py | 1 + .../target_color/test/test_ImageColor.py | 1 + opencsp/common/lib/csp/Facet.py | 1 + opencsp/common/lib/csp/FacetEnsemble.py | 1 + opencsp/common/lib/csp/MirrorAbstract.py | 1 + opencsp/common/lib/csp/MirrorParametric.py | 1 + .../lib/csp/MirrorParametricRectangular.py | 1 + opencsp/common/lib/csp/MirrorPoint.py | 1 + opencsp/common/lib/csp/OpticOrientation.py | 1 + .../VisualizeOrthorectifiedSlopeAbstract.py | 1 + opencsp/common/lib/csp/standard_output.py | 1 + opencsp/common/lib/csp/sun_track.py | 6 +- .../common/lib/csp/test/test_FacetEnsemble.py | 1 + .../lib/csp/test/test_MirrorParametric.py | 1 + .../common/lib/csp/test/test_MirrorPoint.py | 1 + opencsp/common/lib/cv/SpotAnalysis.py | 8 +- .../lib/cv/spot_analysis/ImagesIterable.py | 8 +- .../lib/cv/spot_analysis/ImagesStream.py | 10 +- .../cv/spot_analysis/SpotAnalysisOperable.py | 4 +- .../SpotAnalysisOperablesStream.py | 10 +- .../AbstractSpotAnalysisImageProcessor.py | 18 +- .../CalibrationCameraPosition.py | 2 +- .../lib/deflectometry/ImageProjection.py | 1 + .../deflectometry/ImageProjectionSetupGUI.py | 1 + .../lib/deflectometry/SlopeSolverData.py | 1 + .../lib/deflectometry/SlopeSolverDataDebug.py | 1 + .../test/test_CalibrationCameraPosition.py | 1 + .../deflectometry/test/test_SlopeSolver.py | 5 +- .../lib/deflectometry/test/test_Surface2D.py | 1 + opencsp/common/lib/geometry/Vxyz.py | 1 + opencsp/common/lib/geometry/geometry_3d.py | 30 +- .../common/lib/photogrammetry/ImageMarker.py | 1 + .../lib/photogrammetry/bundle_adjustment.py | 1 + .../lib/photogrammetry/photogrammetry.py | 1 + .../test/test_photogrammetry.py | 1 + .../lib/process/parallel_video_tools.py | 7 +- .../common/lib/process/subprocess_tools.py | 4 +- opencsp/common/lib/render/VideoHandler.py | 6 +- opencsp/common/lib/render/View3d.py | 5 +- .../render_control/RenderControlEnsemble.py | 1 + .../RenderControlFigureRecord.py | 10 +- .../RenderControlVideoFrames.py | 1 + opencsp/common/lib/target/TargetColor.py | 6 +- .../common/lib/target/target_color_2d_rgb.py | 1 + opencsp/common/lib/test/test_MirrorOutput.py | 8 +- opencsp/common/lib/tool/file_tools.py | 23 +- opencsp/common/lib/tool/hdf5_tools.py | 28 +- opencsp/common/lib/tool/log_tools.py | 16 +- .../common/lib/tool/test/test_file_tools.py | 4 +- 158 files changed, 2407 insertions(+), 1412 deletions(-) diff --git a/.github/workflows/format.yml b/.github/workflows/format.yml index 4c9561867..6524f1f69 100644 --- a/.github/workflows/format.yml +++ b/.github/workflows/format.yml @@ -1,5 +1,5 @@ name: github-FORMAT - +#test on: push #pull_request: diff --git a/contrib/app/sofast/load_saved_data.py b/contrib/app/sofast/load_saved_data.py index 42cce7577..f3b370125 100644 --- a/contrib/app/sofast/load_saved_data.py +++ b/contrib/app/sofast/load_saved_data.py @@ -1,6 +1,7 @@ """Library of functions that load saved Sofast HDF5 files and return OpenCSP optics classes. """ + import numpy as np from scipy.spatial.transform import Rotation diff --git a/contrib/app/sofast/run_and_characterize_sofast_1_cam.py b/contrib/app/sofast/run_and_characterize_sofast_1_cam.py index 15c96d428..fb5bd9759 100644 --- a/contrib/app/sofast/run_and_characterize_sofast_1_cam.py +++ b/contrib/app/sofast/run_and_characterize_sofast_1_cam.py @@ -7,6 +7,7 @@ NOTE: must be run with a computer connected to a working SOFAST system. This includes a camera, mirror, screen, and system layout calibration files. """ + import os import numpy as np diff --git a/contrib/app/sofast/run_and_characterize_sofast_2_cam.py b/contrib/app/sofast/run_and_characterize_sofast_2_cam.py index 7b4088d19..8a9257f49 100644 --- a/contrib/app/sofast/run_and_characterize_sofast_2_cam.py +++ b/contrib/app/sofast/run_and_characterize_sofast_2_cam.py @@ -7,6 +7,7 @@ NOTE: must be run with a computer connected to a working SOFAST system. This includes a camera, mirror, screen, and system layout calibration files. """ + import os import numpy as np diff --git a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/02_main_altitude_gaze_analysis_yz.py b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/02_main_altitude_gaze_analysis_yz.py index 15291e0b2..064ac122e 100644 --- a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/02_main_altitude_gaze_analysis_yz.py +++ b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/02_main_altitude_gaze_analysis_yz.py @@ -123,28 +123,28 @@ # Control parameters. scan_parameters = {} - scan_parameters[ - 'locale' - ] = 'Sandia NSTTF' # Information needed to convert (x,y,z into global (longitude, latitude) coordinates. + scan_parameters['locale'] = ( + 'Sandia NSTTF' # Information needed to convert (x,y,z into global (longitude, latitude) coordinates. + ) scan_parameters['camera'] = cam.sony_alpha_20mm_landscape() # Camera model. # scan_parameters['camera'] = cam.sony_alpha_20mm_portrait() # Camera model. scan_parameters['camera'] = cam.ultra_wide_angle() # Camera model. # scan_parameters['camera'] = cam.mavic_zoom() # Camera model. - scan_parameters[ - 'section_plane_tolerance' - ] = 3 # m. Lateral distance to include heliostats in section. - scan_parameters[ - 'p_margin' - ] = 0 # 2 # m. Lateral distance to add to constraints to allow UAS postiion error. - scan_parameters[ - 'altitude_margin' - ] = 2.5 # m. Clearance of highest possible heliostat point. - scan_parameters[ - 'maximum_safe_altitude' - ] = 90.0 # meters. # ?? SCAFFOLDING -- BASE THIS ON TECHNICAL FACTORS: SOLAR FLUX, ETC - scan_parameters[ - 'maximum_target_lookback' - ] = 3 # Number of heliostats to look back for reflection targets. + scan_parameters['section_plane_tolerance'] = ( + 3 # m. Lateral distance to include heliostats in section. + ) + scan_parameters['p_margin'] = ( + 0 # 2 # m. Lateral distance to add to constraints to allow UAS postiion error. + ) + scan_parameters['altitude_margin'] = ( + 2.5 # m. Clearance of highest possible heliostat point. + ) + scan_parameters['maximum_safe_altitude'] = ( + 90.0 # meters. # ?? SCAFFOLDING -- BASE THIS ON TECHNICAL FACTORS: SOLAR FLUX, ETC + ) + scan_parameters['maximum_target_lookback'] = ( + 3 # Number of heliostats to look back for reflection targets. + ) scan_parameters['gaze_tolerance'] = np.deg2rad( 1 ) # Uncertainty in gaze angle. True angle is +/- tolerance from nominal. diff --git a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/03_main_flight_plan_assembly.py b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/03_main_flight_plan_assembly.py index b40817afe..0429f945a 100644 --- a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/03_main_flight_plan_assembly.py +++ b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/03_main_flight_plan_assembly.py @@ -35,28 +35,28 @@ def construct_ufacet_scan(solar_field, lead_in, run_past): # Control parameters. scan_parameters = {} - scan_parameters[ - 'locale' - ] = 'Sandia NSTTF' # Information needed to convert (x,y,z into global (longitude, latitude) coordinates. + scan_parameters['locale'] = ( + 'Sandia NSTTF' # Information needed to convert (x,y,z into global (longitude, latitude) coordinates. + ) scan_parameters['camera'] = cam.sony_alpha_20mm_landscape() # Camera model. # scan_parameters['camera'] = cam.sony_alpha_20mm_portrait() # Camera model. scan_parameters['camera'] = cam.ultra_wide_angle() # Camera model. # scan_parameters['camera'] = cam.mavic_zoom() # Camera model. - scan_parameters[ - 'section_plane_tolerance' - ] = 3 # m. Lateral distance to include heliostats in section. - scan_parameters[ - 'p_margin' - ] = 0 # 2 # m. Lateral distance to add to constraints to allow UAS postiion error. - scan_parameters[ - 'altitude_margin' - ] = 2.5 # m. Clearance of highest possible heliostat point. - scan_parameters[ - 'maximum_safe_altitude' - ] = 90.0 # meters. # ?? SCAFFOLDING -- BASE THIS ON TECHNICAL FACTORS: SOLAR FLUX, ETC - scan_parameters[ - 'maximum_target_lookback' - ] = 3 # Number of heliostats to look back for reflection targets. + scan_parameters['section_plane_tolerance'] = ( + 3 # m. Lateral distance to include heliostats in section. + ) + scan_parameters['p_margin'] = ( + 0 # 2 # m. Lateral distance to add to constraints to allow UAS postiion error. + ) + scan_parameters['altitude_margin'] = ( + 2.5 # m. Clearance of highest possible heliostat point. + ) + scan_parameters['maximum_safe_altitude'] = ( + 90.0 # meters. # ?? SCAFFOLDING -- BASE THIS ON TECHNICAL FACTORS: SOLAR FLUX, ETC + ) + scan_parameters['maximum_target_lookback'] = ( + 3 # Number of heliostats to look back for reflection targets. + ) scan_parameters['gaze_tolerance'] = np.deg2rad( 1 ) # Uncertainty in gaze angle. True angle is +/- tolerance from nominal. diff --git a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/04_main_ufacet_xy_analysis.py b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/04_main_ufacet_xy_analysis.py index 9b79031c3..a14a07b11 100644 --- a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/04_main_ufacet_xy_analysis.py +++ b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/04_main_ufacet_xy_analysis.py @@ -51,28 +51,28 @@ def construct_ufacet_scan_pass(solar_field, lead_in, run_past): # Control parameters. scan_parameters = {} - scan_parameters[ - 'locale' - ] = 'Sandia NSTTF' # Information needed to convert (x,y,z into global (longitude, latitude) coordinates. + scan_parameters['locale'] = ( + 'Sandia NSTTF' # Information needed to convert (x,y,z into global (longitude, latitude) coordinates. + ) scan_parameters['camera'] = cam.sony_alpha_20mm_landscape() # Camera model. # scan_parameters['camera'] = cam.sony_alpha_20mm_portrait() # Camera model. scan_parameters['camera'] = cam.ultra_wide_angle() # Camera model. # scan_parameters['camera'] = cam.mavic_zoom() # Camera model. - scan_parameters[ - 'section_plane_tolerance' - ] = 3 # m. Lateral distance to include heliostats in section. - scan_parameters[ - 'p_margin' - ] = 0 # 2 # m. Lateral distance to add to constraints to allow UAS postiion error. - scan_parameters[ - 'altitude_margin' - ] = 2.5 # m. Clearance of highest possible heliostat point. - scan_parameters[ - 'maximum_safe_altitude' - ] = 90.0 # meters. # ?? SCAFFOLDING -- BASE THIS ON TECHNICAL FACTORS: SOLAR FLUX, ETC - scan_parameters[ - 'maximum_target_lookback' - ] = 3 # Number of heliostats to look back for reflection targets. + scan_parameters['section_plane_tolerance'] = ( + 3 # m. Lateral distance to include heliostats in section. + ) + scan_parameters['p_margin'] = ( + 0 # 2 # m. Lateral distance to add to constraints to allow UAS postiion error. + ) + scan_parameters['altitude_margin'] = ( + 2.5 # m. Clearance of highest possible heliostat point. + ) + scan_parameters['maximum_safe_altitude'] = ( + 90.0 # meters. # ?? SCAFFOLDING -- BASE THIS ON TECHNICAL FACTORS: SOLAR FLUX, ETC + ) + scan_parameters['maximum_target_lookback'] = ( + 3 # Number of heliostats to look back for reflection targets. + ) scan_parameters['gaze_tolerance'] = np.deg2rad( 1 ) # Uncertainty in gaze angle. True angle is +/- tolerance from nominal. @@ -131,28 +131,28 @@ def construct_ufacet_scan_pass(solar_field, lead_in, run_past): def construct_ufacet_scan_passes(solar_field, lead_in, run_past): # Control parameters. scan_parameters = {} - scan_parameters[ - 'locale' - ] = 'Sandia NSTTF' # Information needed to convert (x,y,z into global (longitude, latitude) coordinates. + scan_parameters['locale'] = ( + 'Sandia NSTTF' # Information needed to convert (x,y,z into global (longitude, latitude) coordinates. + ) scan_parameters['camera'] = cam.sony_alpha_20mm_landscape() # Camera model. # scan_parameters['camera'] = cam.sony_alpha_20mm_portrait() # Camera model. scan_parameters['camera'] = cam.ultra_wide_angle() # Camera model. # scan_parameters['camera'] = cam.mavic_zoom() # Camera model. - scan_parameters[ - 'section_plane_tolerance' - ] = 3 # m. Lateral distance to include heliostats in section. - scan_parameters[ - 'p_margin' - ] = 0 # 2 # m. Lateral distance to add to constraints to allow UAS postiion error. - scan_parameters[ - 'altitude_margin' - ] = 2.5 # m. Clearance of highest possible heliostat point. - scan_parameters[ - 'maximum_safe_altitude' - ] = 90.0 # meters. # ?? SCAFFOLDING -- BASE THIS ON TECHNICAL FACTORS: SOLAR FLUX, ETC - scan_parameters[ - 'maximum_target_lookback' - ] = 3 # Number of heliostats to look back for reflection targets. + scan_parameters['section_plane_tolerance'] = ( + 3 # m. Lateral distance to include heliostats in section. + ) + scan_parameters['p_margin'] = ( + 0 # 2 # m. Lateral distance to add to constraints to allow UAS postiion error. + ) + scan_parameters['altitude_margin'] = ( + 2.5 # m. Clearance of highest possible heliostat point. + ) + scan_parameters['maximum_safe_altitude'] = ( + 90.0 # meters. # ?? SCAFFOLDING -- BASE THIS ON TECHNICAL FACTORS: SOLAR FLUX, ETC + ) + scan_parameters['maximum_target_lookback'] = ( + 3 # Number of heliostats to look back for reflection targets. + ) scan_parameters['gaze_tolerance'] = np.deg2rad( 1 ) # Uncertainty in gaze angle. True angle is +/- tolerance from nominal. @@ -211,28 +211,28 @@ def construct_ufacet_scan_passes(solar_field, lead_in, run_past): def construct_ufacet_scan(solar_field, lead_in, run_past): # Control parameters. scan_parameters = {} - scan_parameters[ - 'locale' - ] = 'Sandia NSTTF' # Information needed to convert (x,y,z into global (longitude, latitude) coordinates. + scan_parameters['locale'] = ( + 'Sandia NSTTF' # Information needed to convert (x,y,z into global (longitude, latitude) coordinates. + ) scan_parameters['camera'] = cam.sony_alpha_20mm_landscape() # Camera model. # scan_parameters['camera'] = cam.sony_alpha_20mm_portrait() # Camera model. scan_parameters['camera'] = cam.ultra_wide_angle() # Camera model. # scan_parameters['camera'] = cam.mavic_zoom() # Camera model. - scan_parameters[ - 'section_plane_tolerance' - ] = 3 # m. Lateral distance to include heliostats in section. - scan_parameters[ - 'p_margin' - ] = 0 # 2 # m. Lateral distance to add to constraints to allow UAS postiion error. - scan_parameters[ - 'altitude_margin' - ] = 2.5 # m. Clearance of highest possible heliostat point. - scan_parameters[ - 'maximum_safe_altitude' - ] = 90.0 # meters. # ?? SCAFFOLDING -- BASE THIS ON TECHNICAL FACTORS: SOLAR FLUX, ETC - scan_parameters[ - 'maximum_target_lookback' - ] = 3 # Number of heliostats to look back for reflection targets. + scan_parameters['section_plane_tolerance'] = ( + 3 # m. Lateral distance to include heliostats in section. + ) + scan_parameters['p_margin'] = ( + 0 # 2 # m. Lateral distance to add to constraints to allow UAS postiion error. + ) + scan_parameters['altitude_margin'] = ( + 2.5 # m. Clearance of highest possible heliostat point. + ) + scan_parameters['maximum_safe_altitude'] = ( + 90.0 # meters. # ?? SCAFFOLDING -- BASE THIS ON TECHNICAL FACTORS: SOLAR FLUX, ETC + ) + scan_parameters['maximum_target_lookback'] = ( + 3 # Number of heliostats to look back for reflection targets. + ) scan_parameters['gaze_tolerance'] = np.deg2rad( 1 ) # Uncertainty in gaze angle. True angle is +/- tolerance from nominal. diff --git a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/06_main_planner_trial_study.py b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/06_main_planner_trial_study.py index b57fbe99b..ef6bfc1a9 100644 --- a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/06_main_planner_trial_study.py +++ b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/06_main_planner_trial_study.py @@ -203,9 +203,9 @@ def setup_render_control_scan_section_analysis(): ] ufacet_control_parameters['curve_key_xy_list'] = ufacet_curve_key_xy_list # Maximum altitude. - ufacet_control_parameters[ - 'maximum_altitude' - ] = 25.0 # m. Maximum altitude, roughly AGL, including slope effects. + ufacet_control_parameters['maximum_altitude'] = ( + 25.0 # m. Maximum altitude, roughly AGL, including slope effects. + ) # Gaze control. ufacet_control_parameters['gaze_type'] = 'constant' # 'constant' or 'linear' ufacet_control_parameters['delta_eta'] = np.deg2rad( diff --git a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/07_main_generate_flight_plan_suite.py b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/07_main_generate_flight_plan_suite.py index b57fbe99b..ef6bfc1a9 100644 --- a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/07_main_generate_flight_plan_suite.py +++ b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/07_main_generate_flight_plan_suite.py @@ -203,9 +203,9 @@ def setup_render_control_scan_section_analysis(): ] ufacet_control_parameters['curve_key_xy_list'] = ufacet_curve_key_xy_list # Maximum altitude. - ufacet_control_parameters[ - 'maximum_altitude' - ] = 25.0 # m. Maximum altitude, roughly AGL, including slope effects. + ufacet_control_parameters['maximum_altitude'] = ( + 25.0 # m. Maximum altitude, roughly AGL, including slope effects. + ) # Gaze control. ufacet_control_parameters['gaze_type'] = 'constant' # 'constant' or 'linear' ufacet_control_parameters['delta_eta'] = np.deg2rad( diff --git a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/08_main_generate_half_and_half_flight_plan.py b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/08_main_generate_half_and_half_flight_plan.py index 395d1ad46..b737e6502 100644 --- a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/08_main_generate_half_and_half_flight_plan.py +++ b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/08_main_generate_half_and_half_flight_plan.py @@ -204,9 +204,9 @@ def setup_render_control_scan_section_analysis(): ufacet_control_parameters['curve_key_xy_list'] = ufacet_curve_key_xy_list # Maximum altitude. # ufacet_control_parameters['maximum_altitude'] = 25.0 # m. Maximum altitude, roughly AGL, including slope effects. - ufacet_control_parameters[ - 'maximum_altitude' - ] = 18.0 # m. Maximum altitude, roughly AGL, including slope effects. + ufacet_control_parameters['maximum_altitude'] = ( + 18.0 # m. Maximum altitude, roughly AGL, including slope effects. + ) # Gaze control. ufacet_control_parameters['gaze_type'] = 'constant' # 'constant' or 'linear' ufacet_control_parameters['delta_eta'] = np.deg2rad( diff --git a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/93_quick_nsttf_survey.py b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/93_quick_nsttf_survey.py index f2fd6c0d0..d441a188b 100644 --- a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/93_quick_nsttf_survey.py +++ b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/93_quick_nsttf_survey.py @@ -103,28 +103,28 @@ # Control parameters. scan_parameters = {} - scan_parameters[ - 'locale' - ] = 'Sandia NSTTF' # Information needed to convert (x,y,z into global (longitude, latitude) coordinates. + scan_parameters['locale'] = ( + 'Sandia NSTTF' # Information needed to convert (x,y,z into global (longitude, latitude) coordinates. + ) scan_parameters['camera'] = cam.sony_alpha_20mm_landscape() # Camera model. # scan_parameters['camera'] = cam.sony_alpha_20mm_portrait() # Camera model. scan_parameters['camera'] = cam.ultra_wide_angle() # Camera model. # scan_parameters['camera'] = cam.mavic_zoom() # Camera model. - scan_parameters[ - 'section_plane_tolerance' - ] = 3 # m. Lateral distance to include heliostats in section. - scan_parameters[ - 'p_margin' - ] = 0 # 2 # m. Lateral distance to add to constraints to allow UAS postiion error. - scan_parameters[ - 'altitude_margin' - ] = 2.5 # m. Clearance of highest possible heliostat point. - scan_parameters[ - 'maximum_safe_altitude' - ] = 90.0 # meters. # ?? SCAFFOLDING -- BASE THIS ON TECHNICAL FACTORS: SOLAR FLUX, ETC - scan_parameters[ - 'maximum_target_lookback' - ] = 3 # Number of heliostats to look back for reflection targets. + scan_parameters['section_plane_tolerance'] = ( + 3 # m. Lateral distance to include heliostats in section. + ) + scan_parameters['p_margin'] = ( + 0 # 2 # m. Lateral distance to add to constraints to allow UAS postiion error. + ) + scan_parameters['altitude_margin'] = ( + 2.5 # m. Clearance of highest possible heliostat point. + ) + scan_parameters['maximum_safe_altitude'] = ( + 90.0 # meters. # ?? SCAFFOLDING -- BASE THIS ON TECHNICAL FACTORS: SOLAR FLUX, ETC + ) + scan_parameters['maximum_target_lookback'] = ( + 3 # Number of heliostats to look back for reflection targets. + ) scan_parameters['gaze_tolerance'] = np.deg2rad( 1 ) # Uncertainty in gaze angle. True angle is +/- tolerance from nominal. diff --git a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/define_scan_nsttf.py b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/define_scan_nsttf.py index f252f719b..60a9b3445 100644 --- a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/define_scan_nsttf.py +++ b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/define_scan_nsttf.py @@ -43,12 +43,12 @@ def define_scan_NSTTF_half_and_half(solar_field_short_name): lln.LON_NSTTF_ORIGIN, lln.LAT_NSTTF_ORIGIN, ) - solar_field_spec[ - 'field_heliostat_file' - ] = '../U_Code_data/NSTTF/NSTTF_Heliostats_origin_at_torque_tube.csv' - solar_field_spec[ - 'field_facet_centroids_file' - ] = '../U_Code_data/NSTTF/NSTTF_Facet_Centroids.csv' + solar_field_spec['field_heliostat_file'] = ( + '../U_Code_data/NSTTF/NSTTF_Heliostats_origin_at_torque_tube.csv' + ) + solar_field_spec['field_facet_centroids_file'] = ( + '../U_Code_data/NSTTF/NSTTF_Facet_Centroids.csv' + ) # Define UFACET control flags. ufacet_control_parameters = {} @@ -62,9 +62,9 @@ def define_scan_NSTTF_half_and_half(solar_field_short_name): ufacet_control_parameters['curve_key_xy_list'] = ufacet_curve_key_xy_list # Maximum altitude. # Half-and-Half - ufacet_control_parameters[ - 'maximum_altitude' - ] = 18.0 # m. Maximum altitude, roughly AGL, including slope effects. + ufacet_control_parameters['maximum_altitude'] = ( + 18.0 # m. Maximum altitude, roughly AGL, including slope effects. + ) # Gaze control. ufacet_control_parameters['gaze_type'] = 'constant' # 'constant' or 'linear' ufacet_control_parameters['delta_eta'] = np.deg2rad( @@ -351,12 +351,12 @@ def define_scan_NSTTF_demo(solar_field_short_name): lln.LON_NSTTF_ORIGIN, lln.LAT_NSTTF_ORIGIN, ) - solar_field_spec[ - 'field_heliostat_file' - ] = '../U_Code_data/NSTTF/NSTTF_Heliostats_origin_at_torque_tube.csv' - solar_field_spec[ - 'field_facet_centroids_file' - ] = '../U_Code_data/NSTTF/NSTTF_Facet_Centroids.csv' + solar_field_spec['field_heliostat_file'] = ( + '../U_Code_data/NSTTF/NSTTF_Heliostats_origin_at_torque_tube.csv' + ) + solar_field_spec['field_facet_centroids_file'] = ( + '../U_Code_data/NSTTF/NSTTF_Facet_Centroids.csv' + ) # Define UFACET control flags. ufacet_control_parameters = {} @@ -369,9 +369,9 @@ def define_scan_NSTTF_demo(solar_field_short_name): ] ufacet_control_parameters['curve_key_xy_list'] = ufacet_curve_key_xy_list # Maximum altitude. - ufacet_control_parameters[ - 'maximum_altitude' - ] = 25.0 # m. Maximum altitude, roughly AGL, including slope effects. + ufacet_control_parameters['maximum_altitude'] = ( + 25.0 # m. Maximum altitude, roughly AGL, including slope effects. + ) # Gaze control. ufacet_control_parameters['gaze_type'] = 'constant' # 'constant' or 'linear' ufacet_control_parameters['delta_eta'] = np.deg2rad( @@ -454,9 +454,9 @@ def define_scan_NSTTF_full_field(solar_field_short_name): ] ufacet_control_parameters['curve_key_xy_list'] = ufacet_curve_key_xy_list # Maximum altitude. - ufacet_control_parameters[ - 'maximum_altitude' - ] = 25.0 # m. Maximum altitude, roughly AGL, including slope effects. + ufacet_control_parameters['maximum_altitude'] = ( + 25.0 # m. Maximum altitude, roughly AGL, including slope effects. + ) # Gaze control. ufacet_control_parameters['gaze_type'] = 'constant' # 'constant' or 'linear' ufacet_control_parameters['delta_eta'] = np.deg2rad( diff --git a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_parameters.py b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_parameters.py index 72fd30764..f79652b18 100644 --- a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_parameters.py +++ b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_parameters.py @@ -14,9 +14,9 @@ def construct_scan_parameters(scan_parameter_file): if scan_parameter_file == 'NSTTF': # Location. - scan_parameters[ - 'locale' - ] = 'NSTTF' # Information needed to convert (x,y,z into global (longitude, latitude) coordinates. + scan_parameters['locale'] = ( + 'NSTTF' # Information needed to convert (x,y,z into global (longitude, latitude) coordinates. + ) # Camera. scan_parameters['camera'] = cam.sony_alpha_20mm_landscape() # Camera model. @@ -27,9 +27,9 @@ def construct_scan_parameters(scan_parameter_file): # Scan flight. scan_parameters['lead_in'] = 18 # m. # ** Overriden by vanity flights. ** scan_parameters['run_past'] = 9 # m. # ** Overriden by vanity flights. ** - scan_parameters[ - 'fly_forward_backward' - ] = False # ** Overriden by vanity flights, raster flights. ** + scan_parameters['fly_forward_backward'] = ( + False # ** Overriden by vanity flights, raster flights. ** + ) # Return. return scan_parameters diff --git a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_raster_parameters.py b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_raster_parameters.py index 2b865fc39..7ec00c646 100644 --- a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_raster_parameters.py +++ b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_raster_parameters.py @@ -58,9 +58,9 @@ def construct_raster_scan_parameters(raster_scan_parameter_file): eta = np.deg2rad(-35.0) # Arbitrary test value. scan_parameters['n_horizontal'] = 10 # Number of horizontal passes. scan_parameters['n_vertical'] = 6 # Number of vertical passes. - scan_parameters[ - 'eta' - ] = eta # rad, Gaze angle, measured relative to horizontal (positive ==> up). + scan_parameters['eta'] = ( + eta # rad, Gaze angle, measured relative to horizontal (positive ==> up). + ) scan_parameters['relative_z'] = 20 # m. scan_parameters['speed'] = 10 # m/sec. # Check result and return. diff --git a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_ufacet.py b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_ufacet.py index d2b3b3b60..3a798662a 100644 --- a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_ufacet.py +++ b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_ufacet.py @@ -59,9 +59,9 @@ def construct_ufacet_scan( 'curve_key_xy_list' ] ufacet_scan_construction['list_of_ideal_xy_lists'] = list_of_ideal_xy_lists - ufacet_scan_construction[ - 'list_of_best_fit_segment_xys' - ] = list_of_best_fit_segment_xys + ufacet_scan_construction['list_of_best_fit_segment_xys'] = ( + list_of_best_fit_segment_xys + ) ufacet_scan_construction['section_list'] = section_list ufacet_scan_construction['scan_pass_list'] = scan_pass_list diff --git a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_ufacet_parameters.py b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_ufacet_parameters.py index d3599830c..153920e40 100644 --- a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_ufacet_parameters.py +++ b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_ufacet_parameters.py @@ -19,27 +19,27 @@ def construct_ufacet_scan_parameters( # NSTTF if ufacet_scan_parameter_file == 'NSTTF': # Scan section construction. - scan_parameters[ - 'candidate_margin_w' - ] = 10.00 # m. Margin on either side of section plane to bring in heliostats. + scan_parameters['candidate_margin_w'] = ( + 10.00 # m. Margin on either side of section plane to bring in heliostats. + ) # Should be larger than side-to-side heliostat distance. - scan_parameters[ - 'discard_threshold_p' - ] = 9.00 # m. Threshold to discard heliostats that are close together on a section, presumably abreast. + scan_parameters['discard_threshold_p'] = ( + 9.00 # m. Threshold to discard heliostats that are close together on a section, presumably abreast. + ) # Should be smaller than minimum heliostat row spacing. # Section analysis. - scan_parameters[ - 'p_margin' - ] = 0 # 2 # m. Lateral distance to add to constraints to allow UAS postiion error. - scan_parameters[ - 'altitude_margin' - ] = 2.5 # m. Clearance above highest possible heliostat point. - scan_parameters[ - 'maximum_safe_altitude' - ] = 90.0 # meters. Driven by safey considerations. Control limit may be tighter. - scan_parameters[ - 'maximum_target_lookback' - ] = 3 # Number of heliostats to look back for reflection targets. + scan_parameters['p_margin'] = ( + 0 # 2 # m. Lateral distance to add to constraints to allow UAS postiion error. + ) + scan_parameters['altitude_margin'] = ( + 2.5 # m. Clearance above highest possible heliostat point. + ) + scan_parameters['maximum_safe_altitude'] = ( + 90.0 # meters. Driven by safey considerations. Control limit may be tighter. + ) + scan_parameters['maximum_target_lookback'] = ( + 3 # Number of heliostats to look back for reflection targets. + ) scan_parameters['gaze_tolerance'] = np.deg2rad( 1 ) # Uncertainty in gaze angle. True angle is +/- tolerance from nominal. diff --git a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_ufacet_section_analysis.py b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_ufacet_section_analysis.py index 9eb655b4a..95312974a 100644 --- a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_ufacet_section_analysis.py +++ b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_ufacet_section_analysis.py @@ -236,24 +236,24 @@ def single_heliostat_gaze_angle_analysis( C += C_step # Save constraints. - constraints[ - 'path_s_pq_list' - ] = path_s_pq_list # Path sacn region start point, as a function of C. - constraints[ - 'path_e_pq_list' - ] = path_e_pq_list # Path sacn region start point, as a function of C. - constraints[ - 'ray_min_eta_list' - ] = ray_min_eta_list # Ray pointing from path start point to assessed heliostat top edge, function of C. - constraints[ - 'ray_max_eta_list' - ] = ray_max_eta_list # Ray pointing from path end point to assessed heliostat bottom edge, function of C. - constraints[ - 'min_etaC_list' - ] = min_etaC_list # Lower bound on required gaze angle interval, as a function of C. - constraints[ - 'max_etaC_list' - ] = max_etaC_list # Upper bound on required gaze angle interval, as a function of C. + constraints['path_s_pq_list'] = ( + path_s_pq_list # Path sacn region start point, as a function of C. + ) + constraints['path_e_pq_list'] = ( + path_e_pq_list # Path sacn region start point, as a function of C. + ) + constraints['ray_min_eta_list'] = ( + ray_min_eta_list # Ray pointing from path start point to assessed heliostat top edge, function of C. + ) + constraints['ray_max_eta_list'] = ( + ray_max_eta_list # Ray pointing from path end point to assessed heliostat bottom edge, function of C. + ) + constraints['min_etaC_list'] = ( + min_etaC_list # Lower bound on required gaze angle interval, as a function of C. + ) + constraints['max_etaC_list'] = ( + max_etaC_list # Upper bound on required gaze angle interval, as a function of C. + ) # Return. return constraints @@ -437,9 +437,9 @@ def single_heliostat_gaze_angle_selection(section_context, constraints): constraints['shrunk_max_etaC_list'] = shrunk_max_etaC_list constraints['clipped_min_etaC_list'] = clipped_min_etaC_list constraints['clipped_max_etaC_list'] = clipped_max_etaC_list - constraints[ - 'selected_cacg_etaC' - ] = selected_cacg_etaC # "cacg" == "constant altitude, constant gaze" + constraints['selected_cacg_etaC'] = ( + selected_cacg_etaC # "cacg" == "constant altitude, constant gaze" + ) # Return. return constraints @@ -642,32 +642,32 @@ def single_heliostat_section_analysis( # Save the constraints. # Save before gaze angle analysis, because some gaze angle analysis routines might want to fetch contsraints. constraints = {} - constraints[ - 'h_a_idx' - ] = h_a_idx # Assessed heliostat index in assess_heliostat_name_list. + constraints['h_a_idx'] = ( + h_a_idx # Assessed heliostat index in assess_heliostat_name_list. + ) constraints['h_a_name'] = h_a_name # Assessed heliostat name. - constraints[ - 'h_t_name_list' - ] = h_t_name_list # Reflected target heliostat name list. + constraints['h_t_name_list'] = ( + h_t_name_list # Reflected target heliostat name list. + ) constraints['h_b_name'] = h_b_name # Background heliostat name. constraints['at_pq'] = at_pq # Top corner of assessed heliostat. constraints['ab_pq'] = ab_pq # Bottom corner of assessed heliostat. - constraints[ - 't_pq_list' - ] = t_pq_list # List of reflection target points. Might include a fictitious point. + constraints['t_pq_list'] = ( + t_pq_list # List of reflection target points. Might include a fictitious point. + ) constraints['bb_pq'] = bb_pq # Bottom corner of background heliostat. - constraints[ - 'nu' - ] = nu # Angle from p axis to assessed heliostat surface normal, measured ccw. + constraints['nu'] = ( + nu # Angle from p axis to assessed heliostat surface normal, measured ccw. + ) constraints['abv_lb'] = abv_lb # Assessed bottom visibility, p lower bound. - constraints[ - 'abvm_lb' - ] = abvm_lb # Assessed bottom visibility margin, p lower bound. + constraints['abvm_lb'] = ( + abvm_lb # Assessed bottom visibility margin, p lower bound. + ) constraints['atv_lb'] = atv_lb # Assessed top visibility, p lower bound. constraints['atvm_lb'] = atvm_lb # Assessed top visibility margin, p lower bound. - constraints[ - 'ts_ub_list' - ] = ts_ub_list # Target reflection start list, p upper bound. + constraints['ts_ub_list'] = ( + ts_ub_list # Target reflection start list, p upper bound. + ) constraints['ts_ub'] = ts_ub # Target reflection start, p upper bound. constraints['tsm_ub'] = tsm_ub # Target reflection margin, p upper bound. constraints['sca_pq'] = sca_pq # Path start critical altitude point. @@ -680,9 +680,9 @@ def single_heliostat_section_analysis( constraints['e_locus'] = e_locus # Valid pass end points. constraints['C_start'] = C_start # Altitude of start critical point. constraints['C_end'] = C_end # Altitude of end critical point. - constraints[ - 'C_critical' - ] = C_critical # Critical altitude, considering both start and end. + constraints['C_critical'] = ( + C_critical # Critical altitude, considering both start and end. + ) # GAZE ANGLE ANALYSIS constraints = single_heliostat_gaze_angle_analysis( @@ -913,9 +913,9 @@ def multi_heliostat_gaze_angle_selection(section_context, pass_constraints): pass_constraints['clipped_max_etaC_list'] = clipped_max_etaC_list pass_constraints['shrunk_min_etaC_list'] = shrunk_min_etaC_list pass_constraints['shrunk_max_etaC_list'] = shrunk_max_etaC_list - pass_constraints[ - 'selected_cacg_etaC' - ] = selected_cacg_etaC # "cacg" == "constant altitude, constant gaze" + pass_constraints['selected_cacg_etaC'] = ( + selected_cacg_etaC # "cacg" == "constant altitude, constant gaze" + ) # Return. return pass_constraints diff --git a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_ufacet_section_construction.py b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_ufacet_section_construction.py index 122308a92..f68ac5b21 100644 --- a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_ufacet_section_construction.py +++ b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_ufacet_section_construction.py @@ -109,11 +109,10 @@ def construct_ufacet_section(solar_field, best_fit_segment_xy, ufacet_scan_param # Sort by w. sort_heliostat_name_pqw_list_by_w(candidate_heliostat_name_pqw_list) # Select heliostats close to section plane, and discard close neighbors. - ( - selected_heliostat_name_pqw_list, - rejected_heliostat_name_pqw_list, - ) = select_min_w_reject_nearby_p( - candidate_heliostat_name_pqw_list, ufacet_scan_parameters + (selected_heliostat_name_pqw_list, rejected_heliostat_name_pqw_list) = ( + select_min_w_reject_nearby_p( + candidate_heliostat_name_pqw_list, ufacet_scan_parameters + ) ) # Sort in order of ascending p. sort_heliostat_name_pqw_list_by_p(selected_heliostat_name_pqw_list) diff --git a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_vanity_parameters.py b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_vanity_parameters.py index 70d8802e4..08fe5e728 100644 --- a/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_vanity_parameters.py +++ b/contrib/app/ufacet-s/flight_planner_ufacet/U_Code/lib/plan_scan_vanity_parameters.py @@ -69,9 +69,9 @@ def construct_vanity_scan_parameters( eta = np.deg2rad(-35.0) # Arbitrary test value. scan_parameters['n_horizontal'] = 10 # Number of horizontal passes. scan_parameters['n_vertical'] = 6 # Number of vertical passes. - scan_parameters[ - 'eta' - ] = eta # rad, Gaze angle, measured relative to horizontal (positive ==> up). + scan_parameters['eta'] = ( + eta # rad, Gaze angle, measured relative to horizontal (positive ==> up). + ) scan_parameters['relative_z'] = 20 # m. scan_parameters['speed'] = 10 # m/sec. # Check result and return. diff --git a/contrib/app/ufacet-s/helio_scan/080_FramesNoDuplicates.py b/contrib/app/ufacet-s/helio_scan/080_FramesNoDuplicates.py index 2d0f43452..13f04da53 100644 --- a/contrib/app/ufacet-s/helio_scan/080_FramesNoDuplicates.py +++ b/contrib/app/ufacet-s/helio_scan/080_FramesNoDuplicates.py @@ -111,14 +111,13 @@ def filter_frames_and_write_data(self): ) # Identify duplicate frames. - ( - non_duplicate_frame_files, - duplicate_frame_files, - ) = vm.identify_duplicate_frames( - self.input_frame_dir, - self.output_frame_dir, - self.tolerance_image_size, - self.tolerance_image_pixel, + (non_duplicate_frame_files, duplicate_frame_files) = ( + vm.identify_duplicate_frames( + self.input_frame_dir, + self.output_frame_dir, + self.tolerance_image_size, + self.tolerance_image_pixel, + ) ) # Copy non-duplicate frames to frame output directory. diff --git a/contrib/app/ufacet-s/helio_scan/130_KeyFramesGivenManual.py b/contrib/app/ufacet-s/helio_scan/130_KeyFramesGivenManual.py index d840b1c23..163f9a089 100644 --- a/contrib/app/ufacet-s/helio_scan/130_KeyFramesGivenManual.py +++ b/contrib/app/ufacet-s/helio_scan/130_KeyFramesGivenManual.py @@ -177,11 +177,12 @@ def convert_and_save_original_keyinfo_file(self): heliostat_names = keyinfo[3] frame_id = instance_frame_correspondence_dict[instance_key] # Construct key frame entry. - ( - key_frame_id, - list_of_name_polygons, - ) = self.construct_key_frame_entry( - instance_key, keyinfo_dict, instance_frame_correspondence_dict + (key_frame_id, list_of_name_polygons) = ( + self.construct_key_frame_entry( + instance_key, + keyinfo_dict, + instance_frame_correspondence_dict, + ) ) key_frames_fnxl.add_list_of_name_xy_lists( key_frame_id, list_of_name_polygons diff --git a/contrib/app/ufacet-s/helio_scan/140_KeyCorners.py b/contrib/app/ufacet-s/helio_scan/140_KeyCorners.py index ae19e3dc6..bde2a9dac 100644 --- a/contrib/app/ufacet-s/helio_scan/140_KeyCorners.py +++ b/contrib/app/ufacet-s/helio_scan/140_KeyCorners.py @@ -378,13 +378,12 @@ def search_key_frame(self, key_frame_id): # Construct a new fnxl that combines both key frame search results. # Take care to ensure that both have the same set of heliostat names. # This can return None if there are no xy_lists with a common name. - ( - pair_projected_fnxl_or_None, - mismatched, - ) = self.construct_merged_fnxl_synchronizing_heliostat_names( - local_logger, - search_result_1.projected_fnxl(), - search_result_2.projected_fnxl(), + (pair_projected_fnxl_or_None, mismatched) = ( + self.construct_merged_fnxl_synchronizing_heliostat_names( + local_logger, + search_result_1.projected_fnxl(), + search_result_2.projected_fnxl(), + ) ) # Determine whether any found heliostats were lost from one frame to the next. @@ -562,9 +561,9 @@ def save_key_corners(self, all_key_frames_corners_fnxl, key_frame_fnxls): def save_data(self, all_key_frames_corners_fnxl, mismatched_key_frame_ids): # Statistics. summary_dict = {} - summary_dict[ - 'n_key_frames_with_corners' - ] = all_key_frames_corners_fnxl.number_of_frames() + summary_dict['n_key_frames_with_corners'] = ( + all_key_frames_corners_fnxl.number_of_frames() + ) print('In KeyCorners.save_data(), writing key frame summary statistics...') ft.write_dict_file( 'key frame corners summary statistics', diff --git a/contrib/app/ufacet-s/helio_scan/150_KeyTracks.py b/contrib/app/ufacet-s/helio_scan/150_KeyTracks.py index a20a7a285..85a90b63a 100644 --- a/contrib/app/ufacet-s/helio_scan/150_KeyTracks.py +++ b/contrib/app/ufacet-s/helio_scan/150_KeyTracks.py @@ -416,12 +416,12 @@ def search_key_track(self, key_frame_id): # Assemble result dictionary. result_dict = {} result_dict['key_frame_id'] = key_frame_id - result_dict[ - 'key_frame_projected_track_fnxl' - ] = search_result.key_frame_projected_track_fnxl - result_dict[ - 'key_frame_confirmed_track_fnxl' - ] = search_result.key_frame_confirmed_track_fnxl + result_dict['key_frame_projected_track_fnxl'] = ( + search_result.key_frame_projected_track_fnxl + ) + result_dict['key_frame_confirmed_track_fnxl'] = ( + search_result.key_frame_confirmed_track_fnxl + ) return result_dict # WRITE RESULT @@ -488,9 +488,9 @@ def save_data(self, list_of_result_dicts): key_frame_projected_track_fnxl = result_dict[ 'key_frame_projected_track_fnxl' ] - tracked_frames_per_key_frame_dict[ - key_frame_id - ] = key_frame_projected_track_fnxl.number_of_frames() + tracked_frames_per_key_frame_dict[key_frame_id] = ( + key_frame_projected_track_fnxl.number_of_frames() + ) print( 'In KeyTracks.save_data(), writing tracked_frames per key frame:', os.path.join(self.output_data_dir, self.tfpkf_body_ext), diff --git a/contrib/app/ufacet-s/helio_scan/170_HeliostatTracks.py b/contrib/app/ufacet-s/helio_scan/170_HeliostatTracks.py index c64fc640b..870ab0f98 100644 --- a/contrib/app/ufacet-s/helio_scan/170_HeliostatTracks.py +++ b/contrib/app/ufacet-s/helio_scan/170_HeliostatTracks.py @@ -327,9 +327,9 @@ def save_data(self, heliostat_tracks_nfxl, projected_or_confirmed_str): raise ValueError(msg) # Statistics. summary_dict = {} - summary_dict[ - 'n_heliostat_track_frames' - ] = heliostat_tracks_nfxl.number_of_frames() + summary_dict['n_heliostat_track_frames'] = ( + heliostat_tracks_nfxl.number_of_frames() + ) print( 'In HeliostatTracks.save_data(), writing key frame ' + projected_or_confirmed_str diff --git a/contrib/app/ufacet-s/helio_scan/180_Heliostats3d.py b/contrib/app/ufacet-s/helio_scan/180_Heliostats3d.py index 19ecc3879..902fe1178 100644 --- a/contrib/app/ufacet-s/helio_scan/180_Heliostats3d.py +++ b/contrib/app/ufacet-s/helio_scan/180_Heliostats3d.py @@ -150,18 +150,16 @@ def __init__( ) ) # Nearest. - ( - self.nearest_smooth_dir_body_ext, - self.nearest_design_dir_body_ext, - ) = self.construct_save_and_analyze_key_heliostats( - self.specifications.heliostat_design_name, 'Nearest' + (self.nearest_smooth_dir_body_ext, self.nearest_design_dir_body_ext) = ( + self.construct_save_and_analyze_key_heliostats( + self.specifications.heliostat_design_name, 'Nearest' + ) ) # Farthest. - ( - self.farthest_smooth_dir_body_ext, - self.farthest_design_dir_body_ext, - ) = self.construct_save_and_analyze_key_heliostats( - self.specifications.heliostat_design_name, 'Farthest' + (self.farthest_smooth_dir_body_ext, self.farthest_design_dir_body_ext) = ( + self.construct_save_and_analyze_key_heliostats( + self.specifications.heliostat_design_name, 'Farthest' + ) ) # Demonstration heliostat. self.demonstration_dir_body_ext = ( @@ -483,9 +481,9 @@ def construct_and_save_heliostat_corners_3d_aux( infer_dict['projected_or_confirmed_str'] = projected_or_confirmed_str infer_dict['distorted_or_undistorted_str'] = distorted_or_undistorted_str # infer_dict['output_corner_2d_trajectories_dir'] = output_corner_2d_trajectories_dir - infer_dict[ - 'output_construct_corners_3d_dir' - ] = output_construct_corners_3d_dir + infer_dict['output_construct_corners_3d_dir'] = ( + output_construct_corners_3d_dir + ) infer_dict['render_control'] = render_control list_of_infer_dicts.append(infer_dict) diff --git a/contrib/app/ufacet-s/helio_scan/190_TrajectoryAnalysis.py b/contrib/app/ufacet-s/helio_scan/190_TrajectoryAnalysis.py index 0853c24d5..43862b428 100644 --- a/contrib/app/ufacet-s/helio_scan/190_TrajectoryAnalysis.py +++ b/contrib/app/ufacet-s/helio_scan/190_TrajectoryAnalysis.py @@ -319,10 +319,9 @@ def __init__( 'In TrajectoryAnalysis.__init__(), initializing GPS-frame synchronization constants...' ) self.synchronization_pair_list = self.initialize_synchronization_pair_list() - ( - self.synchronization_slope, - self.synchronization_intercept, - ) = self.initialize_synchronization_constants() + (self.synchronization_slope, self.synchronization_intercept) = ( + self.initialize_synchronization_constants() + ) self.print_synchronization_pair_list() self.synchronization_constants_dir_body_ext = ( self.save_synchronization_constants() @@ -579,16 +578,16 @@ def add_velocity_columns_to_flight_log_df(self): self.flight_log_df.loc[idx, 'speed_average(m/sec)'] = average_s self.flight_log_df.loc[idx, 'delta_speed(m/sec)'] = delta_speed self.flight_log_df.loc[idx, 'abs_delta_speed(m/sec)'] = abs_delta_speed - self.flight_log_df.loc[ - idx, 'velocity_angle_xy(rad)' - ] = velocity_angle_xy + self.flight_log_df.loc[idx, 'velocity_angle_xy(rad)'] = ( + velocity_angle_xy + ) self.flight_log_df.loc[idx, 'velocity_angle_z(rad)'] = velocity_angle_z - self.flight_log_df.loc[ - idx, 'delta_velocity_angle_xy(rad)' - ] = delta_velocity_angle_xy - self.flight_log_df.loc[ - idx, 'delta_velocity_angle_z(rad)' - ] = delta_velocity_angle_z + self.flight_log_df.loc[idx, 'delta_velocity_angle_xy(rad)'] = ( + delta_velocity_angle_xy + ) + self.flight_log_df.loc[idx, 'delta_velocity_angle_z(rad)'] = ( + delta_velocity_angle_z + ) def velocity_angle_xy(self, velocity_x, velocity_y): """ @@ -1750,9 +1749,9 @@ def construct_hel_gps_camera_analysis_dict(self): ) pass else: - hel_gps_camera_analysis_dict[ - hel_name - ] = list_of_gps_camera_analysis_dicts + hel_gps_camera_analysis_dict[hel_name] = ( + list_of_gps_camera_analysis_dicts + ) # Return. return hel_gps_camera_analysis_dict @@ -1824,12 +1823,10 @@ def construct_gps_camera_analysis_dict( hel_name, time_begin, time_mid, time_end ) # Compute heliostat (azimuth, elevation) that will bring camera pass into parallel alignment with GPS pass. - ( - azimuth_from_alignment, - elevation_from_alignment, - alignment_angle_error, - ) = self.compute_alignment_azimuth_elevation( - gps_pass, camera_pass, azimuth_from_model_mid, elevation_from_model_mid + (azimuth_from_alignment, elevation_from_alignment, alignment_angle_error) = ( + self.compute_alignment_azimuth_elevation( + gps_pass, camera_pass, azimuth_from_model_mid, elevation_from_model_mid + ) ) # # Compute heliostat (azimuth, elevation) from log. # azimuth_from_log_begin, \ @@ -1904,9 +1901,9 @@ def construct_gps_camera_analysis_dict( gps_camera_analysis_dict['time_mid'] = time_mid # (azimuth, elevation) from aim point and time. gps_camera_analysis_dict['azimuth_from_model_begin'] = azimuth_from_model_begin - gps_camera_analysis_dict[ - 'elevation_from_model_begin' - ] = elevation_from_model_begin + gps_camera_analysis_dict['elevation_from_model_begin'] = ( + elevation_from_model_begin + ) gps_camera_analysis_dict['azimuth_from_model_mid'] = azimuth_from_model_mid gps_camera_analysis_dict['elevation_from_model_mid'] = elevation_from_model_mid gps_camera_analysis_dict['azimuth_from_model_end'] = azimuth_from_model_end @@ -1929,22 +1926,21 @@ def construct_gps_camera_analysis_dict( # gps_camera_analysis_dict['azimuth_target_from_log_end'] = azimuth_target_from_log_end # gps_camera_analysis_dict['elevation_target_from_log_end'] = elevation_target_from_log_end # Corresponding point analysis. - gps_camera_analysis_dict[ - 'per_heliostat_transformed_camera_pass' - ] = per_heliostat_transformed_camera_pass - gps_camera_analysis_dict[ - 'camera_gps_point_pair_list' - ] = camera_gps_point_pair_list + gps_camera_analysis_dict['per_heliostat_transformed_camera_pass'] = ( + per_heliostat_transformed_camera_pass + ) + gps_camera_analysis_dict['camera_gps_point_pair_list'] = ( + camera_gps_point_pair_list + ) gps_camera_analysis_dict['camera_gps_distance_list'] = camera_gps_distance_list gps_camera_analysis_dict['rms_distance'] = rms_distance # Return. return gps_camera_analysis_dict def compute_model_azimuth_elevation(self, hel_name, time_begin, time_mid, time_end): - ( - azimuth_begin, - elevation_begin, - ) = self.compute_model_azimuth_elevation_given_time(hel_name, time_begin) + (azimuth_begin, elevation_begin) = ( + self.compute_model_azimuth_elevation_given_time(hel_name, time_begin) + ) azimuth_mid, elevation_mid = self.compute_model_azimuth_elevation_given_time( hel_name, time_mid ) @@ -2051,18 +2047,12 @@ def compute_log_azimuth_elevation(self, hel_name, time_begin, time_mid, time_end azimuth_target_begin, elevation_target_begin, ) = self.compute_log_azimuth_elevation_given_time(hel_name, time_begin) - ( - azimuth_mid, - elevation_mid, - azimuth_target_mid, - elevation_target_mid, - ) = self.compute_log_azimuth_elevation_given_time(hel_name, time_mid) - ( - azimuth_end, - elevation_end, - azimuth_target_end, - elevation_target_end, - ) = self.compute_log_azimuth_elevation_given_time(hel_name, time_end) + (azimuth_mid, elevation_mid, azimuth_target_mid, elevation_target_mid) = ( + self.compute_log_azimuth_elevation_given_time(hel_name, time_mid) + ) + (azimuth_end, elevation_end, azimuth_target_end, elevation_target_end) = ( + self.compute_log_azimuth_elevation_given_time(hel_name, time_end) + ) # Return. return ( azimuth_begin, @@ -2124,9 +2114,9 @@ def set_per_heliostat_estimates_of_camera_xyz_given_overall_time(self): ) transformed_camera_pass_list.append(transformed_camera_pass) # Add to result. - self.hel_transformed_camera_passes_dict[ - hel_name - ] = transformed_camera_pass_list + self.hel_transformed_camera_passes_dict[hel_name] = ( + transformed_camera_pass_list + ) def set_per_heliosat_configurations_from_gps_camera_alignment(self): """ @@ -2849,9 +2839,9 @@ def draw_and_save_solar_field_suite(self): draw_control_dict['draw_GPS_log'] = True draw_control_dict['draw_gps_scan_passes'] = False draw_control_dict['draw_trajectory_fragments'] = True - draw_control_dict[ - 'trajectory_fragment_selected_heliostats' - ] = self.synchronization_heliostat_name_list() + draw_control_dict['trajectory_fragment_selected_heliostats'] = ( + self.synchronization_heliostat_name_list() + ) draw_control_dict['connect_trajectory_fragments'] = True draw_control_dict['draw_synchronization_points'] = True draw_control_dict['include_points_with_missing_corners'] = True @@ -3282,9 +3272,9 @@ def draw_and_save_solar_field_suite(self): draw_control_dict['include_points_with_missing_corners'] = False draw_control_dict['include_non_refined_points'] = False draw_control_dict['draw_camera_passes'] = True - draw_control_dict[ - 'draw_gps_transformed_camera_pass_connections' - ] = True # False #True + draw_control_dict['draw_gps_transformed_camera_pass_connections'] = ( + True # False #True + ) self.solar_field_style = rcsf.heliostat_outlines(color='grey') self.heliostat_up_style = rch.outline( color='lightblue' @@ -4467,11 +4457,9 @@ def draw_and_save_gps_log_analysis_plots(self): def save_enhanced_flight_log(self): if not (os.path.exists(self.output_data_dir)): os.makedirs(self.output_data_dir) - ( - input_flight_log_dir, - input_flight_log_body, - input_flight_log_ext, - ) = ft.path_components(self.input_flight_log_dir_body_ext) + (input_flight_log_dir, input_flight_log_body, input_flight_log_ext) = ( + ft.path_components(self.input_flight_log_dir_body_ext) + ) output_flight_log_plus_body_ext = ( input_flight_log_body + '_plus' + input_flight_log_ext ) diff --git a/contrib/app/ufacet-s/helio_scan/lib/DEPRECATED_utils.py b/contrib/app/ufacet-s/helio_scan/lib/DEPRECATED_utils.py index a7d9eb88c..534018d16 100644 --- a/contrib/app/ufacet-s/helio_scan/lib/DEPRECATED_utils.py +++ b/contrib/app/ufacet-s/helio_scan/lib/DEPRECATED_utils.py @@ -368,7 +368,7 @@ def extract_frames_nopipe( str(ending_frame_id / fps), '-vf', str(fps), - str(saving_path) + str(saving_path), # '-c:v', 'ffv1', # #'-vf', 'select=eq(n\,' + str(frame_id) + ')', # #'-vsync', '0', diff --git a/contrib/app/ufacet-s/helio_scan/lib/HeliostatInfer3d.py b/contrib/app/ufacet-s/helio_scan/lib/HeliostatInfer3d.py index 01bffcf9b..818734caf 100644 --- a/contrib/app/ufacet-s/helio_scan/lib/HeliostatInfer3d.py +++ b/contrib/app/ufacet-s/helio_scan/lib/HeliostatInfer3d.py @@ -379,15 +379,13 @@ def adjust_heliostat_spec_to_minimize_reprojection_error( previous_error = current_overall_error for facet_idx in range(0, n_facets): if not facet_is_converged[facet_idx]: - ( - best_var_name, - best_min_error, - best_min_error_del_var, - ) = self.find_best_variable_xyz_rot_z( - search_heliostat_spec, - facet_to_adjust_idx=facet_idx, - n_steps_one_direction=n_steps_one_direction, - variable_steps=variable_steps, + (best_var_name, best_min_error, best_min_error_del_var) = ( + self.find_best_variable_xyz_rot_z( + search_heliostat_spec, + facet_to_adjust_idx=facet_idx, + n_steps_one_direction=n_steps_one_direction, + variable_steps=variable_steps, + ) ) if best_min_error_del_var == 0: facet_is_converged[facet_idx] = True @@ -466,15 +464,13 @@ def adjust_heliostat_spec_to_minimize_reprojection_error( previous_error = current_overall_error for facet_idx in range(0, n_facets): if not facet_is_converged[facet_idx]: - ( - best_var_name, - best_min_error, - best_min_error_del_var, - ) = self.find_best_variable_rot_xy( - search_heliostat_spec, - facet_to_adjust_idx=facet_idx, - n_steps_one_direction=n_steps_one_direction, - variable_steps=variable_steps, + (best_var_name, best_min_error, best_min_error_del_var) = ( + self.find_best_variable_rot_xy( + search_heliostat_spec, + facet_to_adjust_idx=facet_idx, + n_steps_one_direction=n_steps_one_direction, + variable_steps=variable_steps, + ) ) if best_min_error_del_var == 0: facet_is_converged[facet_idx] = True diff --git a/contrib/app/ufacet-s/helio_scan/lib/KeyFrameCornerSearch.py b/contrib/app/ufacet-s/helio_scan/lib/KeyFrameCornerSearch.py index 6d29134b3..5e09cce49 100644 --- a/contrib/app/ufacet-s/helio_scan/lib/KeyFrameCornerSearch.py +++ b/contrib/app/ufacet-s/helio_scan/lib/KeyFrameCornerSearch.py @@ -14,96 +14,112 @@ from numpy.lib.type_check import imag import opencsp.common.lib.geometry.geometry_2d as g2d -from DEPRECATED_utils import * # ?? SCAFFOLDING RCB -- ELIMINATE THIS -from DEPRECATED_save_read import * # ?? SCAFFOLDING RCB -- ELIMINATE THIS +from DEPRECATED_utils import * # ?? SCAFFOLDING RCB -- ELIMINATE THIS +from DEPRECATED_save_read import * # ?? SCAFFOLDING RCB -- ELIMINATE THIS import FrameNameXyList as fnxl -from opencsp.common.lib.render_control.RenderControlKeyCorners import RenderControlKeyCorners +from opencsp.common.lib.render_control.RenderControlKeyCorners import ( + RenderControlKeyCorners, +) -Component = NewType("Component", dict[str, Union[str, list[int], list[float], list[list[int]]]]) +Component = NewType( + "Component", dict[str, Union[str, list[int], list[float], list[list[int]]]] +) -class KeyFrameCornerSearch(): + +class KeyFrameCornerSearch: """ Class executing search for facet corners in a key frame of a UFACET scan video. """ # CONSTRUCTION - def __init__(self, - # Problem definition. - key_frame_id, # Numerical key frame index. Uniquely determines the frame within the video. - key_frame_id_str, # Not the same as str(key_frame_id), because this includes the proper number of leading zeros, etc. - key_frame_img: np.ndarray, # The key frame image, already loaded. - list_of_name_polygons: list[tuple[str, list[list[int]]]], # List of expected [hel_name, polygon] pairs. - specifications, # Solar field specifications. # ?? SCAFFOLDING RCB -- REPLACE THIS WITH MASTER INFORMATION LOADED FROM DISK FILES. - # Input/output sources. - output_construction_dir, # Where to save the detailed image processing step-by-step plots. - solvePnPtype, # how to solve PnP. Values are 'pnp' and 'calib' - # Render control. - render_control:RenderControlKeyCorners): # Flags to control rendering on this run. - """ Search the given key_frame_img for one heliostat per list_of_name_polygons. + def __init__( + self, + # Problem definition. + key_frame_id, # Numerical key frame index. Uniquely determines the frame within the video. + key_frame_id_str, # Not the same as str(key_frame_id), because this includes the proper number of leading zeros, etc. + key_frame_img: np.ndarray, # The key frame image, already loaded. + list_of_name_polygons: list[ + tuple[str, list[list[int]]] + ], # List of expected [hel_name, polygon] pairs. + specifications, # Solar field specifications. # ?? SCAFFOLDING RCB -- REPLACE THIS WITH MASTER INFORMATION LOADED FROM DISK FILES. + # Input/output sources. + output_construction_dir, # Where to save the detailed image processing step-by-step plots. + solvePnPtype, # how to solve PnP. Values are 'pnp' and 'calib' + # Render control. + render_control: RenderControlKeyCorners, + ): # Flags to control rendering on this run. + """Search the given key_frame_img for one heliostat per list_of_name_polygons. The results can be accessed by: First check successful(), then Retrieve results with projected_fnxl() """ - - print('In KeyFrameCornerSearch.__init__()...') # ?? SCAFFOLDING RCB -- TEMPORARY + + print( + 'In KeyFrameCornerSearch.__init__()...' + ) # ?? SCAFFOLDING RCB -- TEMPORARY # Store input. - self.key_frame_id = key_frame_id - self.key_frame_id_str = key_frame_id_str - self.key_frame_img = key_frame_img - self.list_of_name_polygons = list_of_name_polygons - self.specifications = specifications + self.key_frame_id = key_frame_id + self.key_frame_id_str = key_frame_id_str + self.key_frame_img = key_frame_img + self.list_of_name_polygons = list_of_name_polygons + self.specifications = specifications self.output_construction_dir = output_construction_dir - self.solvePnPtype = solvePnPtype - self.render_control = render_control + self.solvePnPtype = solvePnPtype + self.render_control = render_control self.frame = { # ?? SCAFFOLDING RCB -- DO WE STILL NEED THIS FRAME DATA STRUCTURE? SHOULD WE STORE IN SELF INSTEAD? - 'key_frame_img': key_frame_img, # ?? SCAFFOLDING RCB -- DO WE STILL NEED THIS FRAME DATA STRUCTURE? SHOULD WE STORE IN SELF INSTEAD? - "output_construction_dir": output_construction_dir # ?? SCAFFOLDING RCB -- DO WE STILL NEED THIS FRAME DATA STRUCTURE? SHOULD WE STORE IN SELF INSTEAD? - } # ?? SCAFFOLDING RCB -- DO WE STILL NEED THIS FRAME DATA STRUCTURE? SHOULD WE STORE IN SELF INSTEAD? + 'key_frame_img': key_frame_img, # ?? SCAFFOLDING RCB -- DO WE STILL NEED THIS FRAME DATA STRUCTURE? SHOULD WE STORE IN SELF INSTEAD? + "output_construction_dir": output_construction_dir, # ?? SCAFFOLDING RCB -- DO WE STILL NEED THIS FRAME DATA STRUCTURE? SHOULD WE STORE IN SELF INSTEAD? + } # ?? SCAFFOLDING RCB -- DO WE STILL NEED THIS FRAME DATA STRUCTURE? SHOULD WE STORE IN SELF INSTEAD? - print('In KeyFrameCornerSearch.__init__(), performing full image analysis...') # ?? SCAFFOLDING RCB -- TEMPORARY + print( + 'In KeyFrameCornerSearch.__init__(), performing full image analysis...' + ) # ?? SCAFFOLDING RCB -- TEMPORARY # Input polygons self.draw_img_polygons() # Edge detection - self.frame['edges'], \ - self.frame['edges_img'] = self.canny() + self.frame['edges'], self.frame['edges_img'] = self.canny() # Sky detection # self.frame['sky'], \ # self.frame['sky_img'] = self.skyhsv() # ?? SCAFFOLDING RCB -- PREVIOUS VERSION - self.frame['sky'], \ - self.frame['sky_img'] = self.sky() + self.frame['sky'], self.frame['sky_img'] = self.sky() # Facet boundaries - self.frame['boundaries'], \ - self.frame['boundaries_img'] = self.facet_boundaries() + self.frame['boundaries'], self.frame['boundaries_img'] = self.facet_boundaries() # Connected_components - self.frame['components'], \ - self.frame['components_img'] = self.connected_components() + (self.frame['components'], self.frame['components_img']) = ( + self.connected_components() + ) # Filtered connected_components - self.frame['filt_components'], \ - self.frame['filt_components_img'] = self.filter_connected_components() + (self.frame['filt_components'], self.frame['filt_components_img']) = ( + self.filter_connected_components() + ) # TODO BGB make sure none of the components bridge the gap between mirrors # Fitted lines connected components - self.frame['fitted_lines_components'] = self.fitted_lines_connected_components() + self.frame['fitted_lines_components'] = self.fitted_lines_connected_components() # Line inliers - self.frame['fitted_lines_inliers_components'] = self.fitted_lines_inliers_components() + self.frame['fitted_lines_inliers_components'] = ( + self.fitted_lines_inliers_components() + ) # Corners - self.frame['corners'] = self.find_corners() + self.frame['corners'] = self.find_corners() # Facets - self.frame['facets'] = self.facets() + self.frame['facets'] = self.facets() # Filter facets based on polygons - self.frame['filtered_facets'], \ - self.frame['heliostats'] = self.filter_facets_polygons() # Initial setting of self.frame['heliostats'] - # Top row + (self.frame['filtered_facets'], self.frame['heliostats']) = ( + self.filter_facets_polygons() + ) # Initial setting of self.frame['heliostats'] + # Top row self.top_row_facets() # Updates self.frame['heliostats'] # Register top row self.classify_top_row_facets() # Updates self.frame['heliostats'] # Projected Corners - self.project_and_confirm(iterations=5, # Updates: self.frame['heliostats'], self.frame['all_projected_corners'] - canny_levels=['auto', 'light', 'lighter']) # - + self.project_and_confirm( + iterations=5, # Updates: self.frame['heliostats'], self.frame['all_projected_corners'] + canny_levels=['auto', 'light', 'lighter'], + ) # # ACCESS @@ -118,228 +134,353 @@ def confirmed_fnxl(self) -> fnxl.FrameNameXyList: def projected_fnxl(self) -> fnxl.FrameNameXyList: return self.frame['projected_fnxl'] - + def successful(self): """ Returns true if the image processing successfully produced final corners. """ - return ('all_projected_corners' in self.frame) and (len(self.frame['all_projected_corners']) > 0) - + return ('all_projected_corners' in self.frame) and ( + len(self.frame['all_projected_corners']) > 0 + ) # IMAGE PROCESSING def draw_img_polygons(self): - print('In KeyFrameCornerSearch.draw_img_polygons(), entering routine...') # ?? SCAFFOLDING RCB -- TEMPORARY + print( + 'In KeyFrameCornerSearch.draw_img_polygons(), entering routine...' + ) # ?? SCAFFOLDING RCB -- TEMPORARY if self.render_control.draw_img_box: img = self.frame['key_frame_img'] plt.figure() plt.imshow(cv.cvtColor(img, cv.COLOR_BGR2RGB)) - - for name_polygon in self.list_of_name_polygons: # ?? SCAFFOLDING RCB -- INTEGRATRE THIS WITH STANDARD FNXL RENDERING. - name = name_polygon[0] # ?? SCAFFOLDING RCB -- INTEGRATRE THIS WITH STANDARD FNXL RENDERING. - polygon = name_polygon[1] # ?? SCAFFOLDING RCB -- INTEGRATRE THIS WITH STANDARD FNXL RENDERING. + + for ( + name_polygon + ) in ( + self.list_of_name_polygons + ): # ?? SCAFFOLDING RCB -- INTEGRATRE THIS WITH STANDARD FNXL RENDERING. + name = name_polygon[ + 0 + ] # ?? SCAFFOLDING RCB -- INTEGRATRE THIS WITH STANDARD FNXL RENDERING. + polygon = name_polygon[ + 1 + ] # ?? SCAFFOLDING RCB -- INTEGRATRE THIS WITH STANDARD FNXL RENDERING. color = 'g' # ?? SCAFFOLDING RCB -- INTEGRATRE THIS WITH STANDARD FNXL RENDERING. # Draw the polygon. # ?? SCAFFOLDING RCB -- INTEGRATRE THIS WITH STANDARD FNXL RENDERING. closed_xy_list = polygon.copy() closed_xy_list.append(polygon[0]) - x_list = [pt[0] for pt in closed_xy_list] # ?? SCAFFOLDING RCB -- INTEGRATRE THIS WITH STANDARD FNXL RENDERING. - y_list = [pt[1] for pt in closed_xy_list] # ?? SCAFFOLDING RCB -- INTEGRATRE THIS WITH STANDARD FNXL RENDERING. - plt.plot(x_list, y_list, color=color) # ?? SCAFFOLDING RCB -- INTEGRATRE THIS WITH STANDARD FNXL RENDERING. + x_list = [ + pt[0] for pt in closed_xy_list + ] # ?? SCAFFOLDING RCB -- INTEGRATRE THIS WITH STANDARD FNXL RENDERING. + y_list = [ + pt[1] for pt in closed_xy_list + ] # ?? SCAFFOLDING RCB -- INTEGRATRE THIS WITH STANDARD FNXL RENDERING. + plt.plot( + x_list, y_list, color=color + ) # ?? SCAFFOLDING RCB -- INTEGRATRE THIS WITH STANDARD FNXL RENDERING. # Draw the heliostat name. # ?? SCAFFOLDING RCB -- INTEGRATRE THIS WITH STANDARD FNXL RENDERING. - if len(polygon) > 0: # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. # ?? SCAFFOLDING RCB -- INTEGRATRE THIS WITH STANDARD FNXL RENDERING. - label_xy = g2d.label_point(polygon) # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. # ?? SCAFFOLDING RCB -- INTEGRATRE THIS WITH STANDARD FNXL RENDERING. - plt.text(label_xy[0], # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. # ?? SCAFFOLDING RCB -- INTEGRATRE THIS WITH STANDARD FNXL RENDERING. - label_xy[1], # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. # ?? SCAFFOLDING RCB -- INTEGRATRE THIS WITH STANDARD FNXL RENDERING. - name, # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. # ?? SCAFFOLDING RCB -- INTEGRATRE THIS WITH STANDARD FNXL RENDERING. - color=color, # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. # ?? SCAFFOLDING RCB -- INTEGRATRE THIS WITH STANDARD FNXL RENDERING. - horizontalalignment='center', # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. # ?? SCAFFOLDING RCB -- INTEGRATRE THIS WITH STANDARD FNXL RENDERING. - verticalalignment='center', # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. # ?? SCAFFOLDING RCB -- INTEGRATRE THIS WITH STANDARD FNXL RENDERING. - fontsize=10, # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. # ?? SCAFFOLDING RCB -- INTEGRATRE THIS WITH STANDARD FNXL RENDERING. - fontstyle='normal', # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. # ?? SCAFFOLDING RCB -- INTEGRATRE THIS WITH STANDARD FNXL RENDERING. - fontweight='bold') # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. # ?? SCAFFOLDING RCB -- INTEGRATRE THIS WITH STANDARD FNXL RENDERING. - - plt.savefig(os.path.join(self.frame['output_construction_dir'], self.key_frame_id_str+'_00_img_box.png'), dpi=500) + if ( + len(polygon) > 0 + ): # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. # ?? SCAFFOLDING RCB -- INTEGRATRE THIS WITH STANDARD FNXL RENDERING. + label_xy = g2d.label_point( + polygon + ) # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. # ?? SCAFFOLDING RCB -- INTEGRATRE THIS WITH STANDARD FNXL RENDERING. + plt.text( + label_xy[ + 0 + ], # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. # ?? SCAFFOLDING RCB -- INTEGRATRE THIS WITH STANDARD FNXL RENDERING. + label_xy[ + 1 + ], # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. # ?? SCAFFOLDING RCB -- INTEGRATRE THIS WITH STANDARD FNXL RENDERING. + name, # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. # ?? SCAFFOLDING RCB -- INTEGRATRE THIS WITH STANDARD FNXL RENDERING. + color=color, # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. # ?? SCAFFOLDING RCB -- INTEGRATRE THIS WITH STANDARD FNXL RENDERING. + horizontalalignment='center', # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. # ?? SCAFFOLDING RCB -- INTEGRATRE THIS WITH STANDARD FNXL RENDERING. + verticalalignment='center', # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. # ?? SCAFFOLDING RCB -- INTEGRATRE THIS WITH STANDARD FNXL RENDERING. + fontsize=10, # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. # ?? SCAFFOLDING RCB -- INTEGRATRE THIS WITH STANDARD FNXL RENDERING. + fontstyle='normal', # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. # ?? SCAFFOLDING RCB -- INTEGRATRE THIS WITH STANDARD FNXL RENDERING. + fontweight='bold', + ) # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. # ?? SCAFFOLDING RCB -- INTEGRATRE THIS WITH STANDARD FNXL RENDERING. + + plt.savefig( + os.path.join( + self.frame['output_construction_dir'], + self.key_frame_id_str + '_00_img_box.png', + ), + dpi=500, + ) plt.close() - - + def canny(self, img=None): print('In KeyFrameCornerSearch.canny()...') # ?? SCAFFOLDING RCB -- TEMPORARY if img is None: - img = self.frame['key_frame_img'] # ?? SCAFFOLDING RCB -- SEE CANNY() AND CONFIRM() ROUTINES FOR DUPLICATE PLACES WHERE THIS CODE IS PLACED. MUST KEEP CONSISTENT. - img = cv.GaussianBlur(img, (5,5), 0) # ?? SCAFFOLDING RCB -- SEE CANNY() AND CONFIRM() ROUTINES FOR DUPLICATE PLACES WHERE THIS CODE IS PLACED. MUST KEEP CONSISTENT. - - edges = CannyImg(img=img, canny_type='auto') # ! auto # ?? SCAFFOLDING RCB -- ORIGINAL CODE was 'light' # ?? SCAFFOLDING RCB -- SEE CANNY() AND CONFIRM() ROUTINES FOR DUPLICATE PLACES WHERE THIS CODE IS PLACED. MUST KEEP CONSISTENT. -#edges = CannyImg(img=img, canny_type='light') # ! auto # ?? SCAFFOLDING RCB -- ORIGINAL CODE + img = self.frame[ + 'key_frame_img' + ] # ?? SCAFFOLDING RCB -- SEE CANNY() AND CONFIRM() ROUTINES FOR DUPLICATE PLACES WHERE THIS CODE IS PLACED. MUST KEEP CONSISTENT. + img = cv.GaussianBlur( + img, (5, 5), 0 + ) # ?? SCAFFOLDING RCB -- SEE CANNY() AND CONFIRM() ROUTINES FOR DUPLICATE PLACES WHERE THIS CODE IS PLACED. MUST KEEP CONSISTENT. + + edges = CannyImg( + img=img, canny_type='auto' + ) # ! auto # ?? SCAFFOLDING RCB -- ORIGINAL CODE was 'light' # ?? SCAFFOLDING RCB -- SEE CANNY() AND CONFIRM() ROUTINES FOR DUPLICATE PLACES WHERE THIS CODE IS PLACED. MUST KEEP CONSISTENT. + # edges = CannyImg(img=img, canny_type='light') # ! auto # ?? SCAFFOLDING RCB -- ORIGINAL CODE row, col = np.nonzero(edges) edges = np.zeros((img.shape[0], img.shape[1])).astype('int') - edges[row,col] = 1 + edges[row, col] = 1 edge_img = 0 * self.frame['key_frame_img'] edge_img[row, col, :] = EDGE_COLOR if self.render_control.draw_edge: - save_image(img=(edges*255), imgname=self.key_frame_id_str+'_01_edge.png', path=self.frame['output_construction_dir']) # ?? SCAFFOLDING RCB -- MULTIPLIED BY 255 TO MAKE IMAGE VISIBLE IN PREVIEW. + save_image( + img=(edges * 255), + imgname=self.key_frame_id_str + '_01_edge.png', + path=self.frame['output_construction_dir'], + ) # ?? SCAFFOLDING RCB -- MULTIPLIED BY 255 TO MAKE IMAGE VISIBLE IN PREVIEW. if self.render_control.draw_edge_fig: - save_fig(img=edges, imgname=self.key_frame_id_str+'_01_edge_fig.png', path=self.frame['output_construction_dir'], rgb=True, dpi=1000) # ?? SCAFFOLDING RCB -- SETTING RGB=TRUE, TO SUPPRESS CV CALL WITHIN SAVE_FIG() TO CONVERT FROM BGR TO RGB. THIS CRASHES ON THIS INPUT. + save_fig( + img=edges, + imgname=self.key_frame_id_str + '_01_edge_fig.png', + path=self.frame['output_construction_dir'], + rgb=True, + dpi=1000, + ) # ?? SCAFFOLDING RCB -- SETTING RGB=TRUE, TO SUPPRESS CV CALL WITHIN SAVE_FIG() TO CONVERT FROM BGR TO RGB. THIS CRASHES ON THIS INPUT. if self.render_control.draw_edge_img: - save_image(img=edge_img, imgname=self.key_frame_id_str+'_01_edge_img.png', path=self.frame['output_construction_dir']) + save_image( + img=edge_img, + imgname=self.key_frame_id_str + '_01_edge_img.png', + path=self.frame['output_construction_dir'], + ) if self.render_control.draw_edge_img_fig: - save_fig(img=edge_img, imgname=self.key_frame_id_str+'_01_edge_img_fig.png', path=self.frame['output_construction_dir'], rgb=True, dpi=1000) - - return edges, edge_img + save_fig( + img=edge_img, + imgname=self.key_frame_id_str + '_01_edge_img_fig.png', + path=self.frame['output_construction_dir'], + rgb=True, + dpi=1000, + ) + return edges, edge_img def skyhsv(self): print('In KeyFrameCornerSearch.skyhsv()...') # ?? SCAFFOLDING RCB -- TEMPORARY - img = self.frame['key_frame_img'] + img = self.frame['key_frame_img'] sky, sky_img = sky_with_hsv(img=img, rgb=False) - + # img_rgb = cv.cvtColor(img, cv.COLOR_BGR2RGB) # hsv_img = cv.cvtColor(img_rgb, cv.COLOR_RGB2HSV) # light_sky = (100, 30, 100) #(100, 30, 1) # dark_sky = (150, 140, 255) #(150, 140, 255) # sky = cv.inRange(hsv_img, light_sky, dark_sky) - # sky_img = cv.bitwise_and(img_rgb, img_rgb, mask=sky) + # sky_img = cv.bitwise_and(img_rgb, img_rgb, mask=sky) if self.render_control.draw_skyhsv: - save_image(img=sky, imgname=self.key_frame_id_str+'_02_skyhsv.png', path=self.frame['output_construction_dir']) + save_image( + img=sky, + imgname=self.key_frame_id_str + '_02_skyhsv.png', + path=self.frame['output_construction_dir'], + ) if self.render_control.draw_skyhsv_fig: - save_fig(img=sky, imgname=self.key_frame_id_str+'_02_skyhsv_fig.png', path=self.frame['output_construction_dir'], rgb=True, dpi=1000) + save_fig( + img=sky, + imgname=self.key_frame_id_str + '_02_skyhsv_fig.png', + path=self.frame['output_construction_dir'], + rgb=True, + dpi=1000, + ) if self.render_control.draw_skyhsv_img: - save_image(img=sky_img, imgname=self.key_frame_id_str+'_02_skyhsv_img.png', path=self.frame['output_construction_dir']) + save_image( + img=sky_img, + imgname=self.key_frame_id_str + '_02_skyhsv_img.png', + path=self.frame['output_construction_dir'], + ) if self.render_control.draw_skyhsv_img_fig: - save_fig(img=sky_img, imgname=self.key_frame_id_str+'_02_skyhsv_img_fig.png', path=self.frame['output_construction_dir'], rgb=True, dpi=1000) + save_fig( + img=sky_img, + imgname=self.key_frame_id_str + '_02_skyhsv_img_fig.png', + path=self.frame['output_construction_dir'], + rgb=True, + dpi=1000, + ) return sky, sky_img - def sky(self): print('In KeyFrameCornerSearch.sky()...') # ?? SCAFFOLDING RCB -- TEMPORARY - img = self.frame['key_frame_img'] + img = self.frame['key_frame_img'] sky_img = img.copy() - - b = sky_img[:,:,0] / 255.0 - g = sky_img[:,:,1] / 255.0 - r = sky_img[:,:,2] / 255.0 + + b = sky_img[:, :, 0] / 255.0 + g = sky_img[:, :, 1] / 255.0 + r = sky_img[:, :, 2] / 255.0 # Identify sky - sky_x, sky_y = np.nonzero(b+g+r > SKY_THRESHOLD) - sky = np.zeros((img.shape[0], img.shape[1])).astype('int') + sky_x, sky_y = np.nonzero(b + g + r > SKY_THRESHOLD) + sky = np.zeros((img.shape[0], img.shape[1])).astype('int') sky[sky_x, sky_y] = 1 - + sky_img[sky_x, sky_y, 0] = SKY_COLOR[0] sky_img[sky_x, sky_y, 1] = SKY_COLOR[1] sky_img[sky_x, sky_y, 2] = SKY_COLOR[2] - + if self.render_control.draw_sky: - save_image(img=(sky*255), imgname=self.key_frame_id_str+'_02_sky.png', path=self.frame['output_construction_dir']) # ?? SCAFFOLDING RCB -- MULTIPLIED BY 255 TO MAKE IMAGE VISIBLE IN PREVIEW. + save_image( + img=(sky * 255), + imgname=self.key_frame_id_str + '_02_sky.png', + path=self.frame['output_construction_dir'], + ) # ?? SCAFFOLDING RCB -- MULTIPLIED BY 255 TO MAKE IMAGE VISIBLE IN PREVIEW. if self.render_control.draw_sky_fig: - save_fig(img=sky, imgname=self.key_frame_id_str+'_02_sky_fig.png', path=self.frame['output_construction_dir'], rgb=True, dpi=1000) # ?? SCAFFOLDING RCB -- SETTING RGB=TRUE, TO SUPPRESS CV CALL WITHIN SAVE_FIG() TO CONVERT FROM BGR TO RGB. THIS CRASHES ON THIS INPUT. + save_fig( + img=sky, + imgname=self.key_frame_id_str + '_02_sky_fig.png', + path=self.frame['output_construction_dir'], + rgb=True, + dpi=1000, + ) # ?? SCAFFOLDING RCB -- SETTING RGB=TRUE, TO SUPPRESS CV CALL WITHIN SAVE_FIG() TO CONVERT FROM BGR TO RGB. THIS CRASHES ON THIS INPUT. if self.render_control.draw_sky_img: - save_image(img=sky_img, imgname=self.key_frame_id_str+'_02_sky_img.png', path=self.frame['output_construction_dir']) + save_image( + img=sky_img, + imgname=self.key_frame_id_str + '_02_sky_img.png', + path=self.frame['output_construction_dir'], + ) if self.render_control.draw_sky_img_fig: - save_fig(img=sky_img, imgname=self.key_frame_id_str+'_02_sky_img_fig.png', path=self.frame['output_construction_dir'], rgb=True, dpi=1000) - - return sky, sky_img + save_fig( + img=sky_img, + imgname=self.key_frame_id_str + '_02_sky_img_fig.png', + path=self.frame['output_construction_dir'], + rgb=True, + dpi=1000, + ) + return sky, sky_img def facet_boundaries(self): - """ Colors pixels based on if they match is_boundary_pixel(...). - + """Colors pixels based on if they match is_boundary_pixel(...). + Returns ------- boundaries: a 0 (not a boundary pixel) or 1 (boundary pixel) ndarray that is the same size as self.frame['key_frame_img'] - boundaries_img: an ndarray with with boundary pixels colored based on whether they are a top/left/right/bottom edge pixel """ - print('In KeyFrameCornerSearch.facet_boundaries()...') # ?? SCAFFOLDING RCB -- TEMPORARY - img = self.frame['key_frame_img'] + boundaries_img: an ndarray with with boundary pixels colored based on whether they are a top/left/right/bottom edge pixel + """ + print( + 'In KeyFrameCornerSearch.facet_boundaries()...' + ) # ?? SCAFFOLDING RCB -- TEMPORARY + img = self.frame['key_frame_img'] edges = self.frame['edges'] - row_edges, col_edges = np.nonzero(edges) - print('In KeyFrameCornerSearch.facet_boundaries(), number of edge pixels len(row_edges) =', len(row_edges)) # ?? SCAFFOLDING RCB -- TEMPORARY - boundaries_img = 0*img - boundaries_rows = [] - boundaries_cols = [] + row_edges, col_edges = np.nonzero(edges) + print( + 'In KeyFrameCornerSearch.facet_boundaries(), number of edge pixels len(row_edges) =', + len(row_edges), + ) # ?? SCAFFOLDING RCB -- TEMPORARY + boundaries_img = 0 * img + boundaries_rows = [] + boundaries_cols = [] for row, col in zip(row_edges, col_edges): # TODO this for loop could be easily optimized by creating an intermediary image that is colored based on how many adjacent vertical or horizontal sky pixels there are # determine if such an optimization is worth doing # ~BGB20221003 is_boundary = False # Left - if (self.is_boundary_pixel(row-1, col, 'left') - and self.is_boundary_pixel(row, col, 'left') - and self.is_boundary_pixel(row+1, col, 'left')): + if ( + self.is_boundary_pixel(row - 1, col, 'left') + and self.is_boundary_pixel(row, col, 'left') + and self.is_boundary_pixel(row + 1, col, 'left') + ): boundaries_img[row, col, :] = LEFT_BOUNDARY_COLOR is_boundary = True # Right - elif (self.is_boundary_pixel(row-1, col, 'right') - and self.is_boundary_pixel(row, col, 'right') - and self.is_boundary_pixel(row+1, col, 'right')): + elif ( + self.is_boundary_pixel(row - 1, col, 'right') + and self.is_boundary_pixel(row, col, 'right') + and self.is_boundary_pixel(row + 1, col, 'right') + ): boundaries_img[row, col, :] = RIGHT_BOUNDARY_COLOR is_boundary = True # Top - elif (self.is_boundary_pixel(row, col-1, 'top') - and self.is_boundary_pixel(row, col, 'top') - and self.is_boundary_pixel(row, col+1, 'top')): + elif ( + self.is_boundary_pixel(row, col - 1, 'top') + and self.is_boundary_pixel(row, col, 'top') + and self.is_boundary_pixel(row, col + 1, 'top') + ): boundaries_img[row, col, :] = TOP_BOUNDARY_COLOR is_boundary = True # Bottom - elif (self.is_boundary_pixel(row, col-1, 'bottom') - and self.is_boundary_pixel(row, col, 'bottom') - and self.is_boundary_pixel(row, col+1, 'bottom')): + elif ( + self.is_boundary_pixel(row, col - 1, 'bottom') + and self.is_boundary_pixel(row, col, 'bottom') + and self.is_boundary_pixel(row, col + 1, 'bottom') + ): boundaries_img[row, col, :] = BOTTOM_BOUNDARY_COLOR is_boundary = True - + if is_boundary: boundaries_rows.append(row) boundaries_cols.append(col) - + if self.render_control.draw_boundaries: - save_image( img=boundaries_img, imgname=self.key_frame_id_str+'_03_boundaries.png', path=self.frame['output_construction_dir']) + save_image( + img=boundaries_img, + imgname=self.key_frame_id_str + '_03_boundaries.png', + path=self.frame['output_construction_dir'], + ) if self.render_control.draw_boundaries_fig: - save_fig( img=boundaries_img, imgname=self.key_frame_id_str+'_03_boundaries_fig.png', path=self.frame['output_construction_dir'], dpi=1000) + save_fig( + img=boundaries_img, + imgname=self.key_frame_id_str + '_03_boundaries_fig.png', + path=self.frame['output_construction_dir'], + dpi=1000, + ) boundaries = np.zeros((img.shape[0], img.shape[1])).astype('int') boundaries[boundaries_rows, boundaries_cols] = 1 return boundaries, boundaries_img - - def is_boundary_pixel(self, row:int, col:int, btype:str, required_sky_width:int=None, ignore_margin:int=None) -> bool: - """ Checks if the pixel at the given row/col is a mirror edge boundary pixel (it is assumed to be an edge pixel). - + def is_boundary_pixel( + self, + row: int, + col: int, + btype: str, + required_sky_width: int = None, + ignore_margin: int = None, + ) -> bool: + """Checks if the pixel at the given row/col is a mirror edge boundary pixel (it is assumed to be an edge pixel). + Parameters ---------- btype: Which side of the mirror this pixel is on, one of 'left', 'top', 'right', or 'bottom' required_sky_width: How many pixels of sky must be adjacent to this pixel - ignore_margin: How many pixels of edge are assumed to be next to this pixel """ - edges = self.frame['edges'] - sky = self.frame['sky'] - img = self.frame['key_frame_img'] + ignore_margin: How many pixels of edge are assumed to be next to this pixel + """ + edges = self.frame['edges'] + sky = self.frame['sky'] + img = self.frame['key_frame_img'] if required_sky_width is None: - required_sky_width = REQUIRED_SKY_WIDTH + required_sky_width = REQUIRED_SKY_WIDTH if ignore_margin is None: - ignore_margin = IGNORE_MARGIN + ignore_margin = IGNORE_MARGIN # if required_non_sky_width is None: # required_non_sky_width = REQUIRED_NON_SKY_WIDTH - + max_row = img.shape[0] max_col = img.shape[1] if row < 0 or row >= max_row or col < 0 or col >= max_col: return False if btype == 'left': - low = col+ignore_margin - high = col+ignore_margin+required_sky_width + low = col + ignore_margin + high = col + ignore_margin + required_sky_width max_indx = max_col elif btype == 'top': - low = row+ignore_margin - high = row+ignore_margin+required_sky_width + low = row + ignore_margin + high = row + ignore_margin + required_sky_width max_indx = max_row elif btype == 'right': - low = col-(ignore_margin+required_sky_width) - high = col-ignore_margin + low = col - (ignore_margin + required_sky_width) + high = col - ignore_margin max_indx = max_col elif btype == 'bottom': - low = row-(ignore_margin+required_sky_width) - high = row-ignore_margin + low = row - (ignore_margin + required_sky_width) + high = row - ignore_margin max_indx = max_row - + for indx in range(low, high): if (indx < 0) or (indx >= max_indx): return False @@ -349,160 +490,235 @@ def is_boundary_pixel(self, row:int, col:int, btype:str, required_sky_width:int= else: is_sky = sky[indx, col] is_edge = edges[indx, col] - + if is_edge or not is_sky: return False return True - - def connected_components(self) -> tuple[list[Component], np.ndarray]: - """ Interpret the facet edges as "components" (groups of same-colored pixels). - + def connected_components(self) -> tuple[list[Component], np.ndarray]: + """Interpret the facet edges as "components" (groups of same-colored pixels). + Returns ------- components: the dict['original_pixels'] entries contains the list of component pixels. - component_img: the image with the components drawn on top of it. """ - def construct_component(row:int, col:int, btype:str, color:list[int], img:np.ndarray) -> Component: - """ Builds out a list of adjacent pixels that all have the same color (including diagonals). - + component_img: the image with the components drawn on top of it.""" + + def construct_component( + row: int, col: int, btype: str, color: list[int], img: np.ndarray + ) -> Component: + """Builds out a list of adjacent pixels that all have the same color (including diagonals). + Parameters ---------- btype: one of 'left', 'right', 'top', or 'bottom' color: the rgb color that corresponds to the given btype - + Returns ------- component: dict with key 'original_pixels': a list of xy pairs""" - component = { - 'color' : color, - 'boundary_type' : btype, - 'original_pixels' : [] - } + component = {'color': color, 'boundary_type': btype, 'original_pixels': []} horizon = [[row, col]] max_row = img.shape[0] max_col = img.shape[1] while len(horizon) > 0: - void_color = [0, 0, 0] - pixel_to_add = horizon.pop() + void_color = [0, 0, 0] + pixel_to_add = horizon.pop() component['original_pixels'].append(pixel_to_add) - r, c = pixel_to_add[0], pixel_to_add[1] - img[r,c, :] = void_color + r, c = pixel_to_add[0], pixel_to_add[1] + img[r, c, :] = void_color # add neighbors - if ( (r-1) >= 0) and ((c-1) >= 0) and (img[r-1,c-1,:] == color).all(axis=-1) and [r-1, c-1] not in horizon: horizon.append([r-1,c-1]) - if ( (r-1) >= 0) and (img[r-1,c,:] == color).all(axis=-1) and [r-1, c] not in horizon: horizon.append([r-1,c]) - if ( (r-1) >= 0) and ((c+1) < max_col) and (img[r-1,c+1,:] == color).all(axis=-1) and [r-1, c+1] not in horizon: horizon.append([r-1,c+1]) - if ((c-1) >= 0) and (img[r, c-1,:] == color).all(axis=-1) and [r, c-1] not in horizon: horizon.append([r, c-1]) - if ((c+1) < max_col) and (img[r, c+1,:] == color).all(axis=-1) and [r, c+1] not in horizon: horizon.append([r, c+1]) - if ((r+1) < max_row) and ((c-1) >= 0) and (img[r+1,c-1,:] == color).all(axis=-1) and [r+1, c-1] not in horizon: horizon.append([r+1,c-1]) - if ((r+1) < max_row) and (img[r+1,c,:] == color).all(axis=-1) and [r+1, c] not in horizon: horizon.append([r+1,c]) - if ((r+1) < max_row) and ((c+1) < max_col) and (img[r+1,c+1,:] == color).all(axis=-1) and [r+1, c+1] not in horizon: horizon.append([r+1,c+1]) + if ( + ((r - 1) >= 0) + and ((c - 1) >= 0) + and (img[r - 1, c - 1, :] == color).all(axis=-1) + and [r - 1, c - 1] not in horizon + ): + horizon.append([r - 1, c - 1]) + if ( + ((r - 1) >= 0) + and (img[r - 1, c, :] == color).all(axis=-1) + and [r - 1, c] not in horizon + ): + horizon.append([r - 1, c]) + if ( + ((r - 1) >= 0) + and ((c + 1) < max_col) + and (img[r - 1, c + 1, :] == color).all(axis=-1) + and [r - 1, c + 1] not in horizon + ): + horizon.append([r - 1, c + 1]) + if ( + ((c - 1) >= 0) + and (img[r, c - 1, :] == color).all(axis=-1) + and [r, c - 1] not in horizon + ): + horizon.append([r, c - 1]) + if ( + ((c + 1) < max_col) + and (img[r, c + 1, :] == color).all(axis=-1) + and [r, c + 1] not in horizon + ): + horizon.append([r, c + 1]) + if ( + ((r + 1) < max_row) + and ((c - 1) >= 0) + and (img[r + 1, c - 1, :] == color).all(axis=-1) + and [r + 1, c - 1] not in horizon + ): + horizon.append([r + 1, c - 1]) + if ( + ((r + 1) < max_row) + and (img[r + 1, c, :] == color).all(axis=-1) + and [r + 1, c] not in horizon + ): + horizon.append([r + 1, c]) + if ( + ((r + 1) < max_row) + and ((c + 1) < max_col) + and (img[r + 1, c + 1, :] == color).all(axis=-1) + and [r + 1, c + 1] not in horizon + ): + horizon.append([r + 1, c + 1]) return component def construct_component_img(components, img): - """ Draws the components on top of the given img """ - components_img = 0*img + """Draws the components on top of the given img""" + components_img = 0 * img for component in components: pixels = component['original_pixels'] - color = component['color'] + color = component['color'] for pixel in pixels: components_img[pixel[0], pixel[1], :] = color return components_img - - print('In KeyFrameCornerSearch.connected_components()...') # ?? SCAFFOLDING RCB -- TEMPORARY - img = self.frame['key_frame_img'] - boundaries = self.frame['boundaries'] - boundaries_img = self.frame['boundaries_img'] - #print('Estimating Connected Components ...') + print( + 'In KeyFrameCornerSearch.connected_components()...' + ) # ?? SCAFFOLDING RCB -- TEMPORARY + img = self.frame['key_frame_img'] + boundaries = self.frame['boundaries'] + boundaries_img = self.frame['boundaries_img'] + + # print('Estimating Connected Components ...') components = [] rows, cols = np.nonzero(boundaries) copied_img = boundaries_img.copy() - for row, col in zip(rows,cols): - if (boundaries_img[row,col,:] == LEFT_BOUNDARY_COLOR).all(axis=-1): + for row, col in zip(rows, cols): + if (boundaries_img[row, col, :] == LEFT_BOUNDARY_COLOR).all(axis=-1): btype = 'left' color = LEFT_BOUNDARY_COLOR - elif (boundaries_img[row,col,:] == RIGHT_BOUNDARY_COLOR).all(axis=-1): + elif (boundaries_img[row, col, :] == RIGHT_BOUNDARY_COLOR).all(axis=-1): btype = 'right' color = RIGHT_BOUNDARY_COLOR - elif (boundaries_img[row,col,:] == TOP_BOUNDARY_COLOR).all(axis=-1): + elif (boundaries_img[row, col, :] == TOP_BOUNDARY_COLOR).all(axis=-1): btype = 'top' color = TOP_BOUNDARY_COLOR - elif (boundaries_img[row,col,:] == BOTTOM_BOUNDARY_COLOR).all(axis=-1): + elif (boundaries_img[row, col, :] == BOTTOM_BOUNDARY_COLOR).all(axis=-1): btype = 'bottom' color = BOTTOM_BOUNDARY_COLOR # else: #TODO I (BGB) think we need this catch-all case, because the copied_img is getting updated to be all black in construct_component # continue - + component = construct_component(row, col, btype, color, copied_img) components.append(component) - + # construct image - components_img = construct_component_img(components,img) + components_img = construct_component_img(components, img) if self.render_control.draw_components: - save_image( img=components_img, imgname=self.key_frame_id_str+'_04_components.png', path=self.frame['output_construction_dir']) + save_image( + img=components_img, + imgname=self.key_frame_id_str + '_04_components.png', + path=self.frame['output_construction_dir'], + ) if self.render_control.draw_components_fig: - save_fig( img=components_img, imgname=self.key_frame_id_str+'_04_components_fig.png', path=self.frame['output_construction_dir']) - + save_fig( + img=components_img, + imgname=self.key_frame_id_str + '_04_components_fig.png', + path=self.frame['output_construction_dir'], + ) + if self.render_control.write_components: - save_connected_components(components=components, filename='components.csv',path=self.frame['output_construction_dir']) - + save_connected_components( + components=components, + filename='components.csv', + path=self.frame['output_construction_dir'], + ) + return components, components_img + def filter_connected_components(self) -> tuple[list[Component], np.ndarray]: + """Filters self.frame['components'] to only include those that have at least COMPONENT_THRESHOLD pixels.""" - def filter_connected_components(self) -> tuple[list[Component], np.ndarray]: - """ Filters self.frame['components'] to only include those that have at least COMPONENT_THRESHOLD pixels. """ def construct_component_img(components, img): - components_img = 0*img + components_img = 0 * img for component in components: pixels = component['original_pixels'] - color = component['color'] + color = component['color'] for pixel in pixels: components_img[pixel[0], pixel[1], :] = color return components_img - - print('In KeyFrameCornerSearch.filter_connected_components()...') # ?? SCAFFOLDING RCB -- TEMPORARY - img = self.frame['key_frame_img'] + + print( + 'In KeyFrameCornerSearch.filter_connected_components()...' + ) # ?? SCAFFOLDING RCB -- TEMPORARY + img = self.frame['key_frame_img'] components = self.frame['components'] filtered_components = [] for component in components: if len(component['original_pixels']) >= COMPONENT_THRESHOLD: filtered_components.append(component) - - if self.render_control.draw_filt_components or self.render_control.draw_filt_components_fig: + + if ( + self.render_control.draw_filt_components + or self.render_control.draw_filt_components_fig + ): filt_connected_comp_img = construct_component_img(filtered_components, img) if self.render_control.draw_filt_components: - save_image( img=filt_connected_comp_img, imgname=self.key_frame_id_str+'_05_filt_components.png', path=self.frame['output_construction_dir']) + save_image( + img=filt_connected_comp_img, + imgname=self.key_frame_id_str + '_05_filt_components.png', + path=self.frame['output_construction_dir'], + ) if self.render_control.draw_filt_components_fig: - save_fig( img=filt_connected_comp_img, imgname=self.key_frame_id_str+'_05_filt_components_fig.png', path=self.frame['output_construction_dir']) - + save_fig( + img=filt_connected_comp_img, + imgname=self.key_frame_id_str + '_05_filt_components_fig.png', + path=self.frame['output_construction_dir'], + ) + if self.render_control.write_filt_components: - save_connected_components(filtered_components, filename='filt_components.csv', path=self.frame['output_construction_dir']) + save_connected_components( + filtered_components, + filename='filt_components.csv', + path=self.frame['output_construction_dir'], + ) return filtered_components, filt_connected_comp_img - def fitted_lines_connected_components(self, type_fit='regression'): - """ Does an initial line fit on the pixels in the components. """ - print('In KeyFrameCornerSearch.fitted_lines_connected_components()...') # ?? SCAFFOLDING RCB -- TEMPORARY + """Does an initial line fit on the pixels in the components.""" + print( + 'In KeyFrameCornerSearch.fitted_lines_connected_components()...' + ) # ?? SCAFFOLDING RCB -- TEMPORARY line_components = [] - components = self.frame['filt_components'] + components = self.frame['filt_components'] for component in components: new_component = fit_line_component(component=component, type_fit=type_fit) line_components.append(new_component) - + if self.render_control.write_fitted_lines_components: - save_fitted_lines_connected_components(components=line_components, - filename='fitted_lines_components.csv', - path=self.frame['output_construction_dir']) + save_fitted_lines_connected_components( + components=line_components, + filename='fitted_lines_components.csv', + path=self.frame['output_construction_dir'], + ) return line_components - def fitted_lines_inliers_components(self): - """ Updates the component to fit to the pixels that are closest to the original line fit. - + """Updates the component to fit to the pixels that are closest to the original line fit. + New Component Keys ------------------ tolerance: the maximum distance that any pixel in inliers is from the fit line, float value from MIN_TOLERANCE to MAX_TOLERANCE (or 99) @@ -510,114 +726,140 @@ def fitted_lines_inliers_components(self): outliers_pixels: pixels that are outside the tolerance inliers_line_hom_coef: new A, B, and C for the new fit line Ax + By + C = 0 inliers_line_residual: numpy.polyfit residual - inliers_line_points: a pair of points [x1, y1, x2, y2] defining the line at the extremis of the component pixels """ - def find_inliers_component(component:Component): - A, B, C :float = component['original_line_hom_coef'] # from Ax + By + C = 0 - btype :str = component['boundary_type'] # 'left', 'right', 'top', or 'bottom' - original_pixels :list[list[int]] = component['original_pixels'] - required_inliers = int(round(INLIERS_THRESHOLD*len(original_pixels))) - - tolerance = MIN_TOLERANCE - max_tolerance = MAX_TOLERANCE - tol_step = TOL_STEP + inliers_line_points: a pair of points [x1, y1, x2, y2] defining the line at the extremis of the component pixels + """ + + def find_inliers_component(component: Component): + A, B, C = component['original_line_hom_coef'] # from Ax + By + C = 0 + btype: str = component[ + 'boundary_type' + ] # 'left', 'right', 'top', or 'bottom' + original_pixels: list[list[int]] = component['original_pixels'] + required_inliers = int(round(INLIERS_THRESHOLD * len(original_pixels))) + + tolerance = MIN_TOLERANCE + max_tolerance = MAX_TOLERANCE + tol_step = TOL_STEP while tolerance <= max_tolerance: - inliers = [] - inlier_cnt = 0 + inliers = [] + inlier_cnt = 0 for pixel in original_pixels: row, col = pixel[0], pixel[1] - if abs(A*col + B*row + C) <= tolerance: - inlier_cnt +=1 + if abs(A * col + B * row + C) <= tolerance: + inlier_cnt += 1 inliers.append(pixel) if inlier_cnt >= required_inliers: break tolerance += tol_step - + if tolerance > max_tolerance: # not sufficient inliers found # populate original information to inliers - component['tolerance'] = 99 - component['outliers_pixels'] = [] - component['inliers_pixels'] = component['original_pixels'] + component['tolerance'] = 99 + component['outliers_pixels'] = [] + component['inliers_pixels'] = component['original_pixels'] component['inliers_line_hom_coef'] = component['original_line_hom_coef'] component['inliers_line_residual'] = component['original_line_residual'] - component['inliers_line_points'] = component['original_line_points'] + component['inliers_line_points'] = component['original_line_points'] else: - row, col = np.array([a[0] for a in inliers]), np.array([a[1] for a in inliers]) + row, col = np.array([a[0] for a in inliers]), np.array( + [a[1] for a in inliers] + ) if btype == 'left' or btype == 'right': # expected horizontal line in terms of row x, y = row, col else: # expected horizontal line in terms of col - x, y = col,row - + x, y = col, row + reg_fit = np.polyfit(x, y, deg=1, full=True) m, b = reg_fit[0] residual = reg_fit[1][0] A_inl, B_inl, C_inl = -m, 1, -b norm = np.linalg.norm(np.array([A_inl, B_inl])) - A_inl, B_inl, C_inl = A_inl/norm, B_inl/norm, C_inl/norm + A_inl, B_inl, C_inl = A_inl / norm, B_inl / norm, C_inl / norm x1 = np.min(x) - y1 = (-A_inl*x1-C_inl)/B_inl + y1 = (-A_inl * x1 - C_inl) / B_inl x2 = np.max(x) - y2 = (-A_inl*x2-C_inl)/B_inl + y2 = (-A_inl * x2 - C_inl) / B_inl if btype == 'left' or btype == 'right': # swap x, y <-- why do we do this? ~BGB A_inl, B_inl = B_inl, A_inl x1, y1 = y1, x1 x2, y2 = y2, x2 - start_point = [x1, y1] # point at first x pixel [first y for left/right], with the y [x] adjusted to lie on the fit line - A_inl, B_inl, C_inl = set_proper_hom_coef_sign(start_point, btype, A_inl, B_inl, C_inl) - outliers = [[pixel[0], pixel[1]] for pixel in original_pixels if pixel not in inliers] - component['tolerance'] = tolerance - component['outliers_pixels'] = outliers - component['inliers_pixels'] = inliers + start_point = [ + x1, + y1, + ] # point at first x pixel [first y for left/right], with the y [x] adjusted to lie on the fit line + A_inl, B_inl, C_inl = set_proper_hom_coef_sign( + start_point, btype, A_inl, B_inl, C_inl + ) + outliers = [ + [pixel[0], pixel[1]] + for pixel in original_pixels + if pixel not in inliers + ] + component['tolerance'] = tolerance + component['outliers_pixels'] = outliers + component['inliers_pixels'] = inliers component['inliers_line_hom_coef'] = [A_inl, B_inl, C_inl] component['inliers_line_residual'] = residual - component['inliers_line_points'] = [x1, y1, x2, y2] + component['inliers_line_points'] = [x1, y1, x2, y2] return component - - print('In KeyFrameCornerSearch.fitted_lines_inliers_components()...') # ?? SCAFFOLDING RCB -- TEMPORARY - components = self.frame['fitted_lines_components'] - inliers_components :list[Component] = [] + + print( + 'In KeyFrameCornerSearch.fitted_lines_inliers_components()...' + ) # ?? SCAFFOLDING RCB -- TEMPORARY + components = self.frame['fitted_lines_components'] + inliers_components: list[Component] = [] for component in components: new_component = find_inliers_component(component) inliers_components.append(new_component) - + if self.render_control.write_fitted_lines_inliers_components: - save_fitted_lines_inliers_connected_components(components=inliers_components, - filename='fitted_lines_inliers_components.csv', - path=self.frame['output_construction_dir']) - + save_fitted_lines_inliers_connected_components( + components=inliers_components, + filename='fitted_lines_inliers_components.csv', + path=self.frame['output_construction_dir'], + ) + return inliers_components def find_corners(self, corners_type=None): - """ Finds the corners based on the intersection points of the components in the image. - + """Finds the corners based on the intersection points of the components in the image. + Arguments --------- corners_type: one of 'top_left'/'top_right'/'bottom_right'/'bottom_left' (for debugging), or None for all four Returns ------- - corners: a list of corner dict lists [[TL],[TR],[BR],[BL]], where each dict contains the xy 'point' for the corner. """ - print('In KeyFrameCornerSearch.find_corners()...') # ?? SCAFFOLDING RCB -- TEMPORARY - top_left_corners :dict[str,Any] = [] - top_right_corners :dict[str,Any] = [] - bottom_right_corners :dict[str,Any] = [] - bottom_left_corners :dict[str,Any] = [] + corners: a list of corner dict lists [[TL],[TR],[BR],[BL]], where each dict contains the xy 'point' for the corner. + """ + print( + 'In KeyFrameCornerSearch.find_corners()...' + ) # ?? SCAFFOLDING RCB -- TEMPORARY + top_left_corners: dict[str, Any] = [] + top_right_corners: dict[str, Any] = [] + bottom_right_corners: dict[str, Any] = [] + bottom_left_corners: dict[str, Any] = [] output_construction_dir = self.frame['output_construction_dir'] - corners_types = [corners_type] if (corners_type != None) else ['top_left', 'top_right', 'bottom_right', 'bottom_left'] - + corners_types = ( + [corners_type] + if (corners_type != None) + else ['top_left', 'top_right', 'bottom_right', 'bottom_left'] + ) + max_row = self.frame['key_frame_img'].shape[0] max_col = self.frame['key_frame_img'].shape[1] all_corners = {} - components :list[Component] = self.frame['fitted_lines_inliers_components'] + components: list[Component] = self.frame['fitted_lines_inliers_components'] for corners_type in corners_types: # Get lists of all the counter-clockwise (tomatched) and clockwise (candidates) edges # for the given corner. Example, left and top edges for top_left. - components_tomatched :list[Component] = [] - components_candidates :list[Component] = [] + components_tomatched: list[Component] = [] + components_candidates: list[Component] = [] for component in components: btype = component['boundary_type'] if corners_type == 'top_left': @@ -640,23 +882,23 @@ def find_corners(self, corners_type=None): components_tomatched.append(component) elif btype == 'left': components_candidates.append(component) - + # Find the corners by searching through the intersecting lines that are # closest to each other. components_candidates_initial = components_candidates.copy() for component in components_tomatched: # Filter matching candidates for this component to those on the # mirror side of the line. - mirrorside_candidates :list[Component] = [] - A, B, C :int = component['inliers_line_hom_coef'] + mirrorside_candidates: list[Component] = [] + A, B, C = component['inliers_line_hom_coef'] components_candidates = components_candidates_initial for candidate in components_candidates: - pixels = candidate['inliers_pixels'] + pixels = candidate['inliers_pixels'] avg_row = sum([p[0] for p in pixels]) / len(pixels) avg_col = sum([p[1] for p in pixels]) / len(pixels) - if (A*avg_col + B*avg_row + C) < 0: + if (A * avg_col + B * avg_row + C) < 0: mirrorside_candidates.append(candidate) - + components_candidates = mirrorside_candidates mirrorside_candidates = [] pixels = component['inliers_pixels'] @@ -664,81 +906,128 @@ def find_corners(self, corners_type=None): avg_col = sum([p[1] for p in pixels]) / len(pixels) for candidate in components_candidates: A, B, C = candidate['inliers_line_hom_coef'] - if (A*avg_col + B*avg_row + C) < 0: + if (A * avg_col + B * avg_row + C) < 0: mirrorside_candidates.append(candidate) - + components_candidates = mirrorside_candidates if not len(components_candidates): continue - + # keep the component's representative pixels = component['inliers_pixels'] - if corners_type == 'top_left': # left edge - representative = sorted(pixels, key = lambda pix: pix[0], reverse=False)[0] # ascending - elif corners_type == 'top_right': # top edge - representative = sorted(pixels, key = lambda pix: pix[1], reverse=True)[0] # descending - elif corners_type == 'bottom_right': # right edge - representative = sorted(pixels, key = lambda pix: pix[0], reverse=True)[0] - elif corners_type == 'bottom_left': # bottom edge - representative = sorted(pixels, key = lambda pix: pix[1], reverse=False)[0] + if corners_type == 'top_left': # left edge + representative = sorted( + pixels, key=lambda pix: pix[0], reverse=False + )[ + 0 + ] # ascending + elif corners_type == 'top_right': # top edge + representative = sorted( + pixels, key=lambda pix: pix[1], reverse=True + )[ + 0 + ] # descending + elif corners_type == 'bottom_right': # right edge + representative = sorted( + pixels, key=lambda pix: pix[0], reverse=True + )[0] + elif corners_type == 'bottom_left': # bottom edge + representative = sorted( + pixels, key=lambda pix: pix[1], reverse=False + )[0] representative_tomatched = representative # keep the closest ones representatives_candidates = [] for candidate in components_candidates: pixels = candidate['inliers_pixels'] - if corners_type == 'top_left': # top edge - representative = sorted(pixels, key = lambda pix: pix[1], reverse=False)[0] - elif corners_type == 'top_right': # right edge - representative = sorted(pixels, key = lambda pix: pix[0], reverse=False)[0] - elif corners_type == 'bottom_right': # bottom edge - representative = sorted(pixels, key = lambda pix: pix[1], reverse=True)[0] - elif corners_type == 'bottom_left': # left edge - representative = sorted(pixels, key = lambda pix: pix[0], reverse=True)[0] - + if corners_type == 'top_left': # top edge + representative = sorted( + pixels, key=lambda pix: pix[1], reverse=False + )[0] + elif corners_type == 'top_right': # right edge + representative = sorted( + pixels, key=lambda pix: pix[0], reverse=False + )[0] + elif corners_type == 'bottom_right': # bottom edge + representative = sorted( + pixels, key=lambda pix: pix[1], reverse=True + )[0] + elif corners_type == 'bottom_left': # left edge + representative = sorted( + pixels, key=lambda pix: pix[0], reverse=True + )[0] + representatives_candidates.append(representative) - - distances = [euclidean_distance(representative_tomatched, candidate) for candidate in representatives_candidates] + + distances = [ + euclidean_distance(representative_tomatched, candidate) + for candidate in representatives_candidates + ] found_candidate_indx = np.argsort(np.array(distances))[0] - found_candidate = components_candidates[found_candidate_indx] - + found_candidate = components_candidates[found_candidate_indx] + component_points = component['inliers_line_points'] - component_start_point = [component_points[0], component_points[1]] # col, row + component_start_point = [ + component_points[0], + component_points[1], + ] # col, row component_end_point = [component_points[2], component_points[3]] found_candidate_points = found_candidate['inliers_line_points'] - found_candidate_start_point = [found_candidate_points[0], found_candidate_points[1]] - found_candidate_end_point = [found_candidate_points[2], found_candidate_points[3]] - - corner = intersection_point(component_start_point[0],component_start_point[1], - component_end_point[0], component_end_point[1], - found_candidate_start_point[0], found_candidate_start_point[1], - found_candidate_end_point[0], found_candidate_end_point[1]) - + found_candidate_start_point = [ + found_candidate_points[0], + found_candidate_points[1], + ] + found_candidate_end_point = [ + found_candidate_points[2], + found_candidate_points[3], + ] + + corner = intersection_point( + component_start_point[0], + component_start_point[1], + component_end_point[0], + component_end_point[1], + found_candidate_start_point[0], + found_candidate_start_point[1], + found_candidate_end_point[0], + found_candidate_end_point[1], + ) + # Filter corners # Looking for distance conditions - pixels = component['inliers_pixels'] + pixels = component['inliers_pixels'] avg_row = sum([p[0] for p in pixels]) / len(pixels) avg_col = sum([p[1] for p in pixels]) / len(pixels) - - distance1 = euclidean_distance([corner[1], corner[0]], [avg_row, avg_col]) - - pixels = found_candidate['inliers_pixels'] + + distance1 = euclidean_distance( + [corner[1], corner[0]], [avg_row, avg_col] + ) + + pixels = found_candidate['inliers_pixels'] avg_row = sum([p[0] for p in pixels]) / len(pixels) avg_col = sum([p[1] for p in pixels]) / len(pixels) - distance2 = euclidean_distance([corner[1], corner[0]], [avg_row, avg_col]) - + distance2 = euclidean_distance( + [corner[1], corner[0]], [avg_row, avg_col] + ) + distance = max([distance1, distance2]) - - if distance >= SIDE_FACET_DISTANCE / 2: - continue - + + if distance >= SIDE_FACET_DISTANCE / 2: + continue + # if (euclidean_distance([corner[1], corner[0]], representative_tomatched) # > euclidean_distance(representative_tomatched, [avg_row, avg_col]) - # or (euclidean_distance([corner[1], corner[0]], [avg_row, avg_col]) + # or (euclidean_distance([corner[1], corner[0]], [avg_row, avg_col]) # < euclidean_distance(representative_tomatched, [avg_row, avg_col]))): # continue - if corner[1] >= 0 and corner[1] < max_row and corner[0] >= 0 and corner[0] < max_col: + if ( + corner[1] >= 0 + and corner[1] < max_row + and corner[0] >= 0 + and corner[0] < max_col + ): corner_structure = {} key1 = 'edge_coeff' key2 = 'edge_pixels' @@ -759,24 +1048,36 @@ def find_corners(self, corners_type=None): prefix1 = 'bottom_' prefix2 = 'left_' corners = bottom_left_corners - + corner_structure = { 'corner_type': corners_type, 'point': corner, prefix1 + key1: component['inliers_line_hom_coef'], prefix1 + key2: component['inliers_pixels'], - prefix1 + key3: [component_start_point[0],component_start_point[1], component_end_point[0], component_end_point[1]], + prefix1 + + key3: [ + component_start_point[0], + component_start_point[1], + component_end_point[0], + component_end_point[1], + ], prefix2 + key1: found_candidate['inliers_line_hom_coef'], prefix2 + key2: found_candidate['inliers_pixels'], - prefix2 + key3: [found_candidate_start_point[0], found_candidate_start_point[1], found_candidate_end_point[0], found_candidate_end_point[1]] + prefix2 + + key3: [ + found_candidate_start_point[0], + found_candidate_start_point[1], + found_candidate_end_point[0], + found_candidate_end_point[1], + ], } corners.append(corner_structure) all_corners[tuple(corner)] = corners_type - - top_left = [] - top_right = [] - bottom_right = [] - bottom_left = [] + + top_left = [] + top_right = [] + bottom_right = [] + bottom_left = [] for key, val in all_corners.items(): if val == 'top_left': top_left.append(list(key)) @@ -785,46 +1086,105 @@ def find_corners(self, corners_type=None): elif val == 'bottom_right': bottom_right.append(list(key)) elif val == 'bottom_left': - bottom_left.append(list(key)) - + bottom_left.append(list(key)) + if self.render_control.draw_corners: plt.figure() plt.imshow(self.frame['edges_img']) - plt.scatter([x[0] for x in top_left], [x[1] for x in top_left], marker='o', facecolor=PLT_TOP_LEFT_COLOR, s=5) - plt.scatter([x[0] for x in top_right], [x[1] for x in top_right], marker='o', facecolor=PLT_TOP_RIGHT_COLOR, s=5) - plt.scatter([x[0] for x in bottom_right], [x[1] for x in bottom_right], marker='o', facecolor=PLT_BOTTOM_RIGHT_COLOR, s=5) - plt.scatter([x[0] for x in bottom_left], [x[1] for x in bottom_left], marker='o', facecolor=PLT_BOTTOM_LEFT_COLOR, s=5) - plt.savefig(os.path.join(self.frame['output_construction_dir'], self.key_frame_id_str+'_08_corners.png'), dpi=200) + plt.scatter( + [x[0] for x in top_left], + [x[1] for x in top_left], + marker='o', + facecolor=PLT_TOP_LEFT_COLOR, + s=5, + ) + plt.scatter( + [x[0] for x in top_right], + [x[1] for x in top_right], + marker='o', + facecolor=PLT_TOP_RIGHT_COLOR, + s=5, + ) + plt.scatter( + [x[0] for x in bottom_right], + [x[1] for x in bottom_right], + marker='o', + facecolor=PLT_BOTTOM_RIGHT_COLOR, + s=5, + ) + plt.scatter( + [x[0] for x in bottom_left], + [x[1] for x in bottom_left], + marker='o', + facecolor=PLT_BOTTOM_LEFT_COLOR, + s=5, + ) + plt.savefig( + os.path.join( + self.frame['output_construction_dir'], + self.key_frame_id_str + '_08_corners.png', + ), + dpi=200, + ) plt.close() - + if self.render_control.write_top_left_corners: - save_corners_facets(corners=top_left_corners, filename='top_left_corners.csv',path=self.frame['output_construction_dir'], corners_type='top_left') + save_corners_facets( + corners=top_left_corners, + filename='top_left_corners.csv', + path=self.frame['output_construction_dir'], + corners_type='top_left', + ) if self.render_control.write_top_right_corners: - save_corners_facets(corners=top_right_corners, filename='top_right_corners.csv',path=self.frame['output_construction_dir'], corners_type='top_right') + save_corners_facets( + corners=top_right_corners, + filename='top_right_corners.csv', + path=self.frame['output_construction_dir'], + corners_type='top_right', + ) if self.render_control.write_bottom_right_corners: - save_corners_facets(corners=bottom_right_corners, filename='bottom_right_corners.csv',path=self.frame['output_construction_dir'], corners_type='bottom_right') + save_corners_facets( + corners=bottom_right_corners, + filename='bottom_right_corners.csv', + path=self.frame['output_construction_dir'], + corners_type='bottom_right', + ) if self.render_control.write_bottom_left_corners: - save_corners_facets(corners=bottom_left_corners, filename='bottom_left_corners.csv',path=self.frame['output_construction_dir'], corners_type='bottom_left') - - return [top_left_corners, top_right_corners, bottom_right_corners, bottom_left_corners] - + save_corners_facets( + corners=bottom_left_corners, + filename='bottom_left_corners.csv', + path=self.frame['output_construction_dir'], + corners_type='bottom_left', + ) + + return [ + top_left_corners, + top_right_corners, + bottom_right_corners, + bottom_left_corners, + ] def facets(self): print('In KeyFrameCornerSearch.facets()...') # ?? SCAFFOLDING RCB -- TEMPORARY - top_left_corners, \ - top_right_corners, \ - bottom_right_corners, \ - bottom_left_corners = self.frame['corners'] + ( + top_left_corners, + top_right_corners, + bottom_right_corners, + bottom_left_corners, + ) = self.frame['corners'] already_matched_corners = [] - facets = [] + facets = [] # For each Top Left corner for top_left_corner in top_left_corners: # Finding Top Right Corner - top_left_point = top_left_corner['point'] - left_edge_points = top_left_corner['left_edge_points'] - A_left, B_left, C_left = top_left_corner['left_edge_coeff'] + top_left_point = top_left_corner['point'] + left_edge_points = top_left_corner['left_edge_points'] + A_left, B_left, C_left = top_left_corner['left_edge_coeff'] # distances with top right corners - top_right_distances = [euclidean_distance(top_left_point, top_right_corner['point']) for top_right_corner in top_right_corners] + top_right_distances = [ + euclidean_distance(top_left_point, top_right_corner['point']) + for top_right_corner in top_right_corners + ] indices = np.argsort(np.array(top_right_distances)) flag = True for indice in indices: @@ -832,110 +1192,192 @@ def facets(self): top_right_point = top_right_corner['point'] top_edge_points = top_right_corner['top_edge_points'] # confirm intersection point is top left corner - interpoint = intersection_point(left_edge_points[0], left_edge_points[1], - left_edge_points[2], left_edge_points[3], - top_edge_points[0], top_edge_points[1], - top_edge_points[2], top_edge_points[3]) - + interpoint = intersection_point( + left_edge_points[0], + left_edge_points[1], + left_edge_points[2], + left_edge_points[3], + top_edge_points[0], + top_edge_points[1], + top_edge_points[2], + top_edge_points[3], + ) + distance = euclidean_distance(top_left_point, interpoint) - if ((A_left*top_right_point[0] + B_left*top_right_point[1] + C_left < 0) # on the right side - and (distance <= INTER_POINT_DISTANCE)): # ?? MAGIC NUMBER + if ( + A_left * top_right_point[0] + B_left * top_right_point[1] + C_left + < 0 + ) and ( # on the right side + distance <= INTER_POINT_DISTANCE + ): # ?? MAGIC NUMBER flag = False - break - - if flag or top_left_corner in already_matched_corners or top_right_corner in already_matched_corners: + break + + if ( + flag + or top_left_corner in already_matched_corners + or top_right_corner in already_matched_corners + ): continue - + # Finding Bottom Right corner - top_right_point = top_right_corner['point'] - top_edge_points = top_right_corner['top_edge_points'] + top_right_point = top_right_corner['point'] + top_edge_points = top_right_corner['top_edge_points'] A_top, B_top, C_top = top_right_corner['top_edge_coeff'] # distances with bottom right corners - bottom_right_distances = [euclidean_distance(top_right_point, bottom_right_corner['point']) for bottom_right_corner in bottom_right_corners] + bottom_right_distances = [ + euclidean_distance(top_right_point, bottom_right_corner['point']) + for bottom_right_corner in bottom_right_corners + ] indices = np.argsort(np.array(bottom_right_distances)) flag = True for indice in indices: bottom_right_corner = bottom_right_corners[indice] - bottom_right_point = bottom_right_corner['point'] - right_edge_points = bottom_right_corner['right_edge_points'] + bottom_right_point = bottom_right_corner['point'] + right_edge_points = bottom_right_corner['right_edge_points'] # confirm intersection point is top right corner - interpoint = intersection_point(top_edge_points[0], top_edge_points[1], - top_edge_points[2], top_edge_points[3], - right_edge_points[0], right_edge_points[1], - right_edge_points[2], right_edge_points[3]) - + interpoint = intersection_point( + top_edge_points[0], + top_edge_points[1], + top_edge_points[2], + top_edge_points[3], + right_edge_points[0], + right_edge_points[1], + right_edge_points[2], + right_edge_points[3], + ) + distance = euclidean_distance(top_right_point, interpoint) - if ((A_top*bottom_right_point[0] + B_top*bottom_right_point[1] + C_top < 0) # on the right side - and (A_left*bottom_right_point[0] + B_left*bottom_right_point[1] + C_left < 0) # on the right side - and (distance <= INTER_POINT_DISTANCE)): # ?? MAGIC NUMBER + if ( + ( + A_top * bottom_right_point[0] + + B_top * bottom_right_point[1] + + C_top + < 0 + ) # on the right side + and ( + A_left * bottom_right_point[0] + + B_left * bottom_right_point[1] + + C_left + < 0 + ) # on the right side + and (distance <= INTER_POINT_DISTANCE) + ): # ?? MAGIC NUMBER flag = False break - + if flag or bottom_right_corner in already_matched_corners: continue - + # Finding Bottom Left corner bottom_right_point = bottom_right_corner['point'] - right_edge_points = bottom_right_corner['right_edge_points'] + right_edge_points = bottom_right_corner['right_edge_points'] A_right, B_right, C_right = bottom_right_corner['right_edge_coeff'] # distances with bottom left corners - bottom_left_distances = [euclidean_distance(bottom_right_point, bottom_left_corner['point']) for bottom_left_corner in bottom_left_corners] + bottom_left_distances = [ + euclidean_distance(bottom_right_point, bottom_left_corner['point']) + for bottom_left_corner in bottom_left_corners + ] indices = np.argsort(bottom_left_distances) - + flag = True for indice in indices: - bottom_left_corner = bottom_left_corners[indice] - bottom_left_point = bottom_left_corner['point'] - bottom_edge_points = bottom_left_corner['bottom_edge_points'] + bottom_left_corner = bottom_left_corners[indice] + bottom_left_point = bottom_left_corner['point'] + bottom_edge_points = bottom_left_corner['bottom_edge_points'] # confirm intersection point is bottom right corner - interpoint = intersection_point(right_edge_points[0], right_edge_points[1], - right_edge_points[2], right_edge_points[3], - bottom_edge_points[0], bottom_edge_points[1], - bottom_edge_points[2], bottom_edge_points[3]) + interpoint = intersection_point( + right_edge_points[0], + right_edge_points[1], + right_edge_points[2], + right_edge_points[3], + bottom_edge_points[0], + bottom_edge_points[1], + bottom_edge_points[2], + bottom_edge_points[3], + ) # confirm intersection point is top left corner - interpoint_alt = intersection_point(left_edge_points[0], left_edge_points[1], - left_edge_points[2], left_edge_points[3], - bottom_edge_points[0], bottom_edge_points[1], - bottom_edge_points[2], bottom_edge_points[3]) - - distance = euclidean_distance(bottom_right_point, interpoint) - distance_alt = euclidean_distance(interpoint_alt, bottom_left_point) - if ((A_right*bottom_left_point[0] + B_right*bottom_left_point[1] + C_right < 0) # on the right side - and (A_top*bottom_left_point[0] + B_top*bottom_left_point[1] + C_top < 0) # on the below side - and (distance <= INTER_POINT_DISTANCE) # ?? MAGIC NUMBER - and (distance_alt <= INTER_POINT_DISTANCE)): # ?? MAGIC NUMBER + interpoint_alt = intersection_point( + left_edge_points[0], + left_edge_points[1], + left_edge_points[2], + left_edge_points[3], + bottom_edge_points[0], + bottom_edge_points[1], + bottom_edge_points[2], + bottom_edge_points[3], + ) + + distance = euclidean_distance(bottom_right_point, interpoint) + distance_alt = euclidean_distance(interpoint_alt, bottom_left_point) + if ( + ( + A_right * bottom_left_point[0] + + B_right * bottom_left_point[1] + + C_right + < 0 + ) # on the right side + and ( + A_top * bottom_left_point[0] + + B_top * bottom_left_point[1] + + C_top + < 0 + ) # on the below side + and (distance <= INTER_POINT_DISTANCE) # ?? MAGIC NUMBER + and (distance_alt <= INTER_POINT_DISTANCE) + ): # ?? MAGIC NUMBER flag = False break - + if flag or bottom_left_corner in already_matched_corners: continue - - center = intersection_point(top_left_point[0], top_left_point[1], - bottom_right_point[0], bottom_right_point[1], - top_right_point[0], top_right_point[1], - bottom_left_point[0], bottom_left_point[1]) - + + center = intersection_point( + top_left_point[0], + top_left_point[1], + bottom_right_point[0], + bottom_right_point[1], + top_right_point[0], + top_right_point[1], + bottom_left_point[0], + bottom_left_point[1], + ) + # center should satisfy boundary constraints - hom_coeff = [top_left_corner['left_edge_coeff'], top_right_corner['top_edge_coeff'], - bottom_right_corner['right_edge_coeff'], bottom_left_corner['left_edge_coeff']] - + hom_coeff = [ + top_left_corner['left_edge_coeff'], + top_right_corner['top_edge_coeff'], + bottom_right_corner['right_edge_coeff'], + bottom_left_corner['left_edge_coeff'], + ] + flag = True - for A,B,C in hom_coeff: - if not (A*center[0] + B*center[1] + C < 0): + for A, B, C in hom_coeff: + if not (A * center[0] + B * center[1] + C < 0): flag = False if not flag: continue - + c = int(center[0]) r = int(center[1]) flag = False - - required_sky_width = int(REQUIRED_SKY_WIDTH/4) - if (self.is_boundary_pixel(r, c, 'left', required_sky_width=required_sky_width) - and self.is_boundary_pixel(r,c,'top', required_sky_width=required_sky_width) - and self.is_boundary_pixel(r,c,'right', required_sky_width=required_sky_width) - and self.is_boundary_pixel(r,c,'bottom', required_sky_width=required_sky_width)): - flag = True + + required_sky_width = int(REQUIRED_SKY_WIDTH / 4) + if ( + self.is_boundary_pixel( + r, c, 'left', required_sky_width=required_sky_width + ) + and self.is_boundary_pixel( + r, c, 'top', required_sky_width=required_sky_width + ) + and self.is_boundary_pixel( + r, c, 'right', required_sky_width=required_sky_width + ) + and self.is_boundary_pixel( + r, c, 'bottom', required_sky_width=required_sky_width + ) + ): + flag = True if not flag: continue facet = { @@ -943,103 +1385,179 @@ def facets(self): "top_right": top_right_corner, "bottom_right": bottom_right_corner, "bottom_left": bottom_left_corner, - "center":center + "center": center, } already_matched_corners.append(top_left_corner) already_matched_corners.append(top_right_corner) already_matched_corners.append(bottom_right_corner) already_matched_corners.append(bottom_left_corner) facets.append(facet) - + if self.render_control.draw_facets: plt.figure() plt.imshow(self.frame['edges_img']) for facet in facets: - top_left_corner = facet['top_left']['point'] - top_right_corner = facet['top_right']['point'] + top_left_corner = facet['top_left']['point'] + top_right_corner = facet['top_right']['point'] bottom_right_corner = facet['bottom_right']['point'] - bottom_left_corner = facet['bottom_left']['point'] - center = facet['center'] - plt.scatter(top_left_corner[0], top_left_corner[1], facecolor=PLT_TOP_LEFT_COLOR,s=1) - plt.scatter(top_right_corner[0], top_right_corner[1], facecolor=PLT_TOP_RIGHT_COLOR,s=1) - plt.scatter(bottom_right_corner[0], bottom_right_corner[1], facecolor=PLT_BOTTOM_RIGHT_COLOR,s=1) - plt.scatter(bottom_left_corner[0], bottom_left_corner[1], facecolor=PLT_BOTTOM_LEFT_COLOR,s=1) + bottom_left_corner = facet['bottom_left']['point'] + center = facet['center'] + plt.scatter( + top_left_corner[0], + top_left_corner[1], + facecolor=PLT_TOP_LEFT_COLOR, + s=1, + ) + plt.scatter( + top_right_corner[0], + top_right_corner[1], + facecolor=PLT_TOP_RIGHT_COLOR, + s=1, + ) + plt.scatter( + bottom_right_corner[0], + bottom_right_corner[1], + facecolor=PLT_BOTTOM_RIGHT_COLOR, + s=1, + ) + plt.scatter( + bottom_left_corner[0], + bottom_left_corner[1], + facecolor=PLT_BOTTOM_LEFT_COLOR, + s=1, + ) plt.scatter(center[0], center[1], facecolor=PLT_CENTER_COLOR, s=1) - plt.savefig(os.path.join(self.frame['output_construction_dir'], self.key_frame_id_str+'_09_facets.png'), dpi=200) + plt.savefig( + os.path.join( + self.frame['output_construction_dir'], + self.key_frame_id_str + '_09_facets.png', + ), + dpi=200, + ) plt.close() - + if self.render_control.write_facets: - save_corners_facets(facets=facets, filename='facets.csv', path=self.frame['output_construction_dir']) + save_corners_facets( + facets=facets, + filename='facets.csv', + path=self.frame['output_construction_dir'], + ) return facets - def filter_facets_polygons(self): - print('In KeyFrameCornerSearch.filter_facets_polygons()...') # ?? SCAFFOLDING RCB -- TEMPORARY - all_facets = self.frame['facets'] - + print( + 'In KeyFrameCornerSearch.filter_facets_polygons()...' + ) # ?? SCAFFOLDING RCB -- TEMPORARY + all_facets = self.frame['facets'] + # filter the facets filtered_facets = [] for facet in all_facets: center = facet['center'] in_polygon = False for name_polygon in self.list_of_name_polygons: - name = name_polygon[0] + name = name_polygon[0] polygon = name_polygon[1] - x_list = [pt[0] for pt in polygon] - y_list = [pt[1] for pt in polygon] + x_list = [pt[0] for pt in polygon] + y_list = [pt[1] for pt in polygon] x_min = min(x_list) x_max = max(x_list) y_min = min(y_list) y_max = max(y_list) - if (x_min < center[0]) and (center[0] < x_max) and \ - (y_min < center[1]) and (center[1] < y_max): + if ( + (x_min < center[0]) + and (center[0] < x_max) + and (y_min < center[1]) + and (center[1] < y_max) + ): in_polygon = True break if in_polygon: filtered_facets.append(facet) - + # heliostats as many as the boxes heliostats = [] assigned_centers = [] for name_polygon in self.list_of_name_polygons: - name = name_polygon[0] + name = name_polygon[0] polygon = name_polygon[1] - x_list = [pt[0] for pt in polygon] # ?? SCAFFOLDING RCB -- REDUNDANT AND INEFFIICENT; IMPLEMENT POINT IN POLYGON TEST, OR SIMILAR. - y_list = [pt[1] for pt in polygon] # ?? SCAFFOLDING RCB -- REDUNDANT AND INEFFIICENT; IMPLEMENT POINT IN POLYGON TEST, OR SIMILAR. - x_min = min(x_list) # ?? SCAFFOLDING RCB -- REDUNDANT AND INEFFIICENT; IMPLEMENT POINT IN POLYGON TEST, OR SIMILAR. - x_max = max(x_list) # ?? SCAFFOLDING RCB -- REDUNDANT AND INEFFIICENT; IMPLEMENT POINT IN POLYGON TEST, OR SIMILAR. - y_min = min(y_list) # ?? SCAFFOLDING RCB -- REDUNDANT AND INEFFIICENT; IMPLEMENT POINT IN POLYGON TEST, OR SIMILAR. - y_max = max(y_list) # ?? SCAFFOLDING RCB -- REDUNDANT AND INEFFIICENT; IMPLEMENT POINT IN POLYGON TEST, OR SIMILAR. + x_list = [ + pt[0] for pt in polygon + ] # ?? SCAFFOLDING RCB -- REDUNDANT AND INEFFIICENT; IMPLEMENT POINT IN POLYGON TEST, OR SIMILAR. + y_list = [ + pt[1] for pt in polygon + ] # ?? SCAFFOLDING RCB -- REDUNDANT AND INEFFIICENT; IMPLEMENT POINT IN POLYGON TEST, OR SIMILAR. + x_min = min( + x_list + ) # ?? SCAFFOLDING RCB -- REDUNDANT AND INEFFIICENT; IMPLEMENT POINT IN POLYGON TEST, OR SIMILAR. + x_max = max( + x_list + ) # ?? SCAFFOLDING RCB -- REDUNDANT AND INEFFIICENT; IMPLEMENT POINT IN POLYGON TEST, OR SIMILAR. + y_min = min( + y_list + ) # ?? SCAFFOLDING RCB -- REDUNDANT AND INEFFIICENT; IMPLEMENT POINT IN POLYGON TEST, OR SIMILAR. + y_max = max( + y_list + ) # ?? SCAFFOLDING RCB -- REDUNDANT AND INEFFIICENT; IMPLEMENT POINT IN POLYGON TEST, OR SIMILAR. heliostat = {} heliostat['name'] = name heliostat['facets'] = [] for facet in filtered_facets: center = facet['center'] - if (center not in assigned_centers) and \ - ((x_min < center[0]) and (center[0] < x_max) and \ - (y_min < center[1]) and (center[1] < y_max)): + if (center not in assigned_centers) and ( + (x_min < center[0]) + and (center[0] < x_max) + and (y_min < center[1]) + and (center[1] < y_max) + ): assigned_centers.append(center) heliostat['facets'].append(facet) heliostats.append(heliostat) - + if self.render_control.draw_filtered_facets: plt.figure() plt.imshow(self.frame['edges_img']) for facet in filtered_facets: - top_left_corner = facet['top_left']['point'] - top_right_corner = facet['top_right']['point'] + top_left_corner = facet['top_left']['point'] + top_right_corner = facet['top_right']['point'] bottom_right_corner = facet['bottom_right']['point'] - bottom_left_corner = facet['bottom_left']['point'] - center = facet['center'] - plt.scatter(top_left_corner[0], top_left_corner[1], facecolor=PLT_TOP_LEFT_COLOR,s=1) - plt.scatter(top_right_corner[0], top_right_corner[1], facecolor=PLT_TOP_RIGHT_COLOR,s=1) - plt.scatter(bottom_right_corner[0], bottom_right_corner[1], facecolor=PLT_BOTTOM_RIGHT_COLOR,s=1) - plt.scatter(bottom_left_corner[0], bottom_left_corner[1], facecolor=PLT_BOTTOM_LEFT_COLOR,s=1) + bottom_left_corner = facet['bottom_left']['point'] + center = facet['center'] + plt.scatter( + top_left_corner[0], + top_left_corner[1], + facecolor=PLT_TOP_LEFT_COLOR, + s=1, + ) + plt.scatter( + top_right_corner[0], + top_right_corner[1], + facecolor=PLT_TOP_RIGHT_COLOR, + s=1, + ) + plt.scatter( + bottom_right_corner[0], + bottom_right_corner[1], + facecolor=PLT_BOTTOM_RIGHT_COLOR, + s=1, + ) + plt.scatter( + bottom_left_corner[0], + bottom_left_corner[1], + facecolor=PLT_BOTTOM_LEFT_COLOR, + s=1, + ) plt.scatter(center[0], center[1], facecolor=PLT_CENTER_COLOR, s=1) - plt.savefig(os.path.join(self.frame['output_construction_dir'], self.key_frame_id_str+'_10_filtered_facets.png'), dpi=200) + plt.savefig( + os.path.join( + self.frame['output_construction_dir'], + self.key_frame_id_str + '_10_filtered_facets.png', + ), + dpi=200, + ) plt.close() - + if self.render_control.draw_filtered_heliostats: colors = ['c', 'r', 'g', 'y', 'p'] plt.figure() @@ -1050,43 +1568,59 @@ def filter_facets_polygons(self): for facet in heliostat['facets']: center = facet['center'] plt.scatter(center[0], center[1], facecolor=color, s=1) - plt.savefig(os.path.join(self.frame['output_construction_dir'], self.key_frame_id_str+'_11_filtered_heliostats.png'), dpi=200) + plt.savefig( + os.path.join( + self.frame['output_construction_dir'], + self.key_frame_id_str + '_11_filtered_heliostats.png', + ), + dpi=200, + ) plt.close() - - return filtered_facets, heliostats + return filtered_facets, heliostats def top_row_facets(self): """ Assumption: We trust first row in terms of correct found centers """ - print('In KeyFrameCornerSearch.top_row_facets()...') # ?? SCAFFOLDING RCB -- TEMPORARY + print( + 'In KeyFrameCornerSearch.top_row_facets()...' + ) # ?? SCAFFOLDING RCB -- TEMPORARY for heliostat in self.frame['heliostats']: - facets = heliostat['facets'] # this is a list - facets = sorted(facets, key = lambda f: f['center'][1]) # sort in terms of rows - top_row_facets = facets[:self.specifications.facets_per_row] # top row facets - keys = [[['bottom_right', 'bottom_edge_coeff'], ['bottom_left', 'bottom_edge_coeff']], - [['top_left', 'top_edge_coeff'],['top_right', 'top_edge_coeff']]] + facets = heliostat['facets'] # this is a list + facets = sorted( + facets, key=lambda f: f['center'][1] + ) # sort in terms of rows + top_row_facets = facets[ + : self.specifications.facets_per_row + ] # top row facets + keys = [ + [ + ['bottom_right', 'bottom_edge_coeff'], + ['bottom_left', 'bottom_edge_coeff'], + ], + [['top_left', 'top_edge_coeff'], ['top_right', 'top_edge_coeff']], + ] for type_of_keys in keys: coeff = [] for key_list in type_of_keys: key1, key2 = key_list for facet in top_row_facets: coeff.append(facet[key1][key2]) - - facet_indx = len(top_row_facets) - 1 - cnt = len(top_row_facets) - while facet_indx >= 0: - facet = top_row_facets[facet_indx] - center = facet['center'] - flag = True + + facet_indx = len(top_row_facets) - 1 + cnt = len(top_row_facets) + while facet_indx >= 0: + facet = top_row_facets[facet_indx] + center = facet['center'] + flag = True for c in coeff: - flag *= (center[0]*c[0] + center[1]*c[1] + c[2] < 0) - + flag *= center[0] * c[0] + center[1] * c[1] + c[2] < 0 + if not flag: cnt -= 1 facet_indx -= 1 - + top_row_facets = top_row_facets[:cnt] heliostat['top_row_facets'] = top_row_facets @@ -1098,10 +1632,17 @@ def top_row_facets(self): for heliostat in self.frame['heliostats']: top_row_facets = heliostat['top_row_facets'] for facet in top_row_facets: - plt.scatter(facet['center'][0], facet['center'][1], s=1, facecolor='m') - plt.savefig(os.path.join(self.frame['output_construction_dir'], self.key_frame_id_str+'_14_top_row_facets.png'), dpi=200) + plt.scatter( + facet['center'][0], facet['center'][1], s=1, facecolor='m' + ) + plt.savefig( + os.path.join( + self.frame['output_construction_dir'], + self.key_frame_id_str + '_14_top_row_facets.png', + ), + dpi=200, + ) plt.close() - def classify_top_row_facets(self): def find_combinations(inp, out): @@ -1109,7 +1650,7 @@ def find_combinations(inp, out): if len(out) != 0: all_combinations.append(out) return - + find_combinations(inp[1:], out[:]) if len(out) == 0: find_combinations(inp[1:], inp[:1]) @@ -1117,12 +1658,18 @@ def find_combinations(inp, out): out.append(inp[0]) find_combinations(inp[1:], out[:]) - print('In KeyFrameCornerSearch.classify_top_row_facets()...') # ?? SCAFFOLDING RCB -- TEMPORARY + print( + 'In KeyFrameCornerSearch.classify_top_row_facets()...' + ) # ?? SCAFFOLDING RCB -- TEMPORARY for heliostat in self.frame['heliostats']: top_row_facets = heliostat['top_row_facets'] - top_row_facets = sorted(top_row_facets, key = lambda f: f['center'][0]) # sort in terms of column - ids = [i for i in range(0, self.specifications.facets_per_row)] - if len(top_row_facets) == self.specifications.facets_per_row: # all facets have been identified + top_row_facets = sorted( + top_row_facets, key=lambda f: f['center'][0] + ) # sort in terms of column + ids = [i for i in range(0, self.specifications.facets_per_row)] + if ( + len(top_row_facets) == self.specifications.facets_per_row + ): # all facets have been identified for facet_indx in range(0, len(top_row_facets)): facet = top_row_facets[facet_indx] facet['id'] = facet_indx @@ -1131,7 +1678,9 @@ def find_combinations(inp, out): # all diferent combinations, brute-force - Complexity O(self.specifications.facets_per_row!) all_combinations = [] find_combinations(ids, []) - combinations = [x for x in all_combinations if len(x) == len(top_row_facets)] + combinations = [ + x for x in all_combinations if len(x) == len(top_row_facets) + ] # image points img_centers2d = [] @@ -1141,36 +1690,43 @@ def find_combinations(inp, out): for key in CLOCKWISE_DIR: img_corners2d.append(facet[key]['point']) points2d = np.array(img_corners2d + img_centers2d).astype('float32') - + centers3d = self.specifications.facets_centroids corners3d = self.specifications.facets_corners -#centers3d = read_centers3d(facet_centoids_csv) -#corners3d = centers3d_to_corners3d(centers3d) - rank = [] - proj_errors = [] + # centers3d = read_centers3d(facet_centoids_csv) + # corners3d = centers3d_to_corners3d(centers3d) + rank = [] + proj_errors = [] for combination in combinations: # object points - flat heliostat obj_centers3d = [] obj_corners3d = [] for i in combination: obj_centers3d.append(centers3d[i]) - corners_indx = i*self.specifications.corners_per_facet - for indx in range(corners_indx, corners_indx + self.specifications.corners_per_facet): + corners_indx = i * self.specifications.corners_per_facet + for indx in range( + corners_indx, + corners_indx + self.specifications.corners_per_facet, + ): obj_corners3d.append(corners3d[indx]) ## Projection points3d = np.array(obj_corners3d + obj_centers3d).astype('float32') h, w = self.frame['key_frame_img'].shape[:2] - _, _, _, _, error = solvePNP(points3d, points2d, h, w, pnptype=self.solvePnPtype) + _, _, _, _, error = solvePNP( + points3d, points2d, h, w, pnptype=self.solvePnPtype + ) proj_errors.append(error) # select based on projected error best_indx = np.argsort(np.array(proj_errors))[0] selected_combination = combinations[best_indx] - for facet_indx, i in zip(range(0,len(top_row_facets)), selected_combination): + for facet_indx, i in zip( + range(0, len(top_row_facets)), selected_combination + ): facet = top_row_facets[facet_indx] facet['id'] = i top_row_facets[facet_indx] = facet - + if self.render_control.draw_top_row_facets_labels: edge_img = self.frame['edges_img'] plt.figure() @@ -1179,68 +1735,106 @@ def find_combinations(inp, out): top_row_facets = heliostat['top_row_facets'] for facet in top_row_facets: center = facet['center'] - label = facet['id'] + label = facet['id'] plt.scatter(center[0], center[1], s=1, facecolor='m') - plt.annotate(str(label), (center[0], center[1]), color='c', fontsize=5) - - plt.savefig(os.path.join(self.frame['output_construction_dir'], self.key_frame_id_str+'_15_top_row_facets_labels.png'), dpi=200) + plt.annotate( + str(label), (center[0], center[1]), color='c', fontsize=5 + ) + + plt.savefig( + os.path.join( + self.frame['output_construction_dir'], + self.key_frame_id_str + '_15_top_row_facets_labels.png', + ), + dpi=200, + ) plt.close() - - def project_and_confirm(self, canny_levels=['tight', 'normal', 'light'], iterations=5): - print('In KeyFrameCornerSearch.project_and_confirm()...') # ?? SCAFFOLDING RCB -- TEMPORARY - edge_img = self.frame['edges_img'] # demonstration - h, w = self.frame['key_frame_img'].shape[:2] -#facet_centoids_csv = self.facet_centroids_dir_body_ext + 'csv_files/' + 'Facets_Centroids.csv' + def project_and_confirm( + self, canny_levels=['tight', 'normal', 'light'], iterations=5 + ): + print( + 'In KeyFrameCornerSearch.project_and_confirm()...' + ) # ?? SCAFFOLDING RCB -- TEMPORARY + edge_img = self.frame['edges_img'] # demonstration + h, w = self.frame['key_frame_img'].shape[:2] + # facet_centoids_csv = self.facet_centroids_dir_body_ext + 'csv_files/' + 'Facets_Centroids.csv' centers3d = self.specifications.facets_centroids corners3d = self.specifications.facets_corners -#centers3d = read_centers3d(facet_centoids_csv) -#corners3d = centers3d_to_corners3d(centers3d) + # centers3d = read_centers3d(facet_centoids_csv) + # corners3d = centers3d_to_corners3d(centers3d) for heliostat in self.frame['heliostats']: top_row_facets = heliostat['top_row_facets'] top_row_facets = sorted(top_row_facets, key=lambda x: x['id']) # Image Points - imgcorners = [] - imgcenters = [] + imgcorners = [] + imgcenters = [] for facet in top_row_facets: imgcenters.append(facet['center']) for key in CLOCKWISE_DIR: imgcorners.append(facet[key]['point']) - - points2d = np.array(imgcorners + imgcenters).astype('float32') + + points2d = np.array(imgcorners + imgcenters).astype('float32') # Object Points - labels = [f['id'] for f in top_row_facets] - objcorners = [] - objcenters = [] + labels = [f['id'] for f in top_row_facets] + objcorners = [] + objcenters = [] for label in labels: objcenters.append(centers3d[label]) - corner_indx = label*self.specifications.corners_per_facet - for indx in range(corner_indx, corner_indx + self.specifications.corners_per_facet): + corner_indx = label * self.specifications.corners_per_facet + for indx in range( + corner_indx, corner_indx + self.specifications.corners_per_facet + ): objcorners.append(corners3d[indx]) - + points3d = np.array(objcorners + objcenters).astype('float32') if len(points3d) != len(points2d): - msg = 'In KeyFrameCornerSearch.project_and_confirm(), len(points3d)='+str(len(points3d))+' does not equal len(points3d)='+str(len(points3d)) + msg = ( + 'In KeyFrameCornerSearch.project_and_confirm(), len(points3d)=' + + str(len(points3d)) + + ' does not equal len(points3d)=' + + str(len(points3d)) + ) print('ERROR: ' + msg) raise ValueError(msg) if len(points3d) < 4: - msg = 'In KeyFrameCornerSearch.project_and_confirm(), len(points3d)='+str(len(points3d))+' is not at least 4. Not expected here.' + msg = ( + 'In KeyFrameCornerSearch.project_and_confirm(), len(points3d)=' + + str(len(points3d)) + + ' is not at least 4. Not expected here.' + ) print('ERROR: ' + msg) raise ValueError(msg) - print('In KeyFrameCornerSearch.project_and_confirm(), calling solvePNP(); len(points3d) =', len(points3d), '; len(points2d) =', len(points2d)) # ?? SCAFFOLDING RCB -- TEMPORARY - mtx, dist, \ - rvec, tvec, \ - pnp_error = solvePNP(points3d, points2d, h, w, pnptype=self.solvePnPtype) - - proj_corners, _ = cv.projectPoints(np.array(corners3d).astype('float32'), rvec, tvec, mtx, dist) - proj_corners = proj_corners.reshape(-1, 2) - proj_corners = proj_corners.tolist() - - confirmed_corners, projected_corners = self.confirm(proj_corners, corners3d, canny_levels=canny_levels,iterations=iterations) - - if not projected_corners: # no corner was confirmed, discard this heliostat # ?? SCAFFOLDING RCB -- DOES THIS EVER WORK, GIVEN THAT WE ARE LOOKING AT OUTPUT OF EXPECTED CORNERS ROUTINE? + print( + 'In KeyFrameCornerSearch.project_and_confirm(), calling solvePNP(); len(points3d) =', + len(points3d), + '; len(points2d) =', + len(points2d), + ) # ?? SCAFFOLDING RCB -- TEMPORARY + mtx, dist, rvec, tvec, pnp_error = solvePNP( + points3d, points2d, h, w, pnptype=self.solvePnPtype + ) + + proj_corners, _ = cv.projectPoints( + np.array(corners3d).astype('float32'), rvec, tvec, mtx, dist + ) + proj_corners = proj_corners.reshape(-1, 2) + proj_corners = proj_corners.tolist() + + confirmed_corners, projected_corners = self.confirm( + proj_corners, + corners3d, + canny_levels=canny_levels, + iterations=iterations, + ) + + if ( + not projected_corners + ): # no corner was confirmed, discard this heliostat # ?? SCAFFOLDING RCB -- DOES THIS EVER WORK, GIVEN THAT WE ARE LOOKING AT OUTPUT OF EXPECTED CORNERS ROUTINE? continue - heliostat['confirmed_corners'] = self.convert_None_corners(confirmed_corners) # Replace None entries with [-1,-1] + heliostat['confirmed_corners'] = self.convert_None_corners( + confirmed_corners + ) # Replace None entries with [-1,-1] heliostat['projected_corners'] = projected_corners final_heliostats = [] @@ -1255,15 +1849,27 @@ def project_and_confirm(self, canny_levels=['tight', 'normal', 'light'], iterati list_of_name_confirmed_corners = [] list_of_name_projected_corners = [] for heliostat in final_heliostats: - all_confirmed_corners += heliostat['confirmed_corners'] # ?? SCAFFOLDING RCB -- AFTER THIS REFACTOR IS COMPLETE, MAYBE THESE ARE NO LONGER NEEDED. - all_projected_corners += heliostat['projected_corners'] # ?? SCAFFOLDING RCB -- AFTER THIS REFACTOR IS COMPLETE, MAYBE THESE ARE NO LONGER NEEDED. - list_of_name_confirmed_corners.append([heliostat['name'], heliostat['confirmed_corners']]) - list_of_name_projected_corners.append([heliostat['name'], heliostat['projected_corners']]) + all_confirmed_corners += heliostat[ + 'confirmed_corners' + ] # ?? SCAFFOLDING RCB -- AFTER THIS REFACTOR IS COMPLETE, MAYBE THESE ARE NO LONGER NEEDED. + all_projected_corners += heliostat[ + 'projected_corners' + ] # ?? SCAFFOLDING RCB -- AFTER THIS REFACTOR IS COMPLETE, MAYBE THESE ARE NO LONGER NEEDED. + list_of_name_confirmed_corners.append( + [heliostat['name'], heliostat['confirmed_corners']] + ) + list_of_name_projected_corners.append( + [heliostat['name'], heliostat['projected_corners']] + ) # FrameNameXyList objects. confirmed_fnxl = fnxl.FrameNameXyList() projected_fnxl = fnxl.FrameNameXyList() - confirmed_fnxl.add_list_of_name_xy_lists(self.key_frame_id, list_of_name_confirmed_corners) - projected_fnxl.add_list_of_name_xy_lists(self.key_frame_id, list_of_name_projected_corners) + confirmed_fnxl.add_list_of_name_xy_lists( + self.key_frame_id, list_of_name_confirmed_corners + ) + projected_fnxl.add_list_of_name_xy_lists( + self.key_frame_id, list_of_name_projected_corners + ) # Store in this class object. self.frame['all_confirmed_corners'] = all_confirmed_corners self.frame['all_projected_corners'] = all_projected_corners @@ -1271,13 +1877,33 @@ def project_and_confirm(self, canny_levels=['tight', 'normal', 'light'], iterati self.frame['projected_fnxl'] = projected_fnxl # Write to disk. if self.render_control.write_all_confirmed_corners: - save_corners(corners=all_confirmed_corners, filename='all_confirmed_corners.csv', path=self.frame['output_construction_dir']) # ?? SCAFFOLDING RCB -- AFTER THIS REFACTOR IS COMPLETE, MAYBE THESE ARE NO LONGER NEEDED. + save_corners( + corners=all_confirmed_corners, + filename='all_confirmed_corners.csv', + path=self.frame['output_construction_dir'], + ) # ?? SCAFFOLDING RCB -- AFTER THIS REFACTOR IS COMPLETE, MAYBE THESE ARE NO LONGER NEEDED. if self.render_control.write_all_projected_corners: - save_corners(corners=all_projected_corners, filename='all_projected_corners.csv', path=self.frame['output_construction_dir']) # ?? SCAFFOLDING RCB -- AFTER THIS REFACTOR IS COMPLETE, MAYBE THESE ARE NO LONGER NEEDED. + save_corners( + corners=all_projected_corners, + filename='all_projected_corners.csv', + path=self.frame['output_construction_dir'], + ) # ?? SCAFFOLDING RCB -- AFTER THIS REFACTOR IS COMPLETE, MAYBE THESE ARE NO LONGER NEEDED. if self.render_control.write_confirmed_fnxl: - confirmed_fnxl.save(os.path.join(self.frame['output_construction_dir'], 'csv_files', (self.key_frame_id_str + '_confirmed_fnxl.csv'))) # ?? SCAFFOLDING RCB -- INSTEAD OF ADDING "CSV_FILES" HERE, SHOULD BE PASSED IN THAT WAY. SAVE TO ANSWER DIRECTORY? -- PROBABLY NOT; INSEAD SAVE FROM CALLER? + confirmed_fnxl.save( + os.path.join( + self.frame['output_construction_dir'], + 'csv_files', + (self.key_frame_id_str + '_confirmed_fnxl.csv'), + ) + ) # ?? SCAFFOLDING RCB -- INSTEAD OF ADDING "CSV_FILES" HERE, SHOULD BE PASSED IN THAT WAY. SAVE TO ANSWER DIRECTORY? -- PROBABLY NOT; INSEAD SAVE FROM CALLER? if self.render_control.write_projected_fnxl: - projected_fnxl.save(os.path.join(self.frame['output_construction_dir'], 'csv_files', (self.key_frame_id_str + '_projected_fnxl.csv'))) # ?? SCAFFOLDING RCB -- INSTEAD OF ADDING "CSV_FILES" HERE, SHOULD BE PASSED IN THAT WAY. SAVE TO ANSWER DIRECTORY? -- PROBABLY NOT; INSEAD SAVE FROM CALLER? + projected_fnxl.save( + os.path.join( + self.frame['output_construction_dir'], + 'csv_files', + (self.key_frame_id_str + '_projected_fnxl.csv'), + ) + ) # ?? SCAFFOLDING RCB -- INSTEAD OF ADDING "CSV_FILES" HERE, SHOULD BE PASSED IN THAT WAY. SAVE TO ANSWER DIRECTORY? -- PROBABLY NOT; INSEAD SAVE FROM CALLER? if self.render_control.draw_confirmed_corners: # Confirmed corners. @@ -1285,25 +1911,45 @@ def project_and_confirm(self, canny_levels=['tight', 'normal', 'light'], iterati plt.figure() plt.imshow(edge_img) for final_heliostat in final_heliostats: - found_confirmed_corners = self.filter_not_found_corners(final_heliostat['confirmed_corners']) + found_confirmed_corners = self.filter_not_found_corners( + final_heliostat['confirmed_corners'] + ) # Draw the heliostat name. - if len(found_confirmed_corners) > 0: # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. - label_xy = g2d.label_point(found_confirmed_corners) # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. - plt.text(label_xy[0], # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. - label_xy[1], # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. - final_heliostat['name'], # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. - color='c', # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. - horizontalalignment='center', # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. - verticalalignment='center', # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. - fontsize=5, # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. - fontstyle='normal', # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. - fontweight='bold') # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. + if ( + len(found_confirmed_corners) > 0 + ): # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. + label_xy = g2d.label_point( + found_confirmed_corners + ) # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. + plt.text( + label_xy[ + 0 + ], # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. + label_xy[ + 1 + ], # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. + final_heliostat[ + 'name' + ], # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. + color='c', # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. + horizontalalignment='center', # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. + verticalalignment='center', # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. + fontsize=5, # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. + fontstyle='normal', # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. + fontweight='bold', + ) # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. # Draw confirmed corners. for corner in found_confirmed_corners: if corner is not None: plt.scatter(corner[0], corner[1], s=1, facecolor='c') # Save the figure. - plt.savefig(os.path.join(self.frame['output_construction_dir'], self.key_frame_id_str+'_16_confirmed_corners.png'), dpi=200) + plt.savefig( + os.path.join( + self.frame['output_construction_dir'], + self.key_frame_id_str + '_16_confirmed_corners.png', + ), + dpi=200, + ) plt.close() if self.render_control.draw_projected_corners: @@ -1314,17 +1960,29 @@ def project_and_confirm(self, canny_levels=['tight', 'normal', 'light'], iterati for final_heliostat in final_heliostats: projected_corners = final_heliostat['projected_corners'] # Draw the heliostat name. - if len(projected_corners) > 0: # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. - label_xy = g2d.label_point(projected_corners) # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. - plt.text(label_xy[0], # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. - label_xy[1], # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. - final_heliostat['name'], # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. - color='m', # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. - horizontalalignment='center', # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. - verticalalignment='center', # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. - fontsize=5, # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. - fontstyle='normal', # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. - fontweight='bold') # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. + if ( + len(projected_corners) > 0 + ): # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. + label_xy = g2d.label_point( + projected_corners + ) # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. + plt.text( + label_xy[ + 0 + ], # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. + label_xy[ + 1 + ], # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. + final_heliostat[ + 'name' + ], # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. + color='m', # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. + horizontalalignment='center', # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. + verticalalignment='center', # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. + fontsize=5, # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. + fontstyle='normal', # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. + fontweight='bold', + ) # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. # Draw projected corners. for corner in projected_corners: if corner is None: @@ -1333,7 +1991,13 @@ def project_and_confirm(self, canny_levels=['tight', 'normal', 'light'], iterati raise ValueError(msg) plt.scatter(corner[0], corner[1], s=1, facecolor='m') # Save the figure. - plt.savefig(os.path.join(self.frame['output_construction_dir'], self.key_frame_id_str+'_17_projected_corners.png'), dpi=200) + plt.savefig( + os.path.join( + self.frame['output_construction_dir'], + self.key_frame_id_str + '_17_projected_corners.png', + ), + dpi=200, + ) plt.close() if self.render_control.draw_projected_and_confirmed_corners: @@ -1343,19 +2007,33 @@ def project_and_confirm(self, canny_levels=['tight', 'normal', 'light'], iterati plt.imshow(edge_img) for final_heliostat in final_heliostats: projected_corners = final_heliostat['projected_corners'] - found_confirmed_corners = self.filter_not_found_corners(final_heliostat['confirmed_corners']) + found_confirmed_corners = self.filter_not_found_corners( + final_heliostat['confirmed_corners'] + ) # Draw the heliostat name. - if len(projected_corners) > 0: # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. - label_xy = g2d.label_point(projected_corners) # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. - plt.text(label_xy[0], # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. - label_xy[1], # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. - final_heliostat['name'], # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. - color='m', # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. - horizontalalignment='center', # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. - verticalalignment='center', # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. - fontsize=5, # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. - fontstyle='normal', # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. - fontweight='bold') # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. + if ( + len(projected_corners) > 0 + ): # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. + label_xy = g2d.label_point( + projected_corners + ) # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. + plt.text( + label_xy[ + 0 + ], # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. + label_xy[ + 1 + ], # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. + final_heliostat[ + 'name' + ], # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. + color='m', # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. + horizontalalignment='center', # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. + verticalalignment='center', # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. + fontsize=5, # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. + fontstyle='normal', # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. + fontweight='bold', + ) # ?? SCAFFOLDING RCB -- MAKE THIS INTEGRATED WITH STANDARD PLOTTING AND RENDER CONTROL ROUTINES. # Draw projected corners. for corner in projected_corners: if corner is None: @@ -1368,27 +2046,53 @@ def project_and_confirm(self, canny_levels=['tight', 'normal', 'light'], iterati if corner is not None: plt.scatter(corner[0], corner[1], s=1, facecolor='c') # Save the figure. - plt.savefig(os.path.join(self.frame['output_construction_dir'], self.key_frame_id_str+'_18_projected_and_confirmed_corners.png'), dpi=200) + plt.savefig( + os.path.join( + self.frame['output_construction_dir'], + self.key_frame_id_str + '_18_projected_and_confirmed_corners.png', + ), + dpi=200, + ) plt.close() - - # Sort heliostats left to right # ?? SCAFFOLDING RCB -- MAY NOT DO ANYTHING, IN WHICH CASE WE SHOULD DELETE. WILL BE INCORRECT IF BOXES ARE NOT SORTED LEFT TO RIGHT? - final_heliostats = sorted(final_heliostats, key=lambda x: np.mean(np.array(x['projected_corners']), axis=0)[0]) # ?? SCAFFOLDING RCB -- MAY NOT DO ANYTHING, IN WHICH CASE WE SHOULD DELETE. WILL BE INCORRECT IF BOXES ARE NOT SORTED LEFT TO RIGHT? - - def confirm(self, expected_corners, corners3d, canny_levels=['tight', 'normal', 'light'], tolerance=3, pixels=100, iterations=5): - h, w = self.frame['key_frame_img'].shape[:2] - max_row = self.frame['key_frame_img'].shape[0] - max_col = self.frame['key_frame_img'].shape[1] + # Sort heliostats left to right # ?? SCAFFOLDING RCB -- MAY NOT DO ANYTHING, IN WHICH CASE WE SHOULD DELETE. WILL BE INCORRECT IF BOXES ARE NOT SORTED LEFT TO RIGHT? + final_heliostats = sorted( + final_heliostats, + key=lambda x: np.mean(np.array(x['projected_corners']), axis=0)[0], + ) # ?? SCAFFOLDING RCB -- MAY NOT DO ANYTHING, IN WHICH CASE WE SHOULD DELETE. WILL BE INCORRECT IF BOXES ARE NOT SORTED LEFT TO RIGHT? + + def confirm( + self, + expected_corners, + corners3d, + canny_levels=['tight', 'normal', 'light'], + tolerance=3, + pixels=100, + iterations=5, + ): + h, w = self.frame['key_frame_img'].shape[:2] + max_row = self.frame['key_frame_img'].shape[0] + max_col = self.frame['key_frame_img'].shape[1] def confirm_facets(expected_corners, edges, tolerance, pixels): confirmed_facets = {} - for indx in range(0, len(expected_corners), self.specifications.corners_per_facet): - facet_id = indx // self.specifications.corners_per_facet - corners = [expected_corners[indx + i] for i in range(0, self.specifications.corners_per_facet)] - for corner_indx in range(0,len(corners)): + for indx in range( + 0, len(expected_corners), self.specifications.corners_per_facet + ): + facet_id = indx // self.specifications.corners_per_facet + corners = [ + expected_corners[indx + i] + for i in range(0, self.specifications.corners_per_facet) + ] + for corner_indx in range(0, len(corners)): corner = corners[corner_indx] - if corner[0] >= max_col or corner[0] < 0 or corner[1] >= max_row or corner[1] < 0: - corners[corner_indx]=None + if ( + corner[0] >= max_col + or corner[0] < 0 + or corner[1] >= max_row + or corner[1] < 0 + ): + corners[corner_indx] = None confirmed_facets[facet_id] = { 'edges': confirm_facet_edges(corners, edges, tolerance, pixels) @@ -1397,10 +2101,10 @@ def confirm_facets(expected_corners, edges, tolerance, pixels): def confirm_facet_edges(corners, edges, tolerance, pixels): confirmed_edges = [] - corners.append(corners[0]) # cyclic - for indx in range(0, len(corners)-1): + corners.append(corners[0]) # cyclic + for indx in range(0, len(corners) - 1): corner1 = corners[indx] - corner2 = corners[indx+1] + corner2 = corners[indx + 1] if corner1 is None or corner2 is None: confirmed_edges.append(None) continue @@ -1408,117 +2112,137 @@ def confirm_facet_edges(corners, edges, tolerance, pixels): A, B, C = find_hom_line_2points(corner1, corner2) if A is None: continue - min_col, max_col, min_row, max_row = min_max_col_row(edges, corner1, corner2) + min_col, max_col, min_row, max_row = min_max_col_row( + edges, corner1, corner2 + ) edge_pixels = [] # confirming if indx % 2 == 0: for row in range(min_row, max_row): for col in range(min_col, max_col): - dist = abs(A*col + B*row +C) + dist = abs(A * col + B * row + C) if edges[row][col] and dist <= tolerance: cnt = 0 edge_pixels.append([col, row]) else: for col in range(min_col, max_col): for row in range(min_row, max_row): - dist = abs(A*col + B*row +C) + dist = abs(A * col + B * row + C) if edges[row][col] and dist <= tolerance: cnt = 0 edge_pixels.append([col, row]) if len(edge_pixels) < pixels: - confirmed_edges.append(None) # edge was not confirmed + confirmed_edges.append(None) # edge was not confirmed continue - + # confirmed edge edge_coeff = fit_line_pixels(edge_pixels) edge_inliers_coeff = fit_line_inliers_pixels(edge_pixels, edge_coeff) confirmed_edges.append(edge_inliers_coeff) - + return confirmed_edges - + def find_corners(confirmed_facets): - hel_corners = [None for _ in range(0, self.specifications.corners_per_heliostat)] + hel_corners = [ + None for _ in range(0, self.specifications.corners_per_heliostat) + ] for facet_indx, facet in confirmed_facets.items(): corners = [] - edges = facet['edges'] - edges.append(edges[0]) # cyclic - for edge_indx in range(0, len(edges)-1): + edges = facet['edges'] + edges.append(edges[0]) # cyclic + for edge_indx in range(0, len(edges) - 1): edge0 = edges[edge_indx] - edge1 = edges[edge_indx+1] + edge1 = edges[edge_indx + 1] if edge0 is not None and edge1 is not None: corners.append(findIntersectionLines(edge0, edge1)) else: corners.append(None) corners.insert(0, corners.pop()) indx = facet_indx * self.specifications.corners_per_facet - for i, j in zip(range(indx, indx+self.specifications.corners_per_facet), range(0, self.specifications.corners_per_facet)): + for i, j in zip( + range(indx, indx + self.specifications.corners_per_facet), + range(0, self.specifications.corners_per_facet), + ): hel_corners[i] = corners[j] return hel_corners def construct_points(confirmed_corners, corners3d): imgcorners = [] - objcorners = [] + objcorners = [] for indx in range(0, len(confirmed_corners)): if confirmed_corners[indx] is not None: imgcorners.append(confirmed_corners[indx]) objcorners.append(corners3d[indx]) - + points3d = np.array(objcorners).astype('float32') points2d = np.array(imgcorners).astype('float32') - + return points3d, points2d - + canny_types = canny_levels for i in range(0, iterations): - flag_break=False + flag_break = False for canny_type in canny_types: - img = self.frame['key_frame_img'] # ?? SCAFFOLDING RCB -- SEE CANNY() AND CONFIRM() ROUTINES FOR DUPLICATE PLACES WHERE THIS CODE IS PLACED. MUST KEEP CONSISTENT. # ?? SCAFFOLDING RCB -- REDUNDANT IMAGE LOAD, BLUR, AND EDGE FINDING. COMPUTE ONCE, CACHE AND COMMUNICATE. - img = cv.GaussianBlur(img, (5,5), 0) # ?? SCAFFOLDING RCB -- SEE CANNY() AND CONFIRM() ROUTINES FOR DUPLICATE PLACES WHERE THIS CODE IS PLACED. MUST KEEP CONSISTENT. # ?? SCAFFOLDING RCB -- REDUNDANT IMAGE LOAD, BLUR, AND EDGE FINDING. COMPUTE ONCE, CACHE AND COMMUNICATE. - edges = CannyImg(img, canny_type=canny_type) # ?? SCAFFOLDING RCB -- SEE CANNY() AND CONFIRM() ROUTINES FOR DUPLICATE PLACES WHERE THIS CODE IS PLACED. MUST KEEP CONSISTENT. # ?? SCAFFOLDING RCB -- REDUNDANT IMAGE LOAD, BLUR, AND EDGE FINDING. COMPUTE ONCE, CACHE AND COMMUNICATE. -#edges = CannyImg(self.frame['sky'], canny_type=canny_type) # ?? SCAFFOLDING RCB -- ORIGINAL CODE, MULTIPLE FAILURE IMPLICATIONS: (1) USING SKY, WHEN SKY WAS NOT USERED PREVIOUSLY. (2) CAUSES OPENCV TO CRASH. (THANKFULLY; OTHERWISE I WOULDN'T HAVE FOUND THE OTHER BUG.) - confirmed_facets = confirm_facets(expected_corners, edges, tolerance, pixels) + img = self.frame[ + 'key_frame_img' + ] # ?? SCAFFOLDING RCB -- SEE CANNY() AND CONFIRM() ROUTINES FOR DUPLICATE PLACES WHERE THIS CODE IS PLACED. MUST KEEP CONSISTENT. # ?? SCAFFOLDING RCB -- REDUNDANT IMAGE LOAD, BLUR, AND EDGE FINDING. COMPUTE ONCE, CACHE AND COMMUNICATE. + img = cv.GaussianBlur( + img, (5, 5), 0 + ) # ?? SCAFFOLDING RCB -- SEE CANNY() AND CONFIRM() ROUTINES FOR DUPLICATE PLACES WHERE THIS CODE IS PLACED. MUST KEEP CONSISTENT. # ?? SCAFFOLDING RCB -- REDUNDANT IMAGE LOAD, BLUR, AND EDGE FINDING. COMPUTE ONCE, CACHE AND COMMUNICATE. + edges = CannyImg( + img, canny_type=canny_type + ) # ?? SCAFFOLDING RCB -- SEE CANNY() AND CONFIRM() ROUTINES FOR DUPLICATE PLACES WHERE THIS CODE IS PLACED. MUST KEEP CONSISTENT. # ?? SCAFFOLDING RCB -- REDUNDANT IMAGE LOAD, BLUR, AND EDGE FINDING. COMPUTE ONCE, CACHE AND COMMUNICATE. + # edges = CannyImg(self.frame['sky'], canny_type=canny_type) # ?? SCAFFOLDING RCB -- ORIGINAL CODE, MULTIPLE FAILURE IMPLICATIONS: (1) USING SKY, WHEN SKY WAS NOT USERED PREVIOUSLY. (2) CAUSES OPENCV TO CRASH. (THANKFULLY; OTHERWISE I WOULDN'T HAVE FOUND THE OTHER BUG.) + confirmed_facets = confirm_facets( + expected_corners, edges, tolerance, pixels + ) confirmed_corners = find_corners(confirmed_facets) flag_break = True for corner in confirmed_corners: - flag_break *= (corner is None) + flag_break *= corner is None if flag_break: expected_corners = [] break - # if not enough corners were confirmed + # if not enough corners were confirmed points3d, points2d = construct_points(confirmed_corners, corners3d) if len(points3d) != len(points2d): - msg = 'In KeyFrameCornerSearch.confirm(), len(points3d)='+str(len(points3d))+' does not equal len(points2d)='+str(len(points2d)) + msg = ( + 'In KeyFrameCornerSearch.confirm(), len(points3d)=' + + str(len(points3d)) + + ' does not equal len(points2d)=' + + str(len(points2d)) + ) print('ERROR: ' + msg) raise ValueError(msg) if len(points3d) < 4: # Four points needed for solvePNP(). expected_corners = [] break - mtx, dist, \ - rvec, tvec, \ - pnp_error = solvePNP(points3d, points2d, h, w, pnptype=self.solvePnPtype) - expected_corners, _ = cv.projectPoints(np.array(corners3d).astype('float32'), rvec, tvec, mtx, dist) - expected_corners = expected_corners.reshape(-1, 2) - expected_corners = expected_corners.tolist() + mtx, dist, rvec, tvec, pnp_error = solvePNP( + points3d, points2d, h, w, pnptype=self.solvePnPtype + ) + expected_corners, _ = cv.projectPoints( + np.array(corners3d).astype('float32'), rvec, tvec, mtx, dist + ) + expected_corners = expected_corners.reshape(-1, 2) + expected_corners = expected_corners.tolist() if flag_break: break - - projected_corners = expected_corners - return confirmed_corners, projected_corners + projected_corners = expected_corners + return confirmed_corners, projected_corners def convert_None_corners(self, input_corners): corners_with_None_entries_converted = [] for corner_entry in input_corners: if corner_entry is None: - corners_with_None_entries_converted.append([-1,-1]) + corners_with_None_entries_converted.append([-1, -1]) else: corners_with_None_entries_converted.append(corner_entry) return corners_with_None_entries_converted - def filter_not_found_corners(self, input_corners): found_corners = [] for corner_entry in input_corners: - if corner_entry != [-1,-1]: + if corner_entry != [-1, -1]: found_corners.append(corner_entry) return found_corners diff --git a/contrib/scripts/SensitiveStringMatcher.py b/contrib/scripts/SensitiveStringMatcher.py index 9ea63487e..2cf9c7f63 100644 --- a/contrib/scripts/SensitiveStringMatcher.py +++ b/contrib/scripts/SensitiveStringMatcher.py @@ -118,7 +118,7 @@ def check_lines(self, lines: list[str]): matching: dict[re.Pattern | str, list[int]] = {} for pattern in possible_matching: span = possible_matching[pattern] - line_part = iline[span[0]: span[1]] + line_part = iline[span[0] : span[1]] if len(self._search_patterns(line_part, self.neg_patterns)) == 0: matching[pattern] = span @@ -130,9 +130,9 @@ def check_lines(self, lines: list[str]): line_part = line[start:end] line_context = f"`{line_part}`" if start > 0: - line_context = line[max(start - 5, 0): start] + line_context + line_context = line[max(start - 5, 0) : start] + line_context if end < len(line): - line_context = line_context + line[end: min(end + 5, len(line))] + line_context = line_context + line[end : min(end + 5, len(line))] match = Match(lineno + 1, start, end, line, line_part, self) self.set_match_msg(match, pattern, line_context) diff --git a/contrib/scripts/sensitive_strings.py b/contrib/scripts/sensitive_strings.py index 1da9e2ba6..ca9b7188d 100644 --- a/contrib/scripts/sensitive_strings.py +++ b/contrib/scripts/sensitive_strings.py @@ -83,13 +83,17 @@ def norm_path(self, file_path, file_name_ext: str): ) def _is_file_in_cleared_cache(self, file_path: str, file_name_ext: str): - cache_entry = fc.FileCache.for_file(self.root_search_dir, file_path, file_name_ext) + cache_entry = fc.FileCache.for_file( + self.root_search_dir, file_path, file_name_ext + ) if cache_entry in self.cached_cleared_files: return True return False def _register_file_in_cleared_cache(self, file_path: str, file_name_ext: str): - cache_entry = fc.FileCache.for_file(self.root_search_dir, file_path, file_name_ext) + cache_entry = fc.FileCache.for_file( + self.root_search_dir, file_path, file_name_ext + ) self.new_cached_cleared_files.append(cache_entry) def _is_binary_file(self, file_path: str, file_name_ext: str): @@ -101,8 +105,9 @@ def _is_binary_file(self, file_path: str, file_name_ext: str): if self._is_img_ext(ext): if ext in self._text_file_extensions: is_binary_file = False - elif (f"{file_path}/{file_name_ext}" in self._text_file_path_name_exts) or \ - (file_name_ext in self._text_file_path_name_exts): + elif (f"{file_path}/{file_name_ext}" in self._text_file_path_name_exts) or ( + file_name_ext in self._text_file_path_name_exts + ): is_binary_file = False else: is_binary_file = True @@ -116,7 +121,9 @@ def _is_binary_file(self, file_path: str, file_name_ext: str): return is_binary_file - def _enqueue_binary_file_for_later_processing(self, file_path: str, file_name_ext: str): + def _enqueue_binary_file_for_later_processing( + self, file_path: str, file_name_ext: str + ): file_ff = ff.FileFingerprint.for_file( self.root_search_dir, file_path, file_name_ext ) @@ -311,7 +318,9 @@ def interactive_image_sign_off( description=f"{file_ff.relative_path}/{file_ff.name_ext}", ) else: - lt.info("Unknown image file failed to open. Do you want to sign off on this file anyways (y/n)?") + lt.info( + "Unknown image file failed to open. Do you want to sign off on this file anyways (y/n)?" + ) val = input("")[0] lt.info(f" User responded '{val}'") if val.lower() == 'y': @@ -403,13 +412,26 @@ def search_files(self): git = st.get_executable_path("git", "mobaxterm") git_committed = st.run( f"\"{git}\" ls-tree --full-tree --name-only -r HEAD", - cwd=self.root_search_dir, stdout="collect", stderr="print") + cwd=self.root_search_dir, + stdout="collect", + stderr="print", + ) git_added = st.run( f"\"{git}\" diff --name-only --cached --diff-filter=A", - cwd=self.root_search_dir, stdout="collect", stderr="print") + cwd=self.root_search_dir, + stdout="collect", + stderr="print", + ) files = [line.val for line in git_committed + git_added] # don't include "git rm"'d files - files = list(filter(lambda file: ft.file_exists(os.path.join(self.root_search_dir, file)), files)) + files = list( + filter( + lambda file: ft.file_exists( + os.path.join(self.root_search_dir, file) + ), + files, + ) + ) lt.info(f"Searching for sensitive strings in {len(files)} tracked files") else: files = ft.files_in_directory( @@ -430,7 +452,9 @@ def search_files(self): # need to check this file if self._is_binary_file(file_path, file_name_ext): # deal with non-parseable binary files as a group, below - self._enqueue_binary_file_for_later_processing(file_path, file_name_ext) + self._enqueue_binary_file_for_later_processing( + file_path, file_name_ext + ) else: # check text files for sensitive strings file_matches = self.search_file(file_path, file_name_ext) @@ -454,9 +478,7 @@ def search_files(self): lt.info("") lt.error(os.path.join(file_ff.relative_path, file_ff.name_ext)) if len(self.unknown_binary_files) > 0: - lt.warn( - f"Found {len(self.unknown_binary_files)} unexpected binary files:" - ) + lt.warn(f"Found {len(self.unknown_binary_files)} unexpected binary files:") # Deal with unknown binary files if len(self.unknown_binary_files) > 0: @@ -478,12 +500,16 @@ def search_files(self): # First, make a backup copy of the allowed list csv file if num_signed_binary_files == 0: - path, name, ext = ft.path_components(self.allowed_binary_files_csv) + path, name, ext = ft.path_components( + self.allowed_binary_files_csv + ) backup_name_ext = f"{name}_backup_{self.date_time_str}{ext}" backup_path_name_ext = os.path.join(path, backup_name_ext) if ft.file_exists(backup_path_name_ext): ft.delete_file(backup_path_name_ext) - ft.copy_file(self.allowed_binary_files_csv, path, backup_name_ext) + ft.copy_file( + self.allowed_binary_files_csv, path, backup_name_ext + ) # Overwrite the allowed list csv file with the updated allowed_binary_files path, name, ext = ft.path_components(self.allowed_binary_files_csv) @@ -514,10 +540,14 @@ def search_files(self): if num_signed_binary_files > 0: path, name, ext = ft.path_components(self.allowed_binary_files_csv) abfc_stamped_name_ext = f"{name}_{self.date_time_str}{ext}" - abfc_stamped_path_name_ext = os.path.join(path, abfc_stamped_name_ext) + abfc_stamped_path_name_ext = os.path.join( + path, abfc_stamped_name_ext + ) if ft.file_exists(abfc_stamped_path_name_ext): ft.delete_file(abfc_stamped_path_name_ext) - ft.copy_file(self.allowed_binary_files_csv, path, abfc_stamped_name_ext) + ft.copy_file( + self.allowed_binary_files_csv, path, abfc_stamped_name_ext + ) # for file_ff in unknowns_copy # if len(self.unknown_binary_files) > 0: @@ -525,9 +555,12 @@ def search_files(self): for file_ff in self.allowed_binary_files + self.unfound_allowed_binary_files: for file_cf in self.new_cached_cleared_files: if file_ff.eq_aff(file_cf): - lt.error_and_raise(RuntimeError, "Programmer error in sensitive_strings.search_files(): " + - "No binary files should be in the cache, but at least 1 such file was found: " + - f"\"{file_cf.relative_path}/{file_cf.name_ext}\"") + lt.error_and_raise( + RuntimeError, + "Programmer error in sensitive_strings.search_files(): " + + "No binary files should be in the cache, but at least 1 such file was found: " + + f"\"{file_cf.relative_path}/{file_cf.name_ext}\"", + ) # Save the cleared files cache for file_ff in self.unknown_binary_files: @@ -555,9 +588,13 @@ def search_files(self): info_or_warn("<<>>" if ret == 0 else "<<>>") info_or_warn(f"Found {len(matches)} sensitive string matches") if len(self.unfound_allowed_binary_files) > 0: - info_or_warn(f"Did not find {len(self.unfound_allowed_binary_files)} expected binary files") + info_or_warn( + f"Did not find {len(self.unfound_allowed_binary_files)} expected binary files" + ) else: - info_or_warn(f"Found {len(self.allowed_binary_files)} expected binary files") + info_or_warn( + f"Found {len(self.allowed_binary_files)} expected binary files" + ) info_or_warn(f"Found {len(self.unknown_binary_files)} unexpected binary files") # Add a 'match' for any unfound or unknown binary files @@ -583,8 +620,12 @@ def search_files(self): parser = argparse.ArgumentParser( prog=__file__.rstrip(".py"), description='Sensitive strings searcher' ) - parser.add_argument('--no-interactive', action='store_true', dest="ninteractive", - help="Don't interactively ask the user about unknown binary files. Simply fail instead.") + parser.add_argument( + '--no-interactive', + action='store_true', + dest="ninteractive", + help="Don't interactively ask the user about unknown binary files. Simply fail instead.", + ) args = parser.parse_args() not_interactive: bool = args.ninteractive diff --git a/contrib/scripts/test/test_FileCache.py b/contrib/scripts/test/test_FileCache.py index fe072b2ef..64be9ee40 100644 --- a/contrib/scripts/test/test_FileCache.py +++ b/contrib/scripts/test/test_FileCache.py @@ -21,7 +21,7 @@ def setUp(self) -> None: ft.create_directories_if_necessary(self.out_dir) def _delay_1_second(self): - """ sleeps up to 1 second so that the file modification time looks different """ + """sleeps up to 1 second so that the file modification time looks different""" ts1 = tdt.current_time_string_forfile() while ts1 == tdt.current_time_string_forfile(): time.sleep(0.05) diff --git a/contrib/scripts/test/test_FileFingerprint.py b/contrib/scripts/test/test_FileFingerprint.py index a48a14e43..61a76a36c 100644 --- a/contrib/scripts/test/test_FileFingerprint.py +++ b/contrib/scripts/test/test_FileFingerprint.py @@ -26,8 +26,12 @@ def test_equal(self): f2 = "equal_file" contents = "%0.10f" % random.Random().random() - ft.write_text_file(f1, f"{self.out_dir}/{d1}", f1, [contents], error_if_dir_not_exist=False) - ft.write_text_file(f2, f"{self.out_dir}/{d2}", f2, [contents], error_if_dir_not_exist=False) + ft.write_text_file( + f1, f"{self.out_dir}/{d1}", f1, [contents], error_if_dir_not_exist=False + ) + ft.write_text_file( + f2, f"{self.out_dir}/{d2}", f2, [contents], error_if_dir_not_exist=False + ) ff1 = ff.FileFingerprint.for_file(f"{self.out_dir}/{d1}", "", f1 + ".txt") ff2 = ff.FileFingerprint.for_file(f"{self.out_dir}/{d2}", "", f2 + ".txt") @@ -40,8 +44,12 @@ def test_not_equal_relpath(self): f2 = "equal_file" contents = "%0.10f" % random.Random().random() - ft.write_text_file(f1, f"{self.out_dir}/{d1}", f1, [contents], error_if_dir_not_exist=False) - ft.write_text_file(f2, f"{self.out_dir}/{d2}", f2, [contents], error_if_dir_not_exist=False) + ft.write_text_file( + f1, f"{self.out_dir}/{d1}", f1, [contents], error_if_dir_not_exist=False + ) + ft.write_text_file( + f2, f"{self.out_dir}/{d2}", f2, [contents], error_if_dir_not_exist=False + ) ff1 = ff.FileFingerprint.for_file(self.out_dir, d1, f1 + ".txt") ff2 = ff.FileFingerprint.for_file(self.out_dir, d2, f2 + ".txt") @@ -54,8 +62,12 @@ def test_not_equal_filename(self): f2 = "equal_file2" contents = "%0.10f" % random.Random().random() - ft.write_text_file(f1, f"{self.out_dir}/{d1}", f1, [contents], error_if_dir_not_exist=False) - ft.write_text_file(f2, f"{self.out_dir}/{d2}", f2, [contents], error_if_dir_not_exist=False) + ft.write_text_file( + f1, f"{self.out_dir}/{d1}", f1, [contents], error_if_dir_not_exist=False + ) + ft.write_text_file( + f2, f"{self.out_dir}/{d2}", f2, [contents], error_if_dir_not_exist=False + ) ff1 = ff.FileFingerprint.for_file(f"{self.out_dir}/{d1}", "", f1 + ".txt") ff2 = ff.FileFingerprint.for_file(f"{self.out_dir}/{d2}", "", f2 + ".txt") @@ -70,8 +82,12 @@ def test_not_equal_hash(self): contents1 = contents + " " contents2 = " " + contents - ft.write_text_file(f1, f"{self.out_dir}/{d1}", f1, [contents1], error_if_dir_not_exist=False) - ft.write_text_file(f2, f"{self.out_dir}/{d2}", f2, [contents2], error_if_dir_not_exist=False) + ft.write_text_file( + f1, f"{self.out_dir}/{d1}", f1, [contents1], error_if_dir_not_exist=False + ) + ft.write_text_file( + f2, f"{self.out_dir}/{d2}", f2, [contents2], error_if_dir_not_exist=False + ) ff1 = ff.FileFingerprint.for_file(f"{self.out_dir}/{d1}", "", f1 + ".txt") ff2 = ff.FileFingerprint.for_file(f"{self.out_dir}/{d2}", "", f2 + ".txt") diff --git a/contrib/scripts/test/test_SensitiveStringMatcher.py b/contrib/scripts/test/test_SensitiveStringMatcher.py index 100c4cd65..0b5552f36 100644 --- a/contrib/scripts/test/test_SensitiveStringMatcher.py +++ b/contrib/scripts/test/test_SensitiveStringMatcher.py @@ -54,18 +54,24 @@ def test_matches(self): self.assertEqual(3, matches[2].lineno) def test_dont_match(self): - matcher = ssm.SensitiveStringMatcher("Basic Matcher", "foo", "**dont_match", "foo") + matcher = ssm.SensitiveStringMatcher( + "Basic Matcher", "foo", "**dont_match", "foo" + ) matches = matcher.check_lines(["foo", "bar", "baz"]) self.assertEqual(0, len(matches)) def test_case_sensitive(self): matcher = ssm.SensitiveStringMatcher("Basic Matcher", "**case_sensitive", "foo") - matches = matcher.check_lines(["foO", "fOo", "fOO", "Foo", "FoO", "FOo", "FOO", "foo"]) + matches = matcher.check_lines( + ["foO", "fOo", "fOO", "Foo", "FoO", "FOo", "FOO", "foo"] + ) self.assertEqual(1, len(matches)) self.assertEqual(8, matches[0].lineno) def test_single_regex(self): - matcher = ssm.SensitiveStringMatcher("Basic Matcher", "**next_is_regex", r"[a-z]a[a-z]") + matcher = ssm.SensitiveStringMatcher( + "Basic Matcher", "**next_is_regex", r"[a-z]a[a-z]" + ) matches = matcher.check_lines(["foo", "bar", "baz"]) self.assertEqual(2, len(matches)) self.assertEqual(2, matches[0].lineno) @@ -74,26 +80,34 @@ def test_single_regex(self): self.assertEqual('baz', matches[1].line_part) def test_partial_single_regex(self): - matcher = ssm.SensitiveStringMatcher("Regex Matcher", "**next_is_regex", r"[a-z]o[a-z]") + matcher = ssm.SensitiveStringMatcher( + "Regex Matcher", "**next_is_regex", r"[a-z]o[a-z]" + ) matches = matcher.check_lines(["foobarbaz"]) self.assertEqual(1, len(matches)) self.assertEqual(0, matches[0].colno) self.assertEqual('foo', matches[0].line_part) - matcher = ssm.SensitiveStringMatcher("Regex Matcher", "**next_is_regex", r"[a-z]{2}r") + matcher = ssm.SensitiveStringMatcher( + "Regex Matcher", "**next_is_regex", r"[a-z]{2}r" + ) matches = matcher.check_lines(["foobarbaz"]) self.assertEqual(1, len(matches)) self.assertEqual(3, matches[0].colno) self.assertEqual('bar', matches[0].line_part) - matcher = ssm.SensitiveStringMatcher("Regex Matcher", "**next_is_regex", r"[a-z]{2}z") + matcher = ssm.SensitiveStringMatcher( + "Regex Matcher", "**next_is_regex", r"[a-z]{2}z" + ) matches = matcher.check_lines(["foobarbaz"]) self.assertEqual(1, len(matches)) self.assertEqual(6, matches[0].colno) self.assertEqual('baz', matches[0].line_part) def test_partial_multiple_regex(self): - matcher = ssm.SensitiveStringMatcher("Regex Matcher", "**all_regex", r"[a-z]o[a-z]", r"[a-z]{2}r", r"[a-z]{2}z") + matcher = ssm.SensitiveStringMatcher( + "Regex Matcher", "**all_regex", r"[a-z]o[a-z]", r"[a-z]{2}r", r"[a-z]{2}z" + ) matches = matcher.check_lines(["foobarbaz"]) self.assertEqual(3, len(matches)) self.assertEqual(0, matches[0].colno) @@ -104,7 +118,9 @@ def test_partial_multiple_regex(self): self.assertEqual('baz', matches[2].line_part) def test_mixed_plain_regex(self): - matcher = ssm.SensitiveStringMatcher("Basic Matcher", "foo", "**next_is_regex", r"[a-z]{2}r", "baz") + matcher = ssm.SensitiveStringMatcher( + "Basic Matcher", "foo", "**next_is_regex", r"[a-z]{2}r", "baz" + ) matches = matcher.check_lines(["foobarbaz"]) self.assertLessEqual(1, len(matches)) @@ -127,12 +143,20 @@ def test_mixed_plain_regex(self): self.assertEqual('baz', matches[0].line_part) def test_regex_dont_match(self): - matcher = ssm.SensitiveStringMatcher("Basic Matcher", "foo", "**dont_match", "**next_is_regex", r"[a-z]o[a-z]") + matcher = ssm.SensitiveStringMatcher( + "Basic Matcher", "foo", "**dont_match", "**next_is_regex", r"[a-z]o[a-z]" + ) matches = matcher.check_lines(["foo", "bar", "baz"]) self.assertEqual(0, len(matches)) - matcher = ssm.SensitiveStringMatcher("Basic Matcher", "**all_regex", "foo.?", - "**dont_match", "**next_is_regex", r"[a-z]{4}") + matcher = ssm.SensitiveStringMatcher( + "Basic Matcher", + "**all_regex", + "foo.?", + "**dont_match", + "**next_is_regex", + r"[a-z]{4}", + ) matches = matcher.check_lines(["foo", "bar", "baz"]) self.assertEqual(1, len(matches)) matches = matcher.check_lines(["foobarbaz"]) diff --git a/contrib/scripts/test/test_sensitive_strings.py b/contrib/scripts/test/test_sensitive_strings.py index 7078445c7..ca740f09d 100644 --- a/contrib/scripts/test/test_sensitive_strings.py +++ b/contrib/scripts/test/test_sensitive_strings.py @@ -20,48 +20,52 @@ def setUp(self) -> None: self.root_search_dir = os.path.join(self.data_dir, "root_search_dir") self.ss_dir = os.path.join(self.data_dir, "per_test_sensitive_strings") - self.allowed_binaries_dir = os.path.join(self.data_dir, "per_test_allowed_binaries") + self.allowed_binaries_dir = os.path.join( + self.data_dir, "per_test_allowed_binaries" + ) self.all_binaries = os.path.join(self.allowed_binaries_dir, "all_binaries.csv") self.no_binaries = os.path.join(self.allowed_binaries_dir, "no_binaries.csv") def test_no_matches(self): sensitive_strings_csv = os.path.join(self.ss_dir, "no_matches.csv") - searcher = ss.SensitiveStringsSearcher(self.root_search_dir, - sensitive_strings_csv, - self.all_binaries) + searcher = ss.SensitiveStringsSearcher( + self.root_search_dir, sensitive_strings_csv, self.all_binaries + ) searcher.git_files_only = False self.assertEqual(searcher.search_files(), 0) def test_single_matcher(self): # based on file name sensitive_strings_csv = os.path.join(self.ss_dir, "test_single_matcher.csv") - searcher = ss.SensitiveStringsSearcher(self.root_search_dir, - sensitive_strings_csv, - self.all_binaries) + searcher = ss.SensitiveStringsSearcher( + self.root_search_dir, sensitive_strings_csv, self.all_binaries + ) searcher.git_files_only = False self.assertEqual(searcher.search_files(), 1) # based on file content - sensitive_strings_csv = os.path.join(self.ss_dir, "test_single_matcher_content.csv") - searcher = ss.SensitiveStringsSearcher(self.root_search_dir, - sensitive_strings_csv, - self.all_binaries) + sensitive_strings_csv = os.path.join( + self.ss_dir, "test_single_matcher_content.csv" + ) + searcher = ss.SensitiveStringsSearcher( + self.root_search_dir, sensitive_strings_csv, self.all_binaries + ) searcher.git_files_only = False self.assertEqual(searcher.search_files(), 1) def test_directory_matcher(self): sensitive_strings_csv = os.path.join(self.ss_dir, "test_directory_matcher.csv") - searcher = ss.SensitiveStringsSearcher(self.root_search_dir, - sensitive_strings_csv, - self.all_binaries) + searcher = ss.SensitiveStringsSearcher( + self.root_search_dir, sensitive_strings_csv, self.all_binaries + ) searcher.git_files_only = False self.assertEqual(searcher.search_files(), 1) def test_all_matches(self): sensitive_strings_csv = os.path.join(self.ss_dir, "test_all_matches.csv") - searcher = ss.SensitiveStringsSearcher(self.root_search_dir, - sensitive_strings_csv, - self.all_binaries) + searcher = ss.SensitiveStringsSearcher( + self.root_search_dir, sensitive_strings_csv, self.all_binaries + ) searcher.git_files_only = False # 6 matches: # files: a.txt, b/b.txt, c/d/e.txt @@ -72,27 +76,29 @@ def test_all_matches(self): def test_single_unknown_binary(self): sensitive_strings_csv = os.path.join(self.ss_dir, "no_matches.csv") single_binary_csv = os.path.join(self.allowed_binaries_dir, "single_binary.csv") - searcher = ss.SensitiveStringsSearcher(self.root_search_dir, - sensitive_strings_csv, - single_binary_csv) + searcher = ss.SensitiveStringsSearcher( + self.root_search_dir, sensitive_strings_csv, single_binary_csv + ) searcher.git_files_only = False self.assertEqual(searcher.search_files(), 1) def test_single_expected_not_found_binary(self): sensitive_strings_csv = os.path.join(self.ss_dir, "no_matches.csv") - single_binary_csv = os.path.join(self.allowed_binaries_dir, "single_expected_not_found_binary.csv") - searcher = ss.SensitiveStringsSearcher(self.root_search_dir, - sensitive_strings_csv, - single_binary_csv) + single_binary_csv = os.path.join( + self.allowed_binaries_dir, "single_expected_not_found_binary.csv" + ) + searcher = ss.SensitiveStringsSearcher( + self.root_search_dir, sensitive_strings_csv, single_binary_csv + ) searcher.git_files_only = False # 2 unknown binaries, and 1 expected not found self.assertEqual(searcher.search_files(), 3) def test_hdf5_match(self): sensitive_strings_csv = os.path.join(self.ss_dir, "h5_match.csv") - searcher = ss.SensitiveStringsSearcher(self.root_search_dir, - sensitive_strings_csv, - self.all_binaries) + searcher = ss.SensitiveStringsSearcher( + self.root_search_dir, sensitive_strings_csv, self.all_binaries + ) searcher.git_files_only = False # 2 unknown binaries, and 1 expected not found self.assertEqual(searcher.search_files(), 1) diff --git a/contrib/test_data_generation/downsample_data_general.py b/contrib/test_data_generation/downsample_data_general.py index 5764083de..53e80beb9 100644 --- a/contrib/test_data_generation/downsample_data_general.py +++ b/contrib/test_data_generation/downsample_data_general.py @@ -1,5 +1,6 @@ """Library of functions to downsample high-res image and other data """ + import numpy as np from scipy.signal import convolve2d diff --git a/contrib/test_data_generation/scene_reconstruction/generate_downsampled_dataset.py b/contrib/test_data_generation/scene_reconstruction/generate_downsampled_dataset.py index e89c1d8a6..cd5fb4425 100644 --- a/contrib/test_data_generation/scene_reconstruction/generate_downsampled_dataset.py +++ b/contrib/test_data_generation/scene_reconstruction/generate_downsampled_dataset.py @@ -1,6 +1,7 @@ """Generates downsampled dataset used for calibrating the 3d locations of fixed pattern dots. """ + from glob import glob from os.path import join, basename, exists from os import mkdir @@ -17,11 +18,11 @@ def generate_data(): - """Downsamples and saves files - """ + """Downsamples and saves files""" # Define file locations dir_sample_data = join( - opencsp_code_dir(), '../../sample_data/scene_reconstruction/data_measurement') + opencsp_code_dir(), '../../sample_data/scene_reconstruction/data_measurement' + ) files_images = glob(join(dir_sample_data, 'aruco_marker_images/*.JPG')) file_alignment_points = join(dir_sample_data, 'alignment_points.csv') @@ -29,8 +30,9 @@ def generate_data(): file_point_pair_dists = join(dir_sample_data, 'point_pair_distances.csv') file_camera_cal = join(dir_sample_data, 'camera.h5') - dir_save = join(opencsp_code_dir(), - 'app/scene_reconstruction/test/data/data_measurement') + dir_save = join( + opencsp_code_dir(), 'app/scene_reconstruction/test/data/data_measurement' + ) # Downsample marker/dot images n_downsample = 5 diff --git a/contrib/test_data_generation/sofast_fixed/generate_downsampled_calibration_data.py b/contrib/test_data_generation/sofast_fixed/generate_downsampled_calibration_data.py index 69d441aa4..0a525a1f4 100644 --- a/contrib/test_data_generation/sofast_fixed/generate_downsampled_calibration_data.py +++ b/contrib/test_data_generation/sofast_fixed/generate_downsampled_calibration_data.py @@ -1,6 +1,7 @@ """Generates downsampled dataset used for calibrating the 3d locations of fixed pattern dots. """ + from glob import glob from os.path import join, basename import shutil @@ -16,11 +17,12 @@ def generate_data(): - """Downsamples and saves files - """ + """Downsamples and saves files""" # Define file locations dir_sample_data = join( - opencsp_code_dir(), '../../sample_data/deflectometry/calibration_dot_locations/data_measurement') + opencsp_code_dir(), + '../../sample_data/deflectometry/calibration_dot_locations/data_measurement', + ) files_images = glob(join(dir_sample_data, 'images/*.JPG')) file_camera_cal = join(dir_sample_data, 'camera_image_calibration.h5') @@ -28,8 +30,10 @@ def generate_data(): file_camera_def = join(dir_sample_data, 'camera_deflectometry.h5') file_image_def = join(dir_sample_data, 'image_deflectometry_camera.png') - dir_save = join(opencsp_code_dir(), - 'test/data/measurements_sofast_fixed/dot_location_calibration/measurements') + dir_save = join( + opencsp_code_dir(), + 'test/data/measurements_sofast_fixed/dot_location_calibration/measurements', + ) # Downsample marker/dot images n_downsample = 4 diff --git a/contrib/test_data_generation/sofast_fringe/downsample_data.py b/contrib/test_data_generation/sofast_fringe/downsample_data.py index 74a1f2724..9f0a7fc54 100644 --- a/contrib/test_data_generation/sofast_fringe/downsample_data.py +++ b/contrib/test_data_generation/sofast_fringe/downsample_data.py @@ -1,9 +1,12 @@ """Contains functions to save downsampled sofast measurement file """ + import os import sys -from opencsp.app.sofast.lib.MeasurementSofastFringe import MeasurementSofastFringe as Measurement +from opencsp.app.sofast.lib.MeasurementSofastFringe import ( + MeasurementSofastFringe as Measurement, +) from opencsp.common.lib.opencsp_path.opencsp_root_path import opencsp_code_dir sys.path.append(os.path.join(opencsp_code_dir(), '..')) diff --git a/contrib/test_data_generation/sofast_fringe/generate_downsampled_calibration_data.py b/contrib/test_data_generation/sofast_fringe/generate_downsampled_calibration_data.py index 4dc777c8c..1ff0c3064 100644 --- a/contrib/test_data_generation/sofast_fringe/generate_downsampled_calibration_data.py +++ b/contrib/test_data_generation/sofast_fringe/generate_downsampled_calibration_data.py @@ -3,6 +3,7 @@ measurement files (and associated equivalent camera definition file) to the Sofast test data suite. """ + from os.path import join, basename, exists, abspath import sys @@ -31,14 +32,22 @@ def downsample_dataset_1(base_dir): # Define location of sample data file_measurement_facet = abspath(join(base_dir, 'sofast/measurement_facet.h5')) - file_measurement_ensemble = abspath(join(base_dir, 'sofast/measurement_facet_ensemble.h5')) + file_measurement_ensemble = abspath( + join(base_dir, 'sofast/measurement_facet_ensemble.h5') + ) file_calibration = abspath(join(base_dir, 'sofast/image_calibration.h5')) file_camera = abspath(join(base_dir, 'calibration_files/camera.h5')) - file_display_1 = abspath(join(base_dir, 'calibration_files/display_distorted_2d.h5')) - file_display_2 = abspath(join(base_dir, 'calibration_files/display_distorted_3d.h5')) + file_display_1 = abspath( + join(base_dir, 'calibration_files/display_distorted_2d.h5') + ) + file_display_2 = abspath( + join(base_dir, 'calibration_files/display_distorted_3d.h5') + ) file_display_3 = abspath(join(base_dir, 'calibration_files/display_rectangular.h5')) - dir_dataset_out = abspath(join(opencsp_code_dir(), 'test/data/measurements_sofast_fringe')) + dir_dataset_out = abspath( + join(opencsp_code_dir(), 'test/data/measurements_sofast_fringe') + ) if not exists(dir_dataset_out): raise FileNotFoundError(f'Output directory {dir_dataset_out} does not exist.') @@ -62,8 +71,12 @@ def downsample_dataset_1(base_dir): plt.title('Ensemble Mask Image') # Save data - measurement_facet.save_to_hdf(join(dir_dataset_out, basename(file_measurement_facet))) - measurement_ensemble.save_to_hdf(join(dir_dataset_out, basename(file_measurement_ensemble))) + measurement_facet.save_to_hdf( + join(dir_dataset_out, basename(file_measurement_facet)) + ) + measurement_ensemble.save_to_hdf( + join(dir_dataset_out, basename(file_measurement_ensemble)) + ) camera.save_to_hdf(join(dir_dataset_out, basename(file_camera))) display_1.save_to_hdf(join(dir_dataset_out, basename(file_display_1))) display_2.save_to_hdf(join(dir_dataset_out, basename(file_display_2))) @@ -76,7 +89,6 @@ def downsample_dataset_1(base_dir): if __name__ == '__main__': # Create downsample dataset 1 (NSTTF Optics Lab data) dir_sample_data = join( - opencsp_code_dir(), - '../../sample_data/sofast/measurement_set_1' + opencsp_code_dir(), '../../sample_data/sofast/measurement_set_1' ) downsample_dataset_1(dir_sample_data) diff --git a/contrib/test_data_generation/sofast_fringe/generate_downsampled_dataset.py b/contrib/test_data_generation/sofast_fringe/generate_downsampled_dataset.py index 6d0246707..8a78acb7f 100644 --- a/contrib/test_data_generation/sofast_fringe/generate_downsampled_dataset.py +++ b/contrib/test_data_generation/sofast_fringe/generate_downsampled_dataset.py @@ -2,6 +2,7 @@ datasets from the Sofast examples, downsamples, then saves downsampled files to this test data suite. """ + import os from os.path import join import shutil diff --git a/contrib/test_data_generation/sofast_fringe/generate_test_data_multi_facet.py b/contrib/test_data_generation/sofast_fringe/generate_test_data_multi_facet.py index c7bd370f4..44a447c08 100644 --- a/contrib/test_data_generation/sofast_fringe/generate_test_data_multi_facet.py +++ b/contrib/test_data_generation/sofast_fringe/generate_test_data_multi_facet.py @@ -1,12 +1,15 @@ """Generates test data from measurement file for mirror type 'multi_facet'. """ + from os.path import join, dirname, exists import matplotlib.pyplot as plt import numpy as np from opencsp.app.sofast.lib.ImageCalibrationScaling import ImageCalibrationScaling -from opencsp.app.sofast.lib.MeasurementSofastFringe import MeasurementSofastFringe as Measurement +from opencsp.app.sofast.lib.MeasurementSofastFringe import ( + MeasurementSofastFringe as Measurement, +) from opencsp.app.sofast.lib.ProcessSofastFringe import ProcessSofastFringe as Sofast from opencsp.app.sofast.lib.DisplayShape import DisplayShape as Display from opencsp.app.sofast.lib.DefinitionEnsemble import DefinitionEnsemble @@ -27,7 +30,9 @@ def generate_dataset( """Generates and saves test data""" # Check output file exists if not exists(dirname(file_dataset_out)): - raise FileNotFoundError(f'Output directory {file_dataset_out:s} does not exist.') + raise FileNotFoundError( + f'Output directory {file_dataset_out:s} does not exist.' + ) # Load components camera = Camera.load_from_hdf(file_camera) diff --git a/contrib/test_data_generation/sofast_fringe/generate_test_data_single_facet.py b/contrib/test_data_generation/sofast_fringe/generate_test_data_single_facet.py index cfaa08ad1..dd35c4212 100644 --- a/contrib/test_data_generation/sofast_fringe/generate_test_data_single_facet.py +++ b/contrib/test_data_generation/sofast_fringe/generate_test_data_single_facet.py @@ -1,6 +1,7 @@ """Generates test data from measurement file for mirror type 'single_facet'. Multiple combinations of display and surface types are iterated over. """ + from os.path import join, dirname, exists from typing import Literal @@ -8,7 +9,9 @@ import numpy as np from opencsp.app.sofast.lib.ImageCalibrationScaling import ImageCalibrationScaling -from opencsp.app.sofast.lib.MeasurementSofastFringe import MeasurementSofastFringe as Measurement +from opencsp.app.sofast.lib.MeasurementSofastFringe import ( + MeasurementSofastFringe as Measurement, +) from opencsp.app.sofast.lib.ProcessSofastFringe import ProcessSofastFringe as Sofast from opencsp.app.sofast.lib.DisplayShape import DisplayShape as Display from opencsp.app.sofast.lib.DefinitionFacet import DefinitionFacet @@ -29,7 +32,9 @@ def generate_dataset( """Generates and saves dataset""" # Check output file exists if not exists(dirname(file_dataset_out)): - raise FileNotFoundError(f'Output directory {file_dataset_out:s} does not exist.') + raise FileNotFoundError( + f'Output directory {file_dataset_out:s} does not exist.' + ) # Load components camera = Camera.load_from_hdf(file_camera) diff --git a/contrib/test_data_generation/sofast_fringe/generate_test_data_undefined.py b/contrib/test_data_generation/sofast_fringe/generate_test_data_undefined.py index 190191905..9c18ec634 100644 --- a/contrib/test_data_generation/sofast_fringe/generate_test_data_undefined.py +++ b/contrib/test_data_generation/sofast_fringe/generate_test_data_undefined.py @@ -1,12 +1,15 @@ """Generates test data from measurement file for mirror type 'undefined'. """ + from os.path import join, dirname, exists import matplotlib.pyplot as plt import numpy as np from opencsp.app.sofast.lib.ImageCalibrationScaling import ImageCalibrationScaling -from opencsp.app.sofast.lib.MeasurementSofastFringe import MeasurementSofastFringe as Measurement +from opencsp.app.sofast.lib.MeasurementSofastFringe import ( + MeasurementSofastFringe as Measurement, +) from opencsp.app.sofast.lib.ProcessSofastFringe import ProcessSofastFringe as Sofast from opencsp.app.sofast.lib.DisplayShape import DisplayShape as Display from opencsp.common.lib.camera.Camera import Camera @@ -23,7 +26,9 @@ def generate_dataset( """Generates and saves dataset""" # Check output file exists if not exists(dirname(file_dataset_out)): - raise FileNotFoundError(f'Output directory {file_dataset_out:s} does not exist.') + raise FileNotFoundError( + f'Output directory {file_dataset_out:s} does not exist.' + ) # Load components camera = Camera.load_from_hdf(file_camera) diff --git a/example/camera_io/live_view_color_Basler.py b/example/camera_io/live_view_color_Basler.py index 5f61c0c7d..dbf26f243 100644 --- a/example/camera_io/live_view_color_Basler.py +++ b/example/camera_io/live_view_color_Basler.py @@ -3,6 +3,7 @@ 12 bit Basler color camera. """ + import argparse from opencsp.common.lib.camera.ImageAcquisition_DCAM_color import ( diff --git a/example/camera_io/live_view_mono_Basler.py b/example/camera_io/live_view_mono_Basler.py index f32715538..f42e77b6a 100644 --- a/example/camera_io/live_view_mono_Basler.py +++ b/example/camera_io/live_view_mono_Basler.py @@ -3,6 +3,7 @@ 8 bit Basler monochrome camera. """ + import argparse from opencsp.common.lib.camera.ImageAcquisition_DCAM_mono import ( diff --git a/example/camera_io/run_and_save_images_Basler_color.py b/example/camera_io/run_and_save_images_Basler_color.py index a000072ff..15941cc35 100644 --- a/example/camera_io/run_and_save_images_Basler_color.py +++ b/example/camera_io/run_and_save_images_Basler_color.py @@ -3,6 +3,7 @@ camera and saves the images in TIFF format. """ + import argparse import imageio.v3 as imageio diff --git a/example/csp/example_optics_and_ray_tracing.py b/example/csp/example_optics_and_ray_tracing.py index 538e6d576..88a9c7610 100644 --- a/example/csp/example_optics_and_ray_tracing.py +++ b/example/csp/example_optics_and_ray_tracing.py @@ -6,6 +6,7 @@ 4) Plots ensquared energy plot """ + import datetime import os diff --git a/example/mirror/example_MirrorOutput.py b/example/mirror/example_MirrorOutput.py index 9a8837819..82fe3273e 100644 --- a/example/mirror/example_MirrorOutput.py +++ b/example/mirror/example_MirrorOutput.py @@ -477,9 +477,7 @@ def example_heliostat_05W01_and_14W01(self) -> None: # Construct heliostat objects and solar field object. def fn_5W01(x, y): - return (x**2) / (4 * focal_length_5W01) + (y**2) / ( - 4 * focal_length_5W01 - ) + return (x**2) / (4 * focal_length_5W01) + (y**2) / (4 * focal_length_5W01) h_5W01 = helio.h_from_facet_centroids( name_5W01, @@ -497,9 +495,7 @@ def fn_5W01(x, y): h_5W01.set_canting_from_equation(fn_5W01) def fn_14W01(x, y): - return (x**2) / (4 * focal_length_14W01) + (y**2) / ( - 4 * focal_length_14W01 - ) + return (x**2) / (4 * focal_length_14W01) + (y**2) / (4 * focal_length_14W01) h_14W01 = helio.h_from_facet_centroids( "NSTTF Heliostat 14W01", diff --git a/example/scene_reconstruction/example_annotate_aruco_markers.py b/example/scene_reconstruction/example_annotate_aruco_markers.py index b1489a3c3..71856831a 100644 --- a/example/scene_reconstruction/example_annotate_aruco_markers.py +++ b/example/scene_reconstruction/example_annotate_aruco_markers.py @@ -89,9 +89,11 @@ def example_annotate_aruco_markers(): """Example script that annotates aruco markers found in imput images matching the given source_pattern. Markers are outlined in red and labeled in blue text. """ - source_pattern = join(opencsp_code_dir(), - 'app/scene_reconstruction/test/data', - 'data_measurement/aruco_marker_images/DSC0365*.JPG') + source_pattern = join( + opencsp_code_dir(), + 'app/scene_reconstruction/test/data', + 'data_measurement/aruco_marker_images/DSC0365*.JPG', + ) save_dir = join(dirname(__file__), 'data/output/annotated_aruco_markers') ft.create_directories_if_necessary(save_dir) diff --git a/example/scene_reconstruction/example_make_aruco_markers.py b/example/scene_reconstruction/example_make_aruco_markers.py index 56b77db63..abe63f2e7 100644 --- a/example/scene_reconstruction/example_make_aruco_markers.py +++ b/example/scene_reconstruction/example_make_aruco_markers.py @@ -1,6 +1,7 @@ """ Script that generates Aruco marker PNG files of given size and number """ + from os.path import dirname, join import cv2 as cv @@ -10,10 +11,7 @@ import opencsp.common.lib.tool.file_tools as ft -def make_aruco_images(save_path: str, - number: str, - size: int = 500, - padding: int = 50): +def make_aruco_images(save_path: str, number: str, size: int = 500, padding: int = 50): """Generates aruco marker images and saves images as PNG files Parameters diff --git a/example/scene_reconstruction/example_scene_reconstruction.py b/example/scene_reconstruction/example_scene_reconstruction.py index 22da2abeb..f17975d2b 100644 --- a/example/scene_reconstruction/example_scene_reconstruction.py +++ b/example/scene_reconstruction/example_scene_reconstruction.py @@ -13,8 +13,7 @@ def example_scene_reconstruction(): """Example script that reconstructs the XYZ locations of Aruco markers in a scene.""" # Define input directory dir_input = join( - opencsp_code_dir(), - 'app/scene_reconstruction/test/data/data_measurement', + opencsp_code_dir(), 'app/scene_reconstruction/test/data/data_measurement' ) # Define output directory diff --git a/example/sofast_fixed/calculate_dot_locations_from_display_object.py b/example/sofast_fixed/calculate_dot_locations_from_display_object.py index f7f7488f7..260a8793b 100644 --- a/example/sofast_fixed/calculate_dot_locations_from_display_object.py +++ b/example/sofast_fixed/calculate_dot_locations_from_display_object.py @@ -2,6 +2,7 @@ file using a previously created Display object. This is only when displaying a fixed dot pattern on a screen. """ + import os from os.path import join, dirname, exists diff --git a/example/sofast_fixed/find_blobs_in_image.py b/example/sofast_fixed/find_blobs_in_image.py index 524c1f018..2a0c88a80 100644 --- a/example/sofast_fixed/find_blobs_in_image.py +++ b/example/sofast_fixed/find_blobs_in_image.py @@ -1,5 +1,6 @@ """Example script that finds blobs in image and saves annotated image. """ + import os from os.path import join, dirname, exists diff --git a/example/sofast_fixed/physical_target_dot_calibration.py b/example/sofast_fixed/physical_target_dot_calibration.py index 6d65e8f8f..b569ecaa5 100644 --- a/example/sofast_fixed/physical_target_dot_calibration.py +++ b/example/sofast_fixed/physical_target_dot_calibration.py @@ -1,5 +1,6 @@ """Example script that performs dot location calibration using photogrammetry. """ + import os from os.path import join, dirname, exists @@ -8,8 +9,9 @@ from opencsp.app.sofast.lib.CalibrateSofastFixedDots import CalibrateSofastFixedDots from opencsp.common.lib.camera.Camera import Camera -from opencsp.common.lib.deflectometry.CalibrationCameraPosition import \ - CalibrationCameraPosition +from opencsp.common.lib.deflectometry.CalibrationCameraPosition import ( + CalibrationCameraPosition, +) from opencsp.app.sofast.lib.SpatialOrientation import SpatialOrientation from opencsp.common.lib.geometry.Vxy import Vxy from opencsp.common.lib.geometry.Vxyz import Vxyz @@ -21,15 +23,19 @@ def example_perform_calibration(): """Performs a dot-location calibration using photogrammetry""" # Define dot location images and origins - base_dir = join(opencsp_code_dir(), - 'test/data/measurements_sofast_fixed/dot_location_calibration/measurements') + base_dir = join( + opencsp_code_dir(), + 'test/data/measurements_sofast_fixed/dot_location_calibration/measurements', + ) files_cal_images = [ join(base_dir, 'images/DSC03965.JPG'), join(base_dir, 'images/DSC03967.JPG'), join(base_dir, 'images/DSC03970.JPG'), join(base_dir, 'images/DSC03972.JPG'), ] - origins = np.array(([4950, 4610, 4221, 3617], [3359, 3454, 3467, 3553]), dtype=float) / 4 + origins = ( + np.array(([4950, 4610, 4221, 3617], [3359, 3454, 3467, 3553]), dtype=float) / 4 + ) origins = Vxy(origins.astype(int)) # Define other files @@ -59,12 +65,20 @@ def example_perform_calibration(): # Perform dot location calibration cal_dot_locs = CalibrateSofastFixedDots( - files_cal_images, origins, camera_marker, pts_xyz_corners, ids_corners, -32, 31, -31, 32 + files_cal_images, + origins, + camera_marker, + pts_xyz_corners, + ids_corners, + -32, + 31, + -31, + 32, ) cal_dot_locs.plot = True - cal_dot_locs.blob_search_threshold = 3. - cal_dot_locs.blob_detector.minArea = 3. - cal_dot_locs.blob_detector.maxArea = 30. + cal_dot_locs.blob_search_threshold = 3.0 + cal_dot_locs.blob_detector.minArea = 3.0 + cal_dot_locs.blob_detector.maxArea = 30.0 cal_dot_locs.run() # Perform camera position calibration diff --git a/example/sofast_fixed/process_fixed_pattern_data.py b/example/sofast_fixed/process_fixed_pattern_data.py index f63acfd5d..728d089fa 100644 --- a/example/sofast_fixed/process_fixed_pattern_data.py +++ b/example/sofast_fixed/process_fixed_pattern_data.py @@ -1,5 +1,6 @@ """Example script that runs fixed pattern deflectometry analysis on saved data """ + from os.path import join, dirname from opencsp.app.sofast.lib.DotLocationsFixedPattern import DotLocationsFixedPattern @@ -64,9 +65,7 @@ def example_process_fixed_pattern_printed_target(): """Example function that processes a fixed pattern data collect using a physical, printed dot target. """ - dir_base = join( - opencsp_code_dir(), '../../sample_data/deflectometry/sandia_lab' - ) + dir_base = join(opencsp_code_dir(), '../../sample_data/deflectometry/sandia_lab') file_camera = join(dir_base, "calibration_files/camera.h5") file_facet = join(dir_base, "calibration_files/Facet_NSTTF.json") @@ -95,9 +94,7 @@ def example_process_fixed_pattern_printed_target(): def example_process_fixed_pattern_screen_target(): """Loads data and calls processing function""" - dir_base = join( - opencsp_code_dir(), '../../sample_data/deflectometry/sandia_lab' - ) + dir_base = join(opencsp_code_dir(), '../../sample_data/deflectometry/sandia_lab') # Define files file_camera = join(dir_base, "calibration_files/camera.h5") diff --git a/example/sofast_fixed/run_and_characterize_fixed_pattern.py b/example/sofast_fixed/run_and_characterize_fixed_pattern.py index b54a6dc7a..c79361bab 100644 --- a/example/sofast_fixed/run_and_characterize_fixed_pattern.py +++ b/example/sofast_fixed/run_and_characterize_fixed_pattern.py @@ -6,6 +6,7 @@ NOTE: The user must have a complete deflectometry setup in place. This includes a camera, dot grid, and system calibration files. """ + import os import matplotlib.pyplot as plt diff --git a/example/sofast_fixed/run_fixed_pattern_projection.py b/example/sofast_fixed/run_fixed_pattern_projection.py index 1a3c84187..a8cae6d12 100644 --- a/example/sofast_fixed/run_fixed_pattern_projection.py +++ b/example/sofast_fixed/run_fixed_pattern_projection.py @@ -2,6 +2,7 @@ NOTE: This example requires a computer screen """ + from os.path import join import pytest diff --git a/example/sofast_fringe/example_calibration_camera_pose.py b/example/sofast_fringe/example_calibration_camera_pose.py index fd3876b5b..26bc13a3b 100644 --- a/example/sofast_fringe/example_calibration_camera_pose.py +++ b/example/sofast_fringe/example_calibration_camera_pose.py @@ -2,7 +2,9 @@ import numpy as np -from opencsp.common.lib.deflectometry.CalibrationCameraPosition import CalibrationCameraPosition +from opencsp.common.lib.deflectometry.CalibrationCameraPosition import ( + CalibrationCameraPosition, +) from opencsp.common.lib.camera.Camera import Camera from opencsp.common.lib.geometry.Vxyz import Vxyz @@ -22,8 +24,7 @@ def example_run_camera_position_calibration(): # Define directory where screen shape calibration data is saved base_dir_sofast_cal = join( - opencsp_code_dir(), - 'common/lib/deflectometry/test/data/data_measurement', + opencsp_code_dir(), 'common/lib/deflectometry/test/data/data_measurement' ) # Define inputs diff --git a/example/sofast_fringe/example_calibration_save_DisplayShape_file.py b/example/sofast_fringe/example_calibration_save_DisplayShape_file.py index d00bab619..3df949012 100644 --- a/example/sofast_fringe/example_calibration_save_DisplayShape_file.py +++ b/example/sofast_fringe/example_calibration_save_DisplayShape_file.py @@ -11,8 +11,7 @@ def example_save_display_shape_file(): - """Example script that saves a DisplayShape file from its components - """ + """Example script that saves a DisplayShape file from its components""" # Define save directory save_dir = join(dirname(__file__), 'data/output/save_DisplayShape_file') ft.create_directories_if_necessary(save_dir) @@ -21,7 +20,7 @@ def example_save_display_shape_file(): file_screen_distortion_data = join( opencsp_code_dir(), 'app/sofast/test/data/data_expected', - 'screen_distortion_data_100_100.h5' + 'screen_distortion_data_100_100.h5', ) datasets = ['pts_xy_screen_fraction', 'pts_xyz_screen_coords'] data = load_hdf5_datasets(datasets, file_screen_distortion_data) @@ -34,7 +33,7 @@ def example_save_display_shape_file(): file_rvec_tvec = join( opencsp_code_dir(), 'common/lib/deflectometry/test/data/data_expected', - 'camera_rvec_tvec.csv' + 'camera_rvec_tvec.csv', ) pose_data = np.loadtxt(file_rvec_tvec, delimiter=',') rvec = pose_data[0] diff --git a/example/sofast_fringe/example_calibration_screen_shape.py b/example/sofast_fringe/example_calibration_screen_shape.py index 869370e22..43bae362b 100644 --- a/example/sofast_fringe/example_calibration_screen_shape.py +++ b/example/sofast_fringe/example_calibration_screen_shape.py @@ -16,8 +16,7 @@ def example_run_screen_shape_calibration(): - """Runs screen shape calibration. Saves data to ./data/output/screen_shape - """ + """Runs screen shape calibration. Saves data to ./data/output/screen_shape""" # Define save directory save_dir = join(dirname(__file__), 'data/output/screen_shape') ft.create_directories_if_necessary(save_dir) @@ -26,7 +25,7 @@ def example_run_screen_shape_calibration(): file_pts_data = join( opencsp_code_dir(), 'common/lib/deflectometry/test/data/data_measurement', - 'point_locations.csv' + 'point_locations.csv', ) pts_marker_data = np.loadtxt(file_pts_data, delimiter=',', skiprows=1) pts_xyz_marker = Vxyz(pts_marker_data[:, 2:].T) @@ -37,12 +36,13 @@ def example_run_screen_shape_calibration(): # Define directory where screen shape calibration data is saved base_dir_sofast_cal = join( - opencsp_code_dir(), - 'app/sofast/test/data/data_measurement', + opencsp_code_dir(), 'app/sofast/test/data/data_measurement' ) # Define input files - file_screen_cal_point_pairs = join(base_dir_sofast_cal, 'screen_calibration_point_pairs.csv') + file_screen_cal_point_pairs = join( + base_dir_sofast_cal, 'screen_calibration_point_pairs.csv' + ) file_camera_distortion = join(base_dir_sofast_cal, 'camera_screen_shape.h5') file_image_projection = join(base_dir_sofast_cal, 'image_projection.h5') files_screen_shape_measurement = glob( @@ -53,10 +53,7 @@ def example_run_screen_shape_calibration(): camera = Camera.load_from_hdf(file_camera_distortion) image_projection_data = ImageProjection.load_from_hdf(file_image_projection) screen_cal_point_pairs = np.loadtxt( - file_screen_cal_point_pairs, - delimiter=',', - skiprows=1, - dtype=int + file_screen_cal_point_pairs, delimiter=',', skiprows=1, dtype=int ) # Store input data in data class @@ -67,7 +64,10 @@ def example_run_screen_shape_calibration(): pts_xyz_marker, camera, image_projection_data, - [MeasurementSofastFringe.load_from_hdf(f) for f in files_screen_shape_measurement], + [ + MeasurementSofastFringe.load_from_hdf(f) + for f in files_screen_shape_measurement + ], ) # Perform screen shape calibration @@ -78,7 +78,7 @@ def example_run_screen_shape_calibration(): cal.save_data_as_hdf(join(save_dir, 'screen_distortion_data.h5')) # Save calibration figures - for fig in (cal.figures): + for fig in cal.figures: fig.savefig(join(save_dir, fig.get_label() + '.png')) diff --git a/example/sofast_fringe/example_multi_facet_data_process.py b/example/sofast_fringe/example_multi_facet_data_process.py index 511f6d1a4..0f2f78e85 100644 --- a/example/sofast_fringe/example_multi_facet_data_process.py +++ b/example/sofast_fringe/example_multi_facet_data_process.py @@ -4,7 +4,9 @@ import matplotlib from opencsp.app.sofast.lib.ImageCalibrationScaling import ImageCalibrationScaling -from opencsp.app.sofast.lib.MeasurementSofastFringe import MeasurementSofastFringe as Measurement +from opencsp.app.sofast.lib.MeasurementSofastFringe import ( + MeasurementSofastFringe as Measurement, +) from opencsp.app.sofast.lib.ProcessSofastFringe import ProcessSofastFringe as Sofast from opencsp.app.sofast.lib.visualize_setup import visualize_setup from opencsp.common.lib.camera.Camera import Camera @@ -31,9 +33,7 @@ def example_driver(): """ # Define sample data directory - sample_data_dir = join( - opencsp_code_dir(), 'test/data/measurements_sofast_fringe/' - ) + sample_data_dir = join(opencsp_code_dir(), 'test/data/measurements_sofast_fringe/') # Directory setup file_measurement = join(sample_data_dir, 'measurement_ensemble.h5') diff --git a/example/sofast_fringe/example_single_facet_data_process.py b/example/sofast_fringe/example_single_facet_data_process.py index 3616baa58..fc062b9a1 100644 --- a/example/sofast_fringe/example_single_facet_data_process.py +++ b/example/sofast_fringe/example_single_facet_data_process.py @@ -4,7 +4,9 @@ import matplotlib from opencsp.app.sofast.lib.ImageCalibrationScaling import ImageCalibrationScaling -from opencsp.app.sofast.lib.MeasurementSofastFringe import MeasurementSofastFringe as Measurement +from opencsp.app.sofast.lib.MeasurementSofastFringe import ( + MeasurementSofastFringe as Measurement, +) from opencsp.app.sofast.lib.ProcessSofastFringe import ProcessSofastFringe as Sofast from opencsp.app.sofast.lib.visualize_setup import visualize_setup from opencsp.common.lib.camera.Camera import Camera @@ -28,9 +30,7 @@ def example_driver(): 4. Plots slope magnitude, physical setup """ # Define sample data directory - sample_data_dir = join( - opencsp_code_dir(), 'test/data/measurements_sofast_fringe/' - ) + sample_data_dir = join(opencsp_code_dir(), 'test/data/measurements_sofast_fringe/') # Directory Setup file_measurement = join(sample_data_dir, 'measurement_facet.h5') diff --git a/example/sofast_fringe/example_standard_mirror_plot_output.py b/example/sofast_fringe/example_standard_mirror_plot_output.py index f2717326d..f0036a7b8 100644 --- a/example/sofast_fringe/example_standard_mirror_plot_output.py +++ b/example/sofast_fringe/example_standard_mirror_plot_output.py @@ -126,9 +126,7 @@ def example_driver(): """ # Define measured and reference data - sample_data_dir = join( - opencsp_code_dir(), 'test/data/measurements_sofast_fringe/' - ) + sample_data_dir = join(opencsp_code_dir(), 'test/data/measurements_sofast_fringe/') save_dir = join(os.path.dirname(__file__), 'data/output/standard_output') diff --git a/example/sofast_fringe/example_undefined_facet_data_process.py b/example/sofast_fringe/example_undefined_facet_data_process.py index b2462ff77..cb8c71f90 100644 --- a/example/sofast_fringe/example_undefined_facet_data_process.py +++ b/example/sofast_fringe/example_undefined_facet_data_process.py @@ -3,7 +3,9 @@ from opencsp.app.sofast.lib.visualize_setup import visualize_setup from opencsp.app.sofast.lib.ImageCalibrationScaling import ImageCalibrationScaling -from opencsp.app.sofast.lib.MeasurementSofastFringe import MeasurementSofastFringe as Measurement +from opencsp.app.sofast.lib.MeasurementSofastFringe import ( + MeasurementSofastFringe as Measurement, +) from opencsp.app.sofast.lib.ProcessSofastFringe import ProcessSofastFringe as Sofast from opencsp.app.sofast.lib.DisplayShape import DisplayShape as Display from opencsp.app.sofast.lib.SpatialOrientation import SpatialOrientation @@ -25,9 +27,7 @@ def example_driver(): 4. Plots slope magnitude, physical setup """ # Define sample data directory - sample_data_dir = join( - opencsp_code_dir(), 'test/data/measurements_sofast_fringe/' - ) + sample_data_dir = join(opencsp_code_dir(), 'test/data/measurements_sofast_fringe/') # Directory Setup file_measurement = join(sample_data_dir, 'measurement_facet.h5') diff --git a/opencsp/app/camera_calibration/CameraCalibration.py b/opencsp/app/camera_calibration/CameraCalibration.py index cd05362ad..f499a4a50 100644 --- a/opencsp/app/camera_calibration/CameraCalibration.py +++ b/opencsp/app/camera_calibration/CameraCalibration.py @@ -2,6 +2,7 @@ GUI used to load previously captured calibration images and calibrate a machine vision camera. """ + import os import tkinter from tkinter.filedialog import askopenfilename, asksaveasfilename diff --git a/opencsp/app/camera_calibration/lib/ViewAnnotatedImages.py b/opencsp/app/camera_calibration/lib/ViewAnnotatedImages.py index dce8202ec..1be76e700 100644 --- a/opencsp/app/camera_calibration/lib/ViewAnnotatedImages.py +++ b/opencsp/app/camera_calibration/lib/ViewAnnotatedImages.py @@ -1,6 +1,7 @@ """ GUI used to view annotated checkerboard corners. """ + import tkinter from numpy import ndarray diff --git a/opencsp/app/camera_calibration/lib/calibration_camera.py b/opencsp/app/camera_calibration/lib/calibration_camera.py index 76dfcd98a..35a5b0f21 100644 --- a/opencsp/app/camera_calibration/lib/calibration_camera.py +++ b/opencsp/app/camera_calibration/lib/calibration_camera.py @@ -1,6 +1,7 @@ """Library of functions used to calibation a machine vision camera. Functions are based off OpenCV library. """ + from typing import Iterable import cv2 as cv diff --git a/opencsp/app/camera_calibration/lib/image_processing.py b/opencsp/app/camera_calibration/lib/image_processing.py index d9c3302be..efd6b449a 100644 --- a/opencsp/app/camera_calibration/lib/image_processing.py +++ b/opencsp/app/camera_calibration/lib/image_processing.py @@ -1,5 +1,6 @@ """Library of image processing functions used for camera calibration """ + import cv2 as cv import numpy as np diff --git a/opencsp/app/camera_calibration/test/test_camera_calibration.py b/opencsp/app/camera_calibration/test/test_camera_calibration.py index d0025aade..3b13feb9e 100644 --- a/opencsp/app/camera_calibration/test/test_camera_calibration.py +++ b/opencsp/app/camera_calibration/test/test_camera_calibration.py @@ -2,6 +2,7 @@ Change the boolean below to True and run to regenerate all test data """ + from glob import glob import os @@ -62,12 +63,9 @@ def setup_class(cls, regenerate=False): img_size = images[0].shape # Calibrate camera - ( - camera, - r_cam_object, - v_cam_object_cam, - calibration_error, - ) = cc.calibrate_camera(p_object, p_image, img_size, cam_name) + (camera, r_cam_object, v_cam_object_cam, calibration_error) = ( + cc.calibrate_camera(p_object, p_image, img_size, cam_name) + ) # Calculate reprojection errors errors = [] diff --git a/opencsp/app/scene_reconstruction/lib/SceneReconstruction.py b/opencsp/app/scene_reconstruction/lib/SceneReconstruction.py index 12fa6c34d..db7b07339 100644 --- a/opencsp/app/scene_reconstruction/lib/SceneReconstruction.py +++ b/opencsp/app/scene_reconstruction/lib/SceneReconstruction.py @@ -1,5 +1,6 @@ """Photogrammetric reconstruction class based on images of Aruco markers """ + from glob import glob from os.path import join from typing import Iterable diff --git a/opencsp/app/scene_reconstruction/test/generate_downsampled_dataset.py b/opencsp/app/scene_reconstruction/test/generate_downsampled_dataset.py index e04e0a51d..3794b632c 100644 --- a/opencsp/app/scene_reconstruction/test/generate_downsampled_dataset.py +++ b/opencsp/app/scene_reconstruction/test/generate_downsampled_dataset.py @@ -4,6 +4,7 @@ To update the expected data, run the corresponding example file. """ + from glob import glob import os from os.path import join @@ -58,8 +59,7 @@ def downsample_dataset(dir_input: str, dir_output: str) -> None: # Downsample aruco marker camera print('Downsampling camera...') - camera_aruco_ds = dd.downsample_camera( - join(dir_input, 'camera.h5'), n_aruco) + camera_aruco_ds = dd.downsample_camera(join(dir_input, 'camera.h5'), n_aruco) camera_aruco_ds.save_to_hdf(join(dir_output, 'camera.h5')) diff --git a/opencsp/app/scene_reconstruction/test/test_SceneReconstruction.py b/opencsp/app/scene_reconstruction/test/test_SceneReconstruction.py index dc9546c3c..da6bc0834 100644 --- a/opencsp/app/scene_reconstruction/test/test_SceneReconstruction.py +++ b/opencsp/app/scene_reconstruction/test/test_SceneReconstruction.py @@ -4,6 +4,7 @@ To create new test data, uncomment/comment the lines below. """ + import os from os.path import join import unittest diff --git a/opencsp/app/select_image_points/SelectImagePoints.py b/opencsp/app/select_image_points/SelectImagePoints.py index 683d91771..c6b196d7c 100644 --- a/opencsp/app/select_image_points/SelectImagePoints.py +++ b/opencsp/app/select_image_points/SelectImagePoints.py @@ -6,6 +6,7 @@ 's' key saves data. """ + import tkinter as tk from tkinter.filedialog import askopenfilename import os diff --git a/opencsp/app/sofast/SofastGUI.py b/opencsp/app/sofast/SofastGUI.py index 914822244..316c7a9f5 100644 --- a/opencsp/app/sofast/SofastGUI.py +++ b/opencsp/app/sofast/SofastGUI.py @@ -1,6 +1,7 @@ """GUI to control SOFAST image projection, image acquisition, and data capture. Can capture datasets and save to HDF format. """ + import datetime as dt import tkinter from tkinter import messagebox, simpledialog @@ -586,7 +587,9 @@ def _load_system_elements(self) -> None: """ if self.image_acquisition is not None and self.image_projection is not None: - self.system = SystemSofastFringe(self.image_projection, self.image_acquisition) + self.system = SystemSofastFringe( + self.image_projection, self.image_acquisition + ) def _save_measurement_data(self, file: str) -> None: """Saves last measurement to HDF file""" diff --git a/opencsp/app/sofast/lib/BlobIndex.py b/opencsp/app/sofast/lib/BlobIndex.py index 7da395da6..886524927 100644 --- a/opencsp/app/sofast/lib/BlobIndex.py +++ b/opencsp/app/sofast/lib/BlobIndex.py @@ -62,7 +62,9 @@ def __init__( idx_x_vec = np.arange(x_min, x_max + 1) # index idx_y_vec = np.arange(y_min, y_max + 1) # index self._idx_x_mat, self._idx_y_mat = np.meshgrid(idx_x_vec, idx_y_vec) # index - self._points_mat = np.zeros((y_max - y_min + 1, x_max - x_min + 1, 2)) * np.nan # pixels + self._points_mat = ( + np.zeros((y_max - y_min + 1, x_max - x_min + 1, 2)) * np.nan + ) # pixels self._point_indices_mat = ( np.zeros((y_max - y_min + 1, x_max - x_min + 1)) * np.nan # index ) @@ -122,8 +124,7 @@ def _nearest_unassigned_idx_from_xy_point_direction( dists_perp = np.abs(v_perp.dot(points_rel)) # Distance of points from line # Make mask of valid points mask = np.logical_and( - dists_axis > 0, - dists_perp / dists_axis <= self.search_perp_axis_ratio + dists_axis > 0, dists_perp / dists_axis <= self.search_perp_axis_ratio ) # Check there are points to find if mask.sum() == 0: @@ -359,19 +360,20 @@ def _extend_data(self, direction: Literal['x', 'y'], step: Literal[1, -1]) -> No pt_cur = pts[is_b == i_b] pt_prev = pts[is_b == idx_b_prev] if (len(pt_cur) > 1) or (len(pt_prev) > 1): - raise ValueError(f'Point index {idx_a:.0f}, {i_b:.0f} ' - 'was assigned more than once. ' - 'Try tightening dot search settings.') + raise ValueError( + f'Point index {idx_a:.0f}, {i_b:.0f} ' + 'was assigned more than once. ' + 'Try tightening dot search settings.' + ) else: # Next iterations, use new points pt_prev = pt_cur pt_cur = self._points[idx_new] # Calculate deltas pt_exp = self._exp_pt_from_pt_pair(pt_cur, pt_prev) - success, ( - idx_new, - dist, - ) = self._nearest_unassigned_idx_from_xy_point_direction( - pt_cur, pt_exp + success, (idx_new, dist) = ( + self._nearest_unassigned_idx_from_xy_point_direction( + pt_cur, pt_exp + ) ) if not success: break diff --git a/opencsp/app/sofast/lib/CalibrateDisplayShape.py b/opencsp/app/sofast/lib/CalibrateDisplayShape.py index 049a28712..fd24a6244 100644 --- a/opencsp/app/sofast/lib/CalibrateDisplayShape.py +++ b/opencsp/app/sofast/lib/CalibrateDisplayShape.py @@ -1,6 +1,7 @@ """Class containing all screen distortion calibration routines. Saves distortion data and calibrated markers for camera position calibration. """ + from dataclasses import dataclass import os from warnings import warn @@ -15,7 +16,9 @@ from scipy.spatial.transform import Rotation from tqdm import tqdm -from opencsp.app.sofast.lib.MeasurementSofastFringe import MeasurementSofastFringe as Measurement +from opencsp.app.sofast.lib.MeasurementSofastFringe import ( + MeasurementSofastFringe as Measurement, +) from opencsp.common.lib.camera.Camera import Camera import opencsp.app.sofast.lib.image_processing as ip from opencsp.common.lib.deflectometry.ImageProjection import CalParams @@ -548,10 +551,9 @@ def run_calibration(self, verbose: int = 0) -> None: self.visualize_xyz_screen_maps() -def interp_xy_screen_positions(im_x: np.ndarray, - im_y: np.ndarray, - x_sc: np.ndarray, - y_sc: np.ndarray) -> Vxy: +def interp_xy_screen_positions( + im_x: np.ndarray, im_y: np.ndarray, x_sc: np.ndarray, y_sc: np.ndarray +) -> Vxy: """ Calculates the interpolated XY screen positions given X/Y fractional screen maps and X/Y interpolation vectors. diff --git a/opencsp/app/sofast/lib/CalibrateSofastFixedDots.py b/opencsp/app/sofast/lib/CalibrateSofastFixedDots.py index 3f41245ec..b4cb230a9 100644 --- a/opencsp/app/sofast/lib/CalibrateSofastFixedDots.py +++ b/opencsp/app/sofast/lib/CalibrateSofastFixedDots.py @@ -1,5 +1,6 @@ """Fixed pattern dot location calibration. """ + import os import cv2 as cv @@ -188,7 +189,8 @@ def _calculate_camera_poses(self) -> None: ret = self._images[cam_idx].attempt_calculate_pose(True) if ret == -1: lt.critical_and_raise( - ValueError, f'Camera pose {cam_idx:d} not calculated successfully') + ValueError, f'Camera pose {cam_idx:d} not calculated successfully' + ) self._rots_cams.append(Rotation.from_rotvec(self._images[cam_idx].rvec)) self._vecs_cams.append(Vxyz(self._images[cam_idx].tvec)) @@ -196,11 +198,18 @@ def _calculate_camera_poses(self) -> None: # Calculate reproj error errors = self._images[cam_idx].calc_reprojection_errors() # Log errors - lt.info(f'Camera {cam_idx:d} mean corner reprojection error: {errors.mean():.2f} pixels') - lt.info(f'Camera {cam_idx:d} min corner reprojection error: {errors.min():.2f} pixels') - lt.info(f'Camera {cam_idx:d} max corner reprojection error: {errors.mean():.2f} pixels') lt.info( - f'Camera {cam_idx:d} STDEV corner reprojection error: {errors.mean():.2f} pixels') + f'Camera {cam_idx:d} mean corner reprojection error: {errors.mean():.2f} pixels' + ) + lt.info( + f'Camera {cam_idx:d} min corner reprojection error: {errors.min():.2f} pixels' + ) + lt.info( + f'Camera {cam_idx:d} max corner reprojection error: {errors.mean():.2f} pixels' + ) + lt.info( + f'Camera {cam_idx:d} STDEV corner reprojection error: {errors.mean():.2f} pixels' + ) def _intersect_rays(self) -> None: """Intersects camera rays to find dot xyz locations""" @@ -224,21 +233,31 @@ def _intersect_rays(self) -> None: self._dot_points_xyz_mat[idx_y, idx_x, :] = point.data.squeeze() self._dot_intersection_dists = np.array(int_dists) - lt.info('Dot ray intersections mean intersection error: ' - f'{self._dot_intersection_dists.mean() * 1000:.1f} mm') - lt.info('Dot ray intersections min intersection error: ' - f'{self._dot_intersection_dists.min() * 1000:.1f} mm') - lt.info('Dot ray intersections max intersection error: ' - f'{self._dot_intersection_dists.max() * 1000:.1f} mm') - lt.info('Dot ray intersections STDEV of intersection error: ' - f'{self._dot_intersection_dists.std() * 1000:.1f} mm') + lt.info( + 'Dot ray intersections mean intersection error: ' + f'{self._dot_intersection_dists.mean() * 1000:.1f} mm' + ) + lt.info( + 'Dot ray intersections min intersection error: ' + f'{self._dot_intersection_dists.min() * 1000:.1f} mm' + ) + lt.info( + 'Dot ray intersections max intersection error: ' + f'{self._dot_intersection_dists.max() * 1000:.1f} mm' + ) + lt.info( + 'Dot ray intersections STDEV of intersection error: ' + f'{self._dot_intersection_dists.std() * 1000:.1f} mm' + ) def _plot_common_dots(self) -> None: """Plots common dots on images""" for idx_image in range(self._num_images): fig = plt.figure(f'image_{idx_image:d}_annotated_dots') plt.imshow(self._images[idx_image].image, cmap='gray') - plt.scatter(*self._dot_image_points_xy[idx_image].data, marker='.', color='red') + plt.scatter( + *self._dot_image_points_xy[idx_image].data, marker='.', color='red' + ) self.figures.append(fig) def _plot_marker_corners(self) -> None: diff --git a/opencsp/app/sofast/lib/DefinitionEnsemble.py b/opencsp/app/sofast/lib/DefinitionEnsemble.py index 78e86ce28..c8b1a0b17 100644 --- a/opencsp/app/sofast/lib/DefinitionEnsemble.py +++ b/opencsp/app/sofast/lib/DefinitionEnsemble.py @@ -1,5 +1,6 @@ """Data class to store facet ensemble optic definition for sofast """ + from copy import deepcopy import json @@ -166,7 +167,9 @@ def load_from_hdf(cls, file: str, prefix: str) -> 'DefinitionEnsemble': r_facet_ensemble = [Rotation.from_rotvec(r) for r in data['r_facet_ensemble']] ensemble_perimeter = data['ensemble_perimeter'] v_centroid_ensemble = Vxyz(data['v_centroid_ensemble']) - return cls(v_facet_locations, r_facet_ensemble, ensemble_perimeter, v_centroid_ensemble) + return cls( + v_facet_locations, r_facet_ensemble, ensemble_perimeter, v_centroid_ensemble + ) def _Vxyz_to_dict(V: Vxyz) -> dict: diff --git a/opencsp/app/sofast/lib/DefinitionFacet.py b/opencsp/app/sofast/lib/DefinitionFacet.py index 35420f46d..9337c16b7 100644 --- a/opencsp/app/sofast/lib/DefinitionFacet.py +++ b/opencsp/app/sofast/lib/DefinitionFacet.py @@ -1,5 +1,6 @@ """Data class to store single facet optic definition """ + import json from opencsp.common.lib.geometry.Vxyz import Vxyz @@ -27,7 +28,9 @@ def __init__(self, v_facet_corners: Vxyz, v_facet_centroid: Vxyz): def copy(self) -> 'DefinitionFacet': """Returns copy of class""" - return DefinitionFacet(self.v_facet_corners.copy(), self.v_facet_centroid.copy()) + return DefinitionFacet( + self.v_facet_corners.copy(), self.v_facet_centroid.copy() + ) @classmethod def load_from_json(cls, file: str) -> 'DefinitionFacet': @@ -81,10 +84,7 @@ def save_to_hdf(self, file: str, prefix: str = '') -> None: prefix : str Prefix to append to folder path within HDF file (folders must be separated by "/") """ - data = [ - self.v_facet_corners.data, - self.v_facet_centroid.data, - ] + data = [self.v_facet_corners.data, self.v_facet_centroid.data] datasets = [ prefix + 'DefinitionFacet/v_facet_corners', prefix + 'DefinitionFacet/v_facet_centroid', diff --git a/opencsp/app/sofast/lib/DisplayShape.py b/opencsp/app/sofast/lib/DisplayShape.py index 91625dfb3..7fb79fe8e 100644 --- a/opencsp/app/sofast/lib/DisplayShape.py +++ b/opencsp/app/sofast/lib/DisplayShape.py @@ -123,9 +123,7 @@ def _init_interp_func(self): self.interp_func = lambda Vuv: self._interp_func_3D(Vuv, func_xyz) else: - raise ValueError( - f'Unknown screen model: {self.grid_data["screen_model"]}' - ) + raise ValueError(f'Unknown screen model: {self.grid_data["screen_model"]}') def _interp_func_rectangular2D(self, uv_display_pts: Vxy) -> Vxyz: """ @@ -228,9 +226,7 @@ def load_from_hdf(cls, file: str): grid_data['xyz_screen_coords'] = Vxyz(grid_data['xyz_screen_coords']) else: - raise ValueError( - f'Model, {grid_data["screen_model"]}, not supported.' - ) + raise ValueError(f'Model, {grid_data["screen_model"]}, not supported.') # Load display parameters datasets = [ @@ -265,9 +261,8 @@ def save_to_hdf(self, file: str): data = [] for dataset in self.grid_data.keys(): datasets.append('DisplayShape/' + dataset) - if ( - isinstance(self.grid_data[dataset], Vxy) - or isinstance(self.grid_data[dataset], Vxyz) + if isinstance(self.grid_data[dataset], Vxy) or isinstance( + self.grid_data[dataset], Vxyz ): data.append(self.grid_data[dataset].data) else: diff --git a/opencsp/app/sofast/lib/ImageCalibrationAbstract.py b/opencsp/app/sofast/lib/ImageCalibrationAbstract.py index f8133ef6b..d5c042bd3 100644 --- a/opencsp/app/sofast/lib/ImageCalibrationAbstract.py +++ b/opencsp/app/sofast/lib/ImageCalibrationAbstract.py @@ -183,7 +183,9 @@ def load_from_hdf(cls, file) -> 'ImageCalibrationAbstract': calibration_name = cls.get_calibration_name() if data['calibration_type'] != calibration_name: - raise ValueError(f'ImageCalibration file is not of type {calibration_name:s}') + raise ValueError( + f'ImageCalibration file is not of type {calibration_name:s}' + ) # Load grid data datasets = ['ImageCalibration/camera_values', 'ImageCalibration/display_values'] diff --git a/opencsp/app/sofast/lib/ImageCalibrationGlobal.py b/opencsp/app/sofast/lib/ImageCalibrationGlobal.py index 2406e66a3..63eac4bfd 100644 --- a/opencsp/app/sofast/lib/ImageCalibrationGlobal.py +++ b/opencsp/app/sofast/lib/ImageCalibrationGlobal.py @@ -1,7 +1,9 @@ from numpy import ndarray from opencsp.app.sofast.lib.ImageCalibrationAbstract import ImageCalibrationAbstract -from opencsp.app.sofast.lib.MeasurementSofastFringe import MeasurementSofastFringe as Measurement +from opencsp.app.sofast.lib.MeasurementSofastFringe import ( + MeasurementSofastFringe as Measurement, +) class ImageCalibrationGlobal(ImageCalibrationAbstract): diff --git a/opencsp/app/sofast/lib/ImageCalibrationScaling.py b/opencsp/app/sofast/lib/ImageCalibrationScaling.py index 8dbcb9a98..86268a3e1 100644 --- a/opencsp/app/sofast/lib/ImageCalibrationScaling.py +++ b/opencsp/app/sofast/lib/ImageCalibrationScaling.py @@ -2,7 +2,9 @@ import numpy as np from opencsp.app.sofast.lib.ImageCalibrationAbstract import ImageCalibrationAbstract -from opencsp.app.sofast.lib.MeasurementSofastFringe import MeasurementSofastFringe as Measurement +from opencsp.app.sofast.lib.MeasurementSofastFringe import ( + MeasurementSofastFringe as Measurement, +) class ImageCalibrationScaling(ImageCalibrationAbstract): diff --git a/opencsp/app/sofast/lib/MeasurementSofastFringe.py b/opencsp/app/sofast/lib/MeasurementSofastFringe.py index c809d3fc4..e7ecfbbad 100644 --- a/opencsp/app/sofast/lib/MeasurementSofastFringe.py +++ b/opencsp/app/sofast/lib/MeasurementSofastFringe.py @@ -1,5 +1,6 @@ """Measurement class for SofastFringe """ + import datetime as dt import numpy as np @@ -92,7 +93,7 @@ def fringe_images_y(self) -> np.ndarray: @property def fringe_images_x(self) -> np.ndarray: """Returns raw x-only fringes""" - return self.fringe_images[..., self.num_y_ims:] + return self.fringe_images[..., self.num_y_ims :] @property def fringe_images_calibrated(self) -> np.ndarray: @@ -110,7 +111,7 @@ def fringe_images_y_calibrated(self) -> np.ndarray: @property def fringe_images_x_calibrated(self) -> np.ndarray: """Returns calibrated x-only fringes""" - return self.fringe_images_calibrated[..., self.num_y_ims:] + return self.fringe_images_calibrated[..., self.num_y_ims :] def calibrate_fringe_images( self, calibration: ImageCalibrationAbstract, **kwargs diff --git a/opencsp/app/sofast/lib/ParamsSofastFixed.py b/opencsp/app/sofast/lib/ParamsSofastFixed.py index 3defeb2b0..031508912 100644 --- a/opencsp/app/sofast/lib/ParamsSofastFixed.py +++ b/opencsp/app/sofast/lib/ParamsSofastFixed.py @@ -1,7 +1,6 @@ """Parameters class for FixedPatternScreen class""" -from opencsp.app.sofast.lib.ParamsOpticGeometry import ( - ParamsOpticGeometry, -) + +from opencsp.app.sofast.lib.ParamsOpticGeometry import ParamsOpticGeometry from opencsp.app.sofast.lib.DebugOpticsGeometry import DebugOpticsGeometry from opencsp.common.lib.deflectometry.SlopeSolverDataDebug import SlopeSolverDataDebug diff --git a/opencsp/app/sofast/lib/ParamsSofastFringe.py b/opencsp/app/sofast/lib/ParamsSofastFringe.py index a7e126f32..4015b1e05 100644 --- a/opencsp/app/sofast/lib/ParamsSofastFringe.py +++ b/opencsp/app/sofast/lib/ParamsSofastFringe.py @@ -1,11 +1,10 @@ """Parameter dataclass for SofastFringe """ + from dataclasses import dataclass, field -from opencsp.app.sofast.lib.ParamsOpticGeometry import ( - ParamsOpticGeometry, -) +from opencsp.app.sofast.lib.ParamsOpticGeometry import ParamsOpticGeometry from opencsp.app.sofast.lib.DebugOpticsGeometry import DebugOpticsGeometry from opencsp.common.lib.deflectometry.SlopeSolverDataDebug import SlopeSolverDataDebug import opencsp.common.lib.tool.hdf5_tools as hdf5_tools @@ -20,15 +19,15 @@ class ParamsSofastFringe: mask_filt_thresh: int = 4 mask_thresh_active_pixels: float = 0.05 mask_keep_largest_area: bool = False - geometry_params: ParamsOpticGeometry = field( - default_factory=ParamsOpticGeometry - ) + geometry_params: ParamsOpticGeometry = field(default_factory=ParamsOpticGeometry) # Debug objects slope_solver_data_debug: SlopeSolverDataDebug = field( default_factory=SlopeSolverDataDebug ) - geometry_data_debug: DebugOpticsGeometry = field(default_factory=DebugOpticsGeometry) + geometry_data_debug: DebugOpticsGeometry = field( + default_factory=DebugOpticsGeometry + ) def save_to_hdf(self, file: str, prefix: str = ''): """Saves data to given HDF5 file. Data is stored in PREFIX + ParamsSofastFringe/... diff --git a/opencsp/app/sofast/lib/ProcessSofastFixed.py b/opencsp/app/sofast/lib/ProcessSofastFixed.py index e95cb1800..49ad035aa 100644 --- a/opencsp/app/sofast/lib/ProcessSofastFixed.py +++ b/opencsp/app/sofast/lib/ProcessSofastFixed.py @@ -1,5 +1,6 @@ """Class that handles the processing of fixed pattern deflectometry data. """ + from typing import Literal import cv2 as cv diff --git a/opencsp/app/sofast/lib/ProcessSofastFringe.py b/opencsp/app/sofast/lib/ProcessSofastFringe.py index f91d6bfe1..c8790bd0f 100644 --- a/opencsp/app/sofast/lib/ProcessSofastFringe.py +++ b/opencsp/app/sofast/lib/ProcessSofastFringe.py @@ -1,12 +1,15 @@ """Controls the processing of Sofast measurement data to calculate surface slopes. """ + from typing import Literal import warnings import numpy as np -from opencsp.app.sofast.lib.MeasurementSofastFringe import MeasurementSofastFringe as Measurement +from opencsp.app.sofast.lib.MeasurementSofastFringe import ( + MeasurementSofastFringe as Measurement, +) from opencsp.app.sofast.lib.ParamsSofastFringe import ParamsSofastFringe from opencsp.common.lib.camera.Camera import Camera from opencsp.common.lib.csp.Facet import Facet @@ -194,7 +197,9 @@ def __init__( self.data_geometry_general: cdc.CalculationDataGeometryGeneral = None self.data_image_processing_general: cdc.CalculationImageProcessingGeneral = None self.data_geometry_facet: list[cdc.CalculationDataGeometryFacet] = None - self.data_image_processing_facet: list[cdc.CalculationImageProcessingFacet] = None + self.data_image_processing_facet: list[cdc.CalculationImageProcessingFacet] = ( + None + ) self.data_error: cdc.CalculationError = None self.data_characterization_facet: list[SlopeSolverData] = None @@ -502,17 +507,17 @@ def _process_display(self) -> None: screen_ys = 1.0 - screen_ys # Store screen points in Vxy v_screen_points_fractional_screens = Vxy((screen_xs, screen_ys)) - self.data_geometry_facet[ - idx_facet - ].v_screen_points_fractional_screens = v_screen_points_fractional_screens + self.data_geometry_facet[idx_facet].v_screen_points_fractional_screens = ( + v_screen_points_fractional_screens + ) # Undistort screen points (display coordinates) v_screen_points_screen = self.display.interp_func( v_screen_points_fractional_screens ) # meters, display coordinates - self.data_geometry_facet[ - idx_facet - ].v_screen_points_screen = v_screen_points_screen + self.data_geometry_facet[idx_facet].v_screen_points_screen = ( + v_screen_points_screen + ) # Check for nans returning from screen point calculation nan_mask = np.isnan(v_screen_points_screen.data).sum(0).astype(bool) @@ -532,15 +537,13 @@ def _process_display(self) -> None: ].v_screen_points_fractional_screens = v_screen_points_fractional_screens[ np.logical_not(nan_mask) ] - self.data_geometry_facet[ - idx_facet - ].v_screen_points_screen = v_screen_points_screen[ - np.logical_not(nan_mask) - ] + self.data_geometry_facet[idx_facet].v_screen_points_screen = ( + v_screen_points_screen[np.logical_not(nan_mask)] + ) # Save bad pixel mask - self.data_image_processing_facet[ - idx_facet - ].mask_bad_pixels = mask_bad_pixels + self.data_image_processing_facet[idx_facet].mask_bad_pixels = ( + mask_bad_pixels + ) # Calculate pixel pointing directions (camera coordinates) u_pixel_pointing_cam = ip.calculate_active_pixels_vectors( @@ -548,17 +551,17 @@ def _process_display(self) -> None: ) # Convert to optic coordinates u_pixel_pointing_facet = u_pixel_pointing_cam.rotate(ori.r_cam_optic) - self.data_geometry_facet[ - idx_facet - ].u_pixel_pointing_facet = u_pixel_pointing_facet + self.data_geometry_facet[idx_facet].u_pixel_pointing_facet = ( + u_pixel_pointing_facet + ) # Convert to optic coordinates v_screen_points_facet = ori.trans_screen_optic.apply( self.data_geometry_facet[idx_facet].v_screen_points_screen ) - self.data_geometry_facet[ - idx_facet - ].v_screen_points_facet = v_screen_points_facet + self.data_geometry_facet[idx_facet].v_screen_points_facet = ( + v_screen_points_facet + ) def _solve_slopes(self, surface_data: list[dict]) -> None: """ @@ -769,7 +772,9 @@ def get_optic( facet = Facet(mirror) # Locate facet if self.optic_type == 'multi': - trans: TransformXYZ = self.data_characterization_ensemble[idx_mirror].trans_facet_ensemble + trans: TransformXYZ = self.data_characterization_ensemble[ + idx_mirror + ].trans_facet_ensemble facet.set_position_in_space(trans.V, trans.R) # Save facets facets.append(facet) @@ -794,7 +799,9 @@ def save_to_hdf(self, file: str) -> None: if self.data_error is not None: self.data_error.save_to_hdf(file, 'DataSofastCalculation/general/') self.data_geometry_general.save_to_hdf(file, 'DataSofastCalculation/general/') - self.data_image_processing_general.save_to_hdf(file, 'DataSofastCalculation/general/') + self.data_image_processing_general.save_to_hdf( + file, 'DataSofastCalculation/general/' + ) # Sofast parameters self.params.save_to_hdf(file, 'DataSofastInput/') @@ -803,11 +810,14 @@ def save_to_hdf(self, file: str) -> None: if self.data_facet_def is not None: for idx_facet, facet_data in enumerate(self.data_facet_def): facet_data.save_to_hdf( - file, f'DataSofastInput/optic_definition/facet_{idx_facet:03d}/') + file, f'DataSofastInput/optic_definition/facet_{idx_facet:03d}/' + ) # Ensemble definition if self.data_ensemble_def is not None: - self.data_ensemble_def.save_to_hdf(file, 'DataSofastInput/optic_definition/') + self.data_ensemble_def.save_to_hdf( + file, 'DataSofastInput/optic_definition/' + ) # Surface definition # TODO: make surface_params a data class @@ -815,22 +825,29 @@ def save_to_hdf(self, file: str) -> None: data = list(surface_params.values()) datasets = list(surface_params.keys()) datasets = [ - f'DataSofastInput/optic_definition/facet_{idx_facet:03d}/surface_definition/' + d for d in datasets] + f'DataSofastInput/optic_definition/facet_{idx_facet:03d}/surface_definition/' + + d + for d in datasets + ] save_hdf5_datasets(data, datasets, file) # Calculations, one per facet for idx_facet in range(self.num_facets): # Save facet slope data self.data_characterization_facet[idx_facet].save_to_hdf( - file, f'DataSofastCalculation/facet/facet_{idx_facet:03d}/') + file, f'DataSofastCalculation/facet/facet_{idx_facet:03d}/' + ) # Save facet geometry data self.data_geometry_facet[idx_facet].save_to_hdf( - file, f'DataSofastCalculation/facet/facet_{idx_facet:03d}/') + file, f'DataSofastCalculation/facet/facet_{idx_facet:03d}/' + ) # Save facet image processing data self.data_image_processing_facet[idx_facet].save_to_hdf( - file, f'DataSofastCalculation/facet/facet_{idx_facet:03d}/') + file, f'DataSofastCalculation/facet/facet_{idx_facet:03d}/' + ) if self.data_characterization_ensemble: # Save ensemle data self.data_characterization_ensemble[idx_facet].save_to_hdf( - file, f'DataSofastCalculation/facet/facet_{idx_facet:03d}/') + file, f'DataSofastCalculation/facet/facet_{idx_facet:03d}/' + ) diff --git a/opencsp/app/sofast/lib/SpatialOrientation.py b/opencsp/app/sofast/lib/SpatialOrientation.py index 719fa189f..1020b5457 100644 --- a/opencsp/app/sofast/lib/SpatialOrientation.py +++ b/opencsp/app/sofast/lib/SpatialOrientation.py @@ -60,14 +60,12 @@ def __init__( def __copy__(self) -> 'SpatialOrientation': """Returns a copy of spatial orientation""" - r_cam_screen = Rotation.from_rotvec( - self.r_cam_screen.as_rotvec().copy()) + r_cam_screen = Rotation.from_rotvec(self.r_cam_screen.as_rotvec().copy()) v_cam_screen_cam = self.v_cam_screen_cam.copy() ori = SpatialOrientation(r_cam_screen, v_cam_screen_cam) if self.optic_oriented: - r_cam_optic = Rotation.from_rotve( - self.r_cam_optic.as_rotvec().copy()) + r_cam_optic = Rotation.from_rotve(self.r_cam_optic.as_rotvec().copy()) v_cam_optic_cam = self.v_cam_optic_cam.copy() ori.orient_optic_cam(r_cam_optic, v_cam_optic_cam) diff --git a/opencsp/app/sofast/lib/SystemSofastFringe.py b/opencsp/app/sofast/lib/SystemSofastFringe.py index f63359b93..91dbe4a04 100644 --- a/opencsp/app/sofast/lib/SystemSofastFringe.py +++ b/opencsp/app/sofast/lib/SystemSofastFringe.py @@ -1,5 +1,6 @@ """Class for controlling displaying Sofast patterns and capturing images """ + import datetime as dt from typing import Callable from warnings import warn @@ -8,7 +9,9 @@ import numpy as np from opencsp.app.sofast.lib.Fringes import Fringes -from opencsp.app.sofast.lib.MeasurementSofastFringe import MeasurementSofastFringe as Measurement +from opencsp.app.sofast.lib.MeasurementSofastFringe import ( + MeasurementSofastFringe as Measurement, +) from opencsp.common.lib.camera.ImageAcquisitionAbstract import ImageAcquisitionAbstract from opencsp.common.lib.deflectometry.ImageProjection import ImageProjection from opencsp.common.lib.geometry.Vxyz import Vxyz @@ -226,7 +229,7 @@ def load_fringes(self, fringes: Fringes, min_display_value: int) -> None: for idx in range(fringe_images_base.shape[2]): # Create image self.fringe_images_to_display.append( - np.concatenate([fringe_images_base[:, :, idx: idx + 1]] * 3, axis=2) + np.concatenate([fringe_images_base[:, :, idx : idx + 1]] * 3, axis=2) ) def check_saturation( diff --git a/opencsp/app/sofast/lib/process_optics_geometry.py b/opencsp/app/sofast/lib/process_optics_geometry.py index 66f64f1ab..ed2597954 100644 --- a/opencsp/app/sofast/lib/process_optics_geometry.py +++ b/opencsp/app/sofast/lib/process_optics_geometry.py @@ -1,4 +1,5 @@ """Library of functions used to process the geometry of a deflectometry setup.""" + from copy import copy import matplotlib.pyplot as plt @@ -10,9 +11,7 @@ import opencsp.app.sofast.lib.calculation_data_classes as cdc from opencsp.app.sofast.lib.DefinitionEnsemble import DefinitionEnsemble from opencsp.app.sofast.lib.DefinitionFacet import DefinitionFacet -from opencsp.app.sofast.lib.ParamsOpticGeometry import ( - ParamsOpticGeometry, -) +from opencsp.app.sofast.lib.ParamsOpticGeometry import ParamsOpticGeometry from opencsp.app.sofast.lib.DebugOpticsGeometry import DebugOpticsGeometry import opencsp.app.sofast.lib.image_processing as ip from opencsp.app.sofast.lib.SpatialOrientation import SpatialOrientation @@ -565,12 +564,12 @@ def process_multifacet_geometry( v_facet_locs_ensemble, r_ensemble_cam_refine_1, v_cam_ensemble_cam_refine_1 ) for idx in range(num_facets): - data_image_processing_facet[ - idx - ].v_facet_corners_image_exp = v_facet_corners_image_exp[idx] - data_image_processing_facet[ - idx - ].v_facet_centroid_image_exp = v_uv_facet_cent_exp[idx] + data_image_processing_facet[idx].v_facet_corners_image_exp = ( + v_facet_corners_image_exp[idx] + ) + data_image_processing_facet[idx].v_facet_centroid_image_exp = ( + v_uv_facet_cent_exp[idx] + ) # Refine facet corners args = [ diff --git a/opencsp/app/sofast/lib/save_DisplayShape_file.py b/opencsp/app/sofast/lib/save_DisplayShape_file.py index 4dfc89ac4..bca6eb245 100644 --- a/opencsp/app/sofast/lib/save_DisplayShape_file.py +++ b/opencsp/app/sofast/lib/save_DisplayShape_file.py @@ -1,5 +1,6 @@ """Script that saves a Sofast physical setup file from previously processed data """ + from numpy import ndarray from scipy.spatial.transform import Rotation diff --git a/opencsp/app/sofast/lib/visualize_setup.py b/opencsp/app/sofast/lib/visualize_setup.py index 2f8937ce6..d6240ba02 100644 --- a/opencsp/app/sofast/lib/visualize_setup.py +++ b/opencsp/app/sofast/lib/visualize_setup.py @@ -1,6 +1,7 @@ """Library with function used to visualise a given Sofast setup in 3D given a display and camera file. Useful for debugging calibration errors. """ + import matplotlib.pyplot as plt import numpy as np from scipy.spatial.transform import Rotation diff --git a/opencsp/app/sofast/test/ImageAcquisition_no_camera.py b/opencsp/app/sofast/test/ImageAcquisition_no_camera.py index e5f376a99..1bd885629 100644 --- a/opencsp/app/sofast/test/ImageAcquisition_no_camera.py +++ b/opencsp/app/sofast/test/ImageAcquisition_no_camera.py @@ -1,4 +1,5 @@ """Representation of a notional camera for image acquisition""" + import numpy as np from opencsp.common.lib.camera.ImageAcquisitionAbstract import ImageAcquisitionAbstract diff --git a/opencsp/app/sofast/test/test_CalibrateDisplayShape.py b/opencsp/app/sofast/test/test_CalibrateDisplayShape.py index 62d1aa898..d49273993 100644 --- a/opencsp/app/sofast/test/test_CalibrateDisplayShape.py +++ b/opencsp/app/sofast/test/test_CalibrateDisplayShape.py @@ -1,5 +1,6 @@ """Tests Sofast screen distortion calibration """ + from os.path import join import unittest @@ -12,7 +13,9 @@ CalibrateDisplayShape, DataInput, ) -from opencsp.app.sofast.lib.MeasurementSofastFringe import MeasurementSofastFringe as Measurement +from opencsp.app.sofast.lib.MeasurementSofastFringe import ( + MeasurementSofastFringe as Measurement, +) from opencsp.common.lib.camera.Camera import Camera from opencsp.common.lib.deflectometry.ImageProjection import ImageProjection from opencsp.common.lib.geometry.Vxyz import Vxyz @@ -27,9 +30,12 @@ def setUpClass(cls): on the dir_input path. """ # Define default data directories - dir_input_sofast = join(opencsp_code_dir(), 'app/sofast/test/data/data_measurement') - dir_input_def = join(opencsp_code_dir(), - 'common/lib/deflectometry/test/data/data_measurement') + dir_input_sofast = join( + opencsp_code_dir(), 'app/sofast/test/data/data_measurement' + ) + dir_input_def = join( + opencsp_code_dir(), 'common/lib/deflectometry/test/data/data_measurement' + ) dir_output = join(opencsp_code_dir(), 'app/sofast/test/data/data_expected') verbose = 1 # 0=no output, 1=only print outputs, 2=print outputs and show plots, 3=plots only with no printing diff --git a/opencsp/app/sofast/test/test_CalibrateSofastFixedDots.py b/opencsp/app/sofast/test/test_CalibrateSofastFixedDots.py index a64a969fb..24f23bd3e 100644 --- a/opencsp/app/sofast/test/test_CalibrateSofastFixedDots.py +++ b/opencsp/app/sofast/test/test_CalibrateSofastFixedDots.py @@ -3,6 +3,7 @@ To create new test data, copy the results from the output folder into the "calculations" folder. """ + import os from os.path import join, dirname, exists @@ -21,8 +22,7 @@ @pytest.mark.skip("No unit test data (yet)") def test_FixedPatternSetupCalibrate(): - """Tests dot-location calibration - """ + """Tests dot-location calibration""" # Define dot location images and origins base_dir = join( opencsp_code_dir(), @@ -37,13 +37,17 @@ def test_FixedPatternSetupCalibrate(): join(base_dir, 'measurements/images/DSC03970.JPG'), join(base_dir, 'measurements/images/DSC03972.JPG'), ] - origins = np.array(([4950, 4610, 4221, 3617], [3359, 3454, 3467, 3553]), dtype=float) / 4 + origins = ( + np.array(([4950, 4610, 4221, 3617], [3359, 3454, 3467, 3553]), dtype=float) / 4 + ) origins = Vxy(origins.astype(int)) # Define other files file_camera_marker = join(base_dir, 'measurements/camera_image_calibration.h5') file_xyz_points = join(base_dir, 'measurements/point_locations.csv') - file_fpd_dot_locs_exp = join(base_dir, 'calculations/fixed_pattern_dot_locations.h5') + file_fpd_dot_locs_exp = join( + base_dir, 'calculations/fixed_pattern_dot_locations.h5' + ) dir_save = join(dirname(__file__), 'data/output/dot_location_calibration') if not exists(dir_save): @@ -68,9 +72,9 @@ def test_FixedPatternSetupCalibrate(): files, origins, camera_marker, pts_xyz_corners, ids_corners, -32, 31, -31, 32 ) cal_dot_locs.plot = True - cal_dot_locs.blob_search_threshold = 3. - cal_dot_locs.blob_detector.minArea = 3. - cal_dot_locs.blob_detector.maxArea = 30. + cal_dot_locs.blob_search_threshold = 3.0 + cal_dot_locs.blob_detector.minArea = 3.0 + cal_dot_locs.blob_detector.maxArea = 30.0 cal_dot_locs.run() # Save data diff --git a/opencsp/app/sofast/test/test_Display.py b/opencsp/app/sofast/test/test_Display.py index f74cd73dd..983467cb9 100644 --- a/opencsp/app/sofast/test/test_Display.py +++ b/opencsp/app/sofast/test/test_Display.py @@ -1,5 +1,6 @@ """Unit test suite to test DisplayShape class """ + import unittest import numpy as np @@ -75,7 +76,9 @@ def test_rectangular2D(self): v_cam_screen_screen = Vxyz((0, 0, 1)) r_screen_cam = Rotation.from_rotvec(np.array([0.0, 0.0, 0.0])) name = 'Test DisplayShape' - disp = DisplayShape(v_cam_screen_screen, r_screen_cam, self.grid_data_rect2D, name) + disp = DisplayShape( + v_cam_screen_screen, r_screen_cam, self.grid_data_rect2D, name + ) # Perform calculation calc = disp.interp_func(self.test_Vxy_pts) diff --git a/opencsp/app/sofast/test/test_Fringes.py b/opencsp/app/sofast/test/test_Fringes.py index 44e8e4ec7..a091f45be 100644 --- a/opencsp/app/sofast/test/test_Fringes.py +++ b/opencsp/app/sofast/test/test_Fringes.py @@ -1,5 +1,6 @@ """Unit test suite to test Fringes class """ + from opencsp.app.sofast.lib.Fringes import Fringes diff --git a/opencsp/app/sofast/test/test_ImageCalibrationGlobal.py b/opencsp/app/sofast/test/test_ImageCalibrationGlobal.py index b816fbd2d..caaea162c 100644 --- a/opencsp/app/sofast/test/test_ImageCalibrationGlobal.py +++ b/opencsp/app/sofast/test/test_ImageCalibrationGlobal.py @@ -1,11 +1,14 @@ """Unit test suite to test ImageCalibrationGlobal class """ + import datetime as dt import numpy as np from opencsp.app.sofast.lib.ImageCalibrationGlobal import ImageCalibrationGlobal -from opencsp.app.sofast.lib.MeasurementSofastFringe import MeasurementSofastFringe as Measurement +from opencsp.app.sofast.lib.MeasurementSofastFringe import ( + MeasurementSofastFringe as Measurement, +) from opencsp.common.lib.geometry.Vxyz import Vxyz diff --git a/opencsp/app/sofast/test/test_SpatialOrientation.py b/opencsp/app/sofast/test/test_SpatialOrientation.py index b2eff2c4c..b0d6e61ea 100644 --- a/opencsp/app/sofast/test/test_SpatialOrientation.py +++ b/opencsp/app/sofast/test/test_SpatialOrientation.py @@ -1,5 +1,6 @@ """Unit test suite to test the SpatialOrientation class """ + import os import unittest diff --git a/opencsp/app/sofast/test/test_SystemSofastFringe.py b/opencsp/app/sofast/test/test_SystemSofastFringe.py index ad7f5e4c5..7cfb3cdf6 100644 --- a/opencsp/app/sofast/test/test_SystemSofastFringe.py +++ b/opencsp/app/sofast/test/test_SystemSofastFringe.py @@ -1,5 +1,6 @@ """Unit test suite to test the System class """ + import os import pytest @@ -14,9 +15,7 @@ @pytest.mark.no_xvfb def test_SystemSofastFringe(): # Get test data location - base_dir = os.path.join( - opencsp_code_dir(), 'test/data/measurements_sofast_fringe' - ) + base_dir = os.path.join(opencsp_code_dir(), 'test/data/measurements_sofast_fringe') # Create fringe object periods_x = [0.9, 3.9] diff --git a/opencsp/app/sofast/test/test_image_processing.py b/opencsp/app/sofast/test/test_image_processing.py index 9599f9984..5b78e1175 100644 --- a/opencsp/app/sofast/test/test_image_processing.py +++ b/opencsp/app/sofast/test/test_image_processing.py @@ -1,5 +1,6 @@ """Unit test suite to test image_processing library """ + import os from os.path import join import unittest @@ -8,7 +9,9 @@ from scipy.spatial.transform import Rotation from opencsp.app.sofast.lib.ImageCalibrationScaling import ImageCalibrationScaling -from opencsp.app.sofast.lib.MeasurementSofastFringe import MeasurementSofastFringe as Measurement +from opencsp.app.sofast.lib.MeasurementSofastFringe import ( + MeasurementSofastFringe as Measurement, +) from opencsp.app.sofast.lib.ParamsSofastFringe import ParamsSofastFringe from opencsp.common.lib.camera.Camera import Camera import opencsp.app.sofast.lib.image_processing as ip diff --git a/opencsp/app/sofast/test/test_integration_multi_facet.py b/opencsp/app/sofast/test/test_integration_multi_facet.py index 44e5f4ed7..447c8cbef 100644 --- a/opencsp/app/sofast/test/test_integration_multi_facet.py +++ b/opencsp/app/sofast/test/test_integration_multi_facet.py @@ -1,5 +1,6 @@ """Integration test. Testing processing of a 'multi_facet' type optic. """ + import os import unittest @@ -8,7 +9,9 @@ from opencsp.app.sofast.lib.ImageCalibrationScaling import ImageCalibrationScaling from opencsp.app.sofast.lib.ProcessSofastFringe import ProcessSofastFringe as Sofast -from opencsp.app.sofast.lib.MeasurementSofastFringe import MeasurementSofastFringe as Measurement +from opencsp.app.sofast.lib.MeasurementSofastFringe import ( + MeasurementSofastFringe as Measurement, +) from opencsp.common.lib.camera.Camera import Camera from opencsp.app.sofast.lib.DisplayShape import DisplayShape as Display from opencsp.app.sofast.lib.DefinitionEnsemble import DefinitionEnsemble @@ -114,7 +117,9 @@ def setUpClass(cls, base_dir: str | None = None): ] data = load_hdf5_datasets(datasets, file_dataset) facet_data.append( - DefinitionFacet(Vxyz(data['v_facet_corners']), Vxyz(data['v_centroid_facet'])) + DefinitionFacet( + Vxyz(data['v_facet_corners']), Vxyz(data['v_centroid_facet']) + ) ) # Load surface data diff --git a/opencsp/app/sofast/test/test_integration_single_facet.py b/opencsp/app/sofast/test/test_integration_single_facet.py index 4fc9a26f1..264688034 100644 --- a/opencsp/app/sofast/test/test_integration_single_facet.py +++ b/opencsp/app/sofast/test/test_integration_single_facet.py @@ -1,5 +1,6 @@ """Integration test. Testing processing of a 'single_facet' type optic. """ + import glob import os import unittest @@ -7,7 +8,9 @@ import numpy as np from opencsp.app.sofast.lib.ImageCalibrationScaling import ImageCalibrationScaling -from opencsp.app.sofast.lib.MeasurementSofastFringe import MeasurementSofastFringe as Measurement +from opencsp.app.sofast.lib.MeasurementSofastFringe import ( + MeasurementSofastFringe as Measurement, +) from opencsp.app.sofast.lib.ProcessSofastFringe import ProcessSofastFringe as Sofast from opencsp.common.lib.camera.Camera import Camera from opencsp.app.sofast.lib.DisplayShape import DisplayShape as Display diff --git a/opencsp/app/sofast/test/test_integration_undefined.py b/opencsp/app/sofast/test/test_integration_undefined.py index 84fceae1e..c0a3dfac0 100644 --- a/opencsp/app/sofast/test/test_integration_undefined.py +++ b/opencsp/app/sofast/test/test_integration_undefined.py @@ -1,11 +1,14 @@ """Integration test. Testing processing of an 'undefined' type optic. """ + import os import numpy as np from opencsp.app.sofast.lib.ImageCalibrationScaling import ImageCalibrationScaling -from opencsp.app.sofast.lib.MeasurementSofastFringe import MeasurementSofastFringe as Measurement +from opencsp.app.sofast.lib.MeasurementSofastFringe import ( + MeasurementSofastFringe as Measurement, +) from opencsp.app.sofast.lib.ProcessSofastFringe import ProcessSofastFringe as Sofast from opencsp.common.lib.camera.Camera import Camera from opencsp.app.sofast.lib.DisplayShape import DisplayShape as Display @@ -15,9 +18,7 @@ def test_undefined(): # Get test data location - base_dir = os.path.join( - opencsp_code_dir(), 'test/data/measurements_sofast_fringe' - ) + base_dir = os.path.join(opencsp_code_dir(), 'test/data/measurements_sofast_fringe') # Directory Setup file_dataset = os.path.join(base_dir, 'calculations_undefined_mirror/data.h5') diff --git a/opencsp/app/sofast/test/test_project_fixed_pattern_target.py b/opencsp/app/sofast/test/test_project_fixed_pattern_target.py index 4f1280872..322ae7c55 100644 --- a/opencsp/app/sofast/test/test_project_fixed_pattern_target.py +++ b/opencsp/app/sofast/test/test_project_fixed_pattern_target.py @@ -1,5 +1,6 @@ """Tests projecting a test dot pattern onto a screen """ + import os import pytest diff --git a/opencsp/app/sofast/test/test_save_DisplayShape_file.py b/opencsp/app/sofast/test/test_save_DisplayShape_file.py index 165324973..64b5c0bd0 100644 --- a/opencsp/app/sofast/test/test_save_DisplayShape_file.py +++ b/opencsp/app/sofast/test/test_save_DisplayShape_file.py @@ -3,9 +3,7 @@ import numpy as np -from opencsp.app.sofast.lib.save_DisplayShape_file import ( - save_DisplayShape_file, -) +from opencsp.app.sofast.lib.save_DisplayShape_file import save_DisplayShape_file from opencsp.common.lib.tool.hdf5_tools import load_hdf5_datasets from opencsp.common.lib.geometry.Vxy import Vxy from opencsp.common.lib.geometry.Vxyz import Vxyz @@ -17,21 +15,28 @@ class test_save_physical_setup_file(unittest.TestCase): def test_save_physical_setup_file(self): """Loads data and saves test Display file""" # Define input file directory - dir_input_sofast = join(opencsp_code_dir(), 'app/sofast/test/data/data_expected') - dir_input_def = join(opencsp_code_dir(), 'common/lib/deflectometry/test/data/data_expected') + dir_input_sofast = join( + opencsp_code_dir(), 'app/sofast/test/data/data_expected' + ) + dir_input_def = join( + opencsp_code_dir(), 'common/lib/deflectometry/test/data/data_expected' + ) dir_output = join(opencsp_code_dir(), 'app/sofast/test/data/output') file_save = join(dir_output, 'test_physical_setup_file.h5') ft.create_directories_if_necessary(dir_output) # Define data files - file_screen_distortion_data = join(dir_input_sofast, 'screen_distortion_data_100_100.h5') + file_screen_distortion_data = join( + dir_input_sofast, 'screen_distortion_data_100_100.h5' + ) file_cam = join(dir_input_def, 'camera_rvec_tvec.csv') # Load data name = 'Test Physical Setup File' data_dist = load_hdf5_datasets( - ['pts_xy_screen_fraction', 'pts_xyz_screen_coords'], file_screen_distortion_data + ['pts_xy_screen_fraction', 'pts_xyz_screen_coords'], + file_screen_distortion_data, ) screen_distortion_data = { 'pts_xy_screen_fraction': Vxy(data_dist['pts_xy_screen_fraction']), diff --git a/opencsp/app/sofast/test/test_spatial_processing.py b/opencsp/app/sofast/test/test_spatial_processing.py index 91809b256..7143b1a7b 100644 --- a/opencsp/app/sofast/test/test_spatial_processing.py +++ b/opencsp/app/sofast/test/test_spatial_processing.py @@ -1,5 +1,6 @@ """Unit test suite to test the spatial_processing library """ + import os import unittest @@ -7,7 +8,9 @@ from scipy.spatial.transform import Rotation from opencsp.app.sofast.lib.DisplayShape import DisplayShape as Display -from opencsp.app.sofast.lib.MeasurementSofastFringe import MeasurementSofastFringe as Measurement +from opencsp.app.sofast.lib.MeasurementSofastFringe import ( + MeasurementSofastFringe as Measurement, +) import opencsp.app.sofast.lib.spatial_processing as sp from opencsp.common.lib.camera.Camera import Camera from opencsp.common.lib.geometry.Vxy import Vxy diff --git a/opencsp/app/target/target_color/lib/ImageColor.py b/opencsp/app/target/target_color/lib/ImageColor.py index e51d26543..896f89a25 100644 --- a/opencsp/app/target/target_color/lib/ImageColor.py +++ b/opencsp/app/target/target_color/lib/ImageColor.py @@ -2,6 +2,7 @@ Tool for matching pixels between two images and viewing results. """ + import cv2 as cv import imageio.v3 as imageio import matplotlib.pyplot as plt diff --git a/opencsp/app/target/target_color/test/test_ImageColor.py b/opencsp/app/target/target_color/test/test_ImageColor.py index 13e7998f5..ee3e6d64e 100644 --- a/opencsp/app/target/target_color/test/test_ImageColor.py +++ b/opencsp/app/target/target_color/test/test_ImageColor.py @@ -1,5 +1,6 @@ """Unit test for ImageColor class """ + import numpy as np from opencsp.app.target.target_color.lib.ImageColor import ImageColor diff --git a/opencsp/common/lib/csp/Facet.py b/opencsp/common/lib/csp/Facet.py index 907c85af3..235c6f064 100644 --- a/opencsp/common/lib/csp/Facet.py +++ b/opencsp/common/lib/csp/Facet.py @@ -1,4 +1,5 @@ """Facet class inherited by all facet classes""" + from typing import Callable import numpy as np diff --git a/opencsp/common/lib/csp/FacetEnsemble.py b/opencsp/common/lib/csp/FacetEnsemble.py index 9b91b4468..0c38371dc 100644 --- a/opencsp/common/lib/csp/FacetEnsemble.py +++ b/opencsp/common/lib/csp/FacetEnsemble.py @@ -1,4 +1,5 @@ """Rigid ensemble of facets""" + from typing import Callable import numpy as np from scipy.spatial.transform import Rotation diff --git a/opencsp/common/lib/csp/MirrorAbstract.py b/opencsp/common/lib/csp/MirrorAbstract.py index b08ca2741..aceb91e20 100644 --- a/opencsp/common/lib/csp/MirrorAbstract.py +++ b/opencsp/common/lib/csp/MirrorAbstract.py @@ -1,5 +1,6 @@ """Abstract mirror representing a single reflective surface """ + from abc import ABC, abstractmethod from matplotlib.tri import Triangulation diff --git a/opencsp/common/lib/csp/MirrorParametric.py b/opencsp/common/lib/csp/MirrorParametric.py index cafc500c1..b0c08d72a 100644 --- a/opencsp/common/lib/csp/MirrorParametric.py +++ b/opencsp/common/lib/csp/MirrorParametric.py @@ -1,6 +1,7 @@ """Parametric mirror representing a single reflective surface defined by an algebraic function. """ + import inspect from typing import Callable diff --git a/opencsp/common/lib/csp/MirrorParametricRectangular.py b/opencsp/common/lib/csp/MirrorParametricRectangular.py index 5451f1a1b..10888fe5a 100644 --- a/opencsp/common/lib/csp/MirrorParametricRectangular.py +++ b/opencsp/common/lib/csp/MirrorParametricRectangular.py @@ -1,6 +1,7 @@ """Parametric rectangular mirror wtih origin in center of rectangular region representing a single reflective surface defined by an algebraic function. """ + from typing import Callable from opencsp.common.lib.geometry.RegionXY import RegionXY diff --git a/opencsp/common/lib/csp/MirrorPoint.py b/opencsp/common/lib/csp/MirrorPoint.py index 64904d841..bc8a6dd40 100644 --- a/opencsp/common/lib/csp/MirrorPoint.py +++ b/opencsp/common/lib/csp/MirrorPoint.py @@ -1,6 +1,7 @@ """Mirror class representing mirrors with scattered surface point locations. """ + from typing import Literal from warnings import warn diff --git a/opencsp/common/lib/csp/OpticOrientation.py b/opencsp/common/lib/csp/OpticOrientation.py index 99dc5c04e..137b40f6a 100644 --- a/opencsp/common/lib/csp/OpticOrientation.py +++ b/opencsp/common/lib/csp/OpticOrientation.py @@ -1,5 +1,6 @@ """Class to handle relative orientations of heirarchical optics. """ + from opencsp.common.lib.geometry.TransformXYZ import TransformXYZ diff --git a/opencsp/common/lib/csp/VisualizeOrthorectifiedSlopeAbstract.py b/opencsp/common/lib/csp/VisualizeOrthorectifiedSlopeAbstract.py index 6c3f2136e..a03f3684e 100644 --- a/opencsp/common/lib/csp/VisualizeOrthorectifiedSlopeAbstract.py +++ b/opencsp/common/lib/csp/VisualizeOrthorectifiedSlopeAbstract.py @@ -1,6 +1,7 @@ """Abstract class used for visualizing orthorectified slope looking down from +z axis """ + from abc import abstractmethod from typing import Literal diff --git a/opencsp/common/lib/csp/standard_output.py b/opencsp/common/lib/csp/standard_output.py index 68283350b..65a3fcb25 100644 --- a/opencsp/common/lib/csp/standard_output.py +++ b/opencsp/common/lib/csp/standard_output.py @@ -1,6 +1,7 @@ """Library of functions used to display/save the suite of standard output plots after measuring a CSP Mirror/FacetEnsemble. """ + from dataclasses import dataclass import matplotlib.pyplot as plt diff --git a/opencsp/common/lib/csp/sun_track.py b/opencsp/common/lib/csp/sun_track.py index cde738184..927608fbf 100644 --- a/opencsp/common/lib/csp/sun_track.py +++ b/opencsp/common/lib/csp/sun_track.py @@ -103,9 +103,9 @@ def tracking_surface_normal_xyz( heliostat_xyz: list | np.ndarray | tuple, # (x,y,z) in m. Heliostat origin. aimpoint_xyz: list | np.ndarray | tuple, # (x,y,z) in m. Reflection aim point. - location_lon_lat: list - | np.ndarray - | tuple, # (lon,lat) in rad. Solar field origin. + location_lon_lat: ( + list | np.ndarray | tuple + ), # (lon,lat) in rad. Solar field origin. when_ymdhmsz: list | np.ndarray | tuple, ): # (year, month, day, hour, minute, second, timezone) tuple. # Example: (2022, 7, 4, 11, 20, 0, -6) diff --git a/opencsp/common/lib/csp/test/test_FacetEnsemble.py b/opencsp/common/lib/csp/test/test_FacetEnsemble.py index 700980216..6ad1fff37 100644 --- a/opencsp/common/lib/csp/test/test_FacetEnsemble.py +++ b/opencsp/common/lib/csp/test/test_FacetEnsemble.py @@ -1,4 +1,5 @@ """Unit tests to test MirrorEnsemble class""" + import numpy as np from opencsp.common.lib.csp.FacetEnsemble import FacetEnsemble diff --git a/opencsp/common/lib/csp/test/test_MirrorParametric.py b/opencsp/common/lib/csp/test/test_MirrorParametric.py index 6a9a13e9b..45f3a125f 100644 --- a/opencsp/common/lib/csp/test/test_MirrorParametric.py +++ b/opencsp/common/lib/csp/test/test_MirrorParametric.py @@ -1,4 +1,5 @@ """Unit test to test MirrorParametric class""" + import numpy as np from opencsp.common.lib.csp.MirrorParametric import MirrorParametric diff --git a/opencsp/common/lib/csp/test/test_MirrorPoint.py b/opencsp/common/lib/csp/test/test_MirrorPoint.py index 7d6d2554f..557b30bcd 100644 --- a/opencsp/common/lib/csp/test/test_MirrorPoint.py +++ b/opencsp/common/lib/csp/test/test_MirrorPoint.py @@ -1,4 +1,5 @@ """Unit test to test the MirrorPoint class""" + import numpy as np from opencsp.common.lib.csp.MirrorPoint import MirrorPoint diff --git a/opencsp/common/lib/cv/SpotAnalysis.py b/opencsp/common/lib/cv/SpotAnalysis.py index 23fa5a095..3f7c50fe1 100644 --- a/opencsp/common/lib/cv/SpotAnalysis.py +++ b/opencsp/common/lib/cv/SpotAnalysis.py @@ -225,9 +225,11 @@ def set_primary_images( def set_input_operables( self, - input_operables: SpotAnalysisOperablesStream - | list[SpotAnalysisOperable] - | Iterator[SpotAnalysisOperable], + input_operables: ( + SpotAnalysisOperablesStream + | list[SpotAnalysisOperable] + | Iterator[SpotAnalysisOperable] + ), ): """Assigns primary and supporting images, and other necessary data, in preparation for process_next(). diff --git a/opencsp/common/lib/cv/spot_analysis/ImagesIterable.py b/opencsp/common/lib/cv/spot_analysis/ImagesIterable.py index 5bc2d79ac..7d6b6a3b2 100644 --- a/opencsp/common/lib/cv/spot_analysis/ImagesIterable.py +++ b/opencsp/common/lib/cv/spot_analysis/ImagesIterable.py @@ -71,9 +71,11 @@ def _video_to_list(self): class ImagesIterable(Iterable[CacheableImage]): def __init__( self, - stream: Callable[[int], CacheableImage] - | list[str | CacheableImage] - | vh.VideoHandler, + stream: ( + Callable[[int], CacheableImage] + | list[str | CacheableImage] + | vh.VideoHandler + ), ): """A restartable iterable that returns one image at a time, for as long as images are still available. diff --git a/opencsp/common/lib/cv/spot_analysis/ImagesStream.py b/opencsp/common/lib/cv/spot_analysis/ImagesStream.py index aeed85500..b681f77ea 100644 --- a/opencsp/common/lib/cv/spot_analysis/ImagesStream.py +++ b/opencsp/common/lib/cv/spot_analysis/ImagesStream.py @@ -20,10 +20,12 @@ def __next__(self): class ImagesStream(Iterator[CacheableImage]): def __init__( self, - images: Callable[[int], CacheableImage] - | list[str | CacheableImage] - | vh.VideoHandler - | Iterator[str | CacheableImage], + images: ( + Callable[[int], CacheableImage] + | list[str | CacheableImage] + | vh.VideoHandler + | Iterator[str | CacheableImage] + ), ): """A one-time iterator over a list of images. diff --git a/opencsp/common/lib/cv/spot_analysis/SpotAnalysisOperable.py b/opencsp/common/lib/cv/spot_analysis/SpotAnalysisOperable.py index d8e097725..ebb8a0c6a 100644 --- a/opencsp/common/lib/cv/spot_analysis/SpotAnalysisOperable.py +++ b/opencsp/common/lib/cv/spot_analysis/SpotAnalysisOperable.py @@ -37,7 +37,9 @@ class SpotAnalysisOperable: """ Any fiducials handed to us in the currently processing image. """ found_fiducials: list[af.AbstractFiducial] = field(default_factory=list) """ The identified fiducials in the currently processing image. """ - camera_intrinsics_characterization: any = None # TODO figure out how to specify information here, maybe using common/lib/camera/Camera + camera_intrinsics_characterization: any = ( + None # TODO figure out how to specify information here, maybe using common/lib/camera/Camera + ) """ Distortion, color, bit depth, etc of the camera. Maybe also distortion properties of the lens system. """ light_sources: list[ls.LightSource] = field(default_factory=list) """ The sources that produced the light that landed on the observed diff --git a/opencsp/common/lib/cv/spot_analysis/SpotAnalysisOperablesStream.py b/opencsp/common/lib/cv/spot_analysis/SpotAnalysisOperablesStream.py index c55cb2845..ada316041 100644 --- a/opencsp/common/lib/cv/spot_analysis/SpotAnalysisOperablesStream.py +++ b/opencsp/common/lib/cv/spot_analysis/SpotAnalysisOperablesStream.py @@ -16,10 +16,12 @@ class SpotAnalysisOperablesStream(Iterator[SpotAnalysisOperable]): def __init__( self, - images: ImagesIterable - | ImagesStream - | SpotAnalysisImagesStream - | Iterator[SpotAnalysisOperable], + images: ( + ImagesIterable + | ImagesStream + | SpotAnalysisImagesStream + | Iterator[SpotAnalysisOperable] + ), ): self.images = images self.images_iter = None diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/AbstractSpotAnalysisImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/AbstractSpotAnalysisImageProcessor.py index d51a683e0..5c73c2f23 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/AbstractSpotAnalysisImageProcessor.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/AbstractSpotAnalysisImageProcessor.py @@ -52,14 +52,16 @@ def __init__(self, name: str): def run( self, - operables: ImagesIterable - | ImagesStream - | SpotAnalysisImagesStream - | Union[ - 'AbstractSpotAnalysisImagesProcessor', - list[SpotAnalysisOperable], - Iterator[SpotAnalysisOperable], - ], + operables: ( + ImagesIterable + | ImagesStream + | SpotAnalysisImagesStream + | Union[ + 'AbstractSpotAnalysisImagesProcessor', + list[SpotAnalysisOperable], + Iterator[SpotAnalysisOperable], + ] + ), ) -> list[CacheableImage]: """Performs image processing on the input images.""" if isinstance(operables, (ImagesIterable, ImagesStream)): diff --git a/opencsp/common/lib/deflectometry/CalibrationCameraPosition.py b/opencsp/common/lib/deflectometry/CalibrationCameraPosition.py index 7827d9b2c..af28832f1 100644 --- a/opencsp/common/lib/deflectometry/CalibrationCameraPosition.py +++ b/opencsp/common/lib/deflectometry/CalibrationCameraPosition.py @@ -96,7 +96,7 @@ def collect_corner_xyz_locations(self) -> None: # Extract calibrated corner locations (4 corners per marker) self.pts_xyz_active_corner_locations = ( self.pts_xyz_active_corner_locations.concatenate( - self.pts_xyz_corners[index:index + 4] + self.pts_xyz_corners[index : index + 4] ) ) diff --git a/opencsp/common/lib/deflectometry/ImageProjection.py b/opencsp/common/lib/deflectometry/ImageProjection.py index 29cf9fb0d..8c214ff87 100644 --- a/opencsp/common/lib/deflectometry/ImageProjection.py +++ b/opencsp/common/lib/deflectometry/ImageProjection.py @@ -1,5 +1,6 @@ """Class handling the projection of images on a monitor/projector """ + import tkinter import cv2 as cv diff --git a/opencsp/common/lib/deflectometry/ImageProjectionSetupGUI.py b/opencsp/common/lib/deflectometry/ImageProjectionSetupGUI.py index 38310dffc..d209e00a7 100644 --- a/opencsp/common/lib/deflectometry/ImageProjectionSetupGUI.py +++ b/opencsp/common/lib/deflectometry/ImageProjectionSetupGUI.py @@ -3,6 +3,7 @@ ```python ImageProjectionGUI.py``` """ + import tkinter from tkinter import messagebox from tkinter.filedialog import askopenfilename diff --git a/opencsp/common/lib/deflectometry/SlopeSolverData.py b/opencsp/common/lib/deflectometry/SlopeSolverData.py index 94a485c8e..550108564 100644 --- a/opencsp/common/lib/deflectometry/SlopeSolverData.py +++ b/opencsp/common/lib/deflectometry/SlopeSolverData.py @@ -1,4 +1,5 @@ """Data class for holding output of a SlopeSolver calculation""" + from dataclasses import dataclass from numpy import ndarray diff --git a/opencsp/common/lib/deflectometry/SlopeSolverDataDebug.py b/opencsp/common/lib/deflectometry/SlopeSolverDataDebug.py index aecc8fc12..3aba255e3 100644 --- a/opencsp/common/lib/deflectometry/SlopeSolverDataDebug.py +++ b/opencsp/common/lib/deflectometry/SlopeSolverDataDebug.py @@ -1,4 +1,5 @@ """Class that holds information for debugging a SlopeSolver calculation.""" + from typing import Any diff --git a/opencsp/common/lib/deflectometry/test/test_CalibrationCameraPosition.py b/opencsp/common/lib/deflectometry/test/test_CalibrationCameraPosition.py index cc79da607..d7ad19b28 100644 --- a/opencsp/common/lib/deflectometry/test/test_CalibrationCameraPosition.py +++ b/opencsp/common/lib/deflectometry/test/test_CalibrationCameraPosition.py @@ -1,5 +1,6 @@ """Tests the calculation of the Sofast camera position from known points """ + import os from os.path import join import unittest diff --git a/opencsp/common/lib/deflectometry/test/test_SlopeSolver.py b/opencsp/common/lib/deflectometry/test/test_SlopeSolver.py index f1f0b34ea..baab9c364 100644 --- a/opencsp/common/lib/deflectometry/test/test_SlopeSolver.py +++ b/opencsp/common/lib/deflectometry/test/test_SlopeSolver.py @@ -1,5 +1,6 @@ """Unit test suite to test SlopeSolver class """ + import os import unittest @@ -7,7 +8,9 @@ from scipy.spatial.transform import Rotation from opencsp.app.sofast.lib.DisplayShape import DisplayShape as Display -from opencsp.app.sofast.lib.MeasurementSofastFringe import MeasurementSofastFringe as Measurement +from opencsp.app.sofast.lib.MeasurementSofastFringe import ( + MeasurementSofastFringe as Measurement, +) from opencsp.app.sofast.lib.SpatialOrientation import SpatialOrientation from opencsp.common.lib.deflectometry.SlopeSolver import SlopeSolver from opencsp.common.lib.geometry.Uxyz import Uxyz diff --git a/opencsp/common/lib/deflectometry/test/test_Surface2D.py b/opencsp/common/lib/deflectometry/test/test_Surface2D.py index aa62ed970..1d6bdf6bb 100644 --- a/opencsp/common/lib/deflectometry/test/test_Surface2D.py +++ b/opencsp/common/lib/deflectometry/test/test_Surface2D.py @@ -1,5 +1,6 @@ """Unit test suite to test Surface2D type classes """ + import unittest import numpy as np diff --git a/opencsp/common/lib/geometry/Vxyz.py b/opencsp/common/lib/geometry/Vxyz.py index cf8545bd6..e4bf008e0 100644 --- a/opencsp/common/lib/geometry/Vxyz.py +++ b/opencsp/common/lib/geometry/Vxyz.py @@ -1,5 +1,6 @@ """Three dimensional vector representation """ + from typing import Callable import numpy as np diff --git a/opencsp/common/lib/geometry/geometry_3d.py b/opencsp/common/lib/geometry/geometry_3d.py index 257add4bf..901a1c511 100644 --- a/opencsp/common/lib/geometry/geometry_3d.py +++ b/opencsp/common/lib/geometry/geometry_3d.py @@ -257,22 +257,22 @@ def construct_line_3d_given_two_points( line_3d['xyz_1'] = xyz_1 # First example point. line_3d['xyz_2'] = xyz_2 # Second example point. line_3d['length'] = length # Euclidean distance between example points. - line_3d[ - 'length_xy' - ] = length_xy # Euclidean distance between example points, projected onto the xy plane. + line_3d['length_xy'] = ( + length_xy # Euclidean distance between example points, projected onto the xy plane. + ) line_3d['mid_xyz'] = mid_xyz # Point midway between the two example points. - line_3d[ - 'vxyz' - ] = vxyz # Vector pointing from first example point to second example point. - line_3d[ - 'uxyz' - ] = uxyz # Unit vector pointing from first example point to second example point. - line_3d[ - 'theta' - ] = theta # Angle the line points, after projecting onto the xy plane, measured ccw about the z axis. - line_3d[ - 'eta' - ] = eta # Angle the line points above the xy plane (negative values indicate below the xy plane). + line_3d['vxyz'] = ( + vxyz # Vector pointing from first example point to second example point. + ) + line_3d['uxyz'] = ( + uxyz # Unit vector pointing from first example point to second example point. + ) + line_3d['theta'] = ( + theta # Angle the line points, after projecting onto the xy plane, measured ccw about the z axis. + ) + line_3d['eta'] = ( + eta # Angle the line points above the xy plane (negative values indicate below the xy plane). + ) # Return. return line_3d diff --git a/opencsp/common/lib/photogrammetry/ImageMarker.py b/opencsp/common/lib/photogrammetry/ImageMarker.py index b38e667cd..eb9c94d9f 100644 --- a/opencsp/common/lib/photogrammetry/ImageMarker.py +++ b/opencsp/common/lib/photogrammetry/ImageMarker.py @@ -6,6 +6,7 @@ class can hold information about just the origin point of the markers, or point ID is the aruco marker ID. In the four point model, the point ID is the Aruco marker ID * 4 plus the corner index (ranging from 0 to 4). """ + from warnings import warn import cv2 as cv diff --git a/opencsp/common/lib/photogrammetry/bundle_adjustment.py b/opencsp/common/lib/photogrammetry/bundle_adjustment.py index 2951ec575..40ec144e1 100644 --- a/opencsp/common/lib/photogrammetry/bundle_adjustment.py +++ b/opencsp/common/lib/photogrammetry/bundle_adjustment.py @@ -3,6 +3,7 @@ https://scipy-cookbook.readthedocs.io/items/bundle_adjustment.html """ + from typing import Literal import cv2 as cv diff --git a/opencsp/common/lib/photogrammetry/photogrammetry.py b/opencsp/common/lib/photogrammetry/photogrammetry.py index 752e0a724..ec5d72b50 100644 --- a/opencsp/common/lib/photogrammetry/photogrammetry.py +++ b/opencsp/common/lib/photogrammetry/photogrammetry.py @@ -1,5 +1,6 @@ """Library of photogrammetry-related functions and algorithms """ + import cv2 as cv import matplotlib.pyplot as plt import numpy as np diff --git a/opencsp/common/lib/photogrammetry/test/test_photogrammetry.py b/opencsp/common/lib/photogrammetry/test/test_photogrammetry.py index e1c457e52..4f98cf029 100644 --- a/opencsp/common/lib/photogrammetry/test/test_photogrammetry.py +++ b/opencsp/common/lib/photogrammetry/test/test_photogrammetry.py @@ -1,4 +1,5 @@ """Tests functions in the phogrammetry library""" + import os from os.path import join diff --git a/opencsp/common/lib/process/parallel_video_tools.py b/opencsp/common/lib/process/parallel_video_tools.py index 471fcb580..bd4eabd42 100644 --- a/opencsp/common/lib/process/parallel_video_tools.py +++ b/opencsp/common/lib/process/parallel_video_tools.py @@ -154,10 +154,9 @@ def parallel_video_to_frames( duplicates_handler = vh.VideoHandler.VideoCreator( dst_frames_dir_serv, None, None, frame_control ) - ( - non_duplicate_frame_files, - duplicate_frame_files, - ) = duplicates_handler.identify_duplicate_frames(0, 0) + (non_duplicate_frame_files, duplicate_frame_files) = ( + duplicates_handler.identify_duplicate_frames(0, 0) + ) for dup_frame in duplicate_frame_files: dup_frame = os.path.join(dst_frames_dir_serv, dup_frame) ft.delete_file(dup_frame) diff --git a/opencsp/common/lib/process/subprocess_tools.py b/opencsp/common/lib/process/subprocess_tools.py index dbaba9301..82657c175 100644 --- a/opencsp/common/lib/process/subprocess_tools.py +++ b/opencsp/common/lib/process/subprocess_tools.py @@ -10,7 +10,7 @@ def get_executable_path(executable_name: str, dont_match: str = None) -> str: - """ Returns the first "path/name.ext" for the given executable. If + """Returns the first "path/name.ext" for the given executable. If dont_match is specified, then paths containing that string are excluded from the returned results. @@ -30,7 +30,7 @@ def get_executable_path(executable_name: str, dont_match: str = None) -> str: """ dont_match = dont_match.lower() search_cmd = "which" - if (os.name == "nt"): + if os.name == "nt": search_cmd = "where" if executable_name.endswith(".exe"): executable_name = executable_name[:-4] diff --git a/opencsp/common/lib/render/VideoHandler.py b/opencsp/common/lib/render/VideoHandler.py index fa76b4e06..b1c462ddb 100644 --- a/opencsp/common/lib/render/VideoHandler.py +++ b/opencsp/common/lib/render/VideoHandler.py @@ -336,9 +336,9 @@ def identify_duplicate_frames( non_duplicate_frame_files = [ ft.file_size_pair_name(previous_frame_file_size_pair) ] - duplicate_frame_files: list[ - str - ] = [] # First frame is never a duplicate of preceding. + duplicate_frame_files: list[str] = ( + [] + ) # First frame is never a duplicate of preceding. for this_frame_file_size_pair in input_frame_file_size_pair_list[1:]: if self._this_frame_is_a_duplicate_of_previous( previous_frame_file_size_pair, diff --git a/opencsp/common/lib/render/View3d.py b/opencsp/common/lib/render/View3d.py index 858a6a163..c551a7850 100644 --- a/opencsp/common/lib/render/View3d.py +++ b/opencsp/common/lib/render/View3d.py @@ -2,6 +2,7 @@ """ + import numpy as np import matplotlib.image as mpimg import matplotlib.pyplot as plt @@ -306,8 +307,8 @@ def imshow(self, *args, colorbar=False, **kwargs) -> None: top of other plots (example on top of 3D data) use draw_image instead.""" if self.view_spec['type'] == 'image': # load the image, as necessary - load_as_necessary = ( - lambda img: img if not isinstance(img, str) else Image.open(img) + load_as_necessary = lambda img: ( + img if not isinstance(img, str) else Image.open(img) ) if 'X' in kwargs: img = kwargs['X'] diff --git a/opencsp/common/lib/render_control/RenderControlEnsemble.py b/opencsp/common/lib/render_control/RenderControlEnsemble.py index 6402a816c..cea0b79fc 100644 --- a/opencsp/common/lib/render_control/RenderControlEnsemble.py +++ b/opencsp/common/lib/render_control/RenderControlEnsemble.py @@ -2,6 +2,7 @@ """ + from typing import Iterable diff --git a/opencsp/common/lib/render_control/RenderControlFigureRecord.py b/opencsp/common/lib/render_control/RenderControlFigureRecord.py index 2cfd0df32..ae20fd5ff 100644 --- a/opencsp/common/lib/render_control/RenderControlFigureRecord.py +++ b/opencsp/common/lib/render_control/RenderControlFigureRecord.py @@ -50,14 +50,12 @@ def __init__( self.figure = figure self.axis_control = axis_control """ Axis control instance used in figure_management.setup_figure. Can be None|RenderControlAxis. """ - self.metadata: list[ - str - ] = ( + self.metadata: list[str] = ( [] ) # A list of standard string fields -- name, figure number, file path, etc. - self.comments: list[ - str - ] = [] # A list of caller-defined strings, to be filled in later. + self.comments: list[str] = ( + [] + ) # A list of caller-defined strings, to be filled in later. self.axis: plt.Axes = None # Matplotlib plot axes object. Set later. self.view: View3d = None # View3d object. Set later. self.equal = None # Whether to make axes equal. Set later. diff --git a/opencsp/common/lib/render_control/RenderControlVideoFrames.py b/opencsp/common/lib/render_control/RenderControlVideoFrames.py index 3bc7adc33..85f34a324 100644 --- a/opencsp/common/lib/render_control/RenderControlVideoFrames.py +++ b/opencsp/common/lib/render_control/RenderControlVideoFrames.py @@ -2,6 +2,7 @@ """ + import os from typing import Optional diff --git a/opencsp/common/lib/target/TargetColor.py b/opencsp/common/lib/target/TargetColor.py index 8d52cbbfc..3dbba6d3e 100755 --- a/opencsp/common/lib/target/TargetColor.py +++ b/opencsp/common/lib/target/TargetColor.py @@ -353,9 +353,9 @@ def set_image_to_polar_color_bar( # Set pixel color # ?? SCAFFOLDING RCB -- FIXUP ALL THIS CONFUSION REGARDING WHETHER COLORS ARE OVER [0,1] OR [0,255]. - self.image[ - row, col, 0 - ] = this_red # /255.0 # ?? SCAFFOLDING RCB -- CONVERT COLOR BAR TO INTERVAL [0,1]? + self.image[row, col, 0] = ( + this_red # /255.0 # ?? SCAFFOLDING RCB -- CONVERT COLOR BAR TO INTERVAL [0,1]? + ) self.image[row, col, 1] = this_green # /255.0 self.image[row, col, 2] = this_blue # /255.0 diff --git a/opencsp/common/lib/target/target_color_2d_rgb.py b/opencsp/common/lib/target/target_color_2d_rgb.py index 221edaea3..f8bb6c515 100755 --- a/opencsp/common/lib/target/target_color_2d_rgb.py +++ b/opencsp/common/lib/target/target_color_2d_rgb.py @@ -2,6 +2,7 @@ Creates a 2D color target """ + import imageio.v3 as imageio import matplotlib.pyplot as plt import numpy as np diff --git a/opencsp/common/lib/test/test_MirrorOutput.py b/opencsp/common/lib/test/test_MirrorOutput.py index 1c2dc4ea4..8036fc9c9 100644 --- a/opencsp/common/lib/test/test_MirrorOutput.py +++ b/opencsp/common/lib/test/test_MirrorOutput.py @@ -392,9 +392,7 @@ def test_heliostat_05W01_and_14W01(self) -> None: # Construct heliostat objects and solar field object. def fn_5W01(x, y): - return (x**2) / (4 * focal_length_5W01) + (y**2) / ( - 4 * focal_length_5W01 - ) + return (x**2) / (4 * focal_length_5W01) + (y**2) / (4 * focal_length_5W01) h_5W01 = helio.h_from_facet_centroids( name_5W01, @@ -412,9 +410,7 @@ def fn_5W01(x, y): h_5W01.set_canting_from_equation(fn_5W01) def fn_14W01(x, y): - return (x**2) / (4 * focal_length_14W01) + (y**2) / ( - 4 * focal_length_14W01 - ) + return (x**2) / (4 * focal_length_14W01) + (y**2) / (4 * focal_length_14W01) h_14W01 = helio.h_from_facet_centroids( "NSTTF Heliostat 14W01", diff --git a/opencsp/common/lib/tool/file_tools.py b/opencsp/common/lib/tool/file_tools.py index 6c2111aec..eaec576ce 100755 --- a/opencsp/common/lib/tool/file_tools.py +++ b/opencsp/common/lib/tool/file_tools.py @@ -285,8 +285,12 @@ def file_size(input_dir_body_ext, error_if_exists_as_dir=True): Returns the size of the given file in bytes. """ if not file_exists(input_dir_body_ext, error_if_exists_as_dir): - lt.error_and_raise(FileNotFoundError, 'ERROR: In file_size(), input_dir_body_ext ' + - 'was not found.\n\tinput_dir_body_ext =' + input_dir_body_ext) + lt.error_and_raise( + FileNotFoundError, + 'ERROR: In file_size(), input_dir_body_ext ' + + 'was not found.\n\tinput_dir_body_ext =' + + input_dir_body_ext, + ) return os.path.getsize(input_dir_body_ext) @@ -1086,12 +1090,15 @@ def convert_shortcuts_to_symlinks(dirname: str): # TEXT FILES -def write_text_file(description: str, - output_dir: str, - output_file_body: str, - output_string_list: list[any], - error_if_dir_not_exist=True) -> str: - """ Writes a strings to a ".txt" file, with each string on a new line. + +def write_text_file( + description: str, + output_dir: str, + output_file_body: str, + output_string_list: list[any], + error_if_dir_not_exist=True, +) -> str: + """Writes a strings to a ".txt" file, with each string on a new line. Parameters ---------- diff --git a/opencsp/common/lib/tool/hdf5_tools.py b/opencsp/common/lib/tool/hdf5_tools.py index fe84b1b4f..32f9ce0df 100644 --- a/opencsp/common/lib/tool/hdf5_tools.py +++ b/opencsp/common/lib/tool/hdf5_tools.py @@ -48,7 +48,7 @@ def load_hdf5_datasets(datasets: list, file: str): def is_dataset_and_shape(object: h5py.Group | h5py.Dataset) -> tuple[bool, tuple]: - """ Returns whether the given object is an hdf5 dataset and, if it is, then + """Returns whether the given object is an hdf5 dataset and, if it is, then also what it's shape is. Parameters @@ -74,7 +74,7 @@ def is_dataset_and_shape(object: h5py.Group | h5py.Dataset) -> tuple[bool, tuple def get_groups_and_datasets(hdf5_path_name_ext: str | h5py.File): - """ Get the structure of an HDF5 file, including all group and dataset names, and the dataset shapes. + """Get the structure of an HDF5 file, including all group and dataset names, and the dataset shapes. Parameters ---------- @@ -116,7 +116,9 @@ def visitor(name: str, object: h5py.Group | h5py.Dataset): return group_names, file_names_and_shapes -def _create_dataset_path(base_dir: str, h5_dataset_path_name: str, dataset_ext: str = ".txt"): +def _create_dataset_path( + base_dir: str, h5_dataset_path_name: str, dataset_ext: str = ".txt" +): dataset_location, dataset_name, _ = ft.path_components(h5_dataset_path_name) dataset_path = ft.norm_path(os.path.join(base_dir, dataset_location)) ft.create_directories_if_necessary(dataset_path) @@ -124,7 +126,7 @@ def _create_dataset_path(base_dir: str, h5_dataset_path_name: str, dataset_ext: def unzip(hdf5_path_name_ext: str, destination_dir: str, dataset_format='npy'): - """ Unpacks the given HDF5 file into the given destination directory. + """Unpacks the given HDF5 file into the given destination directory. Unpacks the given HDF5 file into the given destination directory. A new directory is created in the destination with the same name as the hdf5 file. @@ -155,7 +157,9 @@ def unzip(hdf5_path_name_ext: str, destination_dir: str, dataset_format='npy'): # Create the HDF5 output directory if ft.directory_exists(hdf5_dir): lt.error_and_raise( - FileExistsError, f"Error in hdf5_tools.unzip(): output directory {hdf5_dir} already exists!") + FileExistsError, + f"Error in hdf5_tools.unzip(): output directory {hdf5_dir} already exists!", + ) ft.create_directories_if_necessary(hdf5_dir) # Get all of what may be strings or images from the h5 file @@ -171,11 +175,16 @@ def unzip(hdf5_path_name_ext: str, destination_dir: str, dataset_format='npy'): for i, possible_string_name in enumerate(possible_strings_names): dataset_name = possible_string_name.split("/")[-1] h5_val = load_hdf5_datasets([possible_string_name], norm_path)[dataset_name] - if isinstance(h5_val, np.ndarray) and h5_val.ndim <= 1 and isinstance(h5_val.tolist()[0], str): + if ( + isinstance(h5_val, np.ndarray) + and h5_val.ndim <= 1 + and isinstance(h5_val.tolist()[0], str) + ): h5_val = h5_val.tolist()[0] if isinstance(h5_val, str): dataset_path_name_ext = _create_dataset_path( - hdf5_dir, possible_strings[i][0], ".txt") + hdf5_dir, possible_strings[i][0], ".txt" + ) with open(dataset_path_name_ext, "w") as fout: fout.write(h5_val) else: @@ -192,11 +201,12 @@ def unzip(hdf5_path_name_ext: str, destination_dir: str, dataset_format='npy'): # we assume images have 2 or 3 dimensions if (len(shape) == 2) or (len(shape) == 3): - # we assume shapes are at least 10x10 pixels and have an aspect ratio of at least 10:1 aspect_ratio = max(shape[0], shape[1]) / min(shape[0], shape[1]) if (shape[0] >= 10 and shape[1] >= 10) and (aspect_ratio < 10.001): - dataset_path_name_ext = _create_dataset_path(hdf5_dir, possible_images[i][0], ".png") + dataset_path_name_ext = _create_dataset_path( + hdf5_dir, possible_images[i][0], ".png" + ) # assumed grayscale or RGB if (len(shape) == 2) or (shape[2] in [1, 3]): img = it.numpy_to_image(np_image) diff --git a/opencsp/common/lib/tool/log_tools.py b/opencsp/common/lib/tool/log_tools.py index a8bf583ce..64736e7c6 100644 --- a/opencsp/common/lib/tool/log_tools.py +++ b/opencsp/common/lib/tool/log_tools.py @@ -2,6 +2,7 @@ Utilities for managing logs for multi-processing. """ + import logging as log import multiprocessing as mp import os @@ -17,10 +18,11 @@ global_multiprocessing_logger: log.Logger = None -def logger(log_dir_body_ext: str = None, - level: int = log.INFO, - delete_existing_log: bool = True, - ) -> log.Logger: +def logger( + log_dir_body_ext: str = None, + level: int = log.INFO, + delete_existing_log: bool = True, +) -> log.Logger: """Initialize logging for single-process programs. Creates a fresh log file, deleting the existing log file if it exists as indicated by delete_existing_log_file. @@ -149,9 +151,9 @@ def multiprocessing_logger(log_dir_body_ext=None, level=log.INFO) -> log.Logger: return global_multiprocessing_logger -def _add_stream_handlers(logger_: log.Logger, - level: int, - formatter: log.Formatter = None) -> None: +def _add_stream_handlers( + logger_: log.Logger, level: int, formatter: log.Formatter = None +) -> None: """Adds streams to the given logger. Prints From https://stackoverflow.com/questions/16061641/python-logging-split-between-stdout-and-stderr diff --git a/opencsp/common/lib/tool/test/test_file_tools.py b/opencsp/common/lib/tool/test/test_file_tools.py index 6b190d52c..458a7eb4c 100644 --- a/opencsp/common/lib/tool/test/test_file_tools.py +++ b/opencsp/common/lib/tool/test/test_file_tools.py @@ -35,7 +35,9 @@ def test_files_in_directory_recursive(self): self.assertListEqual(expected, files_name_ext) def test_files_in_directory_recursive_files_only(self): - files_name_ext = ft.files_in_directory(self.data_dir, recursive=True, files_only=True) + files_name_ext = ft.files_in_directory( + self.data_dir, recursive=True, files_only=True + ) files_name_ext = [f.replace("\\", "/") for f in files_name_ext] expected = [".dotfile", "a.a", "b.b", "d/c.c", "d/e/f.f"] self.assertListEqual(expected, files_name_ext)