From fda30bee44f2de9de64275dcb8fe470e08b43872 Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Wed, 27 Jan 2021 13:32:42 -0800 Subject: [PATCH 001/180] cmake pass environment to swig this fixes an issue with the superbuild experienced on Ubuntu 20.04 --- python/CMake/teca_python.cmake | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/python/CMake/teca_python.cmake b/python/CMake/teca_python.cmake index 2acc89a5e..75fae0f5c 100644 --- a/python/CMake/teca_python.cmake +++ b/python/CMake/teca_python.cmake @@ -4,7 +4,9 @@ function(depend_swig input output) # custom command to update the dependency file add_custom_command( OUTPUT ${output_file} - COMMAND ${swig_cmd} -c++ -python -MM + COMMAND env LD_LIBRARY_PATH=$ENV{LD_LIBRARY_PATH} + DYLD_LIBRARY_PATH=$ENV{DYLD_LIBRARY_PATH} + ${swig_cmd} -c++ -python -MM -I${MPI4Py_INCLUDE_DIR} -I${CMAKE_CURRENT_BINARY_DIR} -I${CMAKE_CURRENT_BINARY_DIR}/.. @@ -19,7 +21,9 @@ function(depend_swig input output) # bootstrap the dependency list message(STATUS "Generating initial dependency list for ${input}") execute_process( - COMMAND ${swig_cmd} -c++ -python -MM + COMMAND env LD_LIBRARY_PATH=$ENV{LD_LIBRARY_PATH} + DYLD_LIBRARY_PATH=$ENV{DYLD_LIBRARY_PATH} + ${swig_cmd} -c++ -python -MM -I${MPI4Py_INCLUDE_DIR} -I${CMAKE_CURRENT_BINARY_DIR} -I${CMAKE_CURRENT_BINARY_DIR}/.. @@ -73,7 +77,9 @@ function(wrap_swig input output) endforeach() add_custom_command( OUTPUT ${output_file} - COMMAND ${swig_cmd} -c++ -python -threads -w341,325 + COMMAND env LD_LIBRARY_PATH=$ENV{LD_LIBRARY_PATH} + DYLD_LIBRARY_PATH=$ENV{DYLD_LIBRARY_PATH} + ${swig_cmd} -c++ -python -threads -w341,325 -DSWIG_TYPE_TABLE=teca_py -I${MPI4Py_INCLUDE_DIR} -I${CMAKE_CURRENT_BINARY_DIR} From 79b2f5cbbc798841a0519291e45f307e3a47d2c2 Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Thu, 28 Jan 2021 12:42:29 -0800 Subject: [PATCH 002/180] testing don't oversubscribe on travis-ci --- CMakeLists.txt | 46 ++++++++--- test/CMakeLists.txt | 34 +++++--- test/apps/CMakeLists.txt | 134 +++++++++++++++++++++++++------ test/python/CMakeLists.txt | 86 ++++++++++++-------- test/travis_ci/ctest_linux.cmake | 1 + test/travis_ci/ctest_osx.cmake | 1 + 6 files changed, 218 insertions(+), 84 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index c1ecb1fbb..4eb5ff0ff 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -387,26 +387,50 @@ if (BUILD_TESTING) # figure out how many cores we can use for parallel tests set(TECA_TEST_CORES 0 CACHE STRING "Max number of cores for use in parallel tests") + + # by default assume 2 hyperthreads per core, if this is not + # the case override here + set(HYPERTHREADS_PER_CORE 2 CACHE STRING + "The number of hyperthreads per core.") + + # use CMake to get the number of logical cores. includes hyperthreads + # in the count. if (TECA_TEST_CORES LESS 1) ProcessorCount(LOGICAL_CORES) if (LOGICAL_CORES EQUAL 0) - set(LOGICAL_CORES 4) + message(FATAL_ERROR "Failed to detect the number of cores. " + "Set TECA_TEST_CORES") endif() else() - math(EXPR LOGICAL_CORES "${TECA_TEST_CORES}*2") + math(EXPR LOGICAL_CORES "${TECA_TEST_CORES}*${HYPERTHREADS_PER_CORE}") endif() - math(EXPR PHYSICAL_CORES "${LOGICAL_CORES}/2") - if (PHYSICAL_CORES LESS 3) - set(TEST_CORES 2) - set(HALF_TEST_CORES 2) - set(TWICE_TEST_CORES 4) - else() - set(TEST_CORES ${PHYSICAL_CORES}) - math(EXPR HALF_TEST_CORES "${TEST_CORES}/2") - set(TWICE_TEST_CORES ${LOGICAL_CORES}) + + # adjust count for hyperthreads. + math(EXPR PHYSICAL_CORES "${LOGICAL_CORES}/${HYPERTHREADS_PER_CORE}") + if (PHYSICAL_CORES LESS 1) + message(FATAL_ERROR "Invalid CPU configuration. " + "LOGICAL_CORES=${LOGICAL_CORES} HYPERTHREADS_PER_CORE=" + "${HYPERTHREADS_PER_CORE}") endif() + + # set the number of cores to use for pure MPI or purely threaded tests + set(TEST_CORES ${PHYSICAL_CORES}) message(STATUS "regression testing -- enabled (${TEST_CORES} cores).") + # set the number of cores to use for MPI + threads tests. if there are too + # few physical cores then disable hybrid parallel tests + math(EXPR HALF_TEST_CORES "${TEST_CORES}/2") + if (HALF_TEST_CORES LESS 2) + message(STATUS "Hybrid parallel tests -- disabled.") + set(TEST_MPI_THREADS OFF) + else() + message(STATUS "Hybrid parallel tests -- enabled.") + set(TEST_MPI_THREADS ON) + endif() + + # set the number of cores for oversubscription/streaming tests + math(EXPR TWICE_TEST_CORES "${TEST_CORES}*2") + add_subdirectory(test) else() message(STATUS "regression testing -- disbaled") diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 5d752b9af..71e5d9930 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -183,7 +183,7 @@ teca_add_test(test_cf_writer_cam5_mpi_threads COMMAND ${MPIEXEC} -n ${HALF_TEST_CORES} test_cf_writer -i "${TECA_DATA_ROOT}/cam5_1_amip_run2\\.cam2\\.h2\\.1991-10-[0-9][0-9]-10800\\.nc" -o "test_cf_writer_cam5_mt_%t%.nc" -s 0,-1 -x lon -y lat -t time -c 1 -n 2 U850 V850 - FEATURES ${TECA_HAS_NETCDF} ${TECA_HAS_MPI} + FEATURES ${TECA_HAS_NETCDF} ${TECA_HAS_MPI} ${TEST_MPI_THREADS} REQ_TECA_DATA) teca_add_test(test_cf_writer_cfsr_mpi_threads @@ -191,14 +191,14 @@ teca_add_test(test_cf_writer_cfsr_mpi_threads -i "${TECA_DATA_ROOT}/NCEP_CFSR_0\\.5_1979\\.nc" -o "test_cf_writer_NCEP_CFSR_mt_%t%.nc" -s 0,-1 -x longitude -y latitude -b 65,110,10,55,0,0 -c 1 -n 2 elevation - FEATURES ${TECA_HAS_NETCDF} + FEATURES ${TECA_HAS_NETCDF} ${TECA_HAS_MPI} ${TEST_MPI_THREADS} REQ_TECA_DATA) teca_add_test(test_cf_writer_era5_mpi_threads COMMAND ${MPIEXEC} -n ${HALF_TEST_CORES} test_cf_writer -i "${TECA_DATA_ROOT}/e5\.oper\.an\.vinteg\.162_072_viwvn.*\.nc" -o "test_cf_writer_era5_mt_%t%.nc" -s 0,-1 -x longitude -y latitude -t time -c 1 -n 2 VIWVN - FEATURES ${TECA_HAS_NETCDF} + FEATURES ${TECA_HAS_NETCDF} ${TECA_HAS_MPI} ${TEST_MPI_THREADS} REQ_TECA_DATA) teca_add_test(test_connected_components @@ -331,7 +331,7 @@ teca_add_test(test_descriptive_statistics_mpi_threads "${TECA_DATA_ROOT}/cam5_1_amip_run2\\.cam2\\.h2\\.1991-10-[0-9][0-9]-10800\\.nc" "${TECA_DATA_ROOT}/test_descriptive_statistics.bin" 0 -1 2 TMQ T200 T500 - FEATURES ${TECA_HAS_UDUNITS} ${TECA_HAS_NETCDF} ${TECA_HAS_MPI} + FEATURES ${TECA_HAS_UDUNITS} ${TECA_HAS_NETCDF} ${TECA_HAS_MPI} ${TEST_MPI_THREADS} REQ_TECA_DATA) teca_add_test(test_streaming_reduce_threads @@ -347,9 +347,8 @@ teca_add_test(test_streaming_reduce_mpi_threads LIBS teca_core teca_data teca_io teca_alg ${teca_test_link} COMMAND ${MPIEXEC} -n ${HALF_TEST_CORES} test_descriptive_statistics 0 "${TECA_DATA_ROOT}/prw_hus_day_MRI-CGCM3.*\\.nc" - "${TECA_DATA_ROOT}/test_streaming_reduce.bin" 0 -1 4 - prw - FEATURES ${TECA_HAS_UDUNITS} ${TECA_HAS_NETCDF} + "${TECA_DATA_ROOT}/test_streaming_reduce.bin" 0 -1 4 prw + FEATURES ${TECA_HAS_UDUNITS} ${TECA_HAS_NETCDF} ${TEST_MPI_THREADS} REQ_TECA_DATA) teca_add_test(test_binary_stream @@ -396,6 +395,7 @@ teca_add_test(test_tc_candidates_mpi_threads "${TECA_DATA_ROOT}/test_tc_candidates_20.bin" 0 3 2 U850 V850 UBOT VBOT PSL T500 T200 Z1000 Z200 -20 20 FEATURES ${TECA_HAS_NETCDF} ${TECA_HAS_UDUNITS} ${TECA_HAS_MPI} + ${TEST_MPI_THREADS} REQ_TECA_DATA) teca_add_test(test_tc_trajectory @@ -446,7 +446,7 @@ teca_add_test(test_table_reader_distribute_mpi_threads "${TECA_DATA_ROOT}/test_tc_candidates_20.bin" "${TECA_DATA_ROOT}/test_table_reader_distribute_20.bin" "step" 0 -1 2 - FEATURES ${TECA_HAS_MPI} + FEATURES ${TECA_HAS_MPI} ${TEST_MPI_THREADS} REQ_TECA_DATA) teca_add_test(test_tc_wind_radii_serial @@ -473,7 +473,7 @@ teca_add_test(test_tc_wind_radii_mpi_threads "${TECA_DATA_ROOT}/cam5_1_amip_run2_1990s/.*\\.nc$" "${TECA_DATA_ROOT}/test_tc_wind_radii.bin" "!(((track_id==4)&&(surface_wind*3.6d>=177.0d))||((track_id==191)&&(surface_wind*3.6d>=249.0d))||((track_id==523)&&(3.6d*surface_wind>=209.0d)))" 32 1 2 0 -1 - FEATURES (${TECA_HAS_MPI} AND ${TECA_HAS_NETCDF}) + FEATURES ${TECA_HAS_MPI} ${TECA_HAS_NETCDF} ${TEST_MPI_THREADS} REQ_TECA_DATA) teca_add_test(test_tc_wind_radii_threads @@ -559,7 +559,15 @@ teca_add_test(test_bayesian_ar_detect_threads COMMAND test_bayesian_ar_detect "${TECA_DATA_ROOT}/ARTMIP_MERRA_2D_2017-05.*\\.nc$" "${TECA_DATA_ROOT}/test_bayesian_ar_detect.bin" IVT - "bayesian_ar_detect_%t%.nc" ${TEST_CORES} 0 -1 + "bayesian_ar_detect_%t%.nc" -1 0 -1 + REQ_TECA_DATA) + +teca_add_test(test_bayesian_ar_detect_mpi + COMMAND ${MPIEXEC} -n ${TEST_CORES} test_bayesian_ar_detect + "${TECA_DATA_ROOT}/ARTMIP_MERRA_2D_2017-05.*\\.nc$" + "${TECA_DATA_ROOT}/test_bayesian_ar_detect.bin" IVT + "bayesian_ar_detect_%t%.nc" -1 0 -1 + FEATURES ${TECA_HAS_MPI} ${TECA_HAS_NETCDF} REQ_TECA_DATA) teca_add_test(test_bayesian_ar_detect_mpi_threads @@ -567,7 +575,7 @@ teca_add_test(test_bayesian_ar_detect_mpi_threads "${TECA_DATA_ROOT}/ARTMIP_MERRA_2D_2017-05.*\\.nc$" "${TECA_DATA_ROOT}/test_bayesian_ar_detect.bin" IVT "bayesian_ar_detect_%t%.nc" -1 0 -1 - FEATURES (${TECA_HAS_MPI} AND ${TECA_HAS_NETCDF}) + FEATURES ${TECA_HAS_MPI} ${TECA_HAS_NETCDF} ${TEST_MPI_THREADS} REQ_TECA_DATA) teca_add_test(test_normalize_coordinates_pass_through @@ -677,7 +685,7 @@ teca_add_test(test_cf_writer_collective_mpi teca_add_test(test_cf_writer_collective_mpi_threads COMMAND ${MPIEXEC} -n ${HALF_TEST_CORES} test_cf_writer_collective 128 512 128 2 "${TECA_DATA_ROOT}/test_cf_writer_collective_%t%.bin" 213 - FEATURES ${TECA_HAS_NETCDF} ${TECA_HAS_MPI} + FEATURES ${TECA_HAS_NETCDF} ${TECA_HAS_MPI} ${TEST_MPI_THREADS} REQ_TECA_DATA) teca_add_test(test_cf_writer_bad_type @@ -705,7 +713,7 @@ teca_add_test(test_cf_time_axis_reader teca_add_test(test_cf_time_axis_reader_mpi COMMAND ${MPIEXEC} -n ${TEST_CORES} test_cf_time_axis_reader - "${TECA_DATA_ROOT}/HighResMIP/TC_test/PSL/PSL.*\\.nc$" ${HALF_TEST_CORES} + "${TECA_DATA_ROOT}/HighResMIP/TC_test/PSL/PSL.*\\.nc$" ${TEST_CORES} FEATURES ${TECA_HAS_NETCDF} ${TECA_HAS_MPI} REQ_TECA_DATA) diff --git a/test/apps/CMakeLists.txt b/test/apps/CMakeLists.txt index b2c20a564..1496d4d92 100644 --- a/test/apps/CMakeLists.txt +++ b/test/apps/CMakeLists.txt @@ -5,7 +5,7 @@ include_directories(${CMAKE_CURRENT_SOURCE_DIR} $ ) -teca_add_test(test_deeplab_ar_detect_app_thread +teca_add_test(test_deeplab_ar_detect_app_threads COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_deeplab_ar_detect_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} -1 FEATURES ${TECA_HAS_NETCDF} ${TECA_HAS_PYTORCH} @@ -14,12 +14,20 @@ teca_add_test(test_deeplab_ar_detect_app_thread teca_add_test(test_deeplab_ar_detect_app_mpi COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_deeplab_ar_detect_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} -1 - ${MPIEXEC} ${HALF_TEST_CORES} + ${MPIEXEC} ${TEST_CORES} FEATURES ${TECA_HAS_NETCDF_MPI} ${TECA_HAS_PYTORCH} ${TECA_HAS_MPI} ${MPI4Py_FOUND} REQ_TECA_DATA) -teca_add_test(test_deeplab_ar_detect_app_mcf_thread +teca_add_test(test_deeplab_ar_detect_app_mpi_threads + COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_deeplab_ar_detect_app.sh + ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} -1 + ${MPIEXEC} ${HALF_TEST_CORES} + FEATURES ${TECA_HAS_NETCDF_MPI} ${TECA_HAS_PYTORCH} + ${TECA_HAS_MPI} ${MPI4Py_FOUND} ${TEST_MPI_THREADS} + REQ_TECA_DATA) + +teca_add_test(test_deeplab_ar_detect_app_mcf_threads COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_deeplab_ar_detect_app_mcf.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} -1 FEATURES ${TECA_HAS_NETCDF} ${TECA_HAS_PYTORCH} @@ -28,12 +36,20 @@ teca_add_test(test_deeplab_ar_detect_app_mcf_thread teca_add_test(test_deeplab_ar_detect_app_mcf_mpi COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_deeplab_ar_detect_app_mcf.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} -1 - ${MPIEXEC} ${HALF_TEST_CORES} + ${MPIEXEC} ${TEST_CORES} FEATURES ${TECA_HAS_NETCDF_MPI} ${TECA_HAS_PYTORCH} ${TECA_HAS_MPI} ${MPI4Py_FOUND} REQ_TECA_DATA) -teca_add_test(test_bayesian_ar_detect_app_thread +teca_add_test(test_deeplab_ar_detect_app_mcf_mpi_threads + COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_deeplab_ar_detect_app_mcf.sh + ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} -1 + ${MPIEXEC} ${HALF_TEST_CORES} + FEATURES ${TECA_HAS_NETCDF_MPI} ${TECA_HAS_PYTORCH} + ${TECA_HAS_MPI} ${MPI4Py_FOUND} ${TEST_MPI_THREADS} + REQ_TECA_DATA) + +teca_add_test(test_bayesian_ar_detect_app_threads COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_bayesian_ar_detect_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} -1 FEATURES ${TECA_HAS_NETCDF} @@ -42,11 +58,18 @@ teca_add_test(test_bayesian_ar_detect_app_thread teca_add_test(test_bayesian_ar_detect_app_mpi COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_bayesian_ar_detect_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} -1 - ${MPIEXEC} ${HALF_TEST_CORES} + ${MPIEXEC} ${TEST_CORES} FEATURES ${TECA_HAS_NETCDF_MPI} ${TECA_HAS_MPI} REQ_TECA_DATA) -teca_add_test(test_bayesian_ar_detect_app_mcf_thread +teca_add_test(test_bayesian_ar_detect_app_mpi_threads + COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_bayesian_ar_detect_app.sh + ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} -1 + ${MPIEXEC} ${HALF_TEST_CORES} + FEATURES ${TECA_HAS_NETCDF_MPI} ${TECA_HAS_MPI} ${TEST_MPI_THREADS} + REQ_TECA_DATA) + +teca_add_test(test_bayesian_ar_detect_app_mcf_threads COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_bayesian_ar_detect_app_mcf.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} -1 FEATURES ${TECA_HAS_NETCDF} @@ -55,11 +78,18 @@ teca_add_test(test_bayesian_ar_detect_app_mcf_thread teca_add_test(test_bayesian_ar_detect_app_mcf_mpi COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_bayesian_ar_detect_app_mcf.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} -1 - ${MPIEXEC} ${HALF_TEST_CORES} + ${MPIEXEC} ${TEST_CORES} FEATURES ${TECA_HAS_NETCDF_MPI} ${TECA_HAS_MPI} REQ_TECA_DATA) -teca_add_test(test_integrated_vapor_transport_app_thread +teca_add_test(test_bayesian_ar_detect_app_mcf_mpi_threads + COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_bayesian_ar_detect_app_mcf.sh + ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} -1 + ${MPIEXEC} ${HALF_TEST_CORES} + FEATURES ${TECA_HAS_NETCDF_MPI} ${TECA_HAS_MPI} ${TEST_MPI_THREADS} + REQ_TECA_DATA) + +teca_add_test(test_integrated_vapor_transport_app_threads COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_integrated_vapor_transport_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} -1 FEATURES ${TECA_HAS_NETCDF} @@ -68,10 +98,17 @@ teca_add_test(test_integrated_vapor_transport_app_thread teca_add_test(test_integrated_vapor_transport_app_mpi COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_integrated_vapor_transport_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} -1 - ${MPIEXEC} ${HALF_TEST_CORES} + ${MPIEXEC} ${TEST_CORES} FEATURES ${TECA_HAS_NETCDF_MPI} ${TECA_HAS_MPI} REQ_TECA_DATA) +teca_add_test(test_integrated_vapor_transport_app_mpi_threads + COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_integrated_vapor_transport_app.sh + ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} -1 + ${MPIEXEC} ${HALF_TEST_CORES} + FEATURES ${TECA_HAS_NETCDF_MPI} ${TECA_HAS_MPI} ${TEST_MPI_THREADS} + REQ_TECA_DATA) + teca_add_test(test_tc_detect_app COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_tc_detect_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} @@ -133,7 +170,7 @@ teca_add_test(test_event_filter_app ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} REQ_TECA_DATA) -teca_add_test(test_temporal_reduction_app_seasonal_average_thread +teca_add_test(test_temporal_reduction_app_seasonal_average_threads COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_temporal_reduction_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} "prw_hus_day_MRI-CGCM3_historical_r1i1p1_19500101-19501231\\.nc" prw @@ -141,15 +178,24 @@ teca_add_test(test_temporal_reduction_app_seasonal_average_thread FEATURES ${TECA_HAS_NETCDF} ${TECA_HAS_UDUNITS} REQ_TECA_DATA) -teca_add_test(test_temporal_reduction_app_seasonal_average_mpi_thread +teca_add_test(test_temporal_reduction_app_seasonal_average_mpi COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_temporal_reduction_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} "prw_hus_day_MRI-CGCM3_historical_r1i1p1_19500101-19501231\\.nc" prw - seasonal average 7 ${MPIEXEC} ${HALF_TEST_CORES} + seasonal average 7 ${MPIEXEC} ${HALF_TEST} FEATURES ${TECA_HAS_NETCDF_MPI} ${TECA_HAS_UDUNITS} ${TECA_HAS_MPI} ${MPI4Py_FOUND} REQ_TECA_DATA) -teca_add_test(test_temporal_reduction_app_monthly_average_thread +teca_add_test(test_temporal_reduction_app_seasonal_average_mpi_threads + COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_temporal_reduction_app.sh + ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} + "prw_hus_day_MRI-CGCM3_historical_r1i1p1_19500101-19501231\\.nc" prw + seasonal average 7 ${MPIEXEC} ${HALF_TEST_CORES} + FEATURES ${TECA_HAS_NETCDF_MPI} ${TECA_HAS_UDUNITS} ${TECA_HAS_MPI} + ${MPI4Py_FOUND} ${TEST_MPI_THREADS} + REQ_TECA_DATA) + +teca_add_test(test_temporal_reduction_app_monthly_average_threads COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_temporal_reduction_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} "prw_hus_day_MRI-CGCM3_historical_r1i1p1_19500101-19501231\\.nc" prw @@ -157,15 +203,24 @@ teca_add_test(test_temporal_reduction_app_monthly_average_thread FEATURES ${TECA_HAS_NETCDF} ${TECA_HAS_UDUNITS} REQ_TECA_DATA) -teca_add_test(test_temporal_reduction_app_monthly_average_mpi_thread +teca_add_test(test_temporal_reduction_app_monthly_average_mpi COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_temporal_reduction_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} "prw_hus_day_MRI-CGCM3_historical_r1i1p1_19500101-19501231\\.nc" prw - monthly average 7 ${MPIEXEC} ${HALF_TEST_CORES} + monthly average 7 ${MPIEXEC} ${TEST_CORES} FEATURES ${TECA_HAS_NETCDF_MPI} ${TECA_HAS_UDUNITS} ${TECA_HAS_MPI} ${MPI4Py_FOUND} REQ_TECA_DATA) -teca_add_test(test_temporal_reduction_app_monthly_minimum_thread +teca_add_test(test_temporal_reduction_app_monthly_average_mpi_threads + COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_temporal_reduction_app.sh + ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} + "prw_hus_day_MRI-CGCM3_historical_r1i1p1_19500101-19501231\\.nc" prw + monthly average 7 ${MPIEXEC} ${HALF_TEST_CORES} + FEATURES ${TECA_HAS_NETCDF_MPI} ${TECA_HAS_UDUNITS} ${TECA_HAS_MPI} + ${MPI4Py_FOUND} ${TEST_MPI_THREADS} + REQ_TECA_DATA) + +teca_add_test(test_temporal_reduction_app_monthly_minimum_threads COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_temporal_reduction_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} "prw_hus_day_MRI-CGCM3_historical_r1i1p1_19500101-19501231\\.nc" prw @@ -173,15 +228,24 @@ teca_add_test(test_temporal_reduction_app_monthly_minimum_thread FEATURES ${TECA_HAS_NETCDF} ${TECA_HAS_UDUNITS} REQ_TECA_DATA) -teca_add_test(test_temporal_reduction_app_monthly_minimum_mpi_thread +teca_add_test(test_temporal_reduction_app_monthly_minimum_mpi COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_temporal_reduction_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} "prw_hus_day_MRI-CGCM3_historical_r1i1p1_19500101-19501231\\.nc" prw - monthly minimum 7 ${MPIEXEC} ${HALF_TEST_CORES} + monthly minimum 7 ${MPIEXEC} ${TEST_CORES} FEATURES ${TECA_HAS_NETCDF_MPI} ${TECA_HAS_UDUNITS} ${TECA_HAS_MPI} ${MPI4Py_FOUND} REQ_TECA_DATA) -teca_add_test(test_temporal_reduction_app_monthly_maximum_thread +teca_add_test(test_temporal_reduction_app_monthly_minimum_mpi_threads + COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_temporal_reduction_app.sh + ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} + "prw_hus_day_MRI-CGCM3_historical_r1i1p1_19500101-19501231\\.nc" prw + monthly minimum 7 ${MPIEXEC} ${HALF_TEST_CORES} + FEATURES ${TECA_HAS_NETCDF_MPI} ${TECA_HAS_UDUNITS} ${TECA_HAS_MPI} + ${MPI4Py_FOUND} ${TEST_MPI_THREADS} + REQ_TECA_DATA) + +teca_add_test(test_temporal_reduction_app_monthly_maximum_threads COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_temporal_reduction_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} "prw_hus_day_MRI-CGCM3_historical_r1i1p1_19500101-19501231\\.nc" prw @@ -189,14 +253,23 @@ teca_add_test(test_temporal_reduction_app_monthly_maximum_thread FEATURES ${TECA_HAS_NETCDF} ${TECA_HAS_UDUNITS} REQ_TECA_DATA) -teca_add_test(test_temporal_reduction_app_monthly_maximum_mpi_thread +teca_add_test(test_temporal_reduction_app_monthly_maximum_mpi COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_temporal_reduction_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} "prw_hus_day_MRI-CGCM3_historical_r1i1p1_19500101-19501231\\.nc" prw - monthly maximum 7 ${MPIEXEC} ${HALF_TEST_CORES} + monthly maximum 7 ${MPIEXEC} ${TEST_CORES} FEATURES ${TECA_HAS_NETCDF_MPI} ${TECA_HAS_UDUNITS} ${TECA_HAS_MPI} ${MPI4Py_FOUND} REQ_TECA_DATA) +teca_add_test(test_temporal_reduction_app_monthly_maximum_mpi_threads + COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_temporal_reduction_app.sh + ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} + "prw_hus_day_MRI-CGCM3_historical_r1i1p1_19500101-19501231\\.nc" prw + monthly maximum 7 ${MPIEXEC} ${HALF_TEST_CORES} + FEATURES ${TECA_HAS_NETCDF_MPI} ${TECA_HAS_UDUNITS} ${TECA_HAS_MPI} + ${MPI4Py_FOUND} ${TEST_MPI_THREADS} + REQ_TECA_DATA) + teca_add_test(test_temporal_reduction_app_daily_average_missing_values COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_temporal_reduction_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} @@ -218,7 +291,7 @@ teca_add_test(test_temporal_reduction_app_daily_minimum_missing_values FEATURES ${TECA_HAS_NETCDF} ${TECA_HAS_UDUNITS} REQ_TECA_DATA) -teca_add_test(test_temporal_reduction_app_mcf_daily_average_thread +teca_add_test(test_temporal_reduction_app_mcf_daily_average_threads COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_temporal_reduction_app_mcf.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} "${CMAKE_BINARY_DIR}/${BIN_PREFIX}/../test/ECMWF-IFS-HR-SST-present.mcf" hus @@ -226,14 +299,23 @@ teca_add_test(test_temporal_reduction_app_mcf_daily_average_thread FEATURES ${TECA_HAS_NETCDF} ${TECA_HAS_UDUNITS} REQ_TECA_DATA) -teca_add_test(test_temporal_reduction_app_mcf_daily_average_mpi_thread +teca_add_test(test_temporal_reduction_app_mcf_daily_average_mpi COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_temporal_reduction_app_mcf.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} "${CMAKE_BINARY_DIR}/${BIN_PREFIX}/../test/ECMWF-IFS-HR-SST-present.mcf" hus - daily average 7 ${MPIEXEC} ${HALF_TEST_CORES} + daily average 7 ${MPIEXEC} ${TEST_CORES} FEATURES ${TECA_HAS_NETCDF_MPI} ${TECA_HAS_UDUNITS} ${TECA_HAS_MPI} ${MPI4Py_FOUND} REQ_TECA_DATA) +teca_add_test(test_temporal_reduction_app_mcf_daily_average_mpi_threads + COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_temporal_reduction_app_mcf.sh + ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} + "${CMAKE_BINARY_DIR}/${BIN_PREFIX}/../test/ECMWF-IFS-HR-SST-present.mcf" hus + daily average 7 ${MPIEXEC} ${HALF_TEST_CORES} + FEATURES ${TECA_HAS_NETCDF_MPI} ${TECA_HAS_UDUNITS} ${TECA_HAS_MPI} + ${MPI4Py_FOUND} ${TEST_MPI_THREADS} + REQ_TECA_DATA) + teca_add_test(test_cartesian_mesh_diff_app_pass COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_cartesian_mesh_diff_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} @@ -308,6 +390,6 @@ teca_add_test(test_cf_restripe_app teca_add_test(test_cf_restripe_app_mpi COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_cf_restripe_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} -1 - ${MPIEXEC} ${HALF_TEST_CORES} + ${MPIEXEC} ${TEST_CORES} FEATURES ${TECA_HAS_NETCDF_MPI} ${TECA_HAS_MPI} REQ_TECA_DATA) diff --git a/test/python/CMakeLists.txt b/test/python/CMakeLists.txt index 06a8720e2..9f5ffe50b 100644 --- a/test/python/CMakeLists.txt +++ b/test/python/CMakeLists.txt @@ -66,7 +66,7 @@ teca_add_test(py_test_cf_writer_mpi_threads ${CMAKE_CURRENT_SOURCE_DIR}/test_cf_writer.py "${TECA_DATA_ROOT}/cam5_1_amip_run2\\.cam2\\.h2\\.1991-10-0[12]-10800\\.nc" 0 -1 2 1 "py_test_cf_writer_%t%.nc" U850 V850 - FEATURES ${TECA_HAS_MPI} ${TECA_HAS_NETCDF} + FEATURES ${TECA_HAS_MPI} ${TECA_HAS_NETCDF} ${TEST_MPI_THREADS} REQ_TECA_DATA) # TODO -- camke_parse_arguments eats "" @@ -149,7 +149,7 @@ teca_add_test(py_test_programmable_reduce_mpi COMMAND ${MPIEXEC} -n ${TEST_CORES} ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_programmable_reduce.py "${TECA_DATA_ROOT}/cam5_1_amip_run2\\.cam2\\.h2\\.1991-10-0[12]-10800\\.nc" - "${TECA_DATA_ROOT}/py_test_programmable_reduce.bin" 0 -1 1 TMQ T200 T500 + "${TECA_DATA_ROOT}/py_test_programmable_reduce.bin" 0 -1 -1 TMQ T200 T500 FEATURES ${TECA_HAS_NETCDF} ${TECA_HAS_UDUNITS} ${TECA_HAS_MPI} ${MPI4Py_FOUND} REQ_TECA_DATA) @@ -157,9 +157,9 @@ teca_add_test(py_test_programmable_reduce_mpi_thread COMMAND ${MPIEXEC} -n ${HALF_TEST_CORES} ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_programmable_reduce.py "${TECA_DATA_ROOT}/cam5_1_amip_run2\\.cam2\\.h2\\.1991-10-0[12]-10800\\.nc" - "${TECA_DATA_ROOT}/py_test_programmable_reduce.bin" 0 -1 ${HALF_TEST_CORES} - TMQ T200 T500 - FEATURES ${TECA_HAS_NETCDF} ${TECA_HAS_UDUNITS} ${TECA_HAS_MPI} ${MPI4Py_FOUND} + "${TECA_DATA_ROOT}/py_test_programmable_reduce.bin" 0 -1 -1 TMQ T200 T500 + FEATURES ${TECA_HAS_NETCDF} ${TECA_HAS_UDUNITS} ${TECA_HAS_MPI} + ${MPI4Py_FOUND} ${TEST_MPI_THREADS} REQ_TECA_DATA) teca_add_test(py_test_python_reduce_serial @@ -188,9 +188,9 @@ teca_add_test(py_test_python_reduce_mpi_thread COMMAND ${MPIEXEC} -n ${HALF_TEST_CORES} ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_python_reduce.py "${TECA_DATA_ROOT}/cam5_1_amip_run2\\.cam2\\.h2\\.1991-10-0[12]-10800\\.nc" - "${TECA_DATA_ROOT}/py_test_programmable_reduce.bin" 0 -1 ${HALF_TEST_CORES} - TMQ T200 T500 - FEATURES ${TECA_HAS_NETCDF} ${TECA_HAS_UDUNITS} ${TECA_HAS_MPI} ${MPI4Py_FOUND} + "${TECA_DATA_ROOT}/py_test_programmable_reduce.bin" 0 -1 -1 TMQ T200 T500 + FEATURES ${TECA_HAS_NETCDF} ${TECA_HAS_UDUNITS} ${TECA_HAS_MPI} + ${MPI4Py_FOUND} ${TEST_MPI_THREADS} REQ_TECA_DATA) teca_add_test(py_test_temporal_seasonal_average_thread @@ -205,7 +205,8 @@ teca_add_test(py_test_temporal_seasonal_average_mpi_thread ${CMAKE_CURRENT_SOURCE_DIR}/test_temporal_reduction.py "${TECA_DATA_ROOT}/prw_hus_day_MRI-CGCM3_historical_r1i1p1_19500101-19501231\\.nc" "." "${TECA_DATA_ROOT}/test_temporal_reduction_prw" 7 2 seasonal average 0 prw - FEATURES ${TECA_HAS_NETCDF} ${TECA_HAS_UDUNITS} ${TECA_HAS_MPI} ${MPI4Py_FOUND} + FEATURES ${TECA_HAS_NETCDF} ${TECA_HAS_UDUNITS} ${TECA_HAS_MPI} + ${MPI4Py_FOUND} ${TEST_MPI_THREADS} REQ_TECA_DATA) teca_add_test(py_test_temporal_monthly_average_thread @@ -220,7 +221,8 @@ teca_add_test(py_test_temporal_monthly_average_mpi_thread ${CMAKE_CURRENT_SOURCE_DIR}/test_temporal_reduction.py "${TECA_DATA_ROOT}/prw_hus_day_MRI-CGCM3_historical_r1i1p1_19500101-19501231\\.nc" "." "${TECA_DATA_ROOT}/test_temporal_reduction_prw" 7 2 monthly average 0 prw - FEATURES ${TECA_HAS_NETCDF} ${TECA_HAS_UDUNITS} ${TECA_HAS_MPI} ${MPI4Py_FOUND} + FEATURES ${TECA_HAS_NETCDF} ${TECA_HAS_UDUNITS} ${TECA_HAS_MPI} + ${MPI4Py_FOUND} ${TEST_MPI_THREADS} REQ_TECA_DATA) teca_add_test(py_test_temporal_monthly_minimum_thread @@ -235,7 +237,8 @@ teca_add_test(py_test_temporal_monthly_minimum_mpi_thread ${CMAKE_CURRENT_SOURCE_DIR}/test_temporal_reduction.py "${TECA_DATA_ROOT}/prw_hus_day_MRI-CGCM3_historical_r1i1p1_19500101-19501231\\.nc" "." "${TECA_DATA_ROOT}/test_temporal_reduction_prw" 7 2 monthly minimum 0 prw - FEATURES ${TECA_HAS_NETCDF} ${TECA_HAS_UDUNITS} ${TECA_HAS_MPI} ${MPI4Py_FOUND} + FEATURES ${TECA_HAS_NETCDF} ${TECA_HAS_UDUNITS} ${TECA_HAS_MPI} + ${MPI4Py_FOUND} ${TEST_MPI_THREADS} REQ_TECA_DATA) teca_add_test(py_test_temporal_monthly_maximum_thread @@ -250,7 +253,8 @@ teca_add_test(py_test_temporal_monthly_maximum_mpi_thread ${CMAKE_CURRENT_SOURCE_DIR}/test_temporal_reduction.py "${TECA_DATA_ROOT}/prw_hus_day_MRI-CGCM3_historical_r1i1p1_19500101-19501231\\.nc" "." "${TECA_DATA_ROOT}/test_temporal_reduction_prw" 7 2 monthly maximum 0 prw - FEATURES ${TECA_HAS_NETCDF} ${TECA_HAS_UDUNITS} ${TECA_HAS_MPI} ${MPI4Py_FOUND} + FEATURES ${TECA_HAS_NETCDF} ${TECA_HAS_UDUNITS} ${TECA_HAS_MPI} + ${MPI4Py_FOUND} ${TEST_MPI_THREADS} REQ_TECA_DATA) teca_add_test(py_test_temporal_daily_average @@ -332,7 +336,7 @@ teca_add_test(py_test_tc_candidates_mpi_threads "${TECA_DATA_ROOT}/test_tc_candidates_1990_07_0[12]\\.nc" "${TECA_DATA_ROOT}/test_tc_candidates_20.bin" 0 3 2 U850 V850 UBOT VBOT PSL T500 T200 Z1000 Z200 -20 20 - FEATURES ${TECA_HAS_NETCDF} ${TECA_HAS_UDUNITS} ${TECA_HAS_MPI} + FEATURES ${TECA_HAS_NETCDF} ${TECA_HAS_UDUNITS} ${TECA_HAS_MPI} ${TEST_MPI_THREADS} REQ_TECA_DATA) teca_add_test(py_test_event_filter @@ -378,20 +382,21 @@ teca_add_test(py_test_tc_wind_radii_stats FEATURES ${TECA_HAS_UDUNITS} REQ_TECA_DATA) -teca_add_test(py_test_bayesian_ar_detect_serial +teca_add_test(py_test_bayesian_ar_detect COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_bayesian_ar_detect.py "${TECA_DATA_ROOT}/ARTMIP_MERRA_2D_2017-05.*\\.nc$" "${TECA_DATA_ROOT}/test_bayesian_ar_detect.bin" IVT - "bayesian_ar_detect_py_%t%.vtk" 1 0 -1 - FEATURES ${TECA_HAS_NETCDF} ${TECA_SERIAL_TESTS} + "bayesian_ar_detect_py_%t%.vtk" -1 0 -1 + FEATURES ${TECA_HAS_NETCDF} REQ_TECA_DATA) -teca_add_test(py_test_bayesian_ar_detect_threads - COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_bayesian_ar_detect.py +teca_add_test(py_test_bayesian_ar_detect_mpi + COMMAND ${MPIEXEC} -n ${TEST_CORES} ${PYTHON_EXECUTABLE} + ${CMAKE_CURRENT_SOURCE_DIR}/test_bayesian_ar_detect.py "${TECA_DATA_ROOT}/ARTMIP_MERRA_2D_2017-05.*\\.nc$" "${TECA_DATA_ROOT}/test_bayesian_ar_detect.bin" IVT "bayesian_ar_detect_py_%t%.vtk" -1 0 -1 - FEATURES ${TECA_HAS_NETCDF} + FEATURES ${TECA_HAS_NETCDF} ${TECA_HAS_MPI} REQ_TECA_DATA) teca_add_test(py_test_bayesian_ar_detect_mpi_threads @@ -400,18 +405,10 @@ teca_add_test(py_test_bayesian_ar_detect_mpi_threads "${TECA_DATA_ROOT}/ARTMIP_MERRA_2D_2017-05.*\\.nc$" "${TECA_DATA_ROOT}/test_bayesian_ar_detect.bin" IVT "bayesian_ar_detect_py_%t%.vtk" -1 0 -1 - FEATURES ${TECA_HAS_NETCDF} ${TECA_HAS_MPI} - REQ_TECA_DATA) - -teca_add_test(py_test_deeplab_ar_detect_serial - COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_deeplab_ar_detect.py - "${TECA_DATA_ROOT}/cascade_deeplab_IVT.pt" - "${TECA_DATA_ROOT}/ARTMIP_MERRA_2D_2017-05.*\.nc$" - "${TECA_DATA_ROOT}/test_deeplab_ar_detect" IVT 1 - FEATURES ${TECA_HAS_NETCDF} ${TECA_HAS_PYTORCH} ${TECA_SERIAL_TESTS} + FEATURES ${TECA_HAS_NETCDF} ${TECA_HAS_MPI} ${TEST_MPI_THREADS} REQ_TECA_DATA) -teca_add_test(py_test_deeplab_ar_detect_threads +teca_add_test(py_test_deeplab_ar_detect COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_deeplab_ar_detect.py "${TECA_DATA_ROOT}/cascade_deeplab_IVT.pt" "${TECA_DATA_ROOT}/ARTMIP_MERRA_2D_2017-05.*\.nc$" @@ -419,13 +416,24 @@ teca_add_test(py_test_deeplab_ar_detect_threads FEATURES ${TECA_HAS_NETCDF} ${TECA_HAS_PYTORCH} REQ_TECA_DATA) +teca_add_test(py_test_deeplab_ar_detect_mpi + COMMAND ${MPIEXEC} -n ${TEST_CORES} ${PYTHON_EXECUTABLE} + ${CMAKE_CURRENT_SOURCE_DIR}/test_deeplab_ar_detect.py + "${TECA_DATA_ROOT}/cascade_deeplab_IVT.pt" + "${TECA_DATA_ROOT}/ARTMIP_MERRA_2D_2017-05.*\.nc$" + "${TECA_DATA_ROOT}/test_deeplab_ar_detect" IVT -1 + FEATURES ${TECA_HAS_NETCDF} ${TECA_HAS_PYTORCH} ${TECA_HAS_MPI} + ${MPI4Py_FOUND} + REQ_TECA_DATA) + teca_add_test(py_test_deeplab_ar_detect_mpi_threads COMMAND ${MPIEXEC} -n ${HALF_TEST_CORES} ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_deeplab_ar_detect.py "${TECA_DATA_ROOT}/cascade_deeplab_IVT.pt" "${TECA_DATA_ROOT}/ARTMIP_MERRA_2D_2017-05.*\.nc$" "${TECA_DATA_ROOT}/test_deeplab_ar_detect" IVT -1 - FEATURES ${TECA_HAS_NETCDF} ${TECA_HAS_PYTORCH} ${TECA_HAS_MPI} ${MPI4Py_FOUND} + FEATURES ${TECA_HAS_NETCDF} ${TECA_HAS_PYTORCH} ${TECA_HAS_MPI} + ${MPI4Py_FOUND} ${TEST_MPI_THREADS} REQ_TECA_DATA) teca_add_test(py_test_binary_stream @@ -435,11 +443,16 @@ teca_add_test(py_test_binary_stream FEATURES ${TECA_HAS_MPI} ${MPI4Py_FOUND} REQ_TECA_DATA) -teca_add_test(py_test_nested_pipeline - COMMAND ${MPIEXEC} -n ${HALF_TEST_CORES} ${PYTHON_EXECUTABLE} - ${CMAKE_CURRENT_SOURCE_DIR}/test_nested_pipeline.py 16 16 32 ${HALF_TEST_CORES} +teca_add_test(py_test_nested_pipeline_mpi + COMMAND ${MPIEXEC} -n ${TEST_CORES} ${PYTHON_EXECUTABLE} + ${CMAKE_CURRENT_SOURCE_DIR}/test_nested_pipeline.py 16 16 32 -1 FEATURES ${TECA_HAS_MPI} ${MPI4Py_FOUND}) +teca_add_test(py_test_nested_pipeline_mpi_threads + COMMAND ${MPIEXEC} -n ${HALF_TEST_CORES} ${PYTHON_EXECUTABLE} + ${CMAKE_CURRENT_SOURCE_DIR}/test_nested_pipeline.py 16 16 32 -1 + FEATURES ${TECA_HAS_MPI} ${MPI4Py_FOUND} ${TEST_MPI_THREADS}) + teca_add_test(py_test_cf_writer_collective_serial COMMAND ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/test_cf_writer_collective.py" 128 512 128 @@ -463,10 +476,15 @@ teca_add_test(py_test_thread_parameters_serial "${CMAKE_CURRENT_SOURCE_DIR}/test_thread_parameters.py") teca_add_test(py_test_thread_parameters_mpi - COMMAND ${MPIEXEC} -n ${HALF_TEST_CORES} ${PYTHON_EXECUTABLE} + COMMAND ${MPIEXEC} -n ${TEST_CORES} ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/test_thread_parameters.py" FEATURES ${MPI4Py_FOUND}) +teca_add_test(py_test_thread_parameters_mpi_threads + COMMAND ${MPIEXEC} -n ${HALF_TEST_CORES} ${PYTHON_EXECUTABLE} + "${CMAKE_CURRENT_SOURCE_DIR}/test_thread_parameters.py" + FEATURES ${MPI4Py_FOUND} ${TEST_MPI_THREADS}) + teca_add_test(py_test_calendaring COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_calendaring.py) diff --git a/test/travis_ci/ctest_linux.cmake b/test/travis_ci/ctest_linux.cmake index f1897272d..63ea5bf67 100644 --- a/test/travis_ci/ctest_linux.cmake +++ b/test/travis_ci/ctest_linux.cmake @@ -21,6 +21,7 @@ TECA_ENABLE_PROFILER=ON TECA_PYTHON_VERSION=$ENV{TECA_PYTHON_VERSION} TECA_DATA_ROOT=$ENV{DASHROOT}/TECA_data TECA_TEST_CORES=2 +HYPERTHREADS_PER_CORE=1 REQUIRE_OPENSSL=TRUE REQUIRE_BOOST=TRUE REQUIRE_NETCDF=TRUE diff --git a/test/travis_ci/ctest_osx.cmake b/test/travis_ci/ctest_osx.cmake index c56923421..df49f7a07 100644 --- a/test/travis_ci/ctest_osx.cmake +++ b/test/travis_ci/ctest_osx.cmake @@ -21,6 +21,7 @@ TECA_ENABLE_PROFILER=ON TECA_PYTHON_VERSION=$ENV{TECA_PYTHON_VERSION} TECA_DATA_ROOT=$ENV{DASHROOT}/TECA_data TECA_TEST_CORES=2 +HYPERTHREADS_PER_CORE=1 REQUIRE_OPENSSL=TRUE OPENSSL_ROOT_DIR=/usr/local/opt/openssl@1.1 REQUIRE_BOOST=TRUE From a23c2c60468220c757f05b9828476e08e6d958d6 Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Tue, 2 Feb 2021 18:03:11 -0800 Subject: [PATCH 003/180] fix thread parameters more ranks than cores in the case that there are more MPI ranks than physical cores, warn about performance and return an error. 1 thread is reported and the core id that the code is running on is returned in the affinity map. it is not recommended to bind threads to cores in this case. we've only encountered this issue on Travis-CI in docker containers. --- core/teca_thread_pool.h | 12 ++++++++++-- core/teca_thread_util.cxx | 15 +++++++++------ python/teca_py_core.i | 4 ++-- 3 files changed, 21 insertions(+), 10 deletions(-) diff --git a/core/teca_thread_pool.h b/core/teca_thread_pool.h index 55721111b..df87de6a9 100644 --- a/core/teca_thread_pool.h +++ b/core/teca_thread_pool.h @@ -113,8 +113,16 @@ void teca_thread_pool::create_threads(MPI_Comm comm, int n_threads = n_requested; std::deque core_ids; - teca_thread_util::thread_parameters(comm, -1, - n_requested, bind, verbose, n_threads, core_ids); + + if (teca_thread_util::thread_parameters(comm, -1, + n_requested, bind, verbose, n_threads, core_ids)) + { + TECA_WARNING("Failed to detetermine thread parameters." + " Falling back to 1 thread, affinity disabled.") + + n_threads = 1; + bind = false; + } // allocate the threads for (int i = 0; i < n_threads; ++i) diff --git a/core/teca_thread_util.cxx b/core/teca_thread_util.cxx index 3a7fe5fd7..ec811a732 100644 --- a/core/teca_thread_util.cxx +++ b/core/teca_thread_util.cxx @@ -368,14 +368,17 @@ int thread_parameters(MPI_Comm comm, int base_core_id, int n_requested, } // if the user runs more MPI ranks than cores some of the ranks - // will have no cores to use. fallback to 1 thread on core 0 - if (n_threads < 1) + // will have no cores to use. + if (n_procs > cores_per_node) { + TECA_WARNING(<< n_procs << " MPI ranks running on this node but only " + << cores_per_node << " CPU cores are available. Performance will" + " be degraded.") + n_threads = 1; - affinity.push_back(0); - TECA_WARNING("CPU cores are unavailable, performance will be degraded. " - "This can occur when running more MPI ranks than there are CPU " - "cores. Launching 1 thread on core 0.") + affinity.push_back(base_core_id); + + return -1; } // stop now if we are not binding threads to cores diff --git a/python/teca_py_core.i b/python/teca_py_core.i index fa67e3025..30ae345a4 100644 --- a/python/teca_py_core.i +++ b/python/teca_py_core.i @@ -970,8 +970,8 @@ PyObject *thread_parameters(MPI_Comm comm, { // caller requested automatic load balancing but this, // failed. - TECA_PY_ERROR(PyExc_RuntimeError, - "Automatic load balancing failed") + PyErr_Format(PyExc_RuntimeError, + "Failed to detect thread parameters."); return nullptr; } From a1e6db3021673abece4cbd32ff2f7471dce9ed1e Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Tue, 2 Feb 2021 21:21:14 -0800 Subject: [PATCH 004/180] fix ctest submission on mac os travis ci --- test/travis_ci/install_osx.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/travis_ci/install_osx.sh b/test/travis_ci/install_osx.sh index 21ebd6914..ca9f8cc7c 100755 --- a/test/travis_ci/install_osx.sh +++ b/test/travis_ci/install_osx.sh @@ -8,7 +8,7 @@ export PATH=/usr/local/bin:$PATH # these days. hence this list isn't comprehensive brew update brew unlink python@2 -brew install mpich swig svn udunits openssl python@3.8 +brew install mpich swig svn udunits openssl python@3.8 curl brew unlink python brew link --force python@3.8 From dc3b1c900a6527175bafb577f23f003177d18cc6 Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Wed, 3 Feb 2021 09:19:47 -0800 Subject: [PATCH 005/180] travis-ci report cpuinfo --- core/teca_thread_util.cxx | 4 ++-- test/travis_ci/ctest_linux.sh | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/core/teca_thread_util.cxx b/core/teca_thread_util.cxx index ec811a732..e0c2c46e7 100644 --- a/core/teca_thread_util.cxx +++ b/core/teca_thread_util.cxx @@ -296,7 +296,7 @@ int thread_parameters(MPI_Comm comm, int base_core_id, int n_requested, (void)affinity; if (n_requested < 1) { - TECA_WARNING("Cannot autmatically detect threading parameters " + TECA_WARNING("Can not automatically detect threading parameters " "on this platform. The default is 1 thread per process.") n_threads = 1; } @@ -404,7 +404,7 @@ int thread_parameters(MPI_Comm comm, int base_core_id, int n_requested, // there are enough cores that each thread can have it's own core // mark the cores which have the root thread as used so that we skip them. - // if we always did this in the fully apcked case we'd always be assigning + // if we always did this in the fully packed case we'd always be assigning // hyperthreads off core. it is better to keep them local. if (((n_threads+1)*n_procs) < cores_per_node) { diff --git a/test/travis_ci/ctest_linux.sh b/test/travis_ci/ctest_linux.sh index 4fede64d8..864074349 100755 --- a/test/travis_ci/ctest_linux.sh +++ b/test/travis_ci/ctest_linux.sh @@ -19,6 +19,8 @@ then export NETCDF_BUILD_TYPE="netcdf_mpi" fi +cat /proc/cpuinfo + export PATH=.:${PATH} export PYTHONPATH=${DASHROOT}/build/lib export LD_LIBRARY_PATH=${DASHROOT}/build/lib From 1d90648486101515cd25572699b3b682b59ab294 Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Wed, 3 Feb 2021 12:40:41 -0800 Subject: [PATCH 006/180] travis ci test brew link curl --- test/travis_ci/install_osx.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/test/travis_ci/install_osx.sh b/test/travis_ci/install_osx.sh index ca9f8cc7c..afd64e660 100755 --- a/test/travis_ci/install_osx.sh +++ b/test/travis_ci/install_osx.sh @@ -11,6 +11,7 @@ brew unlink python@2 brew install mpich swig svn udunits openssl python@3.8 curl brew unlink python brew link --force python@3.8 +brew link curl --force # matplotlib currently doesn't have a formula # teca fails to locate mpi4py installed from brew From 189fea9df841a0695bb019654d7694821d1e7f13 Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Mon, 8 Feb 2021 22:07:25 -0800 Subject: [PATCH 007/180] try brewed curl with cdash --- test/travis_ci/ctest_osx.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/test/travis_ci/ctest_osx.sh b/test/travis_ci/ctest_osx.sh index c7c882a78..9d562c035 100755 --- a/test/travis_ci/ctest_osx.sh +++ b/test/travis_ci/ctest_osx.sh @@ -14,6 +14,9 @@ set +x source `pwd`/../tci/bin/activate set -x +export PATH=$(brew --prefix)/opt/curl/bin:$PATH +export DYLD_LIBRARY_PATH=$(brew --prefix)/opt/curl/lib:$DYLD_LIBRARY_PATH + mkdir build ctest -S ${DASHROOT}/test/travis_ci/ctest_osx.cmake --output-on-failure --timeout 180 & ctest_pid=$! From a2cb7d3b51b3a91b83eb4537b3cc618dbcdf607b Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Wed, 17 Feb 2021 19:04:34 -0800 Subject: [PATCH 008/180] TECA_ALGORITHM_PROPERTY_V validate algorithm properties the new macro calls validate_NAME(T val) during set_NAME so that user supplied values may be validated prior to pipeline execution. --- core/teca_algorithm_fwd.h | 32 ++++++++++++++++++++++++++++---- 1 file changed, 28 insertions(+), 4 deletions(-) diff --git a/core/teca_algorithm_fwd.h b/core/teca_algorithm_fwd.h index ee0a58b21..2ca1c8f09 100644 --- a/core/teca_algorithm_fwd.h +++ b/core/teca_algorithm_fwd.h @@ -47,8 +47,8 @@ const char *get_class_name() const override \ T &operator=(const T &src) = delete; \ T &operator=(T &&src) = delete; -// convenience macro to declare standard set_X/get_X methods -// where X is the name of a class member. will manage the +// convenience macro to declare standard set_NAME/get_NAME methods +// where NAME is the name of a class member. will manage the // algorithm's modified state for the user. #define TECA_ALGORITHM_PROPERTY(T, NAME) \ \ @@ -66,8 +66,32 @@ const T &get_##NAME() const \ return this->NAME; \ } -// convenience macro to declare standard set_X/get_X methods -// where X is the name of a class member. will manage the +// similar to TECA_ALGORITHM_PROPERTY but prior to setting NAME +// will call the member function int valididate_NAME(T v). If +// the value v is valid the fucntion should return 0. If the value +// is not zero the function should invoke TECA_ERROR with a +// descriptive message and return non-zero. +#define TECA_ALGORITHM_PROPERTY_V(T, NAME) \ + \ +void set_##NAME(const T &v) \ +{ \ + if (this->validate_ ## NAME (v)) \ + return; \ + \ + if (this->NAME != v) \ + { \ + this->NAME = v; \ + this->set_modified(); \ + } \ +} \ + \ +const T &get_##NAME() const \ +{ \ + return this->NAME; \ +} + +// convenience macro to declare standard set_NAME/get_NAME methods +// where NAME is the name of a class member. will manage the // algorithm's modified state for the user. #define TECA_ALGORITHM_VECTOR_PROPERTY(T, NAME) \ \ From 4b99f6acf6deb0fa05c9e53c145cf78889a2dc87 Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Wed, 17 Feb 2021 19:06:35 -0800 Subject: [PATCH 009/180] data_array_collection improve documentation --- data/teca_array_collection.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/data/teca_array_collection.h b/data/teca_array_collection.h index 992a2b917..e578df44a 100644 --- a/data/teca_array_collection.h +++ b/data/teca_array_collection.h @@ -38,8 +38,11 @@ class teca_array_collection int append(p_teca_variant_array array); int append(const std::string &name, p_teca_variant_array array); - // set, return 0 on success. + // replace the ith array, return 0 on success. + // the name of the array is not changed. int set(unsigned int i, p_teca_variant_array array); + + // add or replace the named array, returns 0 on success. int set(const std::string &name, p_teca_variant_array array); // remove From 300cb9166700074c8d937da303a8b8145c6189db Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Wed, 17 Feb 2021 19:07:25 -0800 Subject: [PATCH 010/180] fix mesh dataset metadata name change dataset metadata key name from array_attributes to attributes to be consistent with get_output_metadata and get_upstream_request --- data/teca_mesh.h | 4 ++-- python/teca_py_data.i | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/data/teca_mesh.h b/data/teca_mesh.h index 51e01ec7f..62bde45bb 100644 --- a/data/teca_mesh.h +++ b/data/teca_mesh.h @@ -24,8 +24,8 @@ class teca_mesh : public teca_dataset TECA_DATASET_METADATA(time_units, std::string, 1) TECA_DATASET_METADATA(time_step, unsigned long, 1) - // set/get array attribute metadata - TECA_DATASET_METADATA(array_attributes, teca_metadata, 1) + // set/get attribute metadata + TECA_DATASET_METADATA(attributes, teca_metadata, 1) // get the array collection for the given centering // the centering enumeration is defined in teca_array_attributes diff --git a/python/teca_py_data.i b/python/teca_py_data.i index 10b79d3f9..e99b3466f 100644 --- a/python/teca_py_data.i +++ b/python/teca_py_data.i @@ -149,8 +149,8 @@ %ignore teca_mesh::get_time_step(unsigned long *) const; %ignore teca_mesh::set_calendar(std::string const *); %ignore teca_mesh::set_time_units(std::string const *); -%ignore teca_mesh::set_array_attributes(teca_metadata const *); -%ignore teca_mesh::get_array_attributes(teca_metadata *) const; +%ignore teca_mesh::set_attributes(teca_metadata const *); +%ignore teca_mesh::get_attributes(teca_metadata *) const; %ignore teca_mesh::get_arrays(int) const; %ignore teca_mesh::get_point_arrays() const; %ignore teca_mesh::get_cell_arrays() const; @@ -174,12 +174,12 @@ TECA_PY_CONST_CAST(teca_mesh) TECA_PY_DATASET_METADATA(std::string, calendar) TECA_PY_DATASET_METADATA(std::string, time_units) - teca_metadata get_array_attributes() + teca_metadata get_attributes() { teca_py_gil_state gil; teca_metadata atts; - self->get_array_attributes(atts); + self->get_attributes(atts); return atts; } From 3c81c64e1daaf47d44fb47daf6145aeeb5d22720 Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Wed, 17 Feb 2021 19:10:08 -0800 Subject: [PATCH 011/180] variant_array_code::get constexpr --- core/teca_variant_array.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/core/teca_variant_array.h b/core/teca_variant_array.h index e68869138..937353b93 100644 --- a/core/teca_variant_array.h +++ b/core/teca_variant_array.h @@ -1256,7 +1256,7 @@ struct teca_variant_array_type {}; template <> \ struct teca_variant_array_code \ { \ - static unsigned int get() noexcept \ + static constexpr unsigned int get() \ { return v; } \ }; \ template <> \ @@ -1269,6 +1269,9 @@ template <> \ struct teca_variant_array_type \ { \ using type = T; \ + \ + static constexpr const char *name() \ + { return #T; } \ }; #define TECA_VARIANT_ARRAY_FACTORY_NEW(_v) \ From 8f4fc7ef506c910c461c369f9747b4b416af2399 Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Wed, 17 Feb 2021 19:19:24 -0800 Subject: [PATCH 012/180] cartesian_mesh_source accept any attributes modify API to take teca_metadata instead of teca_array_attributes --- alg/teca_cartesian_mesh_source.cxx | 8 ++++---- alg/teca_cartesian_mesh_source.h | 10 +++++----- test/test_2d_component_area.cpp | 1 + 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/alg/teca_cartesian_mesh_source.cxx b/alg/teca_cartesian_mesh_source.cxx index 7946d188f..44b711587 100644 --- a/alg/teca_cartesian_mesh_source.cxx +++ b/alg/teca_cartesian_mesh_source.cxx @@ -147,7 +147,7 @@ void teca_cartesian_mesh_source::clear_cached_metadata() // -------------------------------------------------------------------------- void teca_cartesian_mesh_source::append_field_generator( - const std::string &name, const teca_array_attributes &atts, + const std::string &name, const teca_metadata &atts, field_generator_callback &callback) { this->append_field_generator({name, atts, callback}); @@ -251,10 +251,10 @@ teca_metadata teca_cartesian_mesh_source::get_output_metadata( vars.push_back(it->name); // correct size - teca_array_attributes var_atts = it->attributes; - var_atts.size = nxyz; + teca_metadata var_atts = it->attributes; + var_atts.set("size", nxyz); - atts.set(it->name, teca_metadata(var_atts)); + atts.set(it->name, var_atts); } this->internals->metadata.set("variables", vars); diff --git a/alg/teca_cartesian_mesh_source.h b/alg/teca_cartesian_mesh_source.h index be1e5039a..a28e413c2 100644 --- a/alg/teca_cartesian_mesh_source.h +++ b/alg/teca_cartesian_mesh_source.h @@ -2,7 +2,7 @@ #define teca_cartesian_mesh_source_h #include "teca_algorithm.h" -#include "teca_array_attributes.h" +#include "teca_metadata.h" #include #include @@ -19,7 +19,7 @@ using field_generator_callback = std::function::get() to get specific type + // teca_variant_array_code::get() to get specific type // codes for C++ POD types NT. TECA_ALGORITHM_PROPERTY(unsigned int, coordinate_type_code) TECA_ALGORITHM_PROPERTY(unsigned int, field_type_code) @@ -87,7 +87,7 @@ class teca_cartesian_mesh_source : public teca_algorithm TECA_ALGORITHM_PROPERTY(std::string, y_axis_units) TECA_ALGORITHM_PROPERTY(std::string, z_axis_units) - // number of time steps to generate + // set the calendar and time units TECA_ALGORITHM_PROPERTY(std::string, calendar) TECA_ALGORITHM_PROPERTY(std::string, time_units) @@ -99,7 +99,7 @@ class teca_cartesian_mesh_source : public teca_algorithm // x,y,z are coordinate axes in variant arrays, t is the double precision // time value. void append_field_generator(const std::string &name, - const teca_array_attributes &atts, field_generator_callback &callback); + const teca_metadata &atts, field_generator_callback &callback); protected: teca_cartesian_mesh_source(); diff --git a/test/test_2d_component_area.cpp b/test/test_2d_component_area.cpp index 9f71da08b..65047483a 100644 --- a/test/test_2d_component_area.cpp +++ b/test/test_2d_component_area.cpp @@ -11,6 +11,7 @@ #include "teca_dataset.h" #include "teca_cartesian_mesh.h" #include "teca_variant_array.h" +#include "teca_array_attributes.h" #define _USE_MATH_DEFINES #include From 7a8f9d677260c9f6ce1e8a45dfc571ad3531bcd7 Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Wed, 17 Feb 2021 19:23:54 -0800 Subject: [PATCH 013/180] add teca_unpack_data an algorithm to unpack NetCDF packed data, adhears to the NetCDF attribute conventions for packed data. inlcudes Python wrapper and a regression test. --- .travis.yml | 2 +- alg/CMakeLists.txt | 1 + alg/teca_unpack_data.cxx | 359 ++++++++++++++++++++++++++++++++++++++ alg/teca_unpack_data.h | 85 +++++++++ python/teca_py_alg.i | 9 + test/CMakeLists.txt | 10 +- test/test_unpack_data.cpp | 196 +++++++++++++++++++++ 7 files changed, 659 insertions(+), 3 deletions(-) create mode 100644 alg/teca_unpack_data.cxx create mode 100644 alg/teca_unpack_data.h create mode 100644 test/test_unpack_data.cpp diff --git a/.travis.yml b/.travis.yml index 909717b1d..37496417c 100644 --- a/.travis.yml +++ b/.travis.yml @@ -17,7 +17,7 @@ env: - BUILD_TYPE=Debug - TECA_DIR=/travis_teca_dir - TECA_PYTHON_VERSION=3 - - TECA_DATA_REVISION=101 + - TECA_DATA_REVISION=102 jobs: - DOCKER_IMAGE=ubuntu IMAGE_VERSION=20.04 IMAGE_NAME=ubuntu_20_04 REQUIRE_NETCDF_MPI=TRUE - DOCKER_IMAGE=ubuntu IMAGE_VERSION=20.04 IMAGE_NAME=ubuntu_20_04 REQUIRE_NETCDF_MPI=FALSE diff --git a/alg/CMakeLists.txt b/alg/CMakeLists.txt index 48038978d..bd3077adc 100644 --- a/alg/CMakeLists.txt +++ b/alg/CMakeLists.txt @@ -42,6 +42,7 @@ set(teca_alg_cxx_srcs teca_tc_wind_radii.cxx teca_tc_trajectory.cxx teca_temporal_average.cxx + teca_unpack_data.cxx teca_valid_value_mask.cxx teca_variant_array_operand.cxx teca_vertical_coordinate_transform.cxx diff --git a/alg/teca_unpack_data.cxx b/alg/teca_unpack_data.cxx new file mode 100644 index 000000000..b6ae1253b --- /dev/null +++ b/alg/teca_unpack_data.cxx @@ -0,0 +1,359 @@ +#include "teca_unpack_data.h" + +#include "teca_cartesian_mesh.h" +#include "teca_array_collection.h" +#include "teca_variant_array.h" +#include "teca_metadata.h" +#include "teca_array_attributes.h" + +#include +#include +#include +#include +#include + +#if defined(TECA_HAS_BOOST) +#include +#endif + +//#define TECA_DEBUG + +namespace +{ +template +void transform(output_t * __restrict__ p_out, input_t * __restrict__ p_in, + size_t n, output_t scale, output_t offset) +{ + for (size_t i = 0; i < n; ++i) + p_out[i] = p_in[i] * scale + offset; +} + +template +void transform(output_t * __restrict__ p_out, input_t * __restrict__ p_in, + mask_t * __restrict__ p_mask, size_t n, output_t scale, output_t offset, + output_t fill) +{ + for (size_t i = 0; i < n; ++i) + p_out[i] = (p_mask[i] ? p_in[i] * scale + offset : fill); +} +} + + +// -------------------------------------------------------------------------- +teca_unpack_data::teca_unpack_data() : + output_data_type(teca_variant_array_code::get()), + verbose(0) +{ + this->set_number_of_input_connections(1); + this->set_number_of_output_ports(1); +} + +// -------------------------------------------------------------------------- +teca_unpack_data::~teca_unpack_data() +{} + +#if defined(TECA_HAS_BOOST) +// -------------------------------------------------------------------------- +void teca_unpack_data::get_properties_description( + const std::string &prefix, options_description &global_opts) +{ + options_description opts("Options for " + + (prefix.empty()?"teca_unpack_data":prefix)); + + opts.add_options() + TECA_POPTS_GET(int, prefix, output_data_type, + "Sets the type of the transformed data to either single or double" + " precision floating point. Use 11 for single precision and 12 for" + " double precision. The default is single precision") + TECA_POPTS_GET(int, prefix, verbose, "Enables verbose output") + ; + + global_opts.add(opts); +} + +// -------------------------------------------------------------------------- +void teca_unpack_data::set_properties( + const std::string &prefix, variables_map &opts) +{ + TECA_POPTS_SET(opts, int, prefix, output_data_type) + TECA_POPTS_SET(opts, int, prefix, verbose) +} +#endif + +// -------------------------------------------------------------------------- +int teca_unpack_data::validate_output_data_type(int val) +{ + // validate the output type + if ((val != teca_variant_array_code::get()) && + (val != teca_variant_array_code::get())) + { + TECA_ERROR("Invlaid output data type " << val << ". Use " + << teca_variant_array_code::get() + << " to select double precision output and " + << teca_variant_array_code::get() + << " to select single precision output") + return -1; + } + return 0; +} + +// -------------------------------------------------------------------------- +teca_metadata teca_unpack_data::get_output_metadata( + unsigned int port, + const std::vector &input_md) +{ +#ifdef TECA_DEBUG + std::cerr << teca_parallel_id() + << "teca_unpack_data::get_output_metadata" << endl; +#endif + (void)port; + + // for each array on the input look for the presence of scale_factor and + // add_offset if both attributes are present then modify the output data + // type. + teca_metadata out_md(input_md[0]); + + std::vector variables; + if (out_md.get("variables", variables)) + { + TECA_ERROR("Failed to get the list of variables") + return teca_metadata(); + } + + teca_metadata attributes; + if (out_md.get("attributes", attributes)) + { + TECA_ERROR("Failed to get the array attributes") + return teca_metadata(); + } + + size_t n_vars = variables.size(); + for (size_t i = 0; i < n_vars; ++i) + { + const std::string &array_name = variables[i]; + + teca_metadata array_atts; + if (attributes.get(array_name, array_atts)) + { + // this could be reported as an error or a warning but unless this + // becomes problematic quietly ignore it + continue; + } + + // if both scale_factor and add_offset attributes are present then + // the data will be transformed. Update the output type. + if (array_atts.has("scale_factor") && array_atts.has("add_offset")) + { + array_atts.set("type_code", this->output_data_type); + + array_atts.remove("scale_factor"); + array_atts.remove("add_offset"); + + if (array_atts.has("_FillValue") || array_atts.has("missing_value")) + { + array_atts.remove("_FillValue"); + array_atts.remove("missing_value"); + + if (this->output_data_type == teca_variant_array_code::get()) + array_atts.set("_FillValue", 1e20); + else if (this->output_data_type == teca_variant_array_code::get()) + array_atts.set("_FillValue", 1e20f); + } + + attributes.set(array_name, array_atts); + } + } + + out_md.set("attributes", attributes); + return out_md; +} + +// -------------------------------------------------------------------------- +std::vector teca_unpack_data::get_upstream_request( + unsigned int port, + const std::vector &input_md, + const teca_metadata &request) +{ + (void)port; + + std::vector up_reqs; + + // copy the incoming request to preserve the downstream + // requirements and add the arrays we need + teca_metadata req(request); + + // get the list of variable available. we need to see if + // the valid value mask is available and if so request it + const teca_metadata &md = input_md[0]; + + std::set variables; + if (md.get("variables", variables)) + { + TECA_ERROR("Metadata issue. variables is missing") + return up_reqs; + } + + teca_metadata attributes; + if (md.get("attributes", attributes)) + { + TECA_ERROR("Failed to get the array attributes") + return up_reqs; + } + + // add the dependent variables into the requested arrays + std::set arrays_up; + if (req.has("arrays")) + req.get("arrays", arrays_up); + + std::vector arrays_in(arrays_up.begin(), arrays_up.end()); + int n_arrays = arrays_in.size(); + for (int i = 0; i < n_arrays; ++i) + { + const std::string &array_name = arrays_in[i]; + + teca_metadata array_atts; + if (attributes.get(array_name, array_atts)) + { + // this could be reported as an error or a warning but unless this + // becomes problematic quietly ignore it + continue; + } + + // if both scale_factor and add_offset attributes are present then + // the data will be transformed. Update the output type. + if (array_atts.has("scale_factor") && array_atts.has("add_offset") && + (array_atts.has("_FillValue") || array_atts.has("missing_value"))) + { + // request the valid value mask if they are available. + std::string mask_var = array_name + "_valid"; + if (variables.count(mask_var)) + arrays_up.insert(mask_var); + } + } + + // update the request + req.set("arrays", arrays_up); + + // send it up + up_reqs.push_back(req); + return up_reqs; +} + +// -------------------------------------------------------------------------- +const_p_teca_dataset teca_unpack_data::execute( + unsigned int port, + const std::vector &input_data, + const teca_metadata &request) +{ +#ifdef TECA_DEBUG + std::cerr << teca_parallel_id() << "teca_unpack_data::execute" << endl; +#endif + (void)port; + (void)request; + + // get the input mesh + const_p_teca_mesh in_mesh + = std::dynamic_pointer_cast(input_data[0]); + + if (!in_mesh) + { + TECA_ERROR("Input dataset is not a teca_mesh") + return nullptr; + } + + p_teca_mesh out_mesh = + std::static_pointer_cast(in_mesh->new_instance()); + + out_mesh->shallow_copy(std::const_pointer_cast(in_mesh)); + + teca_metadata attributes; + if (out_mesh->get_attributes(attributes)) + { + TECA_ERROR("Failed to get attributes") + return nullptr; + } + + // for each array + p_teca_array_collection point_arrays = out_mesh->get_point_arrays(); + int n_arrays = point_arrays->size(); + for (int i = 0; i < n_arrays; ++i) + { + const std::string &array_name = point_arrays->get_name(i); + + // skip valid value masks + size_t len = array_name.size(); + if ((len > 6) && (strcmp("_valid", array_name.c_str() + len - 6) == 0)) + continue; + + // check if this array is to be transformed + teca_metadata array_atts; + double scale = 0.0; + double offset = 0.0; + if (attributes.get(array_name, array_atts) || + array_atts.get("scale_factor", scale) || + array_atts.get("add_offset", offset)) + continue; + + // check for valid value mask + std::string mask_name = array_name + "_valid"; + p_teca_variant_array mask = point_arrays->get(mask_name); + + // get the input + p_teca_variant_array in_array = point_arrays->get(i); + + // allocate the output + p_teca_variant_array out_array = + teca_variant_array_factory::New(this->output_data_type); + if (!out_array) + { + TECA_ERROR("Failed to allocate the output array") + return nullptr; + } + + unsigned long n_elem = in_array->size(); + out_array->resize(n_elem); + + // transform arrays + NESTED_TEMPLATE_DISPATCH(teca_variant_array_impl, + in_array.get(), + _IN, + NT_IN *p_in = dynamic_cast(in_array.get())->get(); + NESTED_TEMPLATE_DISPATCH_FP(teca_variant_array_impl, + out_array.get(), + _OUT, + NT_OUT *p_out = dynamic_cast(out_array.get())->get(); + + if (mask) + { + NESTED_TEMPLATE_DISPATCH_I(teca_variant_array_impl, + mask.get(), + _MASK, + NT_MASK *p_mask = dynamic_cast(mask.get())->get(); + ::transform(p_out, p_in, p_mask, + n_elem, NT_OUT(scale), NT_OUT(offset), NT_OUT(1e20)); + ) + + } + else + { + ::transform(p_out, p_in, n_elem, NT_OUT(scale), NT_OUT(offset)); + } + ) + ) + + // poass to the output + point_arrays->set(i, out_array); + + // update the metadata + array_atts.set("type_code", this->output_data_type); + attributes.set(array_name, array_atts); + + if (this->verbose) + { + TECA_STATUS("Unpacked \"" << array_name << "\" scale_factor = " + << scale << " add_offset = " << offset) + } + } + + return out_mesh; +} diff --git a/alg/teca_unpack_data.h b/alg/teca_unpack_data.h new file mode 100644 index 000000000..586d8e160 --- /dev/null +++ b/alg/teca_unpack_data.h @@ -0,0 +1,85 @@ +#ifndef teca_unpack_data_h +#define teca_unpack_data_h + +#include "teca_shared_object.h" +#include "teca_algorithm.h" +#include "teca_metadata.h" +#include "teca_variant_array.h" + +#include +#include + +TECA_SHARED_OBJECT_FORWARD_DECL(teca_unpack_data) + +/// an algorithm that unpacks NetCDF packed values +/** +Applies a data transform according to the NetCDF attribute conventions for +packed data values. +https://www.unidata.ucar.edu/software/netcdf/docs/attribute_conventions.html + +Variables in the input dataset are scanned for the presence +of the `scale_factor` and `add_offset` attributes. When both are present +an element wise transformation is applied such that + +out[i] = scale_factor * in[i] + add_offset + +The input array is expected to be an integer type while the type of the output +array may be either float or double. Valid value masks may be necessary for +correct results, see `teca_valid_value_mask`. +*/ +class teca_unpack_data : public teca_algorithm +{ +public: + TECA_ALGORITHM_STATIC_NEW(teca_unpack_data) + TECA_ALGORITHM_DELETE_COPY_ASSIGN(teca_unpack_data) + TECA_ALGORITHM_CLASS_NAME(teca_unpack_data) + ~teca_unpack_data(); + + // report/initialize to/from Boost program options + // objects. + TECA_GET_ALGORITHM_PROPERTIES_DESCRIPTION() + TECA_SET_ALGORITHM_PROPERTIES() + + // set the output data type. + // use teca_variant_array_code::get() to get the numeric + // code corresponding to the data type T. The default output + // data type is single precision floating point. + TECA_ALGORITHM_PROPERTY_V(int, output_data_type) + + // set the output data type to double precision floating point + void set_output_data_type_to_float() + { this->set_output_data_type(teca_variant_array_code::get()); } + + // set the output data type to single precision floating point + void set_output_data_type_to_double() + { this->set_output_data_type(teca_variant_array_code::get()); } + + // set the algorihtm verbosity. off by default. + TECA_ALGORITHM_PROPERTY(int, verbose) + +protected: + teca_unpack_data(); + +private: + teca_metadata get_output_metadata( + unsigned int port, + const std::vector &input_md) override; + + std::vector get_upstream_request( + unsigned int port, + const std::vector &input_md, + const teca_metadata &request) override; + + const_p_teca_dataset execute( + unsigned int port, + const std::vector &input_data, + const teca_metadata &request) override; + + int validate_output_data_type(int val); + +private: + int output_data_type; + int verbose; +}; + +#endif diff --git a/python/teca_py_alg.i b/python/teca_py_alg.i index 9bc3a75d5..f31557f4f 100644 --- a/python/teca_py_alg.i +++ b/python/teca_py_alg.i @@ -37,6 +37,7 @@ #include "teca_tc_trajectory.h" #include "teca_tc_wind_radii.h" #include "teca_temporal_average.h" +#include "teca_unpack_data.h" #include "teca_valid_value_mask.h" #include "teca_vertical_reduction.h" #include "teca_vorticity.h" @@ -418,3 +419,11 @@ struct teca_tc_saffir_simpson %shared_ptr(teca_valid_value_mask) %ignore teca_valid_value_mask::operator=; %include "teca_valid_value_mask.h" + +/*************************************************************************** + unpack_data + ***************************************************************************/ +%ignore teca_unpack_data::shared_from_this; +%shared_ptr(teca_unpack_data) +%ignore teca_unpack_data::operator=; +%include "teca_unpack_data.h" diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 71e5d9930..317e403ad 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -700,8 +700,7 @@ teca_add_test(test_integrated_vapor_transport SOURCES test_integrated_vapor_transport.cpp LIBS teca_core teca_data teca_alg teca_io ${teca_test_link} COMMAND test_integrated_vapor_transport - FEATURES ${TECA_HAS_NETCDF} - REQ_TECA_DATA) + FEATURES ${TECA_HAS_NETCDF}) teca_add_test(test_cf_time_axis_reader SOURCES test_cf_time_axis_reader.cpp @@ -723,3 +722,10 @@ teca_add_test(test_valid_value_mask COMMAND test_valid_value_mask 0.25 1.0e20 "${TECA_DATA_ROOT}/test_valid_value_mask.nc" FEATURES ${TECA_HAS_NETCDF} REQ_TECA_DATA) + +teca_add_test(test_unpack_data + SOURCES test_unpack_data.cpp + LIBS teca_core teca_data teca_alg teca_io ${teca_test_link} + COMMAND test_unpack_data "${TECA_DATA_ROOT}/test_unpack_data.nc" + FEATURES ${TECA_HAS_NETCDF} + REQ_TECA_DATA) diff --git a/test/test_unpack_data.cpp b/test/test_unpack_data.cpp new file mode 100644 index 000000000..28d9811bf --- /dev/null +++ b/test/test_unpack_data.cpp @@ -0,0 +1,196 @@ +#include "teca_variant_array.h" +#include "teca_cartesian_mesh_source.h" +#include "teca_valid_value_mask.h" +#include "teca_unpack_data.h" +#include "teca_cf_reader.h" +#include "teca_cf_writer.h" +#include "teca_dataset_diff.h" +#include "teca_array_attributes.h" +#include "teca_index_executive.h" +#include "teca_system_util.h" +#include "teca_file_util.h" + +#include "math.h" + + +// compute data to pack +// f = cos(z)*sin(x+t)*sin(y+t) +// min(f) = -1 +// max(f) = 1 +struct packed_data +{ + char m_fill; + float m_scale; + float m_offset; + + packed_data() + { + // reserving 255 for the _FillValue + // scale = (max(f) - min(f)) / (2^n - 2) + // offs = min(f) + m_scale = (1.0f - -1.0f)/254.0f; + m_offset = -1.0f; + m_fill = 255; + } + + teca_metadata get_attributes() + { + teca_array_attributes aa(teca_variant_array_code::get(), + teca_array_attributes::point_centering, + 0, "unitless", "packed data", "cos(z)*sin(x+t)*sin(x+t)", + 1, m_fill); + + teca_metadata atts((teca_metadata)aa); + + atts.set("scale_factor", m_scale); + atts.set("add_offset", m_offset); + + return atts; + } + + p_teca_variant_array operator()( + const const_p_teca_variant_array &x, + const const_p_teca_variant_array &y, + const const_p_teca_variant_array &z, + double t) + { + size_t nx = x->size(); + size_t ny = y->size(); + size_t nz = z->size(); + + size_t nxy = nx*ny; + size_t nxyz = nxy*nz; + + // allocate f + p_teca_float_array f = teca_float_array::New(nxyz); + float *p_f = f->get(); + + // compute + // f = cos(z)*sin(x+t)*sin(y+t) + TEMPLATE_DISPATCH(const teca_variant_array_impl, + x.get(), + + const NT *p_x = dynamic_cast(x.get())->get(); + const NT *p_y = dynamic_cast(y.get())->get(); + const NT *p_z = dynamic_cast(z.get())->get(); + + for (size_t k = 0; k < nz; ++k) + { + for (size_t j = 0; j < ny; ++j) + { + for (size_t i = 0; i < nx; ++i) + { + p_f[k*nxy + j*nx + i] = cos(p_z[k])*sin(p_x[i]+t)*sin(p_y[j]+t); + } + } + } + ) + + + // allcate q + p_teca_unsigned_char_array q = teca_unsigned_char_array::New(nxyz); + unsigned char *p_q = q->get(); + + // pack + for (size_t i = 0; i < nxyz; ++i) + { + p_q[i] = (unsigned char)roundf((p_f[i] - m_offset)/m_scale); + } + + // mask bottom and top row + for (size_t i = 0; i < nx; ++i) + { + p_q[i] = m_fill; + p_q[nxy - nx + i] = m_fill; + } + + // mask left and right column + for (size_t j = 0; j < ny; ++j) + { + p_q[j*nx] = m_fill; + p_q[(j+1)*nx - 1] = m_fill; + } + + return q; + } +}; + + +int main(int argc, char **argv) +{ + int write_input = 1; + int write_output = 0; + + if (argc != 2) + { + std::cerr << "usage:" << std::endl + << "test_unpack_data [baseline]" << std::endl; + return -1; + } + + std::string baseline = argv[1]; + + packed_data pd; + + p_teca_cartesian_mesh_source src = teca_cartesian_mesh_source::New(); + src->set_coordinate_type_code(teca_variant_array_code::get()); + src->set_field_type_code(teca_variant_array_code::get()); + src->set_whole_extents({0, 63, 0, 63, 0, 0, 0, 15}); + src->set_bounds({-M_PI, M_PI, -M_PI, M_PI, 0.0, 0.0, 0.0, M_PI/4.}); + src->append_field_generator({"func", pd.get_attributes(), pd}); + src->set_calendar("standard"); + src->set_time_units("days since 1980-01-01 00:00:00"); + + if (write_input) + { + p_teca_cf_writer in_wri = teca_cf_writer::New(); + in_wri->set_input_connection(src->get_output_port()); + in_wri->set_point_arrays({"func"}); + in_wri->set_file_name("./test_unpack_data_input_%t%.nc"); + in_wri->set_thread_pool_size(1); + in_wri->set_steps_per_file(64); + in_wri->update(); + } + + p_teca_valid_value_mask vvm = teca_valid_value_mask::New(); + vvm->set_input_connection(src->get_output_port()); + + p_teca_unpack_data unp = teca_unpack_data::New(); + unp->set_input_connection(vvm->get_output_port()); + + bool do_test = true; + teca_system_util::get_environment_variable("TECA_DO_TEST", do_test); + if (do_test && teca_file_util::file_exists(baseline.c_str())) + { + std::cerr << "running the test ... " << std::endl; + + p_teca_index_executive rex = teca_index_executive::New(); + rex->set_arrays({"func"}); + rex->set_verbose(1); + + p_teca_cf_reader rdr = teca_cf_reader::New(); + rdr->set_files_regex(baseline); + + p_teca_dataset_diff diff = teca_dataset_diff::New(); + diff->set_input_connection(0, rdr->get_output_port()); + diff->set_input_connection(1, unp->get_output_port()); + diff->set_executive(rex); + + diff->update(); + } + else + { + std::cerr << "writing the baseline ... " << std::endl; + + p_teca_cf_writer in_wri = teca_cf_writer::New(); + in_wri->set_input_connection(unp->get_output_port()); + in_wri->set_point_arrays({"func", "func_valid"}); + in_wri->set_file_name(baseline); + in_wri->set_thread_pool_size(1); + in_wri->set_steps_per_file(64); + in_wri->update(); + + } + + return 0; +} From ab7875c696c42d2182dd29ef8bae93d0928e49f2 Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Thu, 18 Feb 2021 05:18:57 -0800 Subject: [PATCH 014/180] fix integrated_vapor_transport app default number of threads --- apps/teca_integrated_vapor_transport.cpp | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/apps/teca_integrated_vapor_transport.cpp b/apps/teca_integrated_vapor_transport.cpp index d078f6b31..420d3546f 100644 --- a/apps/teca_integrated_vapor_transport.cpp +++ b/apps/teca_integrated_vapor_transport.cpp @@ -102,9 +102,9 @@ int main(int argc, char **argv) " format. Note: There must be a space between the date and time specification\n") ("end_date", value(), "\nThe last time to process in 'Y-M-D h:m:s' format\n") - ("n_threads", value(), "\nSets the thread pool size on each MPI rank. When the default" - " value of -1 is used TECA will coordinate the thread pools across ranks such each" - " thread is bound to a unique physical core.\n") + ("n_threads", value()->default_value(-1), "\nSets the thread pool size on each MPI" + " rank. When the default value of -1 is used TECA will coordinate the thread pools" + " across ranks such each thread is bound to a unique physical core.\n") ("verbose", "\nenable extra terminal output\n") @@ -309,10 +309,7 @@ int main(int argc, char **argv) exec->set_verbose(1); } - if (!opt_vals["n_threads"].defaulted()) - cf_writer->set_thread_pool_size(opt_vals["n_threads"].as()); - else - cf_writer->set_thread_pool_size(-1); + cf_writer->set_thread_pool_size(opt_vals["n_threads"].as()); // some minimal check for missing options if ((have_file && have_regex) || !(have_file || have_regex)) From cd1e862fca0eeb803d13dfa97fb02e93a33d8f32 Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Thu, 18 Feb 2021 05:21:08 -0800 Subject: [PATCH 015/180] fix bayesian_ar_detect app default output file --- apps/teca_bayesian_ar_detect.cpp | 50 +++++++++++++++----------------- 1 file changed, 23 insertions(+), 27 deletions(-) diff --git a/apps/teca_bayesian_ar_detect.cpp b/apps/teca_bayesian_ar_detect.cpp index dedfb99bc..785346e07 100644 --- a/apps/teca_bayesian_ar_detect.cpp +++ b/apps/teca_bayesian_ar_detect.cpp @@ -224,6 +224,18 @@ int main(int argc, char **argv) bool have_file = opt_vals.count("input_file"); bool have_regex = opt_vals.count("input_regex"); + if ((have_file && have_regex) || !(have_file || have_regex)) + { + if (mpi_man.get_comm_rank() == 0) + { + TECA_ERROR("Extacly one of --input_file or --input_regex can be specified. " + "Use --input_file to activate the multi_cf_reader (HighResMIP datasets) " + "and --input_regex to activate the cf_reader (CAM like datasets)") + } + return -1; + } + + if (have_file) { mcf_reader->set_input_file(opt_vals["input_file"].as()); @@ -294,6 +306,16 @@ int main(int argc, char **argv) bool do_ivt = opt_vals.count("compute_ivt"); bool do_ivt_magnitude = opt_vals.count("compute_ivt_magnitude"); + if (do_ivt && do_ivt_magnitude) + { + if (mpi_man.get_comm_rank() == 0) + { + TECA_ERROR("Only one of --compute_ivt and compute_ivt_magnitude can " + "be specified. --compute_ivt implies --compute_ivt_magnitude") + } + return -1; + } + if (do_ivt) { std::string z_var = "plev"; @@ -328,13 +350,10 @@ int main(int argc, char **argv) point_arrays.push_back(ivt_int->get_ivt_v_variable()); } + cf_writer->set_file_name(opt_vals["output_file"].as()); cf_writer->set_information_arrays({"ar_count", "parameter_table_row"}); cf_writer->set_point_arrays(point_arrays); - - if (!opt_vals["output_file"].defaulted()) - cf_writer->set_file_name(opt_vals["output_file"].as()); - if (!opt_vals["steps_per_file"].defaulted()) cf_writer->set_steps_per_file(opt_vals["steps_per_file"].as()); @@ -356,29 +375,6 @@ int main(int argc, char **argv) else ar_detect->set_thread_pool_size(-1); - - // some minimal check for missing options - if ((have_file && have_regex) || !(have_file || have_regex)) - { - if (mpi_man.get_comm_rank() == 0) - { - TECA_ERROR("Extacly one of --input_file or --input_regex can be specified. " - "Use --input_file to activate the multi_cf_reader (HighResMIP datasets) " - "and --input_regex to activate the cf_reader (CAM like datasets)") - } - return -1; - } - - if (do_ivt && do_ivt_magnitude) - { - if (mpi_man.get_comm_rank() == 0) - { - TECA_ERROR("Only one of --compute_ivt and compute_ivt_magnitude can " - "be specified. --compute_ivt implies --compute_ivt_magnitude") - } - return -1; - } - if (cf_writer->get_file_name().empty()) { if (mpi_man.get_comm_rank() == 0) From 7c9b52230269508e84fba15fe159d49000c42eee Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Thu, 18 Feb 2021 05:22:35 -0800 Subject: [PATCH 016/180] integrated_vapor_transport app handle NetCDF packed data --- apps/teca_integrated_vapor_transport.cpp | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/apps/teca_integrated_vapor_transport.cpp b/apps/teca_integrated_vapor_transport.cpp index 420d3546f..335bfd3c8 100644 --- a/apps/teca_integrated_vapor_transport.cpp +++ b/apps/teca_integrated_vapor_transport.cpp @@ -10,6 +10,7 @@ #include "teca_multi_cf_reader.h" #include "teca_integrated_vapor_transport.h" #include "teca_valid_value_mask.h" +#include "teca_unpack_data.h" #include "teca_mpi_manager.h" #include "teca_coordinate_util.h" #include "teca_table.h" @@ -140,6 +141,12 @@ int main(int argc, char **argv) p_teca_multi_cf_reader mcf_reader = teca_multi_cf_reader::New(); mcf_reader->get_properties_description("mcf_reader", advanced_opt_defs); + p_teca_valid_value_mask vv_mask = teca_valid_value_mask::New(); + vv_mask->get_properties_description("vv_mask", advanced_opt_defs); + + p_teca_unpack_data unpack = teca_unpack_data::New(); + unpack->get_properties_description("unpack", advanced_opt_defs); + p_teca_integrated_vapor_transport ivt_int = teca_integrated_vapor_transport::New(); ivt_int->get_properties_description("ivt_integral", advanced_opt_defs); ivt_int->set_specific_humidity_variable("Q"); @@ -154,9 +161,6 @@ int main(int argc, char **argv) l2_norm->set_component_1_variable("IVT_V"); l2_norm->set_l2_norm_variable("IVT"); - p_teca_valid_value_mask vv_mask = teca_valid_value_mask::New(); - vv_mask->get_properties_description("vv_mask", advanced_opt_defs); - // Add an executive for the writer p_teca_index_executive exec = teca_index_executive::New(); @@ -184,6 +188,7 @@ int main(int argc, char **argv) cf_reader->set_properties("cf_reader", opt_vals); mcf_reader->set_properties("mcf_reader", opt_vals); vv_mask->set_properties("vv_mask", opt_vals); + unpack->set_properties("unpack", opt_vals); ivt_int->set_properties("ivt_integral", opt_vals); l2_norm->set_properties("ivt_magnitude", opt_vals); cf_writer->set_properties("cf_writer", opt_vals); @@ -258,7 +263,8 @@ int main(int argc, char **argv) // add the valid value mask stage vv_mask->set_input_connection(head->get_output_port()); - head = vv_mask; + unpack->set_input_connection(vv_mask->get_output_port()); + head = unpack; // add the ivt caluation stages if needed bool do_ivt = opt_vals["write_ivt"].as(); From bf8eafaf253d28a7cc725f491b5082b79f457afa Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Thu, 18 Feb 2021 05:23:05 -0800 Subject: [PATCH 017/180] bayesian_ar_detect app handle NetCDF packed data --- apps/teca_bayesian_ar_detect.cpp | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/apps/teca_bayesian_ar_detect.cpp b/apps/teca_bayesian_ar_detect.cpp index 785346e07..6a6ff45f3 100644 --- a/apps/teca_bayesian_ar_detect.cpp +++ b/apps/teca_bayesian_ar_detect.cpp @@ -11,6 +11,7 @@ #include "teca_multi_cf_reader.h" #include "teca_integrated_vapor_transport.h" #include "teca_valid_value_mask.h" +#include "teca_unpack_data.h" #include "teca_mpi_manager.h" #include "teca_coordinate_util.h" #include "teca_table.h" @@ -159,6 +160,9 @@ int main(int argc, char **argv) p_teca_valid_value_mask vv_mask = teca_valid_value_mask::New(); vv_mask->get_properties_description("vv_mask", advanced_opt_defs); + p_teca_unpack_data unpack = teca_unpack_data::New(); + unpack->get_properties_description("unpack", advanced_opt_defs); + p_teca_normalize_coordinates norm_coords = teca_normalize_coordinates::New(); norm_coords->get_properties_description("norm_coords", advanced_opt_defs); @@ -210,6 +214,7 @@ int main(int argc, char **argv) l2_norm->set_properties("ivt_magnitude", opt_vals); ivt_int->set_properties("ivt_integral", opt_vals); vv_mask->set_properties("vv_mask", opt_vals); + unpack->set_properties("unpack", opt_vals); norm_coords->set_properties("norm_coords", opt_vals); params->set_properties("parameter_table", opt_vals); ar_detect->set_properties("ar_detect", opt_vals); @@ -248,6 +253,11 @@ int main(int argc, char **argv) } p_teca_algorithm reader = head; + // add basic transfomration stages to the pipeline + vv_mask->set_input_connection(reader->get_output_port()); + unpack->set_input_connection(vv_mask->get_output_port()); + head = unpack; + if (!opt_vals["periodic_in_x"].defaulted()) { cf_reader->set_periodic_in_x(opt_vals["periodic_in_x"].as()); @@ -325,8 +335,7 @@ int main(int argc, char **argv) cf_reader->set_z_axis_variable(z_var); mcf_reader->set_z_axis_variable(z_var); - vv_mask->set_input_connection(head->get_output_port()); - ivt_int->set_input_connection(vv_mask->get_output_port()); + ivt_int->set_input_connection(head->get_output_port()); l2_norm->set_input_connection(ivt_int->get_output_port()); head = l2_norm; From 634198ccaab80a6be7deabd855a395f7f8db2f8c Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Fri, 19 Feb 2021 16:10:19 -0800 Subject: [PATCH 018/180] fix doc netcdf build flag --- doc/rtd/installation.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/rtd/installation.rst b/doc/rtd/installation.rst index 53a8f2be9..bcd5784d0 100644 --- a/doc/rtd/installation.rst +++ b/doc/rtd/installation.rst @@ -226,7 +226,7 @@ For Python package dependencies pip is used as described in :ref:`python-environ It is recommended to have a parallel HDF5 based NetCDF install, on some systems (Ubuntu, Mac) this requires installing NetCDF from source as outlined in -:ref:`netcdf-parallel-4`. +:ref:`netcdf-parallel4`. Apple Mac OS ^^^^^^^^^^^^ @@ -302,7 +302,7 @@ Once the venv is installed and activated, see :ref:`compile`. but is no longer maintained and should not be used. -.. _netcdf-parallel-4: +.. _netcdf-parallel4: NetCDF w/ Parallel 4 ^^^^^^^^^^^^^^^^^^^^^ @@ -323,7 +323,7 @@ On Ubuntu 20.04 $ cd netcdf-c-4.7.4 $ ./configure CC=mpicc CFLAGS="-O3 -I/usr/include/hdf5/mpich" \ LDFLAGS="-L/usr/lib/x86_64-linux-gnu/hdf5/mpich/ -lhdf5" \ - --prefix=`pwd`/../netcdf-c-4.7.4-install --enable-parallel-4 \ + --prefix=`pwd`/../netcdf-c-4.7.4-install --enable-parallel4 \ --disable-dap $ make -j install @@ -423,7 +423,7 @@ build to the local install by passing options on the pip command line. pip install teca --global-option=build_ext \ --global-option="--with-netcdf=/Users/bloring/netcdf-c-4.7.4-install/" -See section :ref:`netcdf-parallel-4` for information on compiling NetCDF with +See section :ref:`netcdf-parallel4` for information on compiling NetCDF with MPI enabled. with conda From 8a92acfb799af5700b3fa97ccc7efb3ef0b6e1f3 Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Fri, 19 Feb 2021 18:08:07 -0800 Subject: [PATCH 019/180] normalize_coordinate z-axis descending * this patch makes the z-axis output descending by default * adds algorithm properties to control the order of the axes at run time --- .travis.yml | 2 +- alg/teca_normalize_coordinates.cxx | 270 +++++++++++++++++++++++----- alg/teca_normalize_coordinates.h | 34 ++++ data/teca_coordinate_util.h | 63 +++++++ test/CMakeLists.txt | 32 ++-- test/test_normalize_coordinates.cpp | 25 ++- 6 files changed, 365 insertions(+), 61 deletions(-) diff --git a/.travis.yml b/.travis.yml index 37496417c..74e8bbcf1 100644 --- a/.travis.yml +++ b/.travis.yml @@ -17,7 +17,7 @@ env: - BUILD_TYPE=Debug - TECA_DIR=/travis_teca_dir - TECA_PYTHON_VERSION=3 - - TECA_DATA_REVISION=102 + - TECA_DATA_REVISION=103 jobs: - DOCKER_IMAGE=ubuntu IMAGE_VERSION=20.04 IMAGE_NAME=ubuntu_20_04 REQUIRE_NETCDF_MPI=TRUE - DOCKER_IMAGE=ubuntu IMAGE_VERSION=20.04 IMAGE_NAME=ubuntu_20_04 REQUIRE_NETCDF_MPI=FALSE diff --git a/alg/teca_normalize_coordinates.cxx b/alg/teca_normalize_coordinates.cxx index a9544c7ae..9c8c4a966 100644 --- a/alg/teca_normalize_coordinates.cxx +++ b/alg/teca_normalize_coordinates.cxx @@ -16,9 +16,6 @@ #include #endif -using std::cerr; -using std::endl; - //#define TECA_DEBUG struct teca_normalize_coordinates::internals_t @@ -27,7 +24,19 @@ struct teca_normalize_coordinates::internals_t ~internals_t() {} static - p_teca_variant_array normalize_axis(const const_p_teca_variant_array &x); + int normalize_axes( + p_teca_variant_array &out_x, + p_teca_variant_array &out_y, + p_teca_variant_array &out_z, + const const_p_teca_variant_array &in_x, + const const_p_teca_variant_array &in_y, + const const_p_teca_variant_array &in_z, + int x_axis_order, int y_axis_order, + int z_axis_order, double *bounds); + + template typename compare_t> + static p_teca_variant_array normalize_axis( + const const_p_teca_variant_array &x, double *bounds); static void normalize_extent(p_teca_variant_array out_x, @@ -41,10 +50,10 @@ struct teca_normalize_coordinates::internals_t p_teca_array_collection data); }; - // -------------------------------------------------------------------------- +template typename compare_t> p_teca_variant_array teca_normalize_coordinates::internals_t::normalize_axis( - const const_p_teca_variant_array &x) + const const_p_teca_variant_array &x, double *bounds) { unsigned long nx = x->size(); unsigned long x1 = nx - 1; @@ -52,11 +61,13 @@ p_teca_variant_array teca_normalize_coordinates::internals_t::normalize_axis( NESTED_TEMPLATE_DISPATCH(const teca_variant_array_impl, x.get(), _C, - // detect coordinate axis in descending order, reorder it in ascending - // order. for instance an // input of x = (90 30 -30 -90) is transformed - // to x = (-90 -30 30 90) const NT_C *px = dynamic_cast(x.get())->get(); - if (px[x1] < px[0]) + + // if comp(x0, x1) reverse the axis. + // when comp is less than the output will be ascending + // when comp is greater than the output will be descending + compare_t compare; + if (compare(px[x1], px[0])) { p_teca_variant_array xo = x->new_instance(nx); NT_C *pxo = static_cast*>(xo.get())->get(); @@ -65,12 +76,77 @@ p_teca_variant_array teca_normalize_coordinates::internals_t::normalize_axis( for (unsigned long i = 0; i < nx; ++i) pxo[-i] = px[i]; + bounds[0] = px[x1]; + bounds[1] = px[0]; + return xo; } + + bounds[0] = px[0]; + bounds[1] = px[x1]; ) return nullptr; } +// -------------------------------------------------------------------------- +int teca_normalize_coordinates::internals_t::normalize_axes( + p_teca_variant_array &out_x, + p_teca_variant_array &out_y, + p_teca_variant_array &out_z, + const const_p_teca_variant_array &in_x, + const const_p_teca_variant_array &in_y, + const const_p_teca_variant_array &in_z, + int x_axis_order, int y_axis_order, int z_axis_order, + double *bounds) +{ + // x axis + if (x_axis_order == ORDER_ASCENDING) + { + out_x = internals_t::normalize_axis(in_x, bounds); + } + else if (x_axis_order == ORDER_DESCENDING) + { + out_x = internals_t::normalize_axis(in_x, bounds); + } + else + { + TECA_ERROR("Invalid x_axis_order " << x_axis_order) + return -1; + } + + // y axis + if (y_axis_order == ORDER_ASCENDING) + { + out_y = internals_t::normalize_axis(in_y, bounds + 2); + } + else if (y_axis_order == ORDER_DESCENDING) + { + out_y = internals_t::normalize_axis(in_y, bounds + 2); + } + else + { + TECA_ERROR("Invalid y_axis_order " << y_axis_order) + return -1; + } + + // z axis + if (z_axis_order == ORDER_ASCENDING) + { + out_z = internals_t::normalize_axis(in_z, bounds + 4); + } + else if (z_axis_order == ORDER_DESCENDING) + { + out_z = internals_t::normalize_axis(in_z, bounds + 4); + } + else + { + TECA_ERROR("Invalid z_axis_order " << z_axis_order) + return -1; + } + + return 0; +} + // -------------------------------------------------------------------------- void teca_normalize_coordinates::internals_t::normalize_extent( p_teca_variant_array out_x, p_teca_variant_array out_y, @@ -78,12 +154,14 @@ void teca_normalize_coordinates::internals_t::normalize_extent( unsigned long *extent_in, unsigned long *extent_out) { #if defined(TECA_DEBUG) - cerr << "out=[" << out_x << ", " << out_y << ", " << out_z << "]" << endl + std::cerr + << "out=[" << out_x << ", " << out_y << ", " << out_z << "]" << std::endl << "whole_extent=[" << whole_extent[0] << ", " << whole_extent[1] << ", " << whole_extent[2] << ", " << whole_extent[3] << ", " << whole_extent[4] - << ", " << whole_extent[5] << "]" << endl << "extent_in=[" << extent_in[0] - << ", " << extent_in[1] << ", " << extent_in[2] << ", " << extent_in[3] - << ", " << extent_in[4] << ", " << extent_in[5] << "]" << endl; + << ", " << whole_extent[5] << "]" << std::endl + << "extent_in=[" << extent_in[0] << ", " << extent_in[1] << ", " + << extent_in[2] << ", " << extent_in[3] << ", " << extent_in[4] << ", " + << extent_in[5] << "]" << std::endl; #endif memcpy(extent_out, extent_in, 6*sizeof(unsigned long)); @@ -113,13 +191,12 @@ void teca_normalize_coordinates::internals_t::normalize_extent( } #if defined(TECA_DEBUG) - cerr << "extent_out=[" << extent_out[0] << ", " << extent_out[1] << ", " + std::cerr << "extent_out=[" << extent_out[0] << ", " << extent_out[1] << ", " << extent_out[2] << ", " << extent_out[3] << ", " << extent_out[4] - << ", " << extent_out[5] << "]" << endl; + << ", " << extent_out[5] << "]" << std::endl; #endif } - // -------------------------------------------------------------------------- void teca_normalize_coordinates::internals_t::normalize_variables( bool normalize_x, bool normalize_y, bool normalize_z, @@ -232,7 +309,9 @@ void teca_normalize_coordinates::internals_t::normalize_variables( } // -------------------------------------------------------------------------- -teca_normalize_coordinates::teca_normalize_coordinates() : internals(nullptr) +teca_normalize_coordinates::teca_normalize_coordinates() : + x_axis_order(ORDER_ASCENDING), y_axis_order(ORDER_ASCENDING), + z_axis_order(ORDER_DESCENDING), verbose(0), internals(nullptr) { this->internals = new teca_normalize_coordinates::internals_t; @@ -249,24 +328,82 @@ teca_normalize_coordinates::~teca_normalize_coordinates() #if defined(TECA_HAS_BOOST) // -------------------------------------------------------------------------- void teca_normalize_coordinates::get_properties_description( - const std::string &/*prefix*/, options_description &/*global_opts*/) + const std::string &prefix, options_description &global_opts) { + options_description opts("Options for " + + (prefix.empty()?"teca_normalize_coordinates":prefix)); + + opts.add_options() + TECA_POPTS_GET(int, prefix, x_axis_order, + "Sets the desired output order of the x-axis. Use" + " ORDER_ASCENDING(0) or ORDER_DESCENDING(1). By default" + " the x-axis will be output in ascending order.") + TECA_POPTS_GET(int, prefix, y_axis_order, + "Sets the desired output order of the y-axis. Use" + " ORDER_ASCENDING(0) or ORDER_DESCENDING(1). By default" + " the y-axis will be output in ascending order.") + TECA_POPTS_GET(int, prefix, z_axis_order, + "Sets the desired output order of the z-axis. Use" + " ORDER_ASCENDING(0) or ORDER_DESCENDING(1). By default" + " the z-axis will be output in descending order.") + TECA_POPTS_GET(int, prefix, verbose, + "If set then status messages are sent to the terminal.") + ; + + global_opts.add(opts); } // -------------------------------------------------------------------------- void teca_normalize_coordinates::set_properties( - const std::string &/*prefix*/, variables_map &/*opts*/) + const std::string &prefix, variables_map &opts) { + TECA_POPTS_SET(opts, int, prefix, x_axis_order) + TECA_POPTS_SET(opts, int, prefix, y_axis_order) + TECA_POPTS_SET(opts, int, prefix, z_axis_order) + TECA_POPTS_SET(opts, int, prefix, verbose) } #endif +// -------------------------------------------------------------------------- +int teca_normalize_coordinates::validate_x_axis_order(int val) +{ + if ((val != ORDER_ASCENDING) && (val != ORDER_DESCENDING)) + { + TECA_ERROR("Invlaid x_axis_order " << val) + return -1; + } + return 0; +} + +// -------------------------------------------------------------------------- +int teca_normalize_coordinates::validate_y_axis_order(int val) +{ + if ((val != ORDER_ASCENDING) && (val != ORDER_DESCENDING)) + { + TECA_ERROR("Invlaid y_axis_order " << val) + return -1; + } + return 0; +} + +// -------------------------------------------------------------------------- +int teca_normalize_coordinates::validate_z_axis_order(int val) +{ + if ((val != ORDER_ASCENDING) && (val != ORDER_DESCENDING)) + { + TECA_ERROR("Invlaid z_axis_order " << val) + return -1; + } + return 0; +} + // -------------------------------------------------------------------------- teca_metadata teca_normalize_coordinates::get_output_metadata( unsigned int port, const std::vector &input_md) { #ifdef TECA_DEBUG - cerr << teca_parallel_id() - << "teca_normalize_coordinates::get_output_metadata" << endl; + std::cerr << teca_parallel_id() + << "teca_normalize_coordinates::get_output_metadata" << std::endl; #endif (void)port; @@ -289,18 +426,31 @@ teca_metadata teca_normalize_coordinates::get_output_metadata( // check for and transform coordinate axes from descending order // to ascending order + double bounds[6] = {0.0}; p_teca_variant_array out_x, out_y, out_z; - if ((out_x = this->internals->normalize_axis(in_x))) + if (this->internals->normalize_axes(out_x, out_y, out_z, + in_x, in_y, in_z, this->x_axis_order, this->y_axis_order, + this->z_axis_order, bounds)) + { + TECA_ERROR("Failed to normalize axes") + return teca_metadata(); + } + + // pass normalized coordinates + if (out_x) coords.set("x", out_x); - if ((out_y = this->internals->normalize_axis(in_y))) + if (out_y) coords.set("y", out_y); - if ((out_z = this->internals->normalize_axis(in_z))) + if (out_z) coords.set("z", out_z); if (out_x || out_y || out_z) + { out_md.set("coordinates", coords); + out_md.set("bounds", bounds); + } return out_md; } @@ -337,15 +487,13 @@ std::vector teca_normalize_coordinates::get_upstream_request( // now convert the original coordinate axes into the // normalized system. this isn't cached for thread safety + double bounds[6] = {0.0}; p_teca_variant_array out_x, out_y, out_z; - out_x = this->internals->normalize_axis(in_x); - out_y = this->internals->normalize_axis(in_y); - out_z = this->internals->normalize_axis(in_z); - - // normalized system is the same as the original, pass the request up - if (!out_x && !out_y && !out_z) + if (this->internals->normalize_axes(out_x, out_y, out_z, + in_x, in_y, in_z, this->x_axis_order, this->y_axis_order, + this->z_axis_order, bounds)) { - up_reqs.push_back(request); + TECA_ERROR("Failed to normalize axes") return up_reqs; } @@ -360,8 +508,8 @@ std::vector teca_normalize_coordinates::get_upstream_request( // get the extent that is being requested unsigned long extent_in[6] = {0}; unsigned long extent_out[6] = {0}; - double bounds[6] = {0.0}; - if (req.get("bounds", bounds, 6)) + double req_bounds[6] = {0.0}; + if (req.get("bounds", req_bounds, 6)) { // bounds key not present, check for extent key // if not present use whole_extent @@ -370,9 +518,24 @@ std::vector teca_normalize_coordinates::get_upstream_request( } else { + // validate the requested bounds + if (!teca_coordinate_util::same_orientation(bounds, req_bounds) || + !teca_coordinate_util::covers(bounds, req_bounds)) + { + TECA_ERROR("Invalid request. The requested bounds [" + << req_bounds[0] << ", " << req_bounds[1] << ", " + << req_bounds[2] << ", " << req_bounds[3] << ", " + << req_bounds[4] << ", " << req_bounds[5] + << "] is not covered by the available bounds [" + << bounds[0] << ", " << bounds[1] << ", " + << bounds[2] << ", " << bounds[3] << ", " + << bounds[4] << ", " << bounds[5] << "]") + return up_reqs; + } + // bounds key was present, convert the bounds to an // an extent that covers them. - if (teca_coordinate_util::bounds_to_extent(bounds, + if (teca_coordinate_util::bounds_to_extent(req_bounds, (out_x ? out_x : in_x), (out_y ? out_y : in_y), (out_z ? out_z : in_z), extent_in)) { @@ -385,10 +548,24 @@ std::vector teca_normalize_coordinates::get_upstream_request( req.remove("bounds"); } - // apply the trsnaform if needed + // apply the transform if needed this->internals->normalize_extent(out_x, out_y, out_z, whole_extent, extent_in, extent_out); + // validate the requested extent + if (!teca_coordinate_util::covers_ascending(whole_extent, extent_out)) + { + TECA_ERROR("Invalid request. The requested extent [" + << extent_out[0] << ", " << extent_out[1] << ", " + << extent_out[2] << ", " << extent_out[3] << ", " + << extent_out[4] << ", " << extent_out[5] + << "] is not covered by the available whole_extent [" + << whole_extent[0] << ", " << whole_extent[1] << ", " + << whole_extent[2] << ", " << whole_extent[3] << ", " + << whole_extent[4] << ", " << whole_extent[5] << "]") + return up_reqs; + } + // send the request up req.set("extent", extent_out, 6); up_reqs.push_back(req); @@ -402,7 +579,8 @@ const_p_teca_dataset teca_normalize_coordinates::execute(unsigned int port, const teca_metadata &request) { #ifdef TECA_DEBUG - cerr << teca_parallel_id() << "teca_normalize_coordinates::execute" << endl; + std::cerr << teca_parallel_id() + << "teca_normalize_coordinates::execute" << std::endl; #endif (void)port; (void)request; @@ -427,22 +605,31 @@ const_p_teca_dataset teca_normalize_coordinates::execute(unsigned int port, const_p_teca_variant_array in_z = in_mesh->get_z_coordinates(); // transform the axes to ascending order if needed + double bounds[6] = {0.0}; p_teca_variant_array out_x, out_y, out_z; - if ((out_x = this->internals->normalize_axis(in_x))) + if (this->internals->normalize_axes(out_x, out_y, out_z, + in_x, in_y, in_z, this->x_axis_order, this->y_axis_order, + this->z_axis_order, bounds)) + { + TECA_ERROR("Failed to normalize axes") + return nullptr; + } + + if (out_x) { std::string var; in_mesh->get_x_coordinate_variable(var); out_mesh->set_x_coordinates(var, out_x); } - if ((out_y = this->internals->normalize_axis(in_y))) + if (out_y) { std::string var; in_mesh->get_y_coordinate_variable(var); out_mesh->set_y_coordinates(var, out_y); } - if ((out_z = this->internals->normalize_axis(in_z))) + if (out_z) { std::string var; in_mesh->get_z_coordinate_variable(var); @@ -456,7 +643,8 @@ const_p_teca_dataset teca_normalize_coordinates::execute(unsigned int port, in_mesh->get_extent(extent); this->internals->normalize_variables(out_x.get(), - out_y.get(), out_z.get(), extent, out_mesh->get_point_arrays()); + out_y.get(), out_z.get(), extent, + out_mesh->get_point_arrays()); } return out_mesh; diff --git a/alg/teca_normalize_coordinates.h b/alg/teca_normalize_coordinates.h index 87d071603..bf6fcb85c 100644 --- a/alg/teca_normalize_coordinates.h +++ b/alg/teca_normalize_coordinates.h @@ -29,9 +29,38 @@ class teca_normalize_coordinates : public teca_algorithm TECA_GET_ALGORITHM_PROPERTIES_DESCRIPTION() TECA_SET_ALGORITHM_PROPERTIES() + /** @anchor x,y,z_axis_order + * @name x,y,z_axis_order + * Set the desired order of the output for each coordinate + * axis. Use ORDER_ASCENDING(0) to ensure the output is in + * ascending order, and ORDER_DESCENDING(1) to ensure the + * output is in descending order. By default the x and y + * axes are put in ascending order and the z axis is put + * into descending order. + */ + ///@{ + enum {ORDER_ASCENDING = 0, ORDER_DESCENDING = 1}; + + TECA_ALGORITHM_PROPERTY_V(int, x_axis_order) + TECA_ALGORITHM_PROPERTY_V(int, y_axis_order) + TECA_ALGORITHM_PROPERTY_V(int, z_axis_order) + ///@} + + /** @anchor verbose + * @name verbose + * if set to a non-zero value, rank 0 will send status information to the + * terminal. The default setting of zero results in minimal output. + */ + ///@{ + TECA_ALGORITHM_PROPERTY(int, verbose) + ///@} protected: teca_normalize_coordinates(); + int validate_x_axis_order(int val); + int validate_y_axis_order(int val); + int validate_z_axis_order(int val); + private: teca_metadata get_output_metadata(unsigned int port, const std::vector &input_md) override; @@ -45,6 +74,11 @@ class teca_normalize_coordinates : public teca_algorithm const teca_metadata &request) override; private: + int x_axis_order; + int y_axis_order; + int z_axis_order; + int verbose; + struct internals_t; internals_t *internals; }; diff --git a/data/teca_coordinate_util.h b/data/teca_coordinate_util.h index d14b93c20..04a7642e9 100644 --- a/data/teca_coordinate_util.h +++ b/data/teca_coordinate_util.h @@ -546,5 +546,68 @@ int convert_cell_extent(num_t *extent, int centering) int get_cartesian_mesh_extent(const teca_metadata &md, unsigned long *whole_extent, double *bounds); + +// check that one Cartesian region covers the other coordinates must be in +// ascending order. assumes that both regions are specified in ascending order. +template +int covers_ascending(const num_t *whole, const num_t *part) +{ + if ((part[0] >= whole[0]) && (part[0] <= whole[1]) && + (part[1] >= whole[0]) && (part[1] <= whole[1]) && + (part[2] >= whole[2]) && (part[2] <= whole[3]) && + (part[3] >= whole[2]) && (part[3] <= whole[3]) && + (part[4] >= whole[4]) && (part[4] <= whole[5]) && + (part[5] >= whole[4]) && (part[5] <= whole[5])) + return 1; + return 0; +} + +// check that one Cartesian region covers the other, taking into account the +// order of the coordinates. assumes that the regions are specified in the same +// orientation. +template +int covers(const num_t *whole, const num_t *part) +{ + bool x_ascend = whole[0] <= whole[1]; + bool y_ascend = whole[2] <= whole[3]; + bool z_ascend = whole[4] <= whole[5]; + if (((x_ascend && + (part[0] >= whole[0]) && (part[0] <= whole[1]) && + (part[1] >= whole[0]) && (part[1] <= whole[1])) || + (!x_ascend && + (part[0] <= whole[0]) && (part[0] >= whole[1]) && + (part[1] <= whole[0]) && (part[1] >= whole[1]))) && + ((y_ascend && + (part[2] >= whole[2]) && (part[2] <= whole[3]) && + (part[3] >= whole[2]) && (part[3] <= whole[3])) || + (!y_ascend && + (part[2] <= whole[2]) && (part[2] >= whole[3]) && + (part[3] <= whole[2]) && (part[3] >= whole[3]))) && + ((z_ascend && + (part[4] >= whole[4]) && (part[4] <= whole[5]) && + (part[5] >= whole[4]) && (part[5] <= whole[5])) || + (!z_ascend && + (part[4] <= whole[4]) && (part[4] >= whole[5]) && + (part[5] <= whole[4]) && (part[5] >= whole[5])))) + return 1; + return 0; +} + +// check that two Cartesian regions have the same orientation +// ie they are either both specidied in ascending or descending +// order. +template +int same_orientation(const num_t *whole, const num_t *part) +{ + if ((((whole[0] <= whole[1]) && (part[0] <= part[1])) || + ((whole[0] >= whole[1]) && (part[0] >= part[1]))) && + (((whole[2] <= whole[3]) && (part[2] <= part[3])) || + ((whole[2] >= whole[3]) && (part[2] >= part[3]))) && + (((whole[4] <= whole[5]) && (part[4] <= part[5])) || + ((whole[4] >= whole[5]) && (part[4] >= part[5])))) + return 1; + return 0; +} + }; #endif diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 317e403ad..0b5d3ffe4 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -582,82 +582,82 @@ teca_add_test(test_normalize_coordinates_pass_through EXEC_NAME test_normalize_coordinates SOURCES test_normalize_coordinates.cpp LIBS teca_core teca_data teca_io teca_alg ${teca_test_link} - COMMAND test_normalize_coordinates 90 45 7 0 0 0 0 360 -90 90 0 10 + COMMAND test_normalize_coordinates 90 45 7 0 0 0 0 360 -90 90 100 0 "${TECA_DATA_ROOT}/test_normalize_coordinates.bin" REQ_TECA_DATA) teca_add_test(test_normalize_coordinates_flip_x - COMMAND test_normalize_coordinates 90 45 7 1 0 0 0 360 -90 90 0 10 + COMMAND test_normalize_coordinates 90 45 7 1 0 0 0 360 -90 90 100 0 "${TECA_DATA_ROOT}/test_normalize_coordinates.bin" REQ_TECA_DATA) teca_add_test(test_normalize_coordinates_flip_y - COMMAND test_normalize_coordinates 90 45 7 0 1 0 0 360 -90 90 0 10 + COMMAND test_normalize_coordinates 90 45 7 0 1 0 0 360 -90 90 100 0 "${TECA_DATA_ROOT}/test_normalize_coordinates.bin" REQ_TECA_DATA) teca_add_test(test_normalize_coordinates_flip_z - COMMAND test_normalize_coordinates 90 45 7 0 0 1 0 360 -90 90 0 10 + COMMAND test_normalize_coordinates 90 45 7 0 0 1 0 360 -90 90 100 0 "${TECA_DATA_ROOT}/test_normalize_coordinates.bin" REQ_TECA_DATA) teca_add_test(test_normalize_coordinates_flip_xy - COMMAND test_normalize_coordinates 90 45 7 1 1 0 0 360 -90 90 0 10 + COMMAND test_normalize_coordinates 90 45 7 1 1 0 0 360 -90 90 100 0 "${TECA_DATA_ROOT}/test_normalize_coordinates.bin" REQ_TECA_DATA) teca_add_test(test_normalize_coordinates_flip_xz - COMMAND test_normalize_coordinates 90 45 7 1 0 1 0 360 -90 90 0 10 + COMMAND test_normalize_coordinates 90 45 7 1 0 1 0 360 -90 90 100 0 "${TECA_DATA_ROOT}/test_normalize_coordinates.bin" REQ_TECA_DATA) teca_add_test(test_normalize_coordinates_flip_yz - COMMAND test_normalize_coordinates 90 45 7 0 1 1 0 360 -90 90 0 10 + COMMAND test_normalize_coordinates 90 45 7 0 1 1 0 360 -90 90 100 0 "${TECA_DATA_ROOT}/test_normalize_coordinates.bin" REQ_TECA_DATA) teca_add_test(test_normalize_coordinates_flip_xyz - COMMAND test_normalize_coordinates 90 45 7 1 1 1 0 360 -90 90 0 10 + COMMAND test_normalize_coordinates 90 45 7 1 1 1 0 360 -90 90 100 0 "${TECA_DATA_ROOT}/test_normalize_coordinates.bin" REQ_TECA_DATA) teca_add_test(test_normalize_coordinates_pass_through_subset - COMMAND test_normalize_coordinates 90 45 7 0 0 0 40 190 -30 45 3 7 + COMMAND test_normalize_coordinates 90 45 7 0 0 0 40 190 -30 45 70 30 "${TECA_DATA_ROOT}/test_normalize_coordinates_subset.bin" REQ_TECA_DATA) teca_add_test(test_normalize_coordinates_flip_x_subset - COMMAND test_normalize_coordinates 90 45 7 1 0 0 40 190 -30 45 3 7 + COMMAND test_normalize_coordinates 90 45 7 1 0 0 40 190 -30 45 70 30 "${TECA_DATA_ROOT}/test_normalize_coordinates_subset.bin" REQ_TECA_DATA) teca_add_test(test_normalize_coordinates_flip_y_subset - COMMAND test_normalize_coordinates 90 45 7 0 1 0 40 190 -30 45 3 7 + COMMAND test_normalize_coordinates 90 45 7 0 1 0 40 190 -30 45 70 30 "${TECA_DATA_ROOT}/test_normalize_coordinates_subset.bin" REQ_TECA_DATA) teca_add_test(test_normalize_coordinates_flip_z_subset - COMMAND test_normalize_coordinates 90 45 7 0 0 1 40 190 -30 45 3 7 + COMMAND test_normalize_coordinates 90 45 7 0 0 1 40 190 -30 45 70 30 "${TECA_DATA_ROOT}/test_normalize_coordinates_subset.bin" REQ_TECA_DATA) teca_add_test(test_normalize_coordinates_flip_xy_subset - COMMAND test_normalize_coordinates 90 45 7 1 1 0 40 190 -30 45 3 7 + COMMAND test_normalize_coordinates 90 45 7 1 1 0 40 190 -30 45 70 30 "${TECA_DATA_ROOT}/test_normalize_coordinates_subset.bin" REQ_TECA_DATA) teca_add_test(test_normalize_coordinates_flip_xz_subset - COMMAND test_normalize_coordinates 90 45 7 1 0 1 40 190 -30 45 3 7 + COMMAND test_normalize_coordinates 90 45 7 1 0 1 40 190 -30 45 70 30 "${TECA_DATA_ROOT}/test_normalize_coordinates_subset.bin" REQ_TECA_DATA) teca_add_test(test_normalize_coordinates_flip_yz_subset - COMMAND test_normalize_coordinates 90 45 7 0 1 1 40 190 -30 45 3 7 + COMMAND test_normalize_coordinates 90 45 7 0 1 1 40 190 -30 45 70 30 "${TECA_DATA_ROOT}/test_normalize_coordinates_subset.bin" REQ_TECA_DATA) teca_add_test(test_normalize_coordinates_flip_xyz_subset - COMMAND test_normalize_coordinates 90 45 7 1 1 1 40 190 -30 45 3 7 + COMMAND test_normalize_coordinates 90 45 7 1 1 1 40 190 -30 45 70 30 "${TECA_DATA_ROOT}/test_normalize_coordinates_subset.bin" REQ_TECA_DATA) diff --git a/test/test_normalize_coordinates.cpp b/test/test_normalize_coordinates.cpp index 608310d27..b581db0df 100644 --- a/test/test_normalize_coordinates.cpp +++ b/test/test_normalize_coordinates.cpp @@ -103,7 +103,6 @@ int main(int argc, char **argv) atof(argv[9]), atof(argv[10]), atof(argv[11]), atof(argv[12])}); std::string out_file = argv[13]; - p_teca_cartesian_mesh_source source = teca_cartesian_mesh_source::New(); source->set_whole_extents({0, nx-1, 0, ny-1, 0, nz-1, 0, 0}); @@ -111,8 +110,8 @@ int main(int argc, char **argv) double x1 = flip_x ? 0.0 : 360.0; double y0 = flip_y ? 90.0 : -90.0; double y1 = flip_y ? -90.0 : 90.0; - double z0 = flip_z ? 10.0 : 0.0; - double z1 = flip_z ? 0.0 : 10.0; + double z0 = flip_z ? 0.0 : 100.0; // the z-axis is descending by default + double z1 = flip_z ? 100.0 : 0.0; source->set_bounds({x0, x1, y0, y1, z0, z1, 0., 0.}); distance_field distance = {80., -80., 2.5}; @@ -123,6 +122,26 @@ int main(int argc, char **argv) p_teca_index_executive exec = teca_index_executive::New(); exec->set_bounds(req_bounds); + exec->set_verbose(1); + + std::cerr << "running the test with x " + << (flip_x ? "descending" : "ascending") << ", y " + << (flip_y ? "descending" : "ascending") << ", z " + << (flip_z ? "ascending" : "descending") << std::endl + << "whole_extents = [0, " << nx-1 << ", 0, " + << ny-1 << ", 0, " << nz-1 << "]" << std::endl + << "bounds = [" << x0 << ", " << x1 << ", " << y0 + << ", " << y1 << ", " << z0 << ", " << z1 << "]" + << std::endl; + + teca_metadata md = coords->update_metadata(); + + teca_metadata coord_axes; + md.get("coordinates", coord_axes); + + std::cerr << "coordinates" << std::endl; + coord_axes.to_stream(std::cerr); + std::cerr << std::endl; bool do_test = true; teca_system_util::get_environment_variable("TECA_DO_TEST", do_test); From 7ce92ddcc940109824e93e05caf38b28f7804f16 Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Fri, 19 Feb 2021 18:14:05 -0800 Subject: [PATCH 020/180] cartesian_mesh_source pass bounds as const --- alg/teca_cartesian_mesh_source.cxx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/alg/teca_cartesian_mesh_source.cxx b/alg/teca_cartesian_mesh_source.cxx index 44b711587..482b732fe 100644 --- a/alg/teca_cartesian_mesh_source.cxx +++ b/alg/teca_cartesian_mesh_source.cxx @@ -26,8 +26,8 @@ struct teca_cartesian_mesh_source::internals_t // the world space [x0 x1 y0 y1 z0 z1 t0 t1] generate // equally spaced coordinate axes x,y,z,t static - void initialize_axes(int type_code, unsigned long *extent, - double *bounds, p_teca_variant_array &x_axis, + void initialize_axes(int type_code, const unsigned long *extent, + const double *bounds, p_teca_variant_array &x_axis, p_teca_variant_array &y_axis, p_teca_variant_array &z_axis, p_teca_variant_array &t_axis); @@ -59,7 +59,7 @@ void teca_cartesian_mesh_source::internals_t::initialize_axis( // -------------------------------------------------------------------------- void teca_cartesian_mesh_source::internals_t::initialize_axes(int type_code, - unsigned long *extent, double *bounds, p_teca_variant_array &x_axis, + const unsigned long *extent, const double *bounds, p_teca_variant_array &x_axis, p_teca_variant_array &y_axis, p_teca_variant_array &z_axis, p_teca_variant_array &t_axis) { From 2639da9a880e63fe5839e0b48f46d3ad918f17fa Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Fri, 19 Feb 2021 18:15:25 -0800 Subject: [PATCH 021/180] cf_writer default to 128 steps per file --- io/teca_cf_writer.cxx | 4 ++-- io/teca_cf_writer.h | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/io/teca_cf_writer.cxx b/io/teca_cf_writer.cxx index c7b02883b..84c05a1ac 100644 --- a/io/teca_cf_writer.cxx +++ b/io/teca_cf_writer.cxx @@ -40,7 +40,7 @@ class teca_cf_writer::internals_t // -------------------------------------------------------------------------- teca_cf_writer::teca_cf_writer() : file_name(""), date_format("%F-%HZ"), first_step(0), last_step(-1), - steps_per_file(16), mode_flags(NC_CLOBBER|NC_NETCDF4), use_unlimited_dim(0), + steps_per_file(128), mode_flags(NC_CLOBBER|NC_NETCDF4), use_unlimited_dim(0), compression_level(-1), flush_files(0) { this->set_number_of_input_connections(1); @@ -79,7 +79,7 @@ void teca_cf_writer::get_properties_description( "set the last time step to process. A value less than 0 results " "in all steps being processed.(-1)") TECA_POPTS_GET(unsigned int, prefix, steps_per_file, - "set the number of time steps to write per file (1)") + "set the number of time steps to write per file (128)") TECA_POPTS_GET(int, prefix, mode_flags, "mode flags to pass to NetCDF when creating the file (NC_CLOBBER)") TECA_POPTS_GET(int, prefix, use_unlimited_dim, diff --git a/io/teca_cf_writer.h b/io/teca_cf_writer.h index 9cced183f..2f19580e3 100644 --- a/io/teca_cf_writer.h +++ b/io/teca_cf_writer.h @@ -83,12 +83,12 @@ class teca_cf_writer : public teca_threaded_algorithm // set how many time steps are written to each file. Note that upstream is // parallelized over files rather than time steps. this has the affect of // reducing the available oportunity for MPI parallelization by this - // factor. For example if there are 16 timee steps and steps_per_file is 8, + // factor. For example if there are 16 time steps and steps_per_file is 8, // 2 MPI ranks each running 8 or more threads would be optimal. One // should make such calculations when planning large runs if optimal // performance is desired. time steps are gathered before the file is // written, thus available memory per MPI rank is the limiting factor in - // how many steps can be stored in a single file (1). + // how many steps can be stored in a single file (128). TECA_ALGORITHM_PROPERTY(unsigned int, steps_per_file) // sets the flags passed to NetCDF during file creation. (NC_CLOBBER) From 66b084c2b4a4cee47c1e211b08c10543db61139a Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Fri, 19 Feb 2021 18:16:00 -0800 Subject: [PATCH 022/180] fix a warning in test_unpack_data --- test/test_unpack_data.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/test/test_unpack_data.cpp b/test/test_unpack_data.cpp index 28d9811bf..b6e53369e 100644 --- a/test/test_unpack_data.cpp +++ b/test/test_unpack_data.cpp @@ -118,8 +118,7 @@ struct packed_data int main(int argc, char **argv) { - int write_input = 1; - int write_output = 0; + int write_input = 0; if (argc != 2) { From 2795b44f64919294323dc89fa252f65869909ad8 Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Fri, 19 Feb 2021 18:16:24 -0800 Subject: [PATCH 023/180] metadata_probe app normalize coordinates adds teca_normalize_coordinates into the pipeline --- apps/teca_metadata_probe.cpp | 35 ++++++++++++++++++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) diff --git a/apps/teca_metadata_probe.cpp b/apps/teca_metadata_probe.cpp index 6d9f3d192..5e2775475 100644 --- a/apps/teca_metadata_probe.cpp +++ b/apps/teca_metadata_probe.cpp @@ -3,6 +3,7 @@ #include "teca_netcdf_util.h" #include "teca_cf_reader.h" #include "teca_multi_cf_reader.h" +#include "teca_normalize_coordinates.h" #include "teca_array_collection.h" #include "teca_variant_array.h" #include "teca_coordinate_util.h" @@ -93,6 +94,9 @@ int main(int argc, char **argv) p_teca_multi_cf_reader mcf_reader = teca_multi_cf_reader::New(); mcf_reader->get_properties_description("mcf_reader", advanced_opt_defs); + p_teca_normalize_coordinates norm_coords = teca_normalize_coordinates::New(); + norm_coords->get_properties_description("norm_coords", advanced_opt_defs); + // package basic and advanced options for display options_description all_opt_defs(help_width, help_width - 4); all_opt_defs.add(basic_opt_defs).add(advanced_opt_defs); @@ -110,6 +114,7 @@ int main(int argc, char **argv) // options will override them cf_reader->set_properties("cf_reader", opt_vals); mcf_reader->set_properties("mcf_reader", opt_vals); + norm_coords->set_properties("norm_coords", opt_vals); // now pas in the basic options, these are procesed // last so that they will take precedence @@ -155,6 +160,7 @@ int main(int argc, char **argv) z_var = cf_reader->get_z_axis_variable(); reader = cf_reader; } + norm_coords->set_input_connection(reader->get_output_port()); std::string time_i; if (opt_vals.count("start_date")) @@ -177,7 +183,7 @@ int main(int argc, char **argv) } // run the reporting phase of the pipeline - teca_metadata md = reader->update_metadata(); + teca_metadata md = norm_coords->update_metadata(); // from here on out just rank 0 if (rank == 0) @@ -344,6 +350,33 @@ int main(int argc, char **argv) } std::cerr << std::endl; + // report extents + long extent[6] = {0l}; + if (!md.get("whole_extent", extent, 6)) + { + std::cerr << "Mesh extents: " << extent[0] << ", " << extent[1] + << ", " << extent[2] << ", " << extent[3]; + if (!z_var.empty()) + { + std::cerr << ", " << extent[4] << ", " << extent[5]; + } + std::cerr << std::endl; + } + + // report bounds + double bounds[6] = {0.0}; + if (!md.get("bounds", bounds, 6)) + { + std::cerr << "Mesh bounds: " << bounds[0] << ", " << bounds[1] + << ", " << bounds[2] << ", " << bounds[3]; + if (!z_var.empty()) + { + std::cerr << ", " << bounds[4] << ", " << bounds[5]; + } + std::cerr << std::endl; + } + + // report the arrays size_t n_arrays = atrs.size(); From 9ee5adeee24f00f2cab6d3a80bc56de86b6efd08 Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Fri, 19 Feb 2021 18:22:16 -0800 Subject: [PATCH 024/180] integrated_vapor_transport app normalize coordinates added teca_normalize_coordinates into the pipeline includes a regression test using ERA interm data that has increasing z-axis and NertCDF packed data fields. --- .travis.yml | 2 +- apps/teca_integrated_vapor_transport.cpp | 15 ++++-- test/apps/CMakeLists.txt | 6 +++ ...egrated_vapor_transport_app_packed_data.sh | 52 +++++++++++++++++++ 4 files changed, 69 insertions(+), 6 deletions(-) create mode 100755 test/apps/test_integrated_vapor_transport_app_packed_data.sh diff --git a/.travis.yml b/.travis.yml index 74e8bbcf1..04f0801e4 100644 --- a/.travis.yml +++ b/.travis.yml @@ -17,7 +17,7 @@ env: - BUILD_TYPE=Debug - TECA_DIR=/travis_teca_dir - TECA_PYTHON_VERSION=3 - - TECA_DATA_REVISION=103 + - TECA_DATA_REVISION=104 jobs: - DOCKER_IMAGE=ubuntu IMAGE_VERSION=20.04 IMAGE_NAME=ubuntu_20_04 REQUIRE_NETCDF_MPI=TRUE - DOCKER_IMAGE=ubuntu IMAGE_VERSION=20.04 IMAGE_NAME=ubuntu_20_04 REQUIRE_NETCDF_MPI=FALSE diff --git a/apps/teca_integrated_vapor_transport.cpp b/apps/teca_integrated_vapor_transport.cpp index 335bfd3c8..b64aa91da 100644 --- a/apps/teca_integrated_vapor_transport.cpp +++ b/apps/teca_integrated_vapor_transport.cpp @@ -141,6 +141,9 @@ int main(int argc, char **argv) p_teca_multi_cf_reader mcf_reader = teca_multi_cf_reader::New(); mcf_reader->get_properties_description("mcf_reader", advanced_opt_defs); + p_teca_normalize_coordinates norm_coords = teca_normalize_coordinates::New(); + norm_coords->get_properties_description("norm_coords", advanced_opt_defs); + p_teca_valid_value_mask vv_mask = teca_valid_value_mask::New(); vv_mask->get_properties_description("vv_mask", advanced_opt_defs); @@ -187,6 +190,7 @@ int main(int argc, char **argv) // options will override them cf_reader->set_properties("cf_reader", opt_vals); mcf_reader->set_properties("mcf_reader", opt_vals); + norm_coords->set_properties("norm_coords", opt_vals); vv_mask->set_properties("vv_mask", opt_vals); unpack->set_properties("unpack", opt_vals); ivt_int->set_properties("ivt_integral", opt_vals); @@ -196,7 +200,7 @@ int main(int argc, char **argv) // now pass in the basic options, these are processed // last so that they will take precedence // configure the pipeline from the command line options. - p_teca_algorithm head; + p_teca_algorithm reader; // configure the reader bool have_file = opt_vals.count("input_file"); @@ -205,14 +209,14 @@ int main(int argc, char **argv) if (opt_vals.count("input_file")) { mcf_reader->set_input_file(opt_vals["input_file"].as()); - head = mcf_reader; + reader = mcf_reader; } else if (opt_vals.count("input_regex")) { cf_reader->set_files_regex(opt_vals["input_regex"].as()); - head = cf_reader; + reader = cf_reader; } - p_teca_algorithm reader = head; + p_teca_algorithm head = reader; if (!opt_vals["x_axis_variable"].defaulted()) { @@ -262,7 +266,8 @@ int main(int argc, char **argv) } // add the valid value mask stage - vv_mask->set_input_connection(head->get_output_port()); + norm_coords->set_input_connection(head->get_output_port()); + vv_mask->set_input_connection(norm_coords->get_output_port()); unpack->set_input_connection(vv_mask->get_output_port()); head = unpack; diff --git a/test/apps/CMakeLists.txt b/test/apps/CMakeLists.txt index 1496d4d92..3fe7ea2ee 100644 --- a/test/apps/CMakeLists.txt +++ b/test/apps/CMakeLists.txt @@ -109,6 +109,12 @@ teca_add_test(test_integrated_vapor_transport_app_mpi_threads FEATURES ${TECA_HAS_NETCDF_MPI} ${TECA_HAS_MPI} ${TEST_MPI_THREADS} REQ_TECA_DATA) +teca_add_test(test_integrated_vapor_transport_app_packed_data + COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_integrated_vapor_transport_app_packed_data.sh + ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} -1 + FEATURES ${TECA_HAS_NETCDF} + REQ_TECA_DATA) + teca_add_test(test_tc_detect_app COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_tc_detect_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} diff --git a/test/apps/test_integrated_vapor_transport_app_packed_data.sh b/test/apps/test_integrated_vapor_transport_app_packed_data.sh new file mode 100755 index 000000000..ba955b447 --- /dev/null +++ b/test/apps/test_integrated_vapor_transport_app_packed_data.sh @@ -0,0 +1,52 @@ +#!/bin/bash + +if [[ $# < 3 ]] +then + echo "usage: test_integrated_vapor_transport_app.sh [app prefix] " \ + "[data root] [num threads] [mpiexec] [num ranks]" + exit -1 +fi + +app_prefix=${1} +data_root=${2} +n_threads=${3} + +if [[ $# -eq 5 ]] +then + mpi_exec=${4} + test_cores=${5} + launcher="${mpi_exec} -n ${test_cores}" +fi + +set -x +set -e + +# run the app +${launcher} ${app_prefix}/teca_integrated_vapor_transport \ + --input_regex "${data_root}/ERAinterim_1979-01-0.*\.nc$" \ + --x_axis_variable longitude --y_axis_variable latitude --z_axis_variable level \ + --wind_u uwnd --wind_v vwnd --specific_humidity shum --write_ivt 1 \ + --write_ivt_magnitude 1 --steps_per_file 256 --n_threads ${n_threads} --verbose \ + --output_file test_integrated_vapor_transport_app_packed_data_output_%t%.nc + +do_test=1 +if [[ $do_test -eq 0 ]] +then + echo "regenerating baseline..." + for f in `ls test_integrated_vapor_transport_app_packed_data_output_*.nc` + do + ff=`echo $f | sed s/output/ref/g` + cp -vd $f ${data_root}/$ff + done +else + # run the diff + ${app_prefix}/teca_cartesian_mesh_diff \ + --reference_dataset ${data_root}/test_integrated_vapor_transport_app_packed_data_ref'.*\.nc' \ + --test_dataset test_integrated_vapor_transport_app_packed_data_output'.*\.nc' \ + --test_reader::x_axis_variable longitude --test_reader::y_axis_variable latitude \ + --ref_reader::x_axis_variable longitude --ref_reader::y_axis_variable latitude \ + --arrays IVT_U IVT_V IVT --verbose + + # clean up + rm test_integrated_vapor_transport_app_packed_data_output*.nc +fi From 3a9c848b8f6e15115cf342ee3bc9e2f82ca2d356 Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Fri, 19 Feb 2021 18:28:32 -0800 Subject: [PATCH 025/180] bayesian_ar_detect app normalize coordinates added teca_normalize_coordinates into the pipeline includes a regression test using ERA interm data that has increasing z-axis and NertCDF packed data fields. --- .travis.yml | 2 +- apps/teca_bayesian_ar_detect.cpp | 16 +++--- test/apps/CMakeLists.txt | 7 +++ ...test_bayesian_ar_detect_app_packed_data.sh | 51 +++++++++++++++++++ 4 files changed, 67 insertions(+), 9 deletions(-) create mode 100755 test/apps/test_bayesian_ar_detect_app_packed_data.sh diff --git a/.travis.yml b/.travis.yml index 04f0801e4..b4986a69e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -17,7 +17,7 @@ env: - BUILD_TYPE=Debug - TECA_DIR=/travis_teca_dir - TECA_PYTHON_VERSION=3 - - TECA_DATA_REVISION=104 + - TECA_DATA_REVISION=105 jobs: - DOCKER_IMAGE=ubuntu IMAGE_VERSION=20.04 IMAGE_NAME=ubuntu_20_04 REQUIRE_NETCDF_MPI=TRUE - DOCKER_IMAGE=ubuntu IMAGE_VERSION=20.04 IMAGE_NAME=ubuntu_20_04 REQUIRE_NETCDF_MPI=FALSE diff --git a/apps/teca_bayesian_ar_detect.cpp b/apps/teca_bayesian_ar_detect.cpp index 6a6ff45f3..840e4092b 100644 --- a/apps/teca_bayesian_ar_detect.cpp +++ b/apps/teca_bayesian_ar_detect.cpp @@ -223,7 +223,6 @@ int main(int argc, char **argv) // now pass in the basic options, these are processed // last so that they will take precedence // configure the pipeline from the command line options. - p_teca_algorithm head; // configure the reader bool have_file = opt_vals.count("input_file"); @@ -241,22 +240,24 @@ int main(int argc, char **argv) } + p_teca_algorithm reader; if (have_file) { mcf_reader->set_input_file(opt_vals["input_file"].as()); - head = mcf_reader; + reader = mcf_reader; } else if (have_regex) { cf_reader->set_files_regex(opt_vals["input_regex"].as()); - head = cf_reader; + reader = cf_reader; } - p_teca_algorithm reader = head; // add basic transfomration stages to the pipeline - vv_mask->set_input_connection(reader->get_output_port()); + norm_coords->set_input_connection(reader->get_output_port()); + vv_mask->set_input_connection(norm_coords->get_output_port()); unpack->set_input_connection(vv_mask->get_output_port()); - head = unpack; + + p_teca_algorithm head = unpack; if (!opt_vals["periodic_in_x"].defaulted()) { @@ -395,9 +396,8 @@ int main(int argc, char **argv) } // connect the fixed stages of the pipeline - norm_coords->set_input_connection(head->get_output_port()); ar_detect->set_input_connection(0, params->get_output_port()); - ar_detect->set_input_connection(1, norm_coords->get_output_port()); + ar_detect->set_input_connection(1, head->get_output_port()); ar_tag->set_input_connection(0, ar_detect->get_output_port()); cf_writer->set_input_connection(ar_tag->get_output_port()); diff --git a/test/apps/CMakeLists.txt b/test/apps/CMakeLists.txt index 3fe7ea2ee..09f790368 100644 --- a/test/apps/CMakeLists.txt +++ b/test/apps/CMakeLists.txt @@ -89,6 +89,13 @@ teca_add_test(test_bayesian_ar_detect_app_mcf_mpi_threads FEATURES ${TECA_HAS_NETCDF_MPI} ${TECA_HAS_MPI} ${TEST_MPI_THREADS} REQ_TECA_DATA) +teca_add_test(test_bayesian_ar_detect_app_packed_data_mpi + COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_bayesian_ar_detect_app_packed_data.sh + ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} -1 + ${MPIEXEC} ${TEST_CORES} + FEATURES ${TECA_HAS_NETCDF_MPI} ${TECA_HAS_MPI} + REQ_TECA_DATA) + teca_add_test(test_integrated_vapor_transport_app_threads COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_integrated_vapor_transport_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} -1 diff --git a/test/apps/test_bayesian_ar_detect_app_packed_data.sh b/test/apps/test_bayesian_ar_detect_app_packed_data.sh new file mode 100755 index 000000000..3d1268618 --- /dev/null +++ b/test/apps/test_bayesian_ar_detect_app_packed_data.sh @@ -0,0 +1,51 @@ +#!/bin/bash + +if [[ $# < 3 ]] +then + echo "usage: test_bayesian_ar_detect_app_packed_data.sh "\ + "[app prefix] [data root] [num threads] [mpiexec] [num ranks]" + exit -1 +fi + +app_prefix=${1} +data_root=${2} +n_threads=${3} + +if [[ $# -eq 5 ]] +then + mpi_exec=${4} + test_cores=${5} + launcher="${mpi_exec} -n ${test_cores}" +fi + +set -x + +# run the app +${launcher} ${app_prefix}/teca_bayesian_ar_detect \ + --input_regex "${data_root}/ERAinterim_1979-01-0.*\.nc$" \ + --x_axis_variable longitude --y_axis_variable latitude --z_axis_variable level \ + --wind_u uwnd --wind_v vwnd --specific_humidity shum --compute_ivt --write_ivt \ + --write_ivt_magnitude --steps_per_file 256 --n_threads ${n_threads} --verbose \ + --output_file test_bayesian_ar_detect_app_packed_data_output_%t%.nc + +do_test=1 +if [[ $do_test -eq 0 ]] +then + echo "regenerating baseline..." + for f in `ls test_bayesian_ar_detect_app_packed_data_output_*.nc` + do + ff=`echo $f | sed s/output/ref/g` + cp -vd $f ${data_root}/$ff + done +else + # run the diff + ${app_prefix}/teca_cartesian_mesh_diff \ + --reference_dataset "${data_root}/test_bayesian_ar_detect_app_packed_data_ref.*\.nc" \ + --test_dataset "test_bayesian_ar_detect_app_packed_data_output.*\.nc" \ + --test_reader::x_axis_variable longitude --test_reader::y_axis_variable latitude \ + --ref_reader::x_axis_variable longitude --ref_reader::y_axis_variable latitude \ + --arrays ar_probability ar_binary_tag --verbose + + # clean up + rm test_bayesian_ar_detect_app_packed_data_output*.nc +fi From e3af73c0e70e520486e67b24fb59d399ad62d894 Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Wed, 24 Feb 2021 14:20:11 -0800 Subject: [PATCH 026/180] laplacian clean up trailing white space --- alg/teca_laplacian.cxx | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/alg/teca_laplacian.cxx b/alg/teca_laplacian.cxx index 43c481e44..7a63b5172 100644 --- a/alg/teca_laplacian.cxx +++ b/alg/teca_laplacian.cxx @@ -44,7 +44,7 @@ void laplacian(num_t *w, const pt_t *lon, const pt_t *lat, // delta lon squared as a function of latitude num_t d_lon = (lon[1] - lon[0]) * deg_to_rad() * earth_radius(); // tan(lat) - num_t *tan_lat = static_cast(malloc(n_bytes)); + num_t *tan_lat = static_cast(malloc(n_bytes)); for (unsigned long j = 0; j < n_lat; ++j) { delta_lon_sq[j] = pow(d_lon * cos(lat[j] * deg_to_rad()),2); @@ -65,7 +65,7 @@ void laplacian(num_t *w, const pt_t *lon, const pt_t *lat, { // set the current row in the u/v/w arrays unsigned long jj = j*n_lon; - /* + /* * The following f_* variables describe the field * f in a grid oriented fashion: * @@ -74,12 +74,12 @@ void laplacian(num_t *w, const pt_t *lon, const pt_t *lat, * f_ijm f_ji f_ijp * * f_imjm f_imj f_imjp - * + * * The 'j' direction represents longitude, the - * 'i' direciton represents latitude. + * 'i' direciton represents latitude. * * Note: The laplacian represented here uses the chain - * rule to separate the (1/cos(lat)*d(cos(lat)*df/dlat)/dlat + * rule to separate the (1/cos(lat)*d(cos(lat)*df/dlat)/dlat * term into two terms. * */ @@ -94,15 +94,15 @@ void laplacian(num_t *w, const pt_t *lon, const pt_t *lat, // set the pointer index for the output field w // ... this is index i,j num_t *ww = w + jj; - // create a dummy variable for u**2 + // create a dummy variable for u**2 num_t dlon_sq = delta_lon_sq[j]; for (unsigned long i = 1; i < max_i; ++i) { // calculate the laplacian in spherical coordinates, assuming // constant radius R. - ww[i] = (f_imj[i] - num_t(2)*f_ij[i] + f_ipj[i])/dlat_sq - - tan_lat[j]*(f_ipj[i]-f_imj[i])/dlat + + ww[i] = (f_imj[i] - num_t(2)*f_ij[i] + f_ipj[i])/dlat_sq - + tan_lat[j]*(f_ipj[i]-f_imj[i])/dlat + (f_ijm[i] - num_t(2)*f_ij[i] + f_ijp[i])/dlon_sq; } } @@ -125,13 +125,13 @@ void laplacian(num_t *w, const pt_t *lon, const pt_t *lat, // set the pointer index for the output field w // ... this is index i,j num_t *ww = w + jj; - // create a dummy variable for u**2 + // create a dummy variable for u**2 num_t dlon_sq = delta_lon_sq[j]; // calculate the laplacian in spherical coordinates, assuming // constant radius R. - ww[0] = (f_imj[0] - num_t(2)*f_ij[0] + f_ipj[0])/dlat_sq - - tan_lat[j]*(f_ipj[0]-f_imj[0])/dlat + + ww[0] = (f_imj[0] - num_t(2)*f_ij[0] + f_ipj[0])/dlat_sq - + tan_lat[j]*(f_ipj[0]-f_imj[0])/dlat + (f_ijm[0] - num_t(2)*f_ij[0] + f_ijp[0])/dlon_sq; } @@ -152,13 +152,13 @@ void laplacian(num_t *w, const pt_t *lon, const pt_t *lat, // set the pointer index for the output field w // ... this is index i,j num_t *ww = w + jj + max_i; - // create a dummy variable for u**2 + // create a dummy variable for u**2 num_t dlon_sq = delta_lon_sq[j]; // calculate the laplacian in spherical coordinates, assuming // constant radius R. - ww[0] = (f_imj[0] - num_t(2)*f_ij[0] + f_ipj[0])/dlat_sq - - tan_lat[j]*(f_ipj[0]-f_imj[0])/dlat + + ww[0] = (f_imj[0] - num_t(2)*f_ij[0] + f_ipj[0])/dlat_sq - + tan_lat[j]*(f_ipj[0]-f_imj[0])/dlat + (f_ijm[0] - num_t(2)*f_ij[0] + f_ijp[0])/dlon_sq; } } @@ -193,7 +193,7 @@ void laplacian(num_t *w, const pt_t *lon, const pt_t *lat, // -------------------------------------------------------------------------- teca_laplacian::teca_laplacian() : - component_0_variable(), + component_0_variable(), laplacian_variable("laplacian") { this->set_number_of_input_connections(1); From a9dba44c18c9bc61fa398035c32bf19dce98b625 Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Wed, 24 Feb 2021 14:21:25 -0800 Subject: [PATCH 027/180] algorithm add verbose algorithm property adds verbose algorithm property to teca_algorithm. call into the base class methods for getting property descriptions and setting property value from all dervide classes. --- alg/teca_2d_component_area.cxx | 4 +++ alg/teca_ar_detect.cxx | 4 +++ alg/teca_bayesian_ar_detect.cxx | 6 +++- alg/teca_bayesian_ar_detect.h | 5 ---- alg/teca_bayesian_ar_detect_parameters.cxx | 4 +++ alg/teca_cartesian_mesh_regrid.cxx | 4 +++ alg/teca_cartesian_mesh_subset.cxx | 6 +++- alg/teca_component_area_filter.cxx | 4 +++ alg/teca_component_statistics.cxx | 5 ++-- alg/teca_dataset_diff.cxx | 7 +++-- alg/teca_dataset_diff.h | 4 --- alg/teca_derived_quantity.cxx | 4 +++ alg/teca_evaluate_expression.cxx | 4 +++ alg/teca_face_to_cell_centering.cxx | 18 ++++++------ alg/teca_integrated_vapor_transport.cxx | 4 +++ alg/teca_l2_norm.cxx | 4 +++ alg/teca_laplacian.cxx | 4 +++ alg/teca_latitude_damper.cxx | 12 +++++--- alg/teca_normalize_coordinates.cxx | 7 +++-- alg/teca_normalize_coordinates.h | 9 ------ alg/teca_table_calendar.cxx | 4 +++ alg/teca_table_region_mask.cxx | 4 +++ alg/teca_table_remove_rows.cxx | 4 +++ alg/teca_table_sort.cxx | 4 +++ alg/teca_table_to_stream.cxx | 5 ++++ alg/teca_tc_candidates.cxx | 4 +++ alg/teca_tc_classify.cxx | 4 +++ alg/teca_tc_trajectory.cxx | 4 +++ alg/teca_tc_wind_radii.cxx | 4 +++ alg/teca_temporal_average.cxx | 4 +++ alg/teca_unpack_data.cxx | 8 ++++-- alg/teca_unpack_data.h | 4 --- alg/teca_valid_value_mask.cxx | 9 +++--- alg/teca_valid_value_mask.h | 10 ------- alg/teca_vertical_coordinate_transform.cxx | 4 +++ alg/teca_vorticity.cxx | 2 ++ apps/teca_cf_restripe.cpp | 2 +- core/teca_algorithm.cxx | 26 ++++++++++++++++- core/teca_algorithm.h | 33 ++++++++++++++++------ core/teca_threaded_algorithm.cxx | 6 +++- core/teca_threaded_algorithm.h | 2 +- io/teca_cartesian_mesh_reader.cxx | 4 +++ io/teca_cartesian_mesh_writer.cxx | 4 +++ io/teca_cf_reader.cxx | 4 +++ io/teca_cf_writer.cxx | 4 +++ io/teca_multi_cf_reader.cxx | 4 +++ io/teca_table_reader.cxx | 5 ++++ io/teca_table_writer.cxx | 4 +++ io/teca_wrf_reader.cxx | 4 +++ 49 files changed, 227 insertions(+), 72 deletions(-) diff --git a/alg/teca_2d_component_area.cxx b/alg/teca_2d_component_area.cxx index 285d40f52..c2b0572d4 100644 --- a/alg/teca_2d_component_area.cxx +++ b/alg/teca_2d_component_area.cxx @@ -137,6 +137,8 @@ void teca_2d_component_area::get_properties_description( "the label id that corresponds to the background (-1)") ; + this->teca_algorithm::get_properties_description(prefix, opts); + global_opts.add(opts); } @@ -144,6 +146,8 @@ void teca_2d_component_area::get_properties_description( void teca_2d_component_area::set_properties(const std::string &prefix, variables_map &opts) { + this->teca_algorithm::set_properties(prefix, opts); + TECA_POPTS_SET(opts, std::string, prefix, component_variable) TECA_POPTS_SET(opts, int, prefix, contiguous_component_ids) TECA_POPTS_SET(opts, long, prefix, background_id) diff --git a/alg/teca_ar_detect.cxx b/alg/teca_ar_detect.cxx index 23b848c3b..2dcc56af1 100644 --- a/alg/teca_ar_detect.cxx +++ b/alg/teca_ar_detect.cxx @@ -175,6 +175,8 @@ void teca_ar_detect::get_properties_description( "high land value") ; + this->teca_algorithm::get_properties_description(prefix, opts); + opts.add(ard_opts); } @@ -182,6 +184,8 @@ void teca_ar_detect::get_properties_description( void teca_ar_detect::set_properties( const std::string &prefix, variables_map &opts) { + this->teca_algorithm::set_properties(prefix, opts); + TECA_POPTS_SET(opts, std::string, prefix, water_vapor_variable) TECA_POPTS_SET(opts, double, prefix, low_water_vapor_threshold) TECA_POPTS_SET(opts, double, prefix, high_water_vapor_threshold) diff --git a/alg/teca_bayesian_ar_detect.cxx b/alg/teca_bayesian_ar_detect.cxx index 8a7705a09..dc7316627 100644 --- a/alg/teca_bayesian_ar_detect.cxx +++ b/alg/teca_bayesian_ar_detect.cxx @@ -609,7 +609,7 @@ teca_bayesian_ar_detect::teca_bayesian_ar_detect() : min_component_area_variable("min_component_area"), min_ivt_variable("min_water_vapor"), hwhm_latitude_variable("hwhm_latitude"), thread_pool_size(1), - verbose(0), internals(new internals_t) + internals(new internals_t) { this->set_number_of_input_connections(1); this->set_number_of_output_ports(1); @@ -648,6 +648,8 @@ void teca_bayesian_ar_detect::get_properties_description( "the terminal (0)") ; + this->teca_algorithm::get_properties_description(prefix, opts); + global_opts.add(opts); } @@ -655,6 +657,8 @@ void teca_bayesian_ar_detect::get_properties_description( void teca_bayesian_ar_detect::set_properties(const std::string &prefix, variables_map &opts) { + this->teca_algorithm::set_properties(prefix, opts); + TECA_POPTS_SET(opts, std::string, prefix, ivt_variable) TECA_POPTS_SET(opts, std::string, prefix, min_component_area_variable) TECA_POPTS_SET(opts, std::string, prefix, min_ivt_variable) diff --git a/alg/teca_bayesian_ar_detect.h b/alg/teca_bayesian_ar_detect.h index c2805cf99..c702910dc 100644 --- a/alg/teca_bayesian_ar_detect.h +++ b/alg/teca_bayesian_ar_detect.h @@ -57,10 +57,6 @@ class teca_bayesian_ar_detect : public teca_algorithm TECA_ALGORITHM_PROPERTY(std::string, min_component_area_variable) TECA_ALGORITHM_PROPERTY(std::string, hwhm_latitude_variable) - // flag indicating verbose terminal output is desired. - // default is 0 - TECA_ALGORITHM_PROPERTY(int, verbose) - // set/get the number of threads in the pool. setting // to -1 results in a thread per core factoring in all MPI // ranks running on the node. the default is -1. @@ -99,7 +95,6 @@ class teca_bayesian_ar_detect : public teca_algorithm std::string min_ivt_variable; std::string hwhm_latitude_variable; int thread_pool_size; - int verbose; struct internals_t; internals_t *internals; diff --git a/alg/teca_bayesian_ar_detect_parameters.cxx b/alg/teca_bayesian_ar_detect_parameters.cxx index ca58d5c85..529d51564 100644 --- a/alg/teca_bayesian_ar_detect_parameters.cxx +++ b/alg/teca_bayesian_ar_detect_parameters.cxx @@ -976,6 +976,8 @@ void teca_bayesian_ar_detect_parameters::get_properties_description( "the number of parameter table rows to serve (-1)") ; + this->teca_algorithm::get_properties_description(prefix, opts); + global_opts.add(opts); } @@ -983,6 +985,8 @@ void teca_bayesian_ar_detect_parameters::get_properties_description( void teca_bayesian_ar_detect_parameters::set_properties(const std::string &prefix, variables_map &opts) { + this->teca_algorithm::set_properties(prefix, opts); + TECA_POPTS_SET(opts, long, prefix, number_of_rows) } #endif diff --git a/alg/teca_cartesian_mesh_regrid.cxx b/alg/teca_cartesian_mesh_regrid.cxx index 632738efa..452167587 100644 --- a/alg/teca_cartesian_mesh_regrid.cxx +++ b/alg/teca_cartesian_mesh_regrid.cxx @@ -116,6 +116,8 @@ void teca_cartesian_mesh_regrid::get_properties_description( "linear or nearest interpolation (1)") ; + this->teca_algorithm::get_properties_description(prefix, opts); + global_opts.add(opts); } @@ -123,6 +125,8 @@ void teca_cartesian_mesh_regrid::get_properties_description( void teca_cartesian_mesh_regrid::set_properties( const std::string &prefix, variables_map &opts) { + this->teca_algorithm::set_properties(prefix, opts); + TECA_POPTS_SET(opts, int, prefix, target_input) TECA_POPTS_SET(opts, std::vector, prefix, arrays) TECA_POPTS_SET(opts, int, prefix, interpolation_mode) diff --git a/alg/teca_cartesian_mesh_subset.cxx b/alg/teca_cartesian_mesh_subset.cxx index e9b21991d..d2a4797e6 100644 --- a/alg/teca_cartesian_mesh_subset.cxx +++ b/alg/teca_cartesian_mesh_subset.cxx @@ -48,6 +48,8 @@ void teca_cartesian_mesh_subset::get_properties_description( "subset contained by bounds") ; + this->teca_algorithm::get_properties_description(prefix, opts); + global_opts.add(opts); } @@ -55,7 +57,9 @@ void teca_cartesian_mesh_subset::get_properties_description( void teca_cartesian_mesh_subset::set_properties( const string &prefix, variables_map &opts) { - TECA_POPTS_SET(opts, vector, prefix, bounds) + this->teca_algorithm::set_properties(prefix, opts); + + TECA_POPTS_SET(opts, std::vector, prefix, bounds) TECA_POPTS_SET(opts, bool, prefix, cover_bounds) } #endif diff --git a/alg/teca_component_area_filter.cxx b/alg/teca_component_area_filter.cxx index 2441e8d73..5cb611dd5 100644 --- a/alg/teca_component_area_filter.cxx +++ b/alg/teca_component_area_filter.cxx @@ -107,6 +107,8 @@ void teca_component_area_filter::get_properties_description( "this flag enables use of an optimization (0)") ; + this->teca_algorithm::get_properties_description(prefix, opts); + global_opts.add(opts); } @@ -114,6 +116,8 @@ void teca_component_area_filter::get_properties_description( void teca_component_area_filter::set_properties(const std::string &prefix, variables_map &opts) { + this->teca_algorithm::set_properties(prefix, opts); + TECA_POPTS_SET(opts, std::string, prefix, component_variable) TECA_POPTS_SET(opts, std::string, prefix, number_of_components_key) TECA_POPTS_SET(opts, std::string, prefix, component_ids_key) diff --git a/alg/teca_component_statistics.cxx b/alg/teca_component_statistics.cxx index a5de6f0ee..773e2f74f 100644 --- a/alg/teca_component_statistics.cxx +++ b/alg/teca_component_statistics.cxx @@ -47,6 +47,8 @@ void teca_component_statistics::get_properties_description( "list of arrays to compute statistics for") ;*/ + this->teca_algorithm::get_properties_description(prefix, opts); + global_opts.add(opts); } @@ -54,8 +56,7 @@ void teca_component_statistics::get_properties_description( void teca_component_statistics::set_properties( const std::string &prefix, variables_map &opts) { - (void) prefix; - (void) opts; + this->teca_algorithm::set_properties(prefix, opts); //TECA_POPTS_SET(opts, std::vector, prefix, dependent_variables) } diff --git a/alg/teca_dataset_diff.cxx b/alg/teca_dataset_diff.cxx index 3905871cb..ffa008ae5 100644 --- a/alg/teca_dataset_diff.cxx +++ b/alg/teca_dataset_diff.cxx @@ -33,7 +33,7 @@ // -------------------------------------------------------------------------- teca_dataset_diff::teca_dataset_diff() - : relative_tolerance(1.0e-6), absolute_tolerance(-1.0), verbose(1) + : relative_tolerance(1.0e-6), absolute_tolerance(-1.0) { this->set_number_of_input_connections(2); this->set_number_of_output_ports(1); @@ -54,15 +54,18 @@ void teca_dataset_diff::get_properties_description( opts.add_options() TECA_POPTS_GET(double, prefix, relative_tolerance, "relative test tolerance") TECA_POPTS_GET(double, prefix, absolute_tolerance, "absolute test tolerance") - TECA_POPTS_GET(int, prefix, verbose, "print status messages as the diff runs") ; + this->teca_algorithm::get_properties_description(prefix, opts); + global_opts.add(opts); } // -------------------------------------------------------------------------- void teca_dataset_diff::set_properties(const std::string &prefix, variables_map &opts) { + this->teca_algorithm::set_properties(prefix, opts); + TECA_POPTS_SET(opts, double, prefix, relative_tolerance) TECA_POPTS_SET(opts, double, prefix, absolute_tolerance) TECA_POPTS_SET(opts, int, prefix, verbose) diff --git a/alg/teca_dataset_diff.h b/alg/teca_dataset_diff.h index 57bfffbba..7fe64dc50 100644 --- a/alg/teca_dataset_diff.h +++ b/alg/teca_dataset_diff.h @@ -51,9 +51,6 @@ class teca_dataset_diff : public teca_algorithm // tolerance is used with numbers close to zero. TECA_ALGORITHM_PROPERTY(double, absolute_tolerance) - // if set infromation about the test progress is displayed during - // the test. - TECA_ALGORITHM_PROPERTY(int, verbose) protected: teca_dataset_diff(); @@ -107,7 +104,6 @@ class teca_dataset_diff : public teca_algorithm private: double relative_tolerance; double absolute_tolerance; - int verbose; }; #endif diff --git a/alg/teca_derived_quantity.cxx b/alg/teca_derived_quantity.cxx index 46345bf8f..5f70bd15b 100644 --- a/alg/teca_derived_quantity.cxx +++ b/alg/teca_derived_quantity.cxx @@ -37,6 +37,8 @@ void teca_derived_quantity::get_properties_description( "name of the derived quantity") ; + this->teca_algorithm::get_properties_description(prefix, opts); + global_opts.add(opts); } @@ -44,6 +46,8 @@ void teca_derived_quantity::get_properties_description( void teca_derived_quantity::set_properties( const std::string &prefix, variables_map &opts) { + this->teca_algorithm::set_properties(prefix, opts); + TECA_POPTS_SET(opts, std::vector, prefix, dependent_variables) TECA_POPTS_SET(opts, std::string, prefix, derived_variable) } diff --git a/alg/teca_evaluate_expression.cxx b/alg/teca_evaluate_expression.cxx index 69f485a30..c18b3898c 100644 --- a/alg/teca_evaluate_expression.cxx +++ b/alg/teca_evaluate_expression.cxx @@ -59,6 +59,8 @@ void teca_evaluate_expression::get_properties_description( "when set columns used in the calculation are removed from output") ; + this->teca_algorithm::get_properties_description(prefix, opts); + global_opts.add(opts); } @@ -66,6 +68,8 @@ void teca_evaluate_expression::get_properties_description( void teca_evaluate_expression::set_properties( const std::string &prefix, variables_map &opts) { + this->teca_algorithm::set_properties(prefix, opts); + TECA_POPTS_SET(opts, std::string, prefix, expression) TECA_POPTS_SET(opts, std::string, prefix, result_variable) TECA_POPTS_SET(opts, int, prefix, remove_dependent_variables) diff --git a/alg/teca_face_to_cell_centering.cxx b/alg/teca_face_to_cell_centering.cxx index 993df96c4..6e6f5c939 100644 --- a/alg/teca_face_to_cell_centering.cxx +++ b/alg/teca_face_to_cell_centering.cxx @@ -128,26 +128,26 @@ teca_face_to_cell_centering::~teca_face_to_cell_centering() void teca_face_to_cell_centering::get_properties_description( const string &prefix, options_description &global_opts) { - (void)prefix; - (void)global_opts; - /*options_description opts("Options for " + options_description opts("Options for " + (prefix.empty()?"teca_face_to_cell_centering":prefix)); - opts.add_options() + /*opts.add_options() TECA_POPTS_GET(int, prefix, mode, - "transform mode (mode_wrf_v3)") - ; + "Set the coordinate transform mode. The valid modes" + " are: mode_wrf_v3)") + ;*/ - global_opts.add(opts);*/ + this->teca_algorithm::get_properties_description(prefix, opts); + + global_opts.add(opts); } // -------------------------------------------------------------------------- void teca_face_to_cell_centering::set_properties( const string &prefix, variables_map &opts) { - (void)prefix; - (void)opts; //TECA_POPTS_SET(opts, int, prefix, mode) + this->teca_algorithm::set_properties(prefix, opts); } #endif diff --git a/alg/teca_integrated_vapor_transport.cxx b/alg/teca_integrated_vapor_transport.cxx index 39f2fd5f2..f484ca71a 100644 --- a/alg/teca_integrated_vapor_transport.cxx +++ b/alg/teca_integrated_vapor_transport.cxx @@ -165,6 +165,8 @@ void teca_integrated_vapor_transport::get_properties_description( "the value of the NetCDF _FillValue attribute (1e20)") ; + this->teca_algorithm::get_properties_description(prefix, opts); + global_opts.add(opts); } @@ -172,6 +174,8 @@ void teca_integrated_vapor_transport::get_properties_description( void teca_integrated_vapor_transport::set_properties( const string &prefix, variables_map &opts) { + this->teca_algorithm::set_properties(prefix, opts); + TECA_POPTS_SET(opts, std::string, prefix, wind_u_variable) TECA_POPTS_SET(opts, std::string, prefix, wind_v_variable) TECA_POPTS_SET(opts, std::string, prefix, specific_humidity_variable) diff --git a/alg/teca_l2_norm.cxx b/alg/teca_l2_norm.cxx index 9d66dbdd0..5142add16 100644 --- a/alg/teca_l2_norm.cxx +++ b/alg/teca_l2_norm.cxx @@ -92,6 +92,8 @@ void teca_l2_norm::get_properties_description( "array to store the computed norm in") ; + this->teca_algorithm::get_properties_description(prefix, opts); + global_opts.add(opts); } @@ -99,6 +101,8 @@ void teca_l2_norm::get_properties_description( void teca_l2_norm::set_properties( const string &prefix, variables_map &opts) { + this->teca_algorithm::set_properties(prefix, opts); + TECA_POPTS_SET(opts, std::string, prefix, component_0_variable) TECA_POPTS_SET(opts, std::string, prefix, component_1_variable) TECA_POPTS_SET(opts, std::string, prefix, component_2_variable) diff --git a/alg/teca_laplacian.cxx b/alg/teca_laplacian.cxx index 7a63b5172..9d9966dd3 100644 --- a/alg/teca_laplacian.cxx +++ b/alg/teca_laplacian.cxx @@ -219,6 +219,8 @@ void teca_laplacian::get_properties_description( "array to store the computed laplacian in") ; + this->teca_algorithm::get_properties_description(prefix, opts); + global_opts.add(opts); } @@ -226,6 +228,8 @@ void teca_laplacian::get_properties_description( void teca_laplacian::set_properties( const string &prefix, variables_map &opts) { + this->teca_algorithm::set_properties(prefix, opts); + TECA_POPTS_SET(opts, std::string, prefix, component_0_variable) TECA_POPTS_SET(opts, std::string, prefix, laplacian_variable) } diff --git a/alg/teca_latitude_damper.cxx b/alg/teca_latitude_damper.cxx index 34478dcfd..0389bfbe6 100644 --- a/alg/teca_latitude_damper.cxx +++ b/alg/teca_latitude_damper.cxx @@ -74,27 +74,31 @@ void teca_latitude_damper::get_properties_description( { options_description opts("Options for " + (prefix.empty()?"teca_latitude_damper":prefix)); - + opts.add_options() TECA_POPTS_GET(double, prefix, center, "set the center (mu) for the gaussian filter") TECA_POPTS_GET(double, prefix, half_width_at_half_max, "set the value of the half width at half maximum (HWHM) " "to calculate sigma from: sigma = HWHM/std::sqrt(2.0*std::log(2.0))") - TECA_POPTS_GET(std::vector, prefix, damped_variables, + TECA_POPTS_MULTI_GET(std::vector, prefix, damped_variables, "set the variables that will be damped by the inverted " "gaussian filter") TECA_POPTS_GET(std::string, prefix, variable_post_fix, "set the post-fix that will be attached to the variables " "that will be saved in the output") ; - + + this->teca_algorithm::get_properties_description(prefix, opts); + global_opts.add(opts); } // -------------------------------------------------------------------------- void teca_latitude_damper::set_properties(const std::string &prefix, variables_map &opts) { + this->teca_algorithm::set_properties(prefix, opts); + TECA_POPTS_SET(opts, double, prefix, center) TECA_POPTS_SET(opts, double, prefix, half_width_at_half_max) TECA_POPTS_SET(opts, std::vector, prefix, damped_variables) @@ -213,7 +217,7 @@ std::vector teca_latitude_damper::get_upstream_request( arrays.insert(damped_vars.begin(), damped_vars.end()); - // Cleaning off the postfix for arrays passed in the pipeline. + // Cleaning off the postfix for arrays passed in the pipeline. // For ex a down stream could request "foo_damped" then we'd // need to request "foo". also remove "foo_damped" from the // request. diff --git a/alg/teca_normalize_coordinates.cxx b/alg/teca_normalize_coordinates.cxx index 9c8c4a966..7f9baffca 100644 --- a/alg/teca_normalize_coordinates.cxx +++ b/alg/teca_normalize_coordinates.cxx @@ -311,7 +311,7 @@ void teca_normalize_coordinates::internals_t::normalize_variables( // -------------------------------------------------------------------------- teca_normalize_coordinates::teca_normalize_coordinates() : x_axis_order(ORDER_ASCENDING), y_axis_order(ORDER_ASCENDING), - z_axis_order(ORDER_DESCENDING), verbose(0), internals(nullptr) + z_axis_order(ORDER_DESCENDING), internals(nullptr) { this->internals = new teca_normalize_coordinates::internals_t; @@ -350,6 +350,8 @@ void teca_normalize_coordinates::get_properties_description( "If set then status messages are sent to the terminal.") ; + this->teca_algorithm::get_properties_description(prefix, opts); + global_opts.add(opts); } @@ -357,10 +359,11 @@ void teca_normalize_coordinates::get_properties_description( void teca_normalize_coordinates::set_properties( const std::string &prefix, variables_map &opts) { + this->teca_algorithm::set_properties(prefix, opts); + TECA_POPTS_SET(opts, int, prefix, x_axis_order) TECA_POPTS_SET(opts, int, prefix, y_axis_order) TECA_POPTS_SET(opts, int, prefix, z_axis_order) - TECA_POPTS_SET(opts, int, prefix, verbose) } #endif diff --git a/alg/teca_normalize_coordinates.h b/alg/teca_normalize_coordinates.h index bf6fcb85c..26050a8e7 100644 --- a/alg/teca_normalize_coordinates.h +++ b/alg/teca_normalize_coordinates.h @@ -46,14 +46,6 @@ class teca_normalize_coordinates : public teca_algorithm TECA_ALGORITHM_PROPERTY_V(int, z_axis_order) ///@} - /** @anchor verbose - * @name verbose - * if set to a non-zero value, rank 0 will send status information to the - * terminal. The default setting of zero results in minimal output. - */ - ///@{ - TECA_ALGORITHM_PROPERTY(int, verbose) - ///@} protected: teca_normalize_coordinates(); @@ -77,7 +69,6 @@ class teca_normalize_coordinates : public teca_algorithm int x_axis_order; int y_axis_order; int z_axis_order; - int verbose; struct internals_t; internals_t *internals; diff --git a/alg/teca_table_calendar.cxx b/alg/teca_table_calendar.cxx index 79768f323..8875efb23 100644 --- a/alg/teca_table_calendar.cxx +++ b/alg/teca_table_calendar.cxx @@ -73,6 +73,8 @@ void teca_table_calendar::get_properties_description( "prepended to all output column names") ; + this->teca_algorithm::get_properties_description(prefix, opts); + global_opts.add(opts); } @@ -80,6 +82,8 @@ void teca_table_calendar::get_properties_description( void teca_table_calendar::set_properties( const string &prefix, variables_map &opts) { + this->teca_algorithm::set_properties(prefix, opts); + TECA_POPTS_SET(opts, std::string, prefix, units) TECA_POPTS_SET(opts, std::string, prefix, calendar) TECA_POPTS_SET(opts, std::string, prefix, time_column) diff --git a/alg/teca_table_region_mask.cxx b/alg/teca_table_region_mask.cxx index 1b5b9bed9..d111ff856 100644 --- a/alg/teca_table_region_mask.cxx +++ b/alg/teca_table_region_mask.cxx @@ -62,6 +62,8 @@ void teca_table_region_mask::get_properties_description( "list of y coordinates describing the regions") ; + this->teca_algorithm::get_properties_description(prefix, opts); + global_opts.add(opts); } @@ -69,6 +71,8 @@ void teca_table_region_mask::get_properties_description( void teca_table_region_mask::set_properties( const std::string &prefix, variables_map &opts) { + this->teca_algorithm::set_properties(prefix, opts); + TECA_POPTS_SET(opts, std::string, prefix, x_coordinate_column) TECA_POPTS_SET(opts, std::string, prefix, y_coordinate_column) TECA_POPTS_SET(opts, std::string, prefix, result_column) diff --git a/alg/teca_table_remove_rows.cxx b/alg/teca_table_remove_rows.cxx index 986023cc4..9ead8eff6 100644 --- a/alg/teca_table_remove_rows.cxx +++ b/alg/teca_table_remove_rows.cxx @@ -56,6 +56,8 @@ void teca_table_remove_rows::get_properties_description( "when set columns used in the calculation are removed from output") ; + this->teca_algorithm::get_properties_description(prefix, opts); + global_opts.add(opts); } @@ -63,6 +65,8 @@ void teca_table_remove_rows::get_properties_description( void teca_table_remove_rows::set_properties( const std::string &prefix, variables_map &opts) { + this->teca_algorithm::set_properties(prefix, opts); + TECA_POPTS_SET(opts, std::string, prefix, mask_expression) TECA_POPTS_SET(opts, int, prefix, remove_dependent_variables) } diff --git a/alg/teca_table_sort.cxx b/alg/teca_table_sort.cxx index e85daabbb..ef8fae195 100644 --- a/alg/teca_table_sort.cxx +++ b/alg/teca_table_sort.cxx @@ -76,6 +76,8 @@ void teca_table_sort::get_properties_description( "if set a stable sort will be used") ; + this->teca_algorithm::get_properties_description(prefix, opts); + global_opts.add(opts); } @@ -83,6 +85,8 @@ void teca_table_sort::get_properties_description( void teca_table_sort::set_properties( const string &prefix, variables_map &opts) { + this->teca_algorithm::set_properties(prefix, opts); + TECA_POPTS_SET(opts, std::string, prefix, index_column) TECA_POPTS_SET(opts, int, prefix, index_column_id) TECA_POPTS_SET(opts, int, prefix, stable_sort) diff --git a/alg/teca_table_to_stream.cxx b/alg/teca_table_to_stream.cxx index 58b15a108..fb42f55a9 100644 --- a/alg/teca_table_to_stream.cxx +++ b/alg/teca_table_to_stream.cxx @@ -54,6 +54,9 @@ void teca_table_to_stream::get_properties_description( TECA_POPTS_GET(std::string, prefix, stream, "name of stream to send output to. stderr, stdout") ; + + this->teca_algorithm::get_properties_description(prefix, opts); + global_opts.add(opts); } @@ -61,6 +64,8 @@ void teca_table_to_stream::get_properties_description( void teca_table_to_stream::set_properties( const string &prefix, variables_map &opts) { + this->teca_algorithm::set_properties(prefix, opts); + TECA_POPTS_SET(opts, std::string, prefix, header) TECA_POPTS_SET(opts, std::string, prefix, footer) TECA_POPTS_SET(opts, std::string, prefix, stream) diff --git a/alg/teca_tc_candidates.cxx b/alg/teca_tc_candidates.cxx index 76ac71657..cba2f02c3 100644 --- a/alg/teca_tc_candidates.cxx +++ b/alg/teca_tc_candidates.cxx @@ -99,6 +99,8 @@ void teca_tc_candidates::get_properties_description( "highest longitude in degrees to search for storms (0)") ; + this->teca_algorithm::get_properties_description(prefix, opts); + opts.add(ard_opts); } @@ -106,6 +108,8 @@ void teca_tc_candidates::get_properties_description( void teca_tc_candidates::set_properties( const std::string &prefix, variables_map &opts) { + this->teca_algorithm::set_properties(prefix, opts); + TECA_POPTS_SET(opts, std::string, prefix, surface_wind_speed_variable) TECA_POPTS_SET(opts, std::string, prefix, vorticity_850mb_variable) TECA_POPTS_SET(opts, std::string, prefix, sea_level_pressure_variable) diff --git a/alg/teca_tc_classify.cxx b/alg/teca_tc_classify.cxx index 42ceca00e..73d4a5401 100644 --- a/alg/teca_tc_classify.cxx +++ b/alg/teca_tc_classify.cxx @@ -81,6 +81,8 @@ void teca_tc_classify::get_properties_description( "each region. if not provided names are generated from ids") ; + this->teca_algorithm::get_properties_description(prefix, opts); + global_opts.add(opts); } @@ -88,6 +90,8 @@ void teca_tc_classify::get_properties_description( void teca_tc_classify::set_properties( const std::string &prefix, variables_map &opts) { + this->teca_algorithm::set_properties(prefix, opts); + TECA_POPTS_SET(opts, std::string, prefix, track_id_column) TECA_POPTS_SET(opts, std::string, prefix, time_column) TECA_POPTS_SET(opts, std::string, prefix, surface_wind_column) diff --git a/alg/teca_tc_trajectory.cxx b/alg/teca_tc_trajectory.cxx index 61889b7a3..edbce0b11 100644 --- a/alg/teca_tc_trajectory.cxx +++ b/alg/teca_tc_trajectory.cxx @@ -258,6 +258,8 @@ void teca_tc_trajectory::get_properties_description( "number of time steps between valid candidate data. (1 step)") ; + this->teca_algorithm::get_properties_description(prefix, opts); + global_opts.add(opts); } @@ -265,6 +267,8 @@ void teca_tc_trajectory::get_properties_description( void teca_tc_trajectory::set_properties( const std::string &prefix, variables_map &opts) { + this->teca_algorithm::set_properties(prefix, opts); + TECA_POPTS_SET(opts, double, prefix, max_daily_distance) TECA_POPTS_SET(opts, double, prefix, min_wind_speed) TECA_POPTS_SET(opts, double, prefix, min_wind_duration) diff --git a/alg/teca_tc_wind_radii.cxx b/alg/teca_tc_wind_radii.cxx index 20ba6713c..de14e7c00 100644 --- a/alg/teca_tc_wind_radii.cxx +++ b/alg/teca_tc_wind_radii.cxx @@ -596,6 +596,8 @@ void teca_tc_wind_radii::get_properties_description(const std::string &prefix, "the average wind speed over the interval is used.") ; + this->teca_algorithm::get_properties_description(prefix, opts); + global_opts.add(opts); } @@ -603,6 +605,8 @@ void teca_tc_wind_radii::get_properties_description(const std::string &prefix, void teca_tc_wind_radii::set_properties(const std::string &prefix, variables_map &opts) { + this->teca_algorithm::set_properties(prefix, opts); + TECA_POPTS_SET(opts, std::string, prefix, storm_id_column) TECA_POPTS_SET(opts, std::string, prefix, storm_x_coordinate_column) TECA_POPTS_SET(opts, std::string, prefix, storm_y_coordinate_column) diff --git a/alg/teca_temporal_average.cxx b/alg/teca_temporal_average.cxx index 3e19f32c9..5abefaee3 100644 --- a/alg/teca_temporal_average.cxx +++ b/alg/teca_temporal_average.cxx @@ -47,6 +47,8 @@ void teca_temporal_average::get_properties_description( "use a backward(0), forward(1) or centered(2) stencil") ; + this->teca_algorithm::get_properties_description(prefix, opts); + global_opts.add(opts); } @@ -54,6 +56,8 @@ void teca_temporal_average::get_properties_description( void teca_temporal_average::set_properties( const string &prefix, variables_map &opts) { + this->teca_algorithm::set_properties(prefix, opts); + TECA_POPTS_SET(opts, unsigned int, prefix, filter_width) TECA_POPTS_SET(opts, int, prefix, filter_type) } diff --git a/alg/teca_unpack_data.cxx b/alg/teca_unpack_data.cxx index b6ae1253b..7f644add3 100644 --- a/alg/teca_unpack_data.cxx +++ b/alg/teca_unpack_data.cxx @@ -41,8 +41,7 @@ void transform(output_t * __restrict__ p_out, input_t * __restrict__ p_in, // -------------------------------------------------------------------------- teca_unpack_data::teca_unpack_data() : - output_data_type(teca_variant_array_code::get()), - verbose(0) + output_data_type(teca_variant_array_code::get()) { this->set_number_of_input_connections(1); this->set_number_of_output_ports(1); @@ -68,6 +67,9 @@ void teca_unpack_data::get_properties_description( TECA_POPTS_GET(int, prefix, verbose, "Enables verbose output") ; + + this->teca_algorithm::get_properties_description(prefix, opts); + global_opts.add(opts); } @@ -75,6 +77,8 @@ void teca_unpack_data::get_properties_description( void teca_unpack_data::set_properties( const std::string &prefix, variables_map &opts) { + this->teca_algorithm::set_properties(prefix, opts); + TECA_POPTS_SET(opts, int, prefix, output_data_type) TECA_POPTS_SET(opts, int, prefix, verbose) } diff --git a/alg/teca_unpack_data.h b/alg/teca_unpack_data.h index 586d8e160..33d01db6e 100644 --- a/alg/teca_unpack_data.h +++ b/alg/teca_unpack_data.h @@ -54,9 +54,6 @@ class teca_unpack_data : public teca_algorithm void set_output_data_type_to_double() { this->set_output_data_type(teca_variant_array_code::get()); } - // set the algorihtm verbosity. off by default. - TECA_ALGORITHM_PROPERTY(int, verbose) - protected: teca_unpack_data(); @@ -79,7 +76,6 @@ class teca_unpack_data : public teca_algorithm private: int output_data_type; - int verbose; }; #endif diff --git a/alg/teca_valid_value_mask.cxx b/alg/teca_valid_value_mask.cxx index c9240efec..90cbddbfe 100644 --- a/alg/teca_valid_value_mask.cxx +++ b/alg/teca_valid_value_mask.cxx @@ -37,7 +37,7 @@ bool is_mask_array(const std::string &array) // -------------------------------------------------------------------------- teca_valid_value_mask::teca_valid_value_mask() : - mask_arrays(), enable_valid_range(0), verbose(0) + mask_arrays(), enable_valid_range(0) { this->set_number_of_input_connections(1); this->set_number_of_output_ports(1); @@ -62,10 +62,10 @@ void teca_valid_value_mask::get_properties_description( TECA_POPTS_GET(int, prefix, enable_valid_range, "If set non-zero vald_range, valid_min, and valid_max attributes" " would be used if there is no _FillValue attribute.") - TECA_POPTS_GET(int, prefix, verbose, - "If set then status messages are sent to the terminal.") ; + this->teca_algorithm::get_properties_description(prefix, opts); + global_opts.add(opts); } @@ -73,9 +73,10 @@ void teca_valid_value_mask::get_properties_description( void teca_valid_value_mask::set_properties( const std::string &prefix, variables_map &opts) { + this->teca_algorithm::set_properties(prefix, opts); + TECA_POPTS_SET(opts, std::vector, prefix, mask_arrays) TECA_POPTS_SET(opts, int, prefix, enable_valid_range) - TECA_POPTS_SET(opts, int, prefix, verbose) } #endif diff --git a/alg/teca_valid_value_mask.h b/alg/teca_valid_value_mask.h index ebea5f21a..a0176f402 100644 --- a/alg/teca_valid_value_mask.h +++ b/alg/teca_valid_value_mask.h @@ -67,15 +67,6 @@ class teca_valid_value_mask : public teca_algorithm TECA_ALGORITHM_PROPERTY(int, enable_valid_range) ///@} - /** @anchor verbose - * @name verbose - * if set to a non-zero value, rank 0 will send status information to the - * terminal. The default setting of zero results in minimal output. - */ - ///@{ - TECA_ALGORITHM_PROPERTY(int, verbose) - ///@} - protected: teca_valid_value_mask(); @@ -94,7 +85,6 @@ class teca_valid_value_mask : public teca_algorithm private: std::vector mask_arrays; int enable_valid_range; - int verbose; }; #endif diff --git a/alg/teca_vertical_coordinate_transform.cxx b/alg/teca_vertical_coordinate_transform.cxx index 6e9b25d72..88740c327 100644 --- a/alg/teca_vertical_coordinate_transform.cxx +++ b/alg/teca_vertical_coordinate_transform.cxx @@ -95,6 +95,8 @@ void teca_vertical_coordinate_transform::get_properties_description( "transform mode (mode_wrf_v3)") ; + this->teca_algorithm::get_properties_description(prefix, opts); + global_opts.add(opts); } @@ -102,6 +104,8 @@ void teca_vertical_coordinate_transform::get_properties_description( void teca_vertical_coordinate_transform::set_properties( const string &prefix, variables_map &opts) { + this->teca_algorithm::set_properties(prefix, opts); + TECA_POPTS_SET(opts, int, prefix, mode) } #endif diff --git a/alg/teca_vorticity.cxx b/alg/teca_vorticity.cxx index 66fcf21d7..632e9c7ef 100644 --- a/alg/teca_vorticity.cxx +++ b/alg/teca_vorticity.cxx @@ -161,6 +161,8 @@ void teca_vorticity::get_properties_description( "array to store the computed vorticity in") ; + this->teca_algorithm::get_properties_description(prefix, opts); + global_opts.add(opts); } diff --git a/apps/teca_cf_restripe.cpp b/apps/teca_cf_restripe.cpp index bbf7584bd..2273505b5 100644 --- a/apps/teca_cf_restripe.cpp +++ b/apps/teca_cf_restripe.cpp @@ -60,7 +60,7 @@ int main(int argc, char **argv) ("information_arrays", value>()->multitoken(), "\nA list of non-geometric arrays to write\n") - ("output_file", value()->default_value(std::string("IVT_%t%.nc")), + ("output_file", value(), "\nA path and file name pattern for the output NetCDF files. %t% is replaced with a" " human readable date and time corresponding to the time of the first time step in" " the file. Use --cf_writer::date_format to change the formatting\n") diff --git a/core/teca_algorithm.cxx b/core/teca_algorithm.cxx index 8fe7136e9..d1801f035 100644 --- a/core/teca_algorithm.cxx +++ b/core/teca_algorithm.cxx @@ -12,6 +12,10 @@ #include #include +#if defined(TECA_HAS_BOOST) +#include +#endif + using std::vector; using std::map; using std::string; @@ -382,7 +386,8 @@ void teca_algorithm_internals::from_stream(istream &is) // -------------------------------------------------------------------------- -teca_algorithm::teca_algorithm() : internals(new teca_algorithm_internals) +teca_algorithm::teca_algorithm() : verbose(0), + internals(new teca_algorithm_internals) {} // -------------------------------------------------------------------------- @@ -403,6 +408,25 @@ MPI_Comm teca_algorithm::get_communicator() return this->internals->comm; } +#if defined(TECA_HAS_BOOST) +// -------------------------------------------------------------------------- +void teca_algorithm::get_properties_description( + const string &prefix, options_description &opts) +{ + opts.add_options() + TECA_POPTS_GET(int, prefix, verbose, + "Set to non-zero to send diagnostic messages to the terminal") + ; +} + +// -------------------------------------------------------------------------- +void teca_algorithm::set_properties( + const string &prefix, variables_map &opts) +{ + TECA_POPTS_SET(opts, int, prefix, verbose) +} +#endif + // -------------------------------------------------------------------------- teca_algorithm_output_port teca_algorithm::get_output_port( unsigned int port) diff --git a/core/teca_algorithm.h b/core/teca_algorithm.h index 9a8dbb9ac..5d9caf3b0 100644 --- a/core/teca_algorithm.h +++ b/core/teca_algorithm.h @@ -44,17 +44,28 @@ class teca_algorithm : public std::enable_shared_from_this MPI_Comm get_communicator(); #if defined(TECA_HAS_BOOST) - // initialize the given options description - // with algorithm's properties - virtual void get_properties_description(const std::string &, options_description &) - {} - - // initialize the algorithm from the given options - // variable map. - virtual void set_properties(const std::string &, variables_map &) - {} + // initialize the given options description with algorithm's properties + // implementors should call the base implementation when overriding. + // this should be called after the override adds its options. + virtual void get_properties_description(const std::string &prefix, + options_description &opts); + + // initialize the algorithm from the given options variable map. + // implementors should call the base implementation when overriding. + // this should be called before the override sets its properties. + virtual void set_properties(const std::string &prefix, + variables_map &opts); #endif + /** @anchor verbose + * @name verbose + * if set to a non-zero value, rank 0 will send status information to the + * terminal. The default setting of zero results in no output. + */ + ///@{ + TECA_ALGORITHM_PROPERTY(int, verbose) + ///@} + // get an output port from the algorithm. to be used // during pipeline building virtual @@ -232,6 +243,10 @@ class teca_algorithm : public std::enable_shared_from_this // return the output port's modified flag value int get_modified(unsigned int port) const; +protected: +// algorithm properties + int verbose; + private: teca_algorithm_internals *internals; diff --git a/core/teca_threaded_algorithm.cxx b/core/teca_threaded_algorithm.cxx index 42848e1ea..27c90c513 100644 --- a/core/teca_threaded_algorithm.cxx +++ b/core/teca_threaded_algorithm.cxx @@ -73,7 +73,7 @@ void teca_threaded_algorithm_internals::thread_pool_resize(MPI_Comm comm, // -------------------------------------------------------------------------- -teca_threaded_algorithm::teca_threaded_algorithm() : verbose(0), +teca_threaded_algorithm::teca_threaded_algorithm() : bind_threads(1), stream_size(-1), poll_interval(1000000), internals(new teca_threaded_algorithm_internals) { @@ -109,6 +109,8 @@ void teca_threaded_algorithm::get_properties_description( "for completed tasks (1.0e6)") ; + this->teca_algorithm::get_properties_description(prefix, opts); + global_opts.add(opts); } @@ -116,6 +118,8 @@ void teca_threaded_algorithm::get_properties_description( void teca_threaded_algorithm::set_properties(const std::string &prefix, variables_map &opts) { + this->teca_algorithm::set_properties(prefix, opts); + TECA_POPTS_SET(opts, int, prefix, bind_threads) TECA_POPTS_SET(opts, int, prefix, verbose) diff --git a/core/teca_threaded_algorithm.h b/core/teca_threaded_algorithm.h index 7f02f670a..4f4a7c812 100644 --- a/core/teca_threaded_algorithm.h +++ b/core/teca_threaded_algorithm.h @@ -92,8 +92,8 @@ class teca_threaded_algorithm : public teca_algorithm // will be executed by the thread pool. const_p_teca_dataset request_data(teca_algorithm_output_port &port, const teca_metadata &request) override; + private: - int verbose; int bind_threads; int stream_size; long long poll_interval; diff --git a/io/teca_cartesian_mesh_reader.cxx b/io/teca_cartesian_mesh_reader.cxx index dfeca9e6e..3f2030b1b 100644 --- a/io/teca_cartesian_mesh_reader.cxx +++ b/io/teca_cartesian_mesh_reader.cxx @@ -103,6 +103,8 @@ void teca_cartesian_mesh_reader::get_properties_description( "a file name to read") ; + this->teca_algorithm::get_properties_description(prefix, opts); + global_opts.add(opts); } @@ -110,6 +112,8 @@ void teca_cartesian_mesh_reader::get_properties_description( void teca_cartesian_mesh_reader::set_properties(const std::string &prefix, variables_map &opts) { + this->teca_algorithm::set_properties(prefix, opts); + TECA_POPTS_SET(opts, std::string, prefix, file_name) } #endif diff --git a/io/teca_cartesian_mesh_writer.cxx b/io/teca_cartesian_mesh_writer.cxx index 7531edbdf..3da6221f0 100644 --- a/io/teca_cartesian_mesh_writer.cxx +++ b/io/teca_cartesian_mesh_writer.cxx @@ -692,6 +692,8 @@ void teca_cartesian_mesh_writer::get_properties_description( "if auto is used, format is deduced from file_name") ; + this->teca_algorithm::get_properties_description(prefix, opts); + global_opts.add(opts); } @@ -699,6 +701,8 @@ void teca_cartesian_mesh_writer::get_properties_description( void teca_cartesian_mesh_writer::set_properties( const std::string &prefix, variables_map &opts) { + this->teca_algorithm::set_properties(prefix, opts); + TECA_POPTS_SET(opts, std::string, prefix, file_name) TECA_POPTS_SET(opts, int, prefix, output_format) TECA_POPTS_SET(opts, int, prefix, binary) diff --git a/io/teca_cf_reader.cxx b/io/teca_cf_reader.cxx index c3997d3f5..4614f66d1 100644 --- a/io/teca_cf_reader.cxx +++ b/io/teca_cf_reader.cxx @@ -134,6 +134,8 @@ void teca_cf_reader::get_properties_description( "set the max number of MPI ranks for reading metadata (1024)") ; + this->teca_algorithm::get_properties_description(prefix, opts); + global_opts.add(opts); } @@ -141,6 +143,8 @@ void teca_cf_reader::get_properties_description( void teca_cf_reader::set_properties(const std::string &prefix, variables_map &opts) { + this->teca_algorithm::set_properties(prefix, opts); + TECA_POPTS_SET(opts, std::vector, prefix, file_names) TECA_POPTS_SET(opts, std::string, prefix, files_regex) TECA_POPTS_SET(opts, std::string, prefix, x_axis_variable) diff --git a/io/teca_cf_writer.cxx b/io/teca_cf_writer.cxx index 84c05a1ac..2a4e4cd84 100644 --- a/io/teca_cf_writer.cxx +++ b/io/teca_cf_writer.cxx @@ -96,6 +96,8 @@ void teca_cf_writer::get_properties_description( "the list of non-geometric arrays to write (empty)") ; + this->teca_algorithm::get_properties_description(prefix, opts); + global_opts.add(opts); } @@ -103,6 +105,8 @@ void teca_cf_writer::get_properties_description( void teca_cf_writer::set_properties( const std::string &prefix, variables_map &opts) { + this->teca_algorithm::set_properties(prefix, opts); + TECA_POPTS_SET(opts, std::string, prefix, file_name) TECA_POPTS_SET(opts, std::string, prefix, date_format) TECA_POPTS_SET(opts, long, prefix, first_step) diff --git a/io/teca_multi_cf_reader.cxx b/io/teca_multi_cf_reader.cxx index cf6345e8f..3a7824ba1 100644 --- a/io/teca_multi_cf_reader.cxx +++ b/io/teca_multi_cf_reader.cxx @@ -310,6 +310,8 @@ void teca_multi_cf_reader::get_properties_description( "set the max number of ranks for reading metadata (1024)") ; + this->teca_algorithm::get_properties_description(prefix, opts); + global_opts.add(opts); } @@ -317,6 +319,8 @@ void teca_multi_cf_reader::get_properties_description( void teca_multi_cf_reader::set_properties(const std::string &prefix, variables_map &opts) { + this->teca_algorithm::set_properties(prefix, opts); + TECA_POPTS_SET(opts, std::string, prefix, x_axis_variable) TECA_POPTS_SET(opts, std::string, prefix, y_axis_variable) TECA_POPTS_SET(opts, std::string, prefix, z_axis_variable) diff --git a/io/teca_table_reader.cxx b/io/teca_table_reader.cxx index 838faa13a..c4018036c 100644 --- a/io/teca_table_reader.cxx +++ b/io/teca_table_reader.cxx @@ -3,6 +3,7 @@ #include "teca_binary_stream.h" #include "teca_coordinate_util.h" #include "teca_file_util.h" +#include "teca_common.h" #include #include @@ -202,12 +203,16 @@ void teca_table_reader::get_properties_description( "if auto is used, format is deduced from file_name") ; + this->teca_algorithm::get_properties_description(prefix, opts); + global_opts.add(opts); } // -------------------------------------------------------------------------- void teca_table_reader::set_properties(const string &prefix, variables_map &opts) { + this->teca_algorithm::set_properties(prefix, opts); + TECA_POPTS_SET(opts, string, prefix, file_name) TECA_POPTS_SET(opts, string, prefix, index_column) TECA_POPTS_SET(opts, int, prefix, generate_original_ids) diff --git a/io/teca_table_writer.cxx b/io/teca_table_writer.cxx index 8c610a0f9..75e221b5e 100644 --- a/io/teca_table_writer.cxx +++ b/io/teca_table_writer.cxx @@ -309,12 +309,16 @@ void teca_table_writer::get_properties_description( "if auto is used, format is deduced from file_name") ; + this->teca_algorithm::get_properties_description(prefix, opts); + global_opts.add(opts); } // -------------------------------------------------------------------------- void teca_table_writer::set_properties(const std::string &prefix, variables_map &opts) { + this->teca_algorithm::set_properties(prefix, opts); + TECA_POPTS_SET(opts, std::string, prefix, file_name) TECA_POPTS_SET(opts, std::string, prefix, row_dim_name) TECA_POPTS_SET(opts, bool, prefix, output_format) diff --git a/io/teca_wrf_reader.cxx b/io/teca_wrf_reader.cxx index ba5825f12..b2291eba3 100644 --- a/io/teca_wrf_reader.cxx +++ b/io/teca_wrf_reader.cxx @@ -153,6 +153,8 @@ void teca_wrf_reader::get_properties_description( "set the number of I/O threads (-1)") ; + this->teca_algorithm::get_properties_description(prefix, opts); + global_opts.add(opts); } @@ -160,6 +162,8 @@ void teca_wrf_reader::get_properties_description( void teca_wrf_reader::set_properties(const std::string &prefix, variables_map &opts) { + this->teca_algorithm::set_properties(prefix, opts); + TECA_POPTS_SET(opts, std::vector, prefix, file_names) TECA_POPTS_SET(opts, std::string, prefix, files_regex) TECA_POPTS_SET(opts, std::string, prefix, metadata_cache_dir) From 9d011205d6798300a13f8f23e2aedf98e5bab052 Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Wed, 24 Feb 2021 14:16:22 -0800 Subject: [PATCH 028/180] apps report actual default values in advanced options report the actual defaults used by TECA_ALGORITHM_PROPERTIES in the advanced command line options. removes hard coded defaults from the options description. if the algorithm's verbose property is set status message shows which options are being overriden. --- alg/teca_2d_component_area.cxx | 4 +- alg/teca_bayesian_ar_detect.cxx | 18 ++++----- alg/teca_bayesian_ar_detect_parameters.cxx | 2 +- alg/teca_cartesian_mesh_regrid.cxx | 8 ++-- alg/teca_cartesian_mesh_subset.cxx | 2 +- alg/teca_component_area_filter.cxx | 19 ++++----- alg/teca_derived_quantity.cxx | 2 +- alg/teca_descriptive_statistics.cxx | 2 +- alg/teca_integrated_vapor_transport.cxx | 10 ++--- alg/teca_normalize_coordinates.cxx | 11 ++--- alg/teca_table_region_mask.cxx | 6 +-- alg/teca_table_to_stream.cxx | 19 +++++++++ alg/teca_table_to_stream.h | 3 ++ alg/teca_tc_candidates.cxx | 26 ++++++------ alg/teca_tc_trajectory.cxx | 8 ++-- alg/teca_unpack_data.cxx | 2 +- alg/teca_vertical_coordinate_transform.cxx | 2 +- alg/teca_vertical_reduction.cxx | 4 +- alg/teca_vorticity.cxx | 2 + apps/teca_tc_detect.cpp | 47 +++++++++++----------- core/teca_algorithm_fwd.h | 15 +------ core/teca_common.cxx | 3 ++ core/teca_common.h | 6 +++ core/teca_index_reduce.cxx | 4 +- core/teca_mpi_util.cxx | 17 ++++++++ core/teca_mpi_util.h | 2 + core/teca_program_options.h | 35 ++++++++++------ core/teca_threaded_algorithm.cxx | 10 ++--- io/teca_cf_reader.cxx | 18 ++++----- io/teca_cf_writer.cxx | 18 ++++----- io/teca_multi_cf_reader.cxx | 29 +++++++------ io/teca_multi_cf_reader.h | 2 + io/teca_table_reader.cxx | 4 +- io/teca_wrf_reader.cxx | 20 ++++----- python/teca_py_alg.i | 7 ---- 35 files changed, 215 insertions(+), 172 deletions(-) diff --git a/alg/teca_2d_component_area.cxx b/alg/teca_2d_component_area.cxx index c2b0572d4..9bdcbac60 100644 --- a/alg/teca_2d_component_area.cxx +++ b/alg/teca_2d_component_area.cxx @@ -132,9 +132,9 @@ void teca_2d_component_area::get_properties_description( "name of the varibale containing region labels") TECA_POPTS_GET(int, prefix, contiguous_component_ids, "when the region label ids start at 0 and are consecutive " - "this flag enables use of an optimization (0)") + "this flag enables use of an optimization") TECA_POPTS_GET(long, prefix, background_id, - "the label id that corresponds to the background (-1)") + "the label id that corresponds to the background") ; this->teca_algorithm::get_properties_description(prefix, opts); diff --git a/alg/teca_bayesian_ar_detect.cxx b/alg/teca_bayesian_ar_detect.cxx index dc7316627..48069c09a 100644 --- a/alg/teca_bayesian_ar_detect.cxx +++ b/alg/teca_bayesian_ar_detect.cxx @@ -631,21 +631,18 @@ void teca_bayesian_ar_detect::get_properties_description( opts.add_options() TECA_POPTS_GET(std::string, prefix, ivt_variable, - "name of the water vapor variable (\"\")") + "name of the water vapor variable") TECA_POPTS_GET(std::string, prefix, min_component_area_variable, "name of the column in the parameter table containing the " - "component area threshold (\"min_component_area\")") + "component area threshold") TECA_POPTS_GET(std::string, prefix, min_ivt_variable, "name of the column in the parameter table containing the " - "water vapor threshold (\"min_water_vapor\")") + "water vapor threshold") TECA_POPTS_GET(std::string, prefix, hwhm_latitude_variable, "name of the column in the parameter table containing the " - "half width at half max latitude (\"hwhm_latitude\")") + "half width at half max latitude") TECA_POPTS_GET(int, prefix, thread_pool_size, - "number of threads to parallelize execution over (1)") - TECA_POPTS_GET(int, prefix, verbose, - "flag indicating diagnostic info should be displayed in " - "the terminal (0)") + "number of threads to parallelize execution over") ; this->teca_algorithm::get_properties_description(prefix, opts); @@ -697,7 +694,10 @@ void teca_bayesian_ar_detect::set_thread_pool_size(int n) // -------------------------------------------------------------------------- unsigned int teca_bayesian_ar_detect::get_thread_pool_size() const noexcept { - return this->internals->queue->size(); + unsigned int n_threads = 0; + if (this->internals->queue) + n_threads = this->internals->queue->size(); + return n_threads; } // -------------------------------------------------------------------------- diff --git a/alg/teca_bayesian_ar_detect_parameters.cxx b/alg/teca_bayesian_ar_detect_parameters.cxx index 529d51564..23511a112 100644 --- a/alg/teca_bayesian_ar_detect_parameters.cxx +++ b/alg/teca_bayesian_ar_detect_parameters.cxx @@ -973,7 +973,7 @@ void teca_bayesian_ar_detect_parameters::get_properties_description( opts.add_options() TECA_POPTS_GET(long, prefix, number_of_rows, - "the number of parameter table rows to serve (-1)") + "the number of parameter table rows to serve") ; this->teca_algorithm::get_properties_description(prefix, opts); diff --git a/alg/teca_cartesian_mesh_regrid.cxx b/alg/teca_cartesian_mesh_regrid.cxx index 452167587..c8bd5d6ab 100644 --- a/alg/teca_cartesian_mesh_regrid.cxx +++ b/alg/teca_cartesian_mesh_regrid.cxx @@ -109,11 +109,11 @@ void teca_cartesian_mesh_regrid::get_properties_description( opts.add_options() TECA_POPTS_GET(int, prefix, target_input, - "select input connection that contains metadata (0)") - TECA_POPTS_GET(std::vector, prefix, arrays, - "list of arrays to move from source to target mesh ("")") + "select input connection that contains metadata") + TECA_POPTS_MULTI_GET(std::vector, prefix, arrays, + "list of arrays to move from source to target mesh") TECA_POPTS_GET(int, prefix, interpolation_mode, - "linear or nearest interpolation (1)") + "linear or nearest interpolation") ; this->teca_algorithm::get_properties_description(prefix, opts); diff --git a/alg/teca_cartesian_mesh_subset.cxx b/alg/teca_cartesian_mesh_subset.cxx index d2a4797e6..14559bede 100644 --- a/alg/teca_cartesian_mesh_subset.cxx +++ b/alg/teca_cartesian_mesh_subset.cxx @@ -41,7 +41,7 @@ void teca_cartesian_mesh_subset::get_properties_description( + (prefix.empty()?"teca_cartesian_mesh_subset":prefix)); opts.add_options() - TECA_POPTS_GET(vector, prefix, bounds, + TECA_POPTS_MULTI_GET(std::vector, prefix, bounds, "bounding box given by x0,x1,y0,y1,z0,z1") TECA_POPTS_GET(bool, prefix, cover_bounds, "(T)use smallest subset covering or (F)largest " diff --git a/alg/teca_component_area_filter.cxx b/alg/teca_component_area_filter.cxx index 5cb611dd5..25018bfd9 100644 --- a/alg/teca_component_area_filter.cxx +++ b/alg/teca_component_area_filter.cxx @@ -82,29 +82,26 @@ void teca_component_area_filter::get_properties_description( TECA_POPTS_GET(std::string, prefix, component_variable, "name of the varibale containing connected component labeling") TECA_POPTS_GET(std::string, prefix, number_of_components_key, - "name of the key that contains the number of components" - "\"number_of_components\")") + "name of the key that contains the number of components") TECA_POPTS_GET(std::string, prefix, component_ids_key, - "name of the key that contains the list of component ids " - "\"component_ids\")") + "name of the key that contains the list of component ids") TECA_POPTS_GET(std::string, prefix, component_area_key, - "name of the key that contains the list of component areas " - "(\"component_area\")") + "name of the key that contains the list of component areas") TECA_POPTS_GET(int, prefix, mask_value, "components with area outside of the range will be replaced " - "by this label value (-1)") + "by this label value") TECA_POPTS_GET(double, prefix, low_area_threshold, "set the lower end of the range of areas to pass through. " - "components smaller than this are masked out. (-inf)") + "components smaller than this are masked out.") TECA_POPTS_GET(double, prefix, high_area_threshold, "set the higher end of the range of areas to pass through. " - "components larger than this are masked out. (+inf)") + "components larger than this are masked out.") TECA_POPTS_GET(std::string, prefix, variable_post_fix, "set a string that will be appended to variable names and " - "metadata keys in the filter's output (\"\")") + "metadata keys in the filter's output") TECA_POPTS_GET(int, prefix, contiguous_component_ids, "when the region label ids start at 0 and are consecutive " - "this flag enables use of an optimization (0)") + "this flag enables use of an optimization") ; this->teca_algorithm::get_properties_description(prefix, opts); diff --git a/alg/teca_derived_quantity.cxx b/alg/teca_derived_quantity.cxx index 5f70bd15b..4d7f68b6e 100644 --- a/alg/teca_derived_quantity.cxx +++ b/alg/teca_derived_quantity.cxx @@ -31,7 +31,7 @@ void teca_derived_quantity::get_properties_description( + (prefix.empty()?"teca_derived_quantity":prefix)); opts.add_options() - TECA_POPTS_GET(std::vector, prefix, dependent_variables, + TECA_POPTS_MULTI_GET(std::vector, prefix, dependent_variables, "list of arrays needed to compute the derived quantity") TECA_POPTS_GET(std::string, prefix, derived_variable, "name of the derived quantity") diff --git a/alg/teca_descriptive_statistics.cxx b/alg/teca_descriptive_statistics.cxx index 2c7794f0a..042684228 100644 --- a/alg/teca_descriptive_statistics.cxx +++ b/alg/teca_descriptive_statistics.cxx @@ -128,7 +128,7 @@ void teca_descriptive_statistics::get_properties_description( + (prefix.empty()?"teca_descriptive_statistics":prefix)); opts.add_options() - TECA_POPTS_GET(std::vector, prefix, dependent_variables, + TECA_POPTS_MULTI_GET(std::vector, prefix, dependent_variables, "list of arrays to compute statistics for") ; diff --git a/alg/teca_integrated_vapor_transport.cxx b/alg/teca_integrated_vapor_transport.cxx index f484ca71a..bd0cea5de 100644 --- a/alg/teca_integrated_vapor_transport.cxx +++ b/alg/teca_integrated_vapor_transport.cxx @@ -156,13 +156,13 @@ void teca_integrated_vapor_transport::get_properties_description( opts.add_options() TECA_POPTS_GET(std::string, prefix, wind_u_variable, - "name of the variable containg the lon component of the wind vector (ua)") + "name of the variable containg the lon component of the wind vector") TECA_POPTS_GET(std::string, prefix, wind_v_variable, - "name of the variable containg the lat component of the wind vector (va)") - TECA_POPTS_GET(std::string, prefix, specific_humidty_variable, - "name of the variable containg the specific humidity (hus)") + "name of the variable containg the lat component of the wind vector") + TECA_POPTS_GET(std::string, prefix, specific_humidity_variable, + "name of the variable containg the specific humidity") TECA_POPTS_GET(double, prefix, fill_value, - "the value of the NetCDF _FillValue attribute (1e20)") + "the value of the NetCDF _FillValue attribute") ; this->teca_algorithm::get_properties_description(prefix, opts); diff --git a/alg/teca_normalize_coordinates.cxx b/alg/teca_normalize_coordinates.cxx index 7f9baffca..4ae4bd586 100644 --- a/alg/teca_normalize_coordinates.cxx +++ b/alg/teca_normalize_coordinates.cxx @@ -336,18 +336,13 @@ void teca_normalize_coordinates::get_properties_description( opts.add_options() TECA_POPTS_GET(int, prefix, x_axis_order, "Sets the desired output order of the x-axis. Use" - " ORDER_ASCENDING(0) or ORDER_DESCENDING(1). By default" - " the x-axis will be output in ascending order.") + " ORDER_ASCENDING(0) or ORDER_DESCENDING(1).") TECA_POPTS_GET(int, prefix, y_axis_order, "Sets the desired output order of the y-axis. Use" - " ORDER_ASCENDING(0) or ORDER_DESCENDING(1). By default" - " the y-axis will be output in ascending order.") + " ORDER_ASCENDING(0) or ORDER_DESCENDING(1).") TECA_POPTS_GET(int, prefix, z_axis_order, "Sets the desired output order of the z-axis. Use" - " ORDER_ASCENDING(0) or ORDER_DESCENDING(1). By default" - " the z-axis will be output in descending order.") - TECA_POPTS_GET(int, prefix, verbose, - "If set then status messages are sent to the terminal.") + " ORDER_ASCENDING(0) or ORDER_DESCENDING(1).") ; this->teca_algorithm::get_properties_description(prefix, opts); diff --git a/alg/teca_table_region_mask.cxx b/alg/teca_table_region_mask.cxx index d111ff856..260450b96 100644 --- a/alg/teca_table_region_mask.cxx +++ b/alg/teca_table_region_mask.cxx @@ -49,11 +49,11 @@ void teca_table_region_mask::get_properties_description( opts.add_options() TECA_POPTS_GET(std::string, prefix, x_coordinate_column, - "name of the column containing x cooridnates. default \"lon\"") + "name of the column containing x cooridnates.") TECA_POPTS_GET(std::string, prefix, y_coordinate_column, - "name of the column containing y cooridnates. default \"lat\"") + "name of the column containing y cooridnates.") TECA_POPTS_GET(std::string, prefix, result_column, - "name of the column to store the mask in. default \"region_mask\"") + "name of the column to store the mask in.") TECA_POPTS_MULTI_GET(std::vector, prefix, region_sizes, "the number of points in each region") TECA_POPTS_MULTI_GET(std::vector, prefix, region_x_coordinates, diff --git a/alg/teca_table_to_stream.cxx b/alg/teca_table_to_stream.cxx index fb42f55a9..f9ee2cc84 100644 --- a/alg/teca_table_to_stream.cxx +++ b/alg/teca_table_to_stream.cxx @@ -96,6 +96,25 @@ void teca_table_to_stream::set_stream(const std::string &s) } } +// -------------------------------------------------------------------------- +std::string teca_table_to_stream::get_stream() +{ + if (this->stream == &std::cerr) + { + return "stderr"; + } + else if (this->stream == &std::cout) + { + return "stdout"; + } + else if (!this->stream) + { + return "null"; + } + + return "unknown"; +} + // -------------------------------------------------------------------------- void teca_table_to_stream::set_stream_to_stderr() { diff --git a/alg/teca_table_to_stream.h b/alg/teca_table_to_stream.h index c2fcd631c..03de1a64f 100644 --- a/alg/teca_table_to_stream.h +++ b/alg/teca_table_to_stream.h @@ -40,6 +40,9 @@ class teca_table_to_stream : public teca_algorithm void set_stream_to_stderr(); void set_stream_to_stdout(); + // get the stream name + std::string get_stream(); + protected: teca_table_to_stream(); diff --git a/alg/teca_tc_candidates.cxx b/alg/teca_tc_candidates.cxx index cba2f02c3..9496c8314 100644 --- a/alg/teca_tc_candidates.cxx +++ b/alg/teca_tc_candidates.cxx @@ -71,32 +71,32 @@ void teca_tc_candidates::get_properties_description( "name of core temperature variable") TECA_POPTS_GET(double, prefix, max_core_radius, "maximum number of degrees latitude separation between " - "vorticity max and pressure min defining a storm (2.0)") + "vorticity max and pressure min defining a storm") TECA_POPTS_GET(double, prefix, min_vorticity_850mb, - "minimum vorticty to be considered a tropical storm (1.6e-4)") + "minimum vorticty to be considered a tropical storm") TECA_POPTS_GET(double, prefix, vorticity_850mb_window, "size of the search window in degrees. storms core must have a " - "local vorticity max centered on this window (7.74446)") + "local vorticity max centered on this window") TECA_POPTS_GET(double, prefix, max_pressure_delta, - "maximum pressure change within specified radius (400.0)") + "maximum pressure change within specified radius") TECA_POPTS_GET(double, prefix, max_pressure_radius, - "radius in degrees over which max pressure change is computed (5.0)") + "radius in degrees over which max pressure change is computed") TECA_POPTS_GET(double, prefix, max_core_temperature_delta, - "maximum core temperature change over the specified radius (0.8)") + "maximum core temperature change over the specified radius") TECA_POPTS_GET(double, prefix, max_core_temperature_radius, - "radius in degrees over which max core temperature change is computed (5.0)") + "radius in degrees over which max core temperature change is computed") TECA_POPTS_GET(double, prefix, max_thickness_delta, - "maximum thickness change over the specified radius (50.0)") + "maximum thickness change over the specified radius") TECA_POPTS_GET(double, prefix, max_thickness_radius, - "radius in degrees over with max thickness change is comuted (4.0)") + "radius in degrees over with max thickness change is comuted") TECA_POPTS_GET(double, prefix, search_lat_low, - "lowest latitude in degrees to search for storms (-80.0)") + "lowest latitude in degrees to search for storms") TECA_POPTS_GET(double, prefix, search_lat_high, - "highest latitude in degrees to search for storms (80.0)") + "highest latitude in degrees to search for storms") TECA_POPTS_GET(double, prefix, search_lon_low, - "lowest longitude in degrees to search for stroms (1)") + "lowest longitude in degrees to search for stroms") TECA_POPTS_GET(double, prefix, search_lon_high, - "highest longitude in degrees to search for storms (0)") + "highest longitude in degrees to search for storms") ; this->teca_algorithm::get_properties_description(prefix, opts); diff --git a/alg/teca_tc_trajectory.cxx b/alg/teca_tc_trajectory.cxx index edbce0b11..808f5ff6b 100644 --- a/alg/teca_tc_trajectory.cxx +++ b/alg/teca_tc_trajectory.cxx @@ -248,14 +248,14 @@ void teca_tc_trajectory::get_properties_description( opts.add_options() TECA_POPTS_GET(double, prefix, max_daily_distance, - "max distance a storm can move on the same track in single day (1600 km)") + "max distance a storm can move on the same track in single day") TECA_POPTS_GET(double, prefix, min_wind_speed, - "minimum wind speed to be worthy of tracking (17.0 ms^-1)") + "minimum wind speed to be worthy of tracking") TECA_POPTS_GET(double, prefix, min_wind_duration, "minimum number of, not necessarily consecutive, days thickness, " - "core temp, and wind speed criteria must be satisfied (2.0 days)") + "core temp, and wind speed criteria must be satisfied") TECA_POPTS_GET(unsigned long, prefix, step_interval, - "number of time steps between valid candidate data. (1 step)") + "number of time steps between valid candidate data.") ; this->teca_algorithm::get_properties_description(prefix, opts); diff --git a/alg/teca_unpack_data.cxx b/alg/teca_unpack_data.cxx index 7f644add3..b8d0d813b 100644 --- a/alg/teca_unpack_data.cxx +++ b/alg/teca_unpack_data.cxx @@ -63,7 +63,7 @@ void teca_unpack_data::get_properties_description( TECA_POPTS_GET(int, prefix, output_data_type, "Sets the type of the transformed data to either single or double" " precision floating point. Use 11 for single precision and 12 for" - " double precision. The default is single precision") + " double precision.") TECA_POPTS_GET(int, prefix, verbose, "Enables verbose output") ; diff --git a/alg/teca_vertical_coordinate_transform.cxx b/alg/teca_vertical_coordinate_transform.cxx index 88740c327..6fba697a9 100644 --- a/alg/teca_vertical_coordinate_transform.cxx +++ b/alg/teca_vertical_coordinate_transform.cxx @@ -92,7 +92,7 @@ void teca_vertical_coordinate_transform::get_properties_description( opts.add_options() TECA_POPTS_GET(int, prefix, mode, - "transform mode (mode_wrf_v3)") + "Sets the coordinate transform mode. The modes are: mode_wrf_v3") ; this->teca_algorithm::get_properties_description(prefix, opts); diff --git a/alg/teca_vertical_reduction.cxx b/alg/teca_vertical_reduction.cxx index be806acdd..f823cfba7 100644 --- a/alg/teca_vertical_reduction.cxx +++ b/alg/teca_vertical_reduction.cxx @@ -35,9 +35,9 @@ void teca_vertical_reduction::get_properties_description( + (prefix.empty()?"teca_vertical_reduction":prefix)); opts.add_options() - TECA_POPTS_GET(std::vector, prefix, dependent_variables, + TECA_POPTS_MULTI_GET(std::vector, prefix, dependent_variables, "list of arrays needed to compute the derived quantity") - TECA_POPTS_GET(std::vector, prefix, derived_variables, + TECA_POPTS_MULTI_GET(std::vector, prefix, derived_variables, "name of the derived quantity") ; diff --git a/alg/teca_vorticity.cxx b/alg/teca_vorticity.cxx index 632e9c7ef..b6635686e 100644 --- a/alg/teca_vorticity.cxx +++ b/alg/teca_vorticity.cxx @@ -170,6 +170,8 @@ void teca_vorticity::get_properties_description( void teca_vorticity::set_properties( const string &prefix, variables_map &opts) { + this->teca_algorithm::set_properties(prefix, opts); + TECA_POPTS_SET(opts, std::string, prefix, component_0_variable) TECA_POPTS_SET(opts, std::string, prefix, component_1_variable) TECA_POPTS_SET(opts, std::string, prefix, vorticity_variable) diff --git a/apps/teca_tc_detect.cpp b/apps/teca_tc_detect.cpp index f8483f171..f5a236b6e 100644 --- a/apps/teca_tc_detect.cpp +++ b/apps/teca_tc_detect.cpp @@ -267,22 +267,21 @@ int main(int argc, char **argv) surf_wind->set_component_1_variable( opt_vals["surface_wind_v"].as()); - std::vector dep_var; - core_temp->get_dependent_variables(dep_var); if (!opt_vals["500mb_temp"].defaulted()) - dep_var[0] = opt_vals["500mb_temp"].as(); + core_temp->set_dependent_variable(0, + opt_vals["500mb_temp"].as()); + if (!opt_vals["200mb_temp"].defaulted()) - dep_var[1] = opt_vals["200mb_temp"].as(); - core_temp->set_dependent_variables(dep_var); - dep_var.clear(); + core_temp->set_dependent_variable(1, + opt_vals["200mb_temp"].as()); - thickness->get_dependent_variables(dep_var); if (!opt_vals["1000mb_height"].defaulted()) - dep_var[0] = opt_vals["1000mb_height"].as(); + thickness->set_dependent_variable(0, + opt_vals["1000mb_height"].as()); + if (!opt_vals["200mb_height"].defaulted()) - dep_var[1] = opt_vals["200mb_height"].as(); - thickness->set_dependent_variables(dep_var); - dep_var.clear(); + thickness->set_dependent_variable(1, + opt_vals["200mb_height"].as()); if (!opt_vals["sea_level_pressure"].defaulted()) candidates->set_sea_level_pressure_variable( @@ -375,29 +374,31 @@ int main(int argc, char **argv) // now that command line opts have been parsed we can create // the programmable algorithms' functors - core_temp->get_dependent_variables(dep_var); - if (dep_var.size() != 2) + size_t n_var = core_temp->get_number_of_dependent_variables(); + if (n_var != 2) { TECA_ERROR("core temperature calculation requires 2 " - "variables. given " << dep_var.size()) + "variables. given " << n_var) return -1; } core_temp->set_execute_callback( - point_wise_average(dep_var[0], dep_var[1], - core_temp->get_derived_variable())); - dep_var.clear(); + point_wise_average( + core_temp->get_dependent_variable(0), + core_temp->get_dependent_variable(1), + core_temp->get_derived_variable())); - thickness->get_dependent_variables(dep_var); - if (dep_var.size() != 2) + n_var = thickness->get_number_of_dependent_variables(); + if (n_var != 2) { TECA_ERROR("thickness calculation requires 2 " - "variables. given " << dep_var.size()) + "variables. given " << n_var) return -1; } thickness->set_execute_callback( - point_wise_difference(dep_var[0], dep_var[1], - thickness->get_derived_variable())); - dep_var.clear(); + point_wise_difference( + thickness->get_dependent_variable(0), + thickness->get_dependent_variable(1), + thickness->get_derived_variable())); // and tell the candidate stage what variables the functors produce candidates->set_surface_wind_speed_variable(surf_wind->get_l2_norm_variable()); diff --git a/core/teca_algorithm_fwd.h b/core/teca_algorithm_fwd.h index 2ca1c8f09..53411bd15 100644 --- a/core/teca_algorithm_fwd.h +++ b/core/teca_algorithm_fwd.h @@ -134,25 +134,14 @@ void set_##NAME##s(const std::initializer_list &&l) \ } \ } \ \ -void set_##NAME##s(const const_p_teca_variant_array &v) \ -{ \ - v->get(this->NAME##s); \ - this->set_modified(); \ -} \ - \ const T &get_##NAME(size_t i) const \ { \ return this->NAME##s[i]; \ } \ \ -void get_##NAME##s(std::vector &v) const \ -{ \ - v = this->NAME##s; \ -} \ - \ -void get_##NAME##s(const p_teca_variant_array &v) const \ +const std::vector &get_##NAME##s() const \ { \ - v->set(this->NAME##s); \ + return this->NAME##s; \ } \ \ void clear_##NAME##s() \ diff --git a/core/teca_common.cxx b/core/teca_common.cxx index 52baaccef..b3c9f0dc2 100644 --- a/core/teca_common.cxx +++ b/core/teca_common.cxx @@ -1,5 +1,7 @@ #include "teca_common.h" +namespace std +{ // ************************************************************************** std::ostream &operator<<(std::ostream &os, const std::vector &vec) { @@ -12,6 +14,7 @@ std::ostream &operator<<(std::ostream &os, const std::vector &vec) } return os; } +} // ************************************************************************** int have_tty() diff --git a/core/teca_common.h b/core/teca_common.h index 61d06afcf..0cae20fd3 100644 --- a/core/teca_common.h +++ b/core/teca_common.h @@ -3,12 +3,17 @@ #include "teca_config.h" #include "teca_parallel_id.h" + #include #include #include #include #include +// the operator<< overloads have to be namespace std in order for +// boost to find them. they are needed for mutitoken program options +namespace std +{ // send a vector to a stream template std::ostream &operator<<(std::ostream &os, const std::vector &vec) @@ -25,6 +30,7 @@ std::ostream &operator<<(std::ostream &os, const std::vector &vec) // send a vector of strings to a stream std::ostream &operator<<(std::ostream &os, const std::vector &vec); +} // detect if we are writing to a tty, if not then // we should not use ansi color codes diff --git a/core/teca_index_reduce.cxx b/core/teca_index_reduce.cxx index 741ec2c11..de17ad266 100644 --- a/core/teca_index_reduce.cxx +++ b/core/teca_index_reduce.cxx @@ -138,9 +138,9 @@ void teca_index_reduce::get_properties_description(const std::string &prefix, + (prefix.empty()?"teca_index_reduce":prefix)); opts.add_options() - TECA_POPTS_GET(long, prefix, start_index, "first index to process (0)") + TECA_POPTS_GET(long, prefix, start_index, "first index to process") TECA_POPTS_GET(long, prefix, end_index, "last index to process. " - "If set to -1 all indices are processed. (-1)") + "If set to -1 all indices are processed.") ; global_opts.add(opts); diff --git a/core/teca_mpi_util.cxx b/core/teca_mpi_util.cxx index 8e9673dfd..220b67d6b 100644 --- a/core/teca_mpi_util.cxx +++ b/core/teca_mpi_util.cxx @@ -51,4 +51,21 @@ int equipartition_communicator(MPI_Comm comm, #endif return 0; } + +// ************************************************************************** +int mpi_rank_0(MPI_Comm comm) +{ + int rank = 0; +#if defined(TECA_HAS_MPI) + int is_init = 0; + MPI_Initialized(&is_init); + if (is_init) + { + MPI_Comm_rank(comm, &rank); + } +#endif + if (rank == 0) + return 1; + return 0; +} } diff --git a/core/teca_mpi_util.h b/core/teca_mpi_util.h index 2b22c5b89..e51106533 100644 --- a/core/teca_mpi_util.h +++ b/core/teca_mpi_util.h @@ -11,6 +11,8 @@ namespace teca_mpi_util int equipartition_communicator(MPI_Comm comm, int new_comm_size, MPI_Comm *new_comm); +// return non-zero if this process is MPI rank 0 +int mpi_rank_0(MPI_Comm comm); }; #endif diff --git a/core/teca_program_options.h b/core/teca_program_options.h index be4340fe2..d2e2ecca1 100644 --- a/core/teca_program_options.h +++ b/core/teca_program_options.h @@ -2,6 +2,8 @@ #define teca_program_options_h #include "teca_config.h" +#include "teca_common.h" +#include "teca_mpi_util.h" #if defined(TECA_HAS_BOOST) && !defined(SWIG) namespace boost @@ -40,21 +42,30 @@ using variables_map // . These need to be // included in your cxx files. // -#define TECA_POPTS_GET(_type, _prefix, _name, _desc) \ - (((_prefix.empty()?"":_prefix+"::") + #_name).c_str(), \ - boost::program_options::value<_type>(), "\n" _desc "\n") - -#define TECA_POPTS_MULTI_GET(_type, _prefix, _name, _desc) \ +#define TECA_POPTS_GET(_type, _prefix, _name, _desc) \ (((_prefix.empty()?"":_prefix+"::") + #_name).c_str(), \ - boost::program_options::value<_type>()->multitoken(), \ + boost::program_options::value<_type>()->default_value \ + (this->get_ ## _name()), "\n" _desc "\n") + +#define TECA_POPTS_MULTI_GET(_type, _prefix, _name, _desc) \ + (((_prefix.empty()?"":_prefix+"::") + #_name).c_str(), \ + boost::program_options::value<_type>()->multitoken \ + ()->default_value(this->get_ ## _name()), \ "\n" _desc "\n") -#define TECA_POPTS_SET(_opts, _type, _prefix, _name) \ - {std::string opt_name = \ - (_prefix.empty()?"":_prefix+"::") + #_name; \ - if (_opts.count(opt_name)) \ - { \ - this->set_##_name(_opts[opt_name].as<_type>()); \ +#define TECA_POPTS_SET(_opts, _type, _prefix, _name) \ + {std::string opt_name = \ + (_prefix.empty()?"":_prefix+"::") + #_name; \ + bool defd = _opts[opt_name].defaulted(); \ + if (!defd) \ + { \ + _type val = _opts[opt_name].as<_type>(); \ + if (this->verbose && \ + teca_mpi_util::mpi_rank_0(this->get_communicator())) \ + { \ + TECA_STATUS("Setting " << opt_name << " = " << val) \ + } \ + this->set_##_name(val); \ }} #else diff --git a/core/teca_threaded_algorithm.cxx b/core/teca_threaded_algorithm.cxx index 27c90c513..0578aef3e 100644 --- a/core/teca_threaded_algorithm.cxx +++ b/core/teca_threaded_algorithm.cxx @@ -95,18 +95,18 @@ void teca_threaded_algorithm::get_properties_description( opts.add_options() TECA_POPTS_GET(int, prefix, bind_threads, - "bind software threads to hardware cores (1)") + "bind software threads to hardware cores") TECA_POPTS_GET(int, prefix, verbose, - "print a run time report of settings (0)") + "print a run time report of settings") TECA_POPTS_GET(int, prefix, thread_pool_size, "number of threads in pool. When n == -1, 1 thread per core is " - "created (-1)") + "created") TECA_POPTS_GET(int, prefix, stream_size, "number of datasests to pass per execute call. -1 means wait " - "for all. (-1)") + "for all.") TECA_POPTS_GET(long, prefix, poll_interval, "number of nanoseconds to wait between scans of the thread pool " - "for completed tasks (1.0e6)") + "for completed tasks") ; this->teca_algorithm::get_properties_description(prefix, opts); diff --git a/io/teca_cf_reader.cxx b/io/teca_cf_reader.cxx index 4614f66d1..a8a583011 100644 --- a/io/teca_cf_reader.cxx +++ b/io/teca_cf_reader.cxx @@ -81,7 +81,7 @@ void teca_cf_reader::get_properties_description( + (prefix.empty()?"teca_cf_reader":prefix)); opts.add_options() - TECA_POPTS_GET(std::vector, prefix, file_names, + TECA_POPTS_MULTI_GET(std::vector, prefix, file_names, "An explcict list of files to read. If specified takes precedence" " over --files_regex. Use one of --files_regex or --file_names") TECA_POPTS_GET(std::string, prefix, files_regex, @@ -89,11 +89,11 @@ void teca_cf_reader::get_properties_description( " Only the final component in a path may conatin a regular expression." " Use one of --files_regex or --file_names ") TECA_POPTS_GET(std::string, prefix, x_axis_variable, - "name of variable that has x axis coordinates (lon)") + "name of variable that has x axis coordinates") TECA_POPTS_GET(std::string, prefix, y_axis_variable, - "name of variable that has y axis coordinates (lat)") + "name of variable that has y axis coordinates") TECA_POPTS_GET(std::string, prefix, z_axis_variable, - "name of variable that has z axis coordinates (). If left empty the" + "name of variable that has z axis coordinates. If left empty the" " output mesh will be 2D.") TECA_POPTS_GET(std::string, prefix, t_axis_variable, "name of variable that has time axis coordinates (time). Set to an empty" @@ -118,20 +118,20 @@ void teca_cf_reader::get_properties_description( " no units are specified then \"days since %Y-%m-%d 00:00:00\" where Y,m,d" " are determined from the filename of the first file. Set t_axis_variable to" " an empty string to use.") - TECA_POPTS_GET(std::vector, prefix, t_values, + TECA_POPTS_MULTI_GET(std::vector, prefix, t_values, "An optional explicit list of double precision values to use as the" " time axis. If provided these take precedence over the values found" " in the files. Otherwise the variable pointed to by the t_axis_variable" " provides the time values. Set t_axis_variable to an empty string" " to use.") TECA_POPTS_GET(int, prefix, periodic_in_x, - "the dataset has a periodic boundary in the x direction (0)") + "the dataset has a periodic boundary in the x direction") TECA_POPTS_GET(int, prefix, periodic_in_y, - "the dataset has a periodic boundary in the y direction (0)") + "the dataset has a periodic boundary in the y direction") TECA_POPTS_GET(int, prefix, periodic_in_z, - "the dataset has a periodic boundary in the z direction (0)") + "the dataset has a periodic boundary in the z direction") TECA_POPTS_GET(int, prefix, max_metadata_ranks, - "set the max number of MPI ranks for reading metadata (1024)") + "set the max number of MPI ranks for reading metadata") ; this->teca_algorithm::get_properties_description(prefix, opts); diff --git a/io/teca_cf_writer.cxx b/io/teca_cf_writer.cxx index 2a4e4cd84..ce84d640a 100644 --- a/io/teca_cf_writer.cxx +++ b/io/teca_cf_writer.cxx @@ -74,26 +74,26 @@ void teca_cf_writer::get_properties_description( " file names (%F-%HZ). %t% in the file name is replaced with date/time" " of the first time step in the file using this format specifier.") TECA_POPTS_GET(long, prefix, first_step, - "set the first time step to process (0)") + "set the first time step to process") TECA_POPTS_GET(long, prefix, last_step, "set the last time step to process. A value less than 0 results " - "in all steps being processed.(-1)") + "in all steps being processed.") TECA_POPTS_GET(unsigned int, prefix, steps_per_file, - "set the number of time steps to write per file (128)") + "set the number of time steps to write per file") TECA_POPTS_GET(int, prefix, mode_flags, - "mode flags to pass to NetCDF when creating the file (NC_CLOBBER)") + "mode flags to pass to NetCDF when creating the file") TECA_POPTS_GET(int, prefix, use_unlimited_dim, "if set the slowest varying dimension is specified to be " - "NC_UNLIMITED. (0)") + "NC_UNLIMITED.") TECA_POPTS_GET(int, prefix, compression_level, "sets the zlib compression level used for each variable;" - " does nothing if the value is less than or equal to 0. (-1)") + " does nothing if the value is less than or equal to 0.") TECA_POPTS_GET(int, prefix, flush_files, - "if set files are flushed before they are closed. (0)") + "if set files are flushed before they are closed.") TECA_POPTS_MULTI_GET(std::vector, prefix, point_arrays, - "the list of point centered arrays to write (empty)") + "the list of point centered arrays to write") TECA_POPTS_MULTI_GET(std::vector, prefix, information_arrays, - "the list of non-geometric arrays to write (empty)") + "the list of non-geometric arrays to write") ; this->teca_algorithm::get_properties_description(prefix, opts); diff --git a/io/teca_multi_cf_reader.cxx b/io/teca_multi_cf_reader.cxx index 3a7824ba1..98fb76bed 100644 --- a/io/teca_multi_cf_reader.cxx +++ b/io/teca_multi_cf_reader.cxx @@ -254,6 +254,7 @@ int teca_multi_cf_reader_internals::parse_cf_reader_section( // -------------------------------------------------------------------------- teca_multi_cf_reader::teca_multi_cf_reader() : + input_file(""), x_axis_variable("lon"), y_axis_variable("lat"), z_axis_variable(""), @@ -282,32 +283,32 @@ void teca_multi_cf_reader::get_properties_description( opts.add_options() TECA_POPTS_GET(std::string, prefix, input_file, - "a file dedscribing the dataset layout ()") + "a file dedscribing the dataset layout") TECA_POPTS_GET(std::string, prefix, x_axis_variable, - "name of variable that has x axis coordinates (lon)") + "name of variable that has x axis coordinates") TECA_POPTS_GET(std::string, prefix, y_axis_variable, - "name of variable that has y axis coordinates (lat)") + "name of variable that has y axis coordinates") TECA_POPTS_GET(std::string, prefix, z_axis_variable, - "name of variable that has z axis coordinates ()") + "name of variable that has z axis coordinates") TECA_POPTS_GET(std::string, prefix, t_axis_variable, - "name of variable that has t axis coordinates (time)") + "name of variable that has t axis coordinates") TECA_POPTS_GET(std::string, prefix, t_calendar, - "name of variable that has the time calendar (calendar)") + "name of variable that has the time calendar") TECA_POPTS_GET(std::string, prefix, t_units, "a std::get_time template for decoding time from the input filename") TECA_POPTS_GET(std::string, prefix, filename_time_template, - "name of variable that has the time unit (units)") - TECA_POPTS_GET(std::vector, prefix, t_values, + "name of variable that has the time unit") + TECA_POPTS_MULTI_GET(std::vector, prefix, t_values, "name of variable that has t axis values set by the" - "the user if the file doesn't have time variable set ()") + "the user if the file doesn't have time variable set") TECA_POPTS_GET(int, prefix, periodic_in_x, - "the dataset has a periodic boundary in the x direction (0)") + "the dataset has a periodic boundary in the x direction") TECA_POPTS_GET(int, prefix, periodic_in_y, - "the dataset has a periodic boundary in the y direction (0)") + "the dataset has a periodic boundary in the y direction") TECA_POPTS_GET(int, prefix, periodic_in_z, - "the dataset has a periodic boundary in the z direction (0)") + "the dataset has a periodic boundary in the z direction") TECA_POPTS_GET(int, prefix, max_metadata_ranks, - "set the max number of ranks for reading metadata (1024)") + "set the max number of ranks for reading metadata") ; this->teca_algorithm::get_properties_description(prefix, opts); @@ -491,6 +492,8 @@ int teca_multi_cf_reader::set_input_file(const std::string &input_file) } } + this->input_file = input_file; + return 0; } diff --git a/io/teca_multi_cf_reader.h b/io/teca_multi_cf_reader.h index f575ef5ef..c02aa7350 100644 --- a/io/teca_multi_cf_reader.h +++ b/io/teca_multi_cf_reader.h @@ -77,6 +77,7 @@ class teca_multi_cf_reader : public teca_algorithm // set the file name describing the dataset to read int set_input_file(const std::string &input_file); + std::string get_input_file() { return this->input_file; } // adds a reader to the collection and at the same time specifies // how it will be used. @@ -160,6 +161,7 @@ class teca_multi_cf_reader : public teca_algorithm void set_modified() override; private: + std::string input_file; std::string x_axis_variable; std::string y_axis_variable; std::string z_axis_variable; diff --git a/io/teca_table_reader.cxx b/io/teca_table_reader.cxx index c4018036c..92679a7c7 100644 --- a/io/teca_table_reader.cxx +++ b/io/teca_table_reader.cxx @@ -191,14 +191,14 @@ void teca_table_reader::get_properties_description( TECA_POPTS_GET(string, prefix, file_name, "a file name to read") TECA_POPTS_GET(string, prefix, index_column, - "name of the column containing index values (\"\")") + "name of the column containing index values") TECA_POPTS_GET(int, prefix, generate_original_ids, "add original row ids into the output. default off.") TECA_POPTS_MULTI_GET(std::vector, prefix, metadata_column_names, "names of the columns to copy directly into metadata") TECA_POPTS_MULTI_GET(std::vector, prefix, metadata_column_keys, "names of the metadata keys to create from the named columns") - TECA_POPTS_GET(int, prefix, output_format, + TECA_POPTS_GET(int, prefix, file_format, "output file format enum, 0:csv, 1:bin, 2:xlsx, 3:auto." "if auto is used, format is deduced from file_name") ; diff --git a/io/teca_wrf_reader.cxx b/io/teca_wrf_reader.cxx index b2291eba3..1424adaac 100644 --- a/io/teca_wrf_reader.cxx +++ b/io/teca_wrf_reader.cxx @@ -127,30 +127,30 @@ void teca_wrf_reader::get_properties_description( + (prefix.empty()?"teca_wrf_reader":prefix)); opts.add_options() - TECA_POPTS_GET(std::vector, prefix, file_names, + TECA_POPTS_MULTI_GET(std::vector, prefix, file_names, "paths/file names to read") TECA_POPTS_GET(std::string, prefix, files_regex, "a regular expression that matches the set of files " "comprising the dataset") TECA_POPTS_GET(std::string, prefix, metadata_cache_dir, - "a directory where metadata caches can be stored ()") + "a directory where metadata caches can be stored") TECA_POPTS_GET(std::string, prefix, t_calendar, - "name of variable that has the time calendar (calendar)") + "name of variable that has the time calendar") TECA_POPTS_GET(std::string, prefix, t_units, "a std::get_time template for decoding time from the input filename") TECA_POPTS_GET(std::string, prefix, filename_time_template, - "name of variable that has the time unit (units)") - TECA_POPTS_GET(std::vector, prefix, t_values, + "name of variable that has the time unit") + TECA_POPTS_MULTI_GET(std::vector, prefix, t_values, "name of variable that has t axis values set by the" - "the user if the file doesn't have time variable set ()") + "the user if the file doesn't have time variable set") TECA_POPTS_GET(int, prefix, periodic_in_x, - "the dataset has apriodic boundary in the x direction (0)") + "the dataset has apriodic boundary in the x direction") TECA_POPTS_GET(int, prefix, periodic_in_y, - "the dataset has apriodic boundary in the y direction (0)") + "the dataset has apriodic boundary in the y direction") TECA_POPTS_GET(int, prefix, periodic_in_z, - "the dataset has apriodic boundary in the z direction (0)") + "the dataset has apriodic boundary in the z direction") TECA_POPTS_GET(int, prefix, thread_pool_size, - "set the number of I/O threads (-1)") + "set the number of I/O threads") ; this->teca_algorithm::get_properties_description(prefix, opts); diff --git a/python/teca_py_alg.i b/python/teca_py_alg.i index f31557f4f..c5ac4db63 100644 --- a/python/teca_py_alg.i +++ b/python/teca_py_alg.i @@ -217,13 +217,6 @@ %shared_ptr(teca_table_region_mask) %ignore teca_table_region_mask::operator=; %include "teca_table_region_mask.h" -%extend teca_table_region_mask -{ - TECA_PY_ALGORITHM_VECTOR_PROPERTY(unsigned long, region_size) - TECA_PY_ALGORITHM_VECTOR_PROPERTY(unsigned long, region_start); - TECA_PY_ALGORITHM_VECTOR_PROPERTY(double, region_x_coordinate); - TECA_PY_ALGORITHM_VECTOR_PROPERTY(double, region_y_coordinate); -} /*************************************************************************** pytorch_algorithm From 4fdd630071c885578310c57143bff4cca560bae7 Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Wed, 24 Feb 2021 16:09:32 -0800 Subject: [PATCH 029/180] app tests depend on the target use the presence of the target to decide weather or not to run the test. --- CMake/teca_python.cmake | 1 + CMake/teca_test.cmake | 6 ++ test/apps/CMakeLists.txt | 156 ++++++++++++++++++++++++--------------- 3 files changed, 103 insertions(+), 60 deletions(-) diff --git a/CMake/teca_python.cmake b/CMake/teca_python.cmake index 6ee3f7397..181136529 100644 --- a/CMake/teca_python.cmake +++ b/CMake/teca_python.cmake @@ -60,6 +60,7 @@ function (teca_add_python_app app_name) if (NOT APP_SOURCES) set(APP_SOURCES "${app_name}.in") endif() + add_custom_target(${app_name}) teca_py_install_apps(${APP_SOURCES}) else() message(STATUS "command line application ${app_name} -- disabled") diff --git a/CMake/teca_test.cmake b/CMake/teca_test.cmake index 375c3524f..346eaf319 100644 --- a/CMake/teca_test.cmake +++ b/CMake/teca_test.cmake @@ -51,3 +51,9 @@ function (teca_add_test T_NAME) endif() endif() endfunction() + +function (teca_add_app_test T_NAME T_TARGET) + if (TARGET ${T_TARGET}) + teca_add_test(${T_NAME} ${ARGV}) + endif() +endfunction() diff --git a/test/apps/CMakeLists.txt b/test/apps/CMakeLists.txt index 09f790368..75681fccc 100644 --- a/test/apps/CMakeLists.txt +++ b/test/apps/CMakeLists.txt @@ -5,13 +5,15 @@ include_directories(${CMAKE_CURRENT_SOURCE_DIR} $ ) -teca_add_test(test_deeplab_ar_detect_app_threads +teca_add_app_test(test_deeplab_ar_detect_app_threads + teca_deeplab_ar_detect COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_deeplab_ar_detect_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} -1 FEATURES ${TECA_HAS_NETCDF} ${TECA_HAS_PYTORCH} REQ_TECA_DATA) -teca_add_test(test_deeplab_ar_detect_app_mpi +teca_add_app_test(test_deeplab_ar_detect_app_mpi + teca_deeplab_ar_detect COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_deeplab_ar_detect_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} -1 ${MPIEXEC} ${TEST_CORES} @@ -19,7 +21,8 @@ teca_add_test(test_deeplab_ar_detect_app_mpi ${TECA_HAS_MPI} ${MPI4Py_FOUND} REQ_TECA_DATA) -teca_add_test(test_deeplab_ar_detect_app_mpi_threads +teca_add_app_test(test_deeplab_ar_detect_app_mpi_threads + teca_deeplab_ar_detect COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_deeplab_ar_detect_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} -1 ${MPIEXEC} ${HALF_TEST_CORES} @@ -27,13 +30,15 @@ teca_add_test(test_deeplab_ar_detect_app_mpi_threads ${TECA_HAS_MPI} ${MPI4Py_FOUND} ${TEST_MPI_THREADS} REQ_TECA_DATA) -teca_add_test(test_deeplab_ar_detect_app_mcf_threads +teca_add_app_test(test_deeplab_ar_detect_app_mcf_threads + teca_deeplab_ar_detect COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_deeplab_ar_detect_app_mcf.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} -1 FEATURES ${TECA_HAS_NETCDF} ${TECA_HAS_PYTORCH} REQ_TECA_DATA) -teca_add_test(test_deeplab_ar_detect_app_mcf_mpi +teca_add_app_test(test_deeplab_ar_detect_app_mcf_mpi + teca_deeplab_ar_detect COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_deeplab_ar_detect_app_mcf.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} -1 ${MPIEXEC} ${TEST_CORES} @@ -41,7 +46,8 @@ teca_add_test(test_deeplab_ar_detect_app_mcf_mpi ${TECA_HAS_MPI} ${MPI4Py_FOUND} REQ_TECA_DATA) -teca_add_test(test_deeplab_ar_detect_app_mcf_mpi_threads +teca_add_app_test(test_deeplab_ar_detect_app_mcf_mpi_threads + teca_deeplab_ar_detect COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_deeplab_ar_detect_app_mcf.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} -1 ${MPIEXEC} ${HALF_TEST_CORES} @@ -49,141 +55,157 @@ teca_add_test(test_deeplab_ar_detect_app_mcf_mpi_threads ${TECA_HAS_MPI} ${MPI4Py_FOUND} ${TEST_MPI_THREADS} REQ_TECA_DATA) -teca_add_test(test_bayesian_ar_detect_app_threads +teca_add_app_test(test_bayesian_ar_detect_app_threads + teca_bayesian_ar_detect COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_bayesian_ar_detect_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} -1 FEATURES ${TECA_HAS_NETCDF} REQ_TECA_DATA) -teca_add_test(test_bayesian_ar_detect_app_mpi +teca_add_app_test(test_bayesian_ar_detect_app_mpi + teca_bayesian_ar_detect COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_bayesian_ar_detect_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} -1 ${MPIEXEC} ${TEST_CORES} FEATURES ${TECA_HAS_NETCDF_MPI} ${TECA_HAS_MPI} REQ_TECA_DATA) -teca_add_test(test_bayesian_ar_detect_app_mpi_threads +teca_add_app_test(test_bayesian_ar_detect_app_mpi_threads + teca_bayesian_ar_detect COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_bayesian_ar_detect_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} -1 ${MPIEXEC} ${HALF_TEST_CORES} FEATURES ${TECA_HAS_NETCDF_MPI} ${TECA_HAS_MPI} ${TEST_MPI_THREADS} REQ_TECA_DATA) -teca_add_test(test_bayesian_ar_detect_app_mcf_threads +teca_add_app_test(test_bayesian_ar_detect_app_mcf_threads + teca_bayesian_ar_detect COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_bayesian_ar_detect_app_mcf.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} -1 FEATURES ${TECA_HAS_NETCDF} REQ_TECA_DATA) -teca_add_test(test_bayesian_ar_detect_app_mcf_mpi +teca_add_app_test(test_bayesian_ar_detect_app_mcf_mpi + teca_bayesian_ar_detect COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_bayesian_ar_detect_app_mcf.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} -1 ${MPIEXEC} ${TEST_CORES} FEATURES ${TECA_HAS_NETCDF_MPI} ${TECA_HAS_MPI} REQ_TECA_DATA) -teca_add_test(test_bayesian_ar_detect_app_mcf_mpi_threads +teca_add_app_test(test_bayesian_ar_detect_app_mcf_mpi_threads + teca_bayesian_ar_detect COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_bayesian_ar_detect_app_mcf.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} -1 ${MPIEXEC} ${HALF_TEST_CORES} FEATURES ${TECA_HAS_NETCDF_MPI} ${TECA_HAS_MPI} ${TEST_MPI_THREADS} REQ_TECA_DATA) -teca_add_test(test_bayesian_ar_detect_app_packed_data_mpi +teca_add_app_test(test_bayesian_ar_detect_app_packed_data_mpi + teca_bayesian_ar_detect COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_bayesian_ar_detect_app_packed_data.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} -1 ${MPIEXEC} ${TEST_CORES} FEATURES ${TECA_HAS_NETCDF_MPI} ${TECA_HAS_MPI} REQ_TECA_DATA) -teca_add_test(test_integrated_vapor_transport_app_threads +teca_add_app_test(test_integrated_vapor_transport_app_threads COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_integrated_vapor_transport_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} -1 FEATURES ${TECA_HAS_NETCDF} REQ_TECA_DATA) -teca_add_test(test_integrated_vapor_transport_app_mpi +teca_add_app_test(test_integrated_vapor_transport_app_mpi + teca_integrated_vapor_transport COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_integrated_vapor_transport_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} -1 ${MPIEXEC} ${TEST_CORES} FEATURES ${TECA_HAS_NETCDF_MPI} ${TECA_HAS_MPI} REQ_TECA_DATA) -teca_add_test(test_integrated_vapor_transport_app_mpi_threads +teca_add_app_test(test_integrated_vapor_transport_app_mpi_threads + teca_integrated_vapor_transport COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_integrated_vapor_transport_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} -1 ${MPIEXEC} ${HALF_TEST_CORES} FEATURES ${TECA_HAS_NETCDF_MPI} ${TECA_HAS_MPI} ${TEST_MPI_THREADS} REQ_TECA_DATA) -teca_add_test(test_integrated_vapor_transport_app_packed_data +teca_add_app_test(test_integrated_vapor_transport_app_packed_data + teca_integrated_vapor_transport COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_integrated_vapor_transport_app_packed_data.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} -1 FEATURES ${TECA_HAS_NETCDF} REQ_TECA_DATA) -teca_add_test(test_tc_detect_app +teca_add_app_test(test_tc_detect_app teca_tc_detect COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_tc_detect_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} FEATURES ${TECA_HAS_NETCDF} REQ_TECA_DATA) -teca_add_test(test_tc_detect_app_mpi +teca_add_app_test(test_tc_detect_app_mpi teca_tc_detect COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_tc_detect_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} ${MPIEXEC} ${TEST_CORES} FEATURES ${TECA_HAS_NETCDF} ${TECA_HAS_MPI} REQ_TECA_DATA) -teca_add_test(test_tc_detect_app_mcf +teca_add_app_test(test_tc_detect_app_mcf teca_tc_detect COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_tc_detect_app_mcf.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} FEATURES ${TECA_HAS_NETCDF} REQ_TECA_DATA) -teca_add_test(test_tc_trajectory_app +teca_add_app_test(test_tc_trajectory_app + teca_tc_trajectory COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_tc_trajectory_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} REQ_TECA_DATA) -teca_add_test(test_tc_wind_radii_app_serial +teca_add_app_test(test_tc_wind_radii_app_serial + teca_tc_wiind_radii COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_tc_wind_radii_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} FEATURES ${TECA_HAS_NETCDF} REQ_TECA_DATA) -teca_add_test(test_tc_wind_radii_app_mpi +teca_add_app_test(test_tc_wind_radii_app_mpi + teca_tc_wiind_radii COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_tc_wind_radii_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} ${MPIEXEC} ${TEST_CORES} FEATURES ${TECA_HAS_NETCDF} ${TECA_HAS_MPI} REQ_TECA_DATA) -teca_add_test(test_tc_stats_app +teca_add_app_test(test_tc_stats_app teca_tc_stats COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_tc_stats_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} REQ_TECA_DATA) -teca_add_test(test_tc_trajectory_scalars_app_serial +teca_add_app_test(test_tc_trajectory_scalars_app_serial + teca_tc_trajectory_scalars COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_tc_trajectory_scalars_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} REQ_TECA_DATA) -teca_add_test(test_tc_trajectory_scalars_app_mpi +teca_add_app_test(test_tc_trajectory_scalars_app_mpi + teca_tc_trajectory_scalars COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_tc_trajectory_scalars_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} ${MPIEXEC} ${TEST_CORES} FEATURES ${TECA_HAS_MPI} ${MPI4Py_FOUND} REQ_TECA_DATA) -teca_add_test(test_tc_wind_radii_stats_app +teca_add_app_test(test_tc_wind_radii_stats_app teca_tc_wind_radii_stats COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_tc_wind_radii_stats_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} REQ_TECA_DATA) -teca_add_test(test_event_filter_app +teca_add_app_test(test_event_filter_app teca_event_filter COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_event_filter_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} REQ_TECA_DATA) -teca_add_test(test_temporal_reduction_app_seasonal_average_threads +teca_add_app_test(test_temporal_reduction_app_seasonal_average_threads + teca_temporal_reduction COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_temporal_reduction_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} "prw_hus_day_MRI-CGCM3_historical_r1i1p1_19500101-19501231\\.nc" prw @@ -191,7 +213,8 @@ teca_add_test(test_temporal_reduction_app_seasonal_average_threads FEATURES ${TECA_HAS_NETCDF} ${TECA_HAS_UDUNITS} REQ_TECA_DATA) -teca_add_test(test_temporal_reduction_app_seasonal_average_mpi +teca_add_app_test(test_temporal_reduction_app_seasonal_average_mpi + teca_temporal_reduction COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_temporal_reduction_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} "prw_hus_day_MRI-CGCM3_historical_r1i1p1_19500101-19501231\\.nc" prw @@ -199,7 +222,8 @@ teca_add_test(test_temporal_reduction_app_seasonal_average_mpi FEATURES ${TECA_HAS_NETCDF_MPI} ${TECA_HAS_UDUNITS} ${TECA_HAS_MPI} ${MPI4Py_FOUND} REQ_TECA_DATA) -teca_add_test(test_temporal_reduction_app_seasonal_average_mpi_threads +teca_add_app_test(test_temporal_reduction_app_seasonal_average_mpi_threads + teca_temporal_reduction COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_temporal_reduction_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} "prw_hus_day_MRI-CGCM3_historical_r1i1p1_19500101-19501231\\.nc" prw @@ -208,7 +232,8 @@ teca_add_test(test_temporal_reduction_app_seasonal_average_mpi_threads ${MPI4Py_FOUND} ${TEST_MPI_THREADS} REQ_TECA_DATA) -teca_add_test(test_temporal_reduction_app_monthly_average_threads +teca_add_app_test(test_temporal_reduction_app_monthly_average_threads + teca_temporal_reduction COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_temporal_reduction_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} "prw_hus_day_MRI-CGCM3_historical_r1i1p1_19500101-19501231\\.nc" prw @@ -216,7 +241,8 @@ teca_add_test(test_temporal_reduction_app_monthly_average_threads FEATURES ${TECA_HAS_NETCDF} ${TECA_HAS_UDUNITS} REQ_TECA_DATA) -teca_add_test(test_temporal_reduction_app_monthly_average_mpi +teca_add_app_test(test_temporal_reduction_app_monthly_average_mpi + teca_temporal_reduction COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_temporal_reduction_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} "prw_hus_day_MRI-CGCM3_historical_r1i1p1_19500101-19501231\\.nc" prw @@ -224,7 +250,8 @@ teca_add_test(test_temporal_reduction_app_monthly_average_mpi FEATURES ${TECA_HAS_NETCDF_MPI} ${TECA_HAS_UDUNITS} ${TECA_HAS_MPI} ${MPI4Py_FOUND} REQ_TECA_DATA) -teca_add_test(test_temporal_reduction_app_monthly_average_mpi_threads +teca_add_app_test(test_temporal_reduction_app_monthly_average_mpi_threads + teca_temporal_reduction COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_temporal_reduction_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} "prw_hus_day_MRI-CGCM3_historical_r1i1p1_19500101-19501231\\.nc" prw @@ -233,7 +260,8 @@ teca_add_test(test_temporal_reduction_app_monthly_average_mpi_threads ${MPI4Py_FOUND} ${TEST_MPI_THREADS} REQ_TECA_DATA) -teca_add_test(test_temporal_reduction_app_monthly_minimum_threads +teca_add_app_test(test_temporal_reduction_app_monthly_minimum_threads + teca_temporal_reduction COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_temporal_reduction_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} "prw_hus_day_MRI-CGCM3_historical_r1i1p1_19500101-19501231\\.nc" prw @@ -241,7 +269,8 @@ teca_add_test(test_temporal_reduction_app_monthly_minimum_threads FEATURES ${TECA_HAS_NETCDF} ${TECA_HAS_UDUNITS} REQ_TECA_DATA) -teca_add_test(test_temporal_reduction_app_monthly_minimum_mpi +teca_add_app_test(test_temporal_reduction_app_monthly_minimum_mpi + teca_temporal_reduction COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_temporal_reduction_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} "prw_hus_day_MRI-CGCM3_historical_r1i1p1_19500101-19501231\\.nc" prw @@ -249,7 +278,8 @@ teca_add_test(test_temporal_reduction_app_monthly_minimum_mpi FEATURES ${TECA_HAS_NETCDF_MPI} ${TECA_HAS_UDUNITS} ${TECA_HAS_MPI} ${MPI4Py_FOUND} REQ_TECA_DATA) -teca_add_test(test_temporal_reduction_app_monthly_minimum_mpi_threads +teca_add_app_test(test_temporal_reduction_app_monthly_minimum_mpi_threads + teca_temporal_reduction COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_temporal_reduction_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} "prw_hus_day_MRI-CGCM3_historical_r1i1p1_19500101-19501231\\.nc" prw @@ -258,7 +288,8 @@ teca_add_test(test_temporal_reduction_app_monthly_minimum_mpi_threads ${MPI4Py_FOUND} ${TEST_MPI_THREADS} REQ_TECA_DATA) -teca_add_test(test_temporal_reduction_app_monthly_maximum_threads +teca_add_app_test(test_temporal_reduction_app_monthly_maximum_threads + teca_temporal_reduction COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_temporal_reduction_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} "prw_hus_day_MRI-CGCM3_historical_r1i1p1_19500101-19501231\\.nc" prw @@ -266,7 +297,8 @@ teca_add_test(test_temporal_reduction_app_monthly_maximum_threads FEATURES ${TECA_HAS_NETCDF} ${TECA_HAS_UDUNITS} REQ_TECA_DATA) -teca_add_test(test_temporal_reduction_app_monthly_maximum_mpi +teca_add_app_test(test_temporal_reduction_app_monthly_maximum_mpi + teca_temporal_reduction COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_temporal_reduction_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} "prw_hus_day_MRI-CGCM3_historical_r1i1p1_19500101-19501231\\.nc" prw @@ -274,7 +306,8 @@ teca_add_test(test_temporal_reduction_app_monthly_maximum_mpi FEATURES ${TECA_HAS_NETCDF_MPI} ${TECA_HAS_UDUNITS} ${TECA_HAS_MPI} ${MPI4Py_FOUND} REQ_TECA_DATA) -teca_add_test(test_temporal_reduction_app_monthly_maximum_mpi_threads +teca_add_app_test(test_temporal_reduction_app_monthly_maximum_mpi_threads + teca_temporal_reduction COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_temporal_reduction_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} "prw_hus_day_MRI-CGCM3_historical_r1i1p1_19500101-19501231\\.nc" prw @@ -283,28 +316,32 @@ teca_add_test(test_temporal_reduction_app_monthly_maximum_mpi_threads ${MPI4Py_FOUND} ${TEST_MPI_THREADS} REQ_TECA_DATA) -teca_add_test(test_temporal_reduction_app_daily_average_missing_values +teca_add_app_test(test_temporal_reduction_app_daily_average_missing_values + teca_temporal_reduction COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_temporal_reduction_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} "MERRA2_100_inst3_3d_asm_Np_crop_1980012[89]\\.nc" T daily average 1 FEATURES ${TECA_HAS_NETCDF} ${TECA_HAS_UDUNITS} REQ_TECA_DATA) -teca_add_test(test_temporal_reduction_app_daily_maximum_missing_values +teca_add_app_test(test_temporal_reduction_app_daily_maximum_missing_values + teca_temporal_reduction COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_temporal_reduction_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} "MERRA2_100_inst3_3d_asm_Np_crop_1980012[89]\\.nc" T daily maximum 1 FEATURES ${TECA_HAS_NETCDF} ${TECA_HAS_UDUNITS} REQ_TECA_DATA) -teca_add_test(test_temporal_reduction_app_daily_minimum_missing_values +teca_add_app_test(test_temporal_reduction_app_daily_minimum_missing_values + teca_temporal_reduction COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_temporal_reduction_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} "MERRA2_100_inst3_3d_asm_Np_crop_1980012[89]\\.nc" T daily minimum 1 FEATURES ${TECA_HAS_NETCDF} ${TECA_HAS_UDUNITS} REQ_TECA_DATA) -teca_add_test(test_temporal_reduction_app_mcf_daily_average_threads +teca_add_app_test(test_temporal_reduction_app_mcf_daily_average_threads + teca_temporal_reduction COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_temporal_reduction_app_mcf.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} "${CMAKE_BINARY_DIR}/${BIN_PREFIX}/../test/ECMWF-IFS-HR-SST-present.mcf" hus @@ -312,7 +349,8 @@ teca_add_test(test_temporal_reduction_app_mcf_daily_average_threads FEATURES ${TECA_HAS_NETCDF} ${TECA_HAS_UDUNITS} REQ_TECA_DATA) -teca_add_test(test_temporal_reduction_app_mcf_daily_average_mpi +teca_add_app_test(test_temporal_reduction_app_mcf_daily_average_mpi + teca_temporal_reduction COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_temporal_reduction_app_mcf.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} "${CMAKE_BINARY_DIR}/${BIN_PREFIX}/../test/ECMWF-IFS-HR-SST-present.mcf" hus @@ -320,7 +358,8 @@ teca_add_test(test_temporal_reduction_app_mcf_daily_average_mpi FEATURES ${TECA_HAS_NETCDF_MPI} ${TECA_HAS_UDUNITS} ${TECA_HAS_MPI} ${MPI4Py_FOUND} REQ_TECA_DATA) -teca_add_test(test_temporal_reduction_app_mcf_daily_average_mpi_threads +teca_add_app_test(test_temporal_reduction_app_mcf_daily_average_mpi_threads + teca_temporal_reduction COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_temporal_reduction_app_mcf.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} "${CMAKE_BINARY_DIR}/${BIN_PREFIX}/../test/ECMWF-IFS-HR-SST-present.mcf" hus @@ -329,14 +368,14 @@ teca_add_test(test_temporal_reduction_app_mcf_daily_average_mpi_threads ${MPI4Py_FOUND} ${TEST_MPI_THREADS} REQ_TECA_DATA) -teca_add_test(test_cartesian_mesh_diff_app_pass +teca_add_app_test(test_cartesian_mesh_diff_app_pass teca_cartesian_mesh_diff COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_cartesian_mesh_diff_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT}/test_cartesian_mesh_regrid.bin ${TECA_DATA_ROOT}/test_cartesian_mesh_regrid.bin LSMASK REQ_TECA_DATA) -teca_add_test(test_cartesian_mesh_diff_app_fail +teca_add_app_test(test_cartesian_mesh_diff_app_fail teca_cartesian_mesh_diff COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_cartesian_mesh_diff_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT}/test_cf_writer_collective_000213.bin @@ -344,14 +383,14 @@ teca_add_test(test_cartesian_mesh_diff_app_fail REQ_TECA_DATA WILL_FAIL) -teca_add_test(test_table_diff_app_pass +teca_add_app_test(test_table_diff_app_pass teca_table_diff COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_table_diff_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT}/cam5_1_amip_run2_tracks_2005_09.bin ${TECA_DATA_ROOT}/cam5_1_amip_run2_tracks_2005_09.bin REQ_TECA_DATA) -teca_add_test(test_table_diff_app_fail +teca_add_app_test(test_table_diff_app_fail teca_table_diff COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_table_diff_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT}/test_tc_stats_app_output_class_table_ref.bin @@ -359,21 +398,21 @@ teca_add_test(test_table_diff_app_fail REQ_TECA_DATA WILL_FAIL) -teca_add_test(test_convert_table_app_csv_bin +teca_add_app_test(test_convert_table_app_csv_bin teca_convert_table COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_convert_table_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} "cam5-025deg-all-hist-est1-v3-r1-tracks_size.csv" "cam5-025deg-all-hist-est1-v3-r1-tracks_size.bin" REQ_TECA_DATA) -teca_add_test(test_convert_table_app_bin_csv +teca_add_app_test(test_convert_table_app_bin_csv teca_convert_table COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_convert_table_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} "cam5-025deg-all-hist-est1-v3-r1-tracks_size.bin" "cam5-025deg-all-hist-est1-v3-r1-tracks_size.csv" REQ_TECA_DATA) -teca_add_test(test_convert_table_app_bin_netcdf +teca_add_app_test(test_convert_table_app_bin_netcdf teca_convert_table COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_convert_table_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} "test_descriptive_statistics.bin" @@ -381,26 +420,23 @@ teca_add_test(test_convert_table_app_bin_netcdf FEATURES ${TECA_HAS_NETCDF} REQ_TECA_DATA) -teca_add_test(test_metadata_probe_app_cf +teca_add_app_test(test_metadata_probe_app_cf teca_metadata_probe COMMAND ${CMAKE_BINARY_DIR}/${BIN_PREFIX}/teca_metadata_probe --input_regex "${TECA_DATA_ROOT}/test_tc_candidates_1990_07_0[0-9]\\.nc" - FEATURES ${TECA_HAS_NETCDF} REQ_TECA_DATA) -teca_add_test(test_metadata_probe_app_mcf +teca_add_app_test(test_metadata_probe_app_mcf teca_metadata_probe COMMAND ${CMAKE_BINARY_DIR}/${BIN_PREFIX}/teca_metadata_probe --input_file ${CMAKE_CURRENT_BINARY_DIR}/../ECMWF-IFS-HR-SST-present.mcf --z_axis plev - FEATURES ${TECA_HAS_NETCDF} REQ_TECA_DATA) -teca_add_test(test_cf_restripe_app +teca_add_app_test(test_cf_restripe_app teca_cf_restripe COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_cf_restripe_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} -1 - FEATURES ${TECA_HAS_NETCDF} REQ_TECA_DATA) -teca_add_test(test_cf_restripe_app_mpi +teca_add_app_test(test_cf_restripe_app_mpi teca_cf_restripe COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_cf_restripe_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} -1 ${MPIEXEC} ${TEST_CORES} From 331d6585f5dd51f8248d304d82724a5e7cb221ad Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Wed, 24 Feb 2021 16:14:28 -0800 Subject: [PATCH 030/180] apps return 0 when asked for help --- apps/teca_bayesian_ar_detect.cpp | 8 ++++++-- apps/teca_cartesian_mesh_diff.cpp | 6 +++--- apps/teca_cf_restripe.cpp | 8 ++++++-- apps/teca_integrated_vapor_transport.cpp | 8 ++++++-- apps/teca_metadata_probe.cpp | 8 ++++++-- apps/teca_tc_detect.cpp | 8 ++++++-- apps/teca_tc_trajectory.cpp | 8 ++++++-- apps/teca_tc_wind_radii.cpp | 8 ++++++-- 8 files changed, 45 insertions(+), 17 deletions(-) diff --git a/apps/teca_bayesian_ar_detect.cpp b/apps/teca_bayesian_ar_detect.cpp index 840e4092b..4328b570d 100644 --- a/apps/teca_bayesian_ar_detect.cpp +++ b/apps/teca_bayesian_ar_detect.cpp @@ -199,10 +199,14 @@ int main(int argc, char **argv) all_opt_defs.add(basic_opt_defs).add(advanced_opt_defs); // parse the command line + int ierr = 0; variables_map opt_vals; - if (teca_app_util::process_command_line_help(mpi_man.get_comm_rank(), - argc, argv, basic_opt_defs, advanced_opt_defs, all_opt_defs, opt_vals)) + if ((ierr = teca_app_util::process_command_line_help( + mpi_man.get_comm_rank(), argc, argv, basic_opt_defs, + advanced_opt_defs, all_opt_defs, opt_vals))) { + if (ierr == 1) + return 0; return -1; } diff --git a/apps/teca_cartesian_mesh_diff.cpp b/apps/teca_cartesian_mesh_diff.cpp index 26be1d2e2..8a6cc5208 100644 --- a/apps/teca_cartesian_mesh_diff.cpp +++ b/apps/teca_cartesian_mesh_diff.cpp @@ -149,7 +149,7 @@ int main(int argc, char **argv) << std::endl << basic_opt_defs << std::endl << std::endl; - return -1; + return 0; } if (opt_vals.count("advanced_help")) @@ -159,7 +159,7 @@ int main(int argc, char **argv) << std::endl << advanced_opt_defs << std::endl << std::endl; - return -1; + return 0; } if (opt_vals.count("full_help")) @@ -169,7 +169,7 @@ int main(int argc, char **argv) << std::endl << all_opt_defs << std::endl << std::endl; - return -1; + return 0; } } diff --git a/apps/teca_cf_restripe.cpp b/apps/teca_cf_restripe.cpp index 2273505b5..7d2acff12 100644 --- a/apps/teca_cf_restripe.cpp +++ b/apps/teca_cf_restripe.cpp @@ -115,10 +115,14 @@ int main(int argc, char **argv) all_opt_defs.add(basic_opt_defs).add(advanced_opt_defs); // parse the command line + int ierr = 0; variables_map opt_vals; - if (teca_app_util::process_command_line_help(mpi_man.get_comm_rank(), - argc, argv, basic_opt_defs, advanced_opt_defs, all_opt_defs, opt_vals)) + if ((ierr = teca_app_util::process_command_line_help( + mpi_man.get_comm_rank(), argc, argv, basic_opt_defs, + advanced_opt_defs, all_opt_defs, opt_vals))) { + if (ierr == 1) + return 0; return -1; } diff --git a/apps/teca_integrated_vapor_transport.cpp b/apps/teca_integrated_vapor_transport.cpp index b64aa91da..739df9ff9 100644 --- a/apps/teca_integrated_vapor_transport.cpp +++ b/apps/teca_integrated_vapor_transport.cpp @@ -178,10 +178,14 @@ int main(int argc, char **argv) all_opt_defs.add(basic_opt_defs).add(advanced_opt_defs); // parse the command line + int ierr = 0; variables_map opt_vals; - if (teca_app_util::process_command_line_help(mpi_man.get_comm_rank(), - argc, argv, basic_opt_defs, advanced_opt_defs, all_opt_defs, opt_vals)) + if ((ierr = teca_app_util::process_command_line_help( + mpi_man.get_comm_rank(), argc, argv, basic_opt_defs, + advanced_opt_defs, all_opt_defs, opt_vals))) { + if (ierr == 1) + return 0; return -1; } diff --git a/apps/teca_metadata_probe.cpp b/apps/teca_metadata_probe.cpp index 5e2775475..cb3fcc928 100644 --- a/apps/teca_metadata_probe.cpp +++ b/apps/teca_metadata_probe.cpp @@ -102,10 +102,14 @@ int main(int argc, char **argv) all_opt_defs.add(basic_opt_defs).add(advanced_opt_defs); // parse the command line + int ierr = 0; variables_map opt_vals; - if (teca_app_util::process_command_line_help(mpi_man.get_comm_rank(), - argc, argv, basic_opt_defs, advanced_opt_defs, all_opt_defs, opt_vals)) + if ((ierr = teca_app_util::process_command_line_help( + mpi_man.get_comm_rank(), argc, argv, basic_opt_defs, + advanced_opt_defs, all_opt_defs, opt_vals))) { + if (ierr == 1) + return 0; return -1; } diff --git a/apps/teca_tc_detect.cpp b/apps/teca_tc_detect.cpp index f5a236b6e..df1fefab2 100644 --- a/apps/teca_tc_detect.cpp +++ b/apps/teca_tc_detect.cpp @@ -208,10 +208,14 @@ int main(int argc, char **argv) all_opt_defs.add(basic_opt_defs).add(advanced_opt_defs); // parse the command line + int ierr = 0; variables_map opt_vals; - if (teca_app_util::process_command_line_help(mpi_man.get_comm_rank(), - argc, argv, basic_opt_defs, advanced_opt_defs, all_opt_defs, opt_vals)) + if ((ierr = teca_app_util::process_command_line_help( + mpi_man.get_comm_rank(), argc, argv, basic_opt_defs, + advanced_opt_defs, all_opt_defs, opt_vals))) { + if (ierr == 1) + return 0; return -1; } diff --git a/apps/teca_tc_trajectory.cpp b/apps/teca_tc_trajectory.cpp index 6e9c190dd..cc9e84eb4 100644 --- a/apps/teca_tc_trajectory.cpp +++ b/apps/teca_tc_trajectory.cpp @@ -105,10 +105,14 @@ int main(int argc, char **argv) all_opt_defs.add(basic_opt_defs).add(advanced_opt_defs); // parse the command line + int ierr = 0; variables_map opt_vals; - if (teca_app_util::process_command_line_help(mpi_man.get_comm_rank(), - argc, argv, basic_opt_defs, advanced_opt_defs, all_opt_defs, opt_vals)) + if ((ierr = teca_app_util::process_command_line_help( + mpi_man.get_comm_rank(), argc, argv, basic_opt_defs, + advanced_opt_defs, all_opt_defs, opt_vals))) { + if (ierr == 1) + return 0; return -1; } diff --git a/apps/teca_tc_wind_radii.cpp b/apps/teca_tc_wind_radii.cpp index 4ded8748e..0dcbaae9b 100644 --- a/apps/teca_tc_wind_radii.cpp +++ b/apps/teca_tc_wind_radii.cpp @@ -135,10 +135,14 @@ int main(int argc, char **argv) all_opt_defs.add(basic_opt_defs).add(advanced_opt_defs); // parse the command line + int ierr = 0; variables_map opt_vals; - if (teca_app_util::process_command_line_help(mpi_man.get_comm_rank(), - argc, argv, basic_opt_defs, advanced_opt_defs, all_opt_defs, opt_vals)) + if ((ierr = teca_app_util::process_command_line_help( + mpi_man.get_comm_rank(), argc, argv, basic_opt_defs, + advanced_opt_defs, all_opt_defs, opt_vals))) { + if (ierr == 1) + return 0; return -1; } From 4942a065be4f6296ce31b073bbf075d2985aa892 Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Wed, 24 Feb 2021 16:15:15 -0800 Subject: [PATCH 031/180] fix apps full help formatting --- apps/teca_cartesian_mesh_diff.cpp | 7 ++++--- apps/teca_integrated_vapor_transport.cpp | 2 +- apps/teca_tc_detect.cpp | 2 +- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/apps/teca_cartesian_mesh_diff.cpp b/apps/teca_cartesian_mesh_diff.cpp index 8a6cc5208..d950b814e 100644 --- a/apps/teca_cartesian_mesh_diff.cpp +++ b/apps/teca_cartesian_mesh_diff.cpp @@ -66,9 +66,10 @@ int main(int argc, char **argv) // initialize command line options description // set up some common options to simplify use for most // common scenarios + int help_width = 100; options_description basic_opt_defs( "teca_cartesian_mesh_diff an application that compares two datasets.\n\n" - "Command line options", 120, -1 + "Command line options", help_width, help_width - 4 ); basic_opt_defs.add_options() ("reference_dataset", value()->required(), @@ -105,7 +106,7 @@ int main(int argc, char **argv) "control over all runtime modifiable parameters. The basic options\n" "(see" "--help) map to these, and will override them if both are\n" "specified.\n\n" - "Advanced command line options", -1, 1 + "Advanced command line options", help_width, help_width - 4 ); // create the pipeline stages here, they contain the @@ -125,7 +126,7 @@ int main(int argc, char **argv) } // package basic and advanced options for display - options_description all_opt_defs(-1, -1); + options_description all_opt_defs(help_width, help_width - 4); all_opt_defs.add(basic_opt_defs).add(advanced_opt_defs); // parse the command line diff --git a/apps/teca_integrated_vapor_transport.cpp b/apps/teca_integrated_vapor_transport.cpp index 739df9ff9..571b91979 100644 --- a/apps/teca_integrated_vapor_transport.cpp +++ b/apps/teca_integrated_vapor_transport.cpp @@ -174,7 +174,7 @@ int main(int argc, char **argv) cf_writer->set_steps_per_file(128); // package basic and advanced options for display - options_description all_opt_defs(-1, -1); + options_description all_opt_defs(help_width, help_width - 4); all_opt_defs.add(basic_opt_defs).add(advanced_opt_defs); // parse the command line diff --git a/apps/teca_tc_detect.cpp b/apps/teca_tc_detect.cpp index df1fefab2..af9694a0e 100644 --- a/apps/teca_tc_detect.cpp +++ b/apps/teca_tc_detect.cpp @@ -204,7 +204,7 @@ int main(int argc, char **argv) track_writer->get_properties_description("track_writer", advanced_opt_defs); // package basic and advanced options for display - options_description all_opt_defs(-1, -1); + options_description all_opt_defs(help_width, help_width - 4); all_opt_defs.add(basic_opt_defs).add(advanced_opt_defs); // parse the command line From 21f502d397376dc8b76df183a1d82ac0a8fd1b54 Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Wed, 24 Feb 2021 16:18:18 -0800 Subject: [PATCH 032/180] add app tests for command line options --- CMake/teca_app.cmake | 1 + CMake/teca_python.cmake | 1 + test/apps/CMakeLists.txt | 33 +++++++++++++++++++++++++++++++++ 3 files changed, 35 insertions(+) diff --git a/CMake/teca_app.cmake b/CMake/teca_app.cmake index fbaa0e923..82bb06234 100644 --- a/CMake/teca_app.cmake +++ b/CMake/teca_app.cmake @@ -31,6 +31,7 @@ function (teca_add_app app_name) teca_system teca_core teca_data teca_io teca_alg ${APP_LIBS}) endif() + set_target_properties(${app_name} PROPERTIES APP_TYPE C++) install(TARGETS ${app_name} RUNTIME DESTINATION ${BIN_PREFIX}) else() message(STATUS "command line application ${app_name} -- disabled") diff --git a/CMake/teca_python.cmake b/CMake/teca_python.cmake index 181136529..b94dcb80c 100644 --- a/CMake/teca_python.cmake +++ b/CMake/teca_python.cmake @@ -61,6 +61,7 @@ function (teca_add_python_app app_name) set(APP_SOURCES "${app_name}.in") endif() add_custom_target(${app_name}) + set_target_properties(${app_name} PROPERTIES APP_TYPE Python) teca_py_install_apps(${APP_SOURCES}) else() message(STATUS "command line application ${app_name} -- disabled") diff --git a/test/apps/CMakeLists.txt b/test/apps/CMakeLists.txt index 75681fccc..007e11c58 100644 --- a/test/apps/CMakeLists.txt +++ b/test/apps/CMakeLists.txt @@ -442,3 +442,36 @@ teca_add_app_test(test_cf_restripe_app_mpi teca_cf_restripe ${MPIEXEC} ${TEST_CORES} FEATURES ${TECA_HAS_NETCDF_MPI} ${TECA_HAS_MPI} REQ_TECA_DATA) + +set(app_names + teca_bayesian_ar_detect + teca_cf_restripe + teca_convert_table + teca_deeplab_ar_detect + teca_event_filter + teca_integrated_vapor_transport + teca_metadata_probe + teca_profile_explorer + teca_tc_detect + teca_tc_stats + teca_tc_trajectory + teca_tc_trajectory_scalars + teca_tc_wind_radii + teca_tc_wind_radii_stats + teca_temporal_reduction) + +foreach (app_name ${app_names}) + teca_add_app_test(${app_name}_help ${app_name} + COMMAND ${CMAKE_BINARY_DIR}/${BIN_PREFIX}/${app_name} --help) + + get_target_property(app_type ${app_name} APP_TYPE) + if ("${app_type}" STREQUAL "C++") + + teca_add_app_test(${app_name}_full_help ${app_name} + COMMAND ${CMAKE_BINARY_DIR}/${BIN_PREFIX}/${app_name} --full_help) + + teca_add_app_test(${app_name}_advanced_help ${app_name} + COMMAND ${CMAKE_BINARY_DIR}/${BIN_PREFIX}/${app_name} --advanced_help) + + endif() +endforeach() From 79a49d21a32f697e6795a471db112809843ca8ae Mon Sep 17 00:00:00 2001 From: "Travis A. O'Brien" Date: Sun, 28 Feb 2021 11:54:29 -0500 Subject: [PATCH 033/180] Added *.vscode* to .gitignore --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index bf6a22fc6..ddcff675c 100644 --- a/.gitignore +++ b/.gitignore @@ -2,3 +2,4 @@ *.patch _build *.pt +*.vscode* From 67f6b907f70d85b2ff1072945e7de3574fb87db2 Mon Sep 17 00:00:00 2001 From: "Travis A. O'Brien" Date: Sun, 28 Feb 2021 11:53:36 -0500 Subject: [PATCH 034/180] Refactored teca_apply_binary_mask * added required prefix for masked variables * added attributes so variables can be used by cf_writer * changed default behavior to do nothing if no mask variables are given (was previously mask all) * added python test for teca_apply_binary_mask --- alg/teca_apply_binary_mask.cxx | 225 ++++++++++++++++++++------ alg/teca_apply_binary_mask.h | 49 ++++-- test/python/CMakeLists.txt | 4 + test/python/test_apply_binary_mask.py | 132 +++++++++++++++ 4 files changed, 354 insertions(+), 56 deletions(-) create mode 100644 test/python/test_apply_binary_mask.py diff --git a/alg/teca_apply_binary_mask.cxx b/alg/teca_apply_binary_mask.cxx index 4ec27d4df..ab94a4ea8 100644 --- a/alg/teca_apply_binary_mask.cxx +++ b/alg/teca_apply_binary_mask.cxx @@ -1,26 +1,50 @@ #include "teca_apply_binary_mask.h" -#include "teca_mesh.h" +#include "teca_cartesian_mesh.h" #include "teca_array_collection.h" #include "teca_variant_array.h" #include "teca_metadata.h" -#include "teca_mesh.h" +#include "teca_array_attributes.h" #include #include -#include #include -using std::deque; +#include + +#if defined(TECA_HAS_BOOST) +#include +#endif + using std::vector; using std::set; using std::cerr; using std::endl; +using std::string; + +namespace internal +{ +template +void apply_mask(var_t * __restrict__ mask_output, const mask_t * __restrict__ +mask_variable, const var_t * __restrict__ input_variable, unsigned long n) +{ + for (size_t i = 0; i < n; ++i) + { + mask_t m = mask_variable[i]; + var_t v = input_variable[i]; + mask_output[i] = m*v; + } +} +}; -//#define TECA_DEBUG +//#define TECA_DEBUG// -------------------------------------------------------------------------- +std::string teca_apply_binary_mask::get_output_variable_name(std::string input_var){ + return this->output_var_prefix + input_var; +} // -------------------------------------------------------------------------- -teca_apply_binary_mask::teca_apply_binary_mask() : mask_variable("") +teca_apply_binary_mask::teca_apply_binary_mask() : + mask_variable(""), output_var_prefix("masked_") { this->set_number_of_input_connections(1); this->set_number_of_output_ports(1); @@ -30,6 +54,94 @@ teca_apply_binary_mask::teca_apply_binary_mask() : mask_variable("") teca_apply_binary_mask::~teca_apply_binary_mask() {} +#if defined(TECA_HAS_BOOST) +// -------------------------------------------------------------------------- +void teca_apply_binary_mask::get_properties_description( + const string &prefix, options_description &global_opts) +{ + options_description opts("Options for " + + (prefix.empty()?"teca_apply_binary_mask":prefix)); + + opts.add_options() + TECA_POPTS_MULTI_GET(std::vector, prefix, input_variables, + "the input variables") + TECA_POPTS_GET(std::string, prefix, mask_variable, + "the name of the variable containing the mask array") + TECA_POPTS_GET(std::string, prefix, output_var_prefix, + "the prefix to apply to masked input variable names") + ; + + this->teca_algorithm::get_properties_description(prefix, opts); + + global_opts.add(opts); +} + +// -------------------------------------------------------------------------- +void teca_apply_binary_mask::set_properties( + const string &prefix, variables_map &opts) +{ + this->teca_algorithm::set_properties(prefix, opts); + + TECA_POPTS_SET(opts, std::vector, prefix, input_variables) + TECA_POPTS_SET(opts, std::string, prefix, mask_variable) + TECA_POPTS_SET(opts, std::string, prefix, output_var_prefix) +} +#endif +// -------------------------------------------------------------------------- +teca_metadata teca_apply_binary_mask::get_output_metadata( + unsigned int port, + const std::vector &input_md) +{ +#ifdef TECA_DEBUG + cerr << teca_parallel_id() + << "teca_apply_binary_mask::get_output_metadata" << endl; +#endif + (void)port; + + if (this->input_variables.empty()) + { + TECA_WARNING("The list of input variables was not set") + } + + // add in the array we will generate + teca_metadata out_md(input_md[0]); + + // get the attributes + teca_metadata attributes; + out_md.get("attributes", attributes); + + // construct the list of output variable names + for (auto& input_var : input_variables){ + std::string output_var = this->get_output_variable_name(input_var); + + // add the varible to the list of output variables + out_md.append("variables", output_var); + + // insert attributes to enable this variable to be written by the CF writer + teca_metadata input_atts; + if (attributes.get(input_var, input_atts)) + { + TECA_WARNING("Failed to get attributes for \"" << input_var + << "\". Writing the result will not be possible") + } + else + { + // copy the attributes from the input. this will capture the + // data type, size, units, etc. + teca_array_attributes output_atts(input_atts); + + // update description. + output_atts.description = + std::string("masked/weighted by `" + this->mask_variable + "`"); + + attributes.set(output_var, (teca_metadata)output_atts); + out_md.set("attributes", attributes); + } + + } + return out_md; +} + // -------------------------------------------------------------------------- std::vector teca_apply_binary_mask::get_upstream_request( unsigned int port, const std::vector &input_md, @@ -58,8 +170,22 @@ std::vector teca_apply_binary_mask::get_upstream_request( if (req.has("arrays")) req.get("arrays", arrays); arrays.insert(this->mask_variable); - if (!this->mask_arrays.empty()) - arrays.insert(this->mask_arrays.begin(), this->mask_arrays.end()); + + // check that a prefix was given + if (this->get_output_var_prefix().empty()){ + TECA_ERROR("A prefix for the output variables was not specified") + return up_reqs; + } + + for (auto& input_var : input_variables){ + // insert the needed variable + arrays.insert(input_var); + + // intercept request for our output if the variable will have a new name + if(this->get_output_variable_name(input_var) != input_var){ + arrays.erase(this->get_output_variable_name(input_var)); + } + } req.set("arrays", arrays); // send up @@ -79,68 +205,75 @@ const_p_teca_dataset teca_apply_binary_mask::execute( (void)request; // get the input - const_p_teca_mesh in_mesh = - std::dynamic_pointer_cast(input_data[0]); + const_p_teca_cartesian_mesh in_mesh + = std::dynamic_pointer_cast(input_data[0]); if (!in_mesh) { - TECA_ERROR("empty input, or not a mesh") + TECA_ERROR("Failed to apply mask. Dataset is not a teca_cartesian_mesh") return nullptr; } - // create output and copy metadata, coordinates, etc - p_teca_mesh out_mesh = - std::dynamic_pointer_cast(in_mesh->new_instance()); - out_mesh->copy(in_mesh); + // create the output mesh, pass everything through + // output arrays are added in the variable loop + p_teca_cartesian_mesh out_mesh = teca_cartesian_mesh::New(); + out_mesh->shallow_copy(std::const_pointer_cast(in_mesh)); - // get the mask array + // check that a masking variable has been provided if (this->mask_variable.empty()) { TECA_ERROR("A mask variable was not specified") return nullptr; } - p_teca_array_collection arrays = out_mesh->get_point_arrays(); - - p_teca_variant_array mask_array = arrays->get(this->mask_variable); + // get the mask array + const_p_teca_variant_array mask_array + = in_mesh->get_point_arrays()->get(this->mask_variable); if (!mask_array) { - TECA_ERROR("mask variable \"" << this->mask_variable - << "\" is not in the input") + TECA_ERROR("masking array \"" << this->mask_variable + << "\" requested but not present.") return nullptr; } // apply the mask - unsigned long nelem = mask_array->size(); - - NESTED_TEMPLATE_DISPATCH(teca_variant_array_impl, - mask_array.get(), _1, + NESTED_TEMPLATE_DISPATCH(const teca_variant_array_impl, + mask_array.get(), _mask, - NT_1 *pmask = static_cast(mask_array.get())->get(); - - unsigned int narrays = arrays->size(); - for (unsigned int i = 0; i < narrays; ++i) - { - // if the user provided a list, restrict masking to that - // list. and if not, mask everything - if (!this->mask_arrays.empty() && - !std::count(this->mask_arrays.begin(), - this->mask_arrays.end(), arrays->get_name(i))) - continue; + // loop over input variables + for (auto& input_var : input_variables){ + std::string output_var = this->get_output_variable_name(input_var); + // get the input array + const_p_teca_variant_array input_array + = in_mesh->get_point_arrays()->get(input_var); + if (!input_array) + { + TECA_ERROR("input array \"" << input_var + << "\" requested but not present.") + return nullptr; + } - p_teca_variant_array array = arrays->get(i); + // allocate the output array + size_t n = input_array->size(); + p_teca_variant_array output_array = input_array->new_instance(); + output_array->resize(n); - NESTED_TEMPLATE_DISPATCH(teca_variant_array_impl, - array.get(), _2, + // do the mask calculation + NESTED_TEMPLATE_DISPATCH_FP( + teca_variant_array_impl, + output_array.get(), _var, - NT_2 *parray = static_cast(array.get())->get(); - - for (unsigned long q = 0; q < nelem; ++q) - { - parray[q] *= static_cast(pmask[q]); - } + internal::apply_mask( + dynamic_cast(output_array.get())->get(), + static_cast(mask_array.get())->get(), + static_cast(input_array.get())->get(), + n); ) + + out_mesh->get_point_arrays()->append( + output_var, output_array); } - ) + ) + return out_mesh; } diff --git a/alg/teca_apply_binary_mask.h b/alg/teca_apply_binary_mask.h index f30a1081e..3090e3ba3 100644 --- a/alg/teca_apply_binary_mask.h +++ b/alg/teca_apply_binary_mask.h @@ -10,11 +10,27 @@ TECA_SHARED_OBJECT_FORWARD_DECL(teca_apply_binary_mask) -/// an algorithm that applies a binary mask multiplicatively +/// Applies a mask to a given list of variables /** -an algorithm that applies a binary mask multiplicatively to all -arrays in the input dataset. where mask is 1 values are passed -through, where mask is 0 values are removed. + +Given a mask variable, this routine applies the mask to a list of input +variables. + +The mask variable can either be binary, or it can represent a probability +ranging from 0 to 1. For mask variable `mask` and input variable `var`, this +algorithm computes `mask * var` and sends the resulting array downstream; this +masking operation is applied for all variables in the input list. + +A potential use-case for this algorithm is masking quantities like +precipitation by the probability of atmospheric river presence; the average +of this masked precipitation variable gives the average precipitation due to +atmospheric rivers. + +The output variable names are given a prefix to distinguish them from the +upstream versions. E.g., if the algorithm property `output_var_prefix` is set +to 'ar_', and the variable being masked is 'precip', then the output array +name is 'ar_precip'. + */ class teca_apply_binary_mask : public teca_algorithm { @@ -24,19 +40,31 @@ class teca_apply_binary_mask : public teca_algorithm TECA_ALGORITHM_CLASS_NAME(teca_apply_binary_mask) ~teca_apply_binary_mask(); + // report/initialize to/from Boost program options + // objects. + TECA_GET_ALGORITHM_PROPERTIES_DESCRIPTION() + TECA_SET_ALGORITHM_PROPERTIES() + // set the name of the output array TECA_ALGORITHM_PROPERTY(std::string, mask_variable) - // set the arrays to mask. if empty no arrays will be - // requested, but all present will be masked - TECA_ALGORITHM_VECTOR_PROPERTY(std::string, mask_array) + // the arrays to mask. if empty no arrays will be + // requested, and no variables will be masked + TECA_ALGORITHM_VECTOR_PROPERTY(std::string, input_variable) + + // the names of the arrays to store the masking results in + TECA_ALGORITHM_PROPERTY(std::string, output_var_prefix) + + // adds output_var_prefix to a given variable name + std::string get_output_variable_name(std::string input_var); protected: teca_apply_binary_mask(); private: - //teca_metadata get_output_metadata(unsigned int port, - // const std::vector &input_md) override; + teca_metadata get_output_metadata( + unsigned int port, + const std::vector &input_md) override; std::vector get_upstream_request( unsigned int port, const std::vector &input_md, @@ -48,7 +76,8 @@ class teca_apply_binary_mask : public teca_algorithm private: std::string mask_variable; - std::vector mask_arrays; + std::vector input_variables; + std::string output_var_prefix; }; #endif diff --git a/test/python/CMakeLists.txt b/test/python/CMakeLists.txt index 9f5ffe50b..042af1bf3 100644 --- a/test/python/CMakeLists.txt +++ b/test/python/CMakeLists.txt @@ -497,3 +497,7 @@ teca_add_test(py_test_table_from_stream_bin COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_table_from_stream.py "${TECA_DATA_ROOT}/cam5-025deg-all-hist-est1-v3-r1-tracks_size.bin" REQ_TECA_DATA) + +teca_add_test(py_test_apply_binary_mask + COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_apply_binary_mask.py + 128 128 mask_ 1e-5) diff --git a/test/python/test_apply_binary_mask.py b/test/python/test_apply_binary_mask.py new file mode 100644 index 000000000..07da63dc2 --- /dev/null +++ b/test/python/test_apply_binary_mask.py @@ -0,0 +1,132 @@ +from teca import * +import sys +import numpy as np + +set_stack_trace_on_error() + +def isequal(a, b, epsilon): + return np.fabs(a - b) < epsilon + + +if not len(sys.argv) == 5: + sys.stderr.write('test_apply_binary_mask.py [nx] [ny] [prefix] [tolerance]\n') + sys.exit(-1) + +nx = int(sys.argv[1]) +ny = int(sys.argv[2]) +prefix = str(sys.argv[3]) +tolerance = float(sys.argv[4]) + +nxy = nx*ny + +dx = 360./float(nx - 1) +x = [] +for i in range(nx): + x.append(i*dx) + +dy = 180./float(ny - 1) +y = [] +for i in range(ny): + y.append(-90. + i*dy) + +# create a mask variable representing cosine-latitude weighting +coslat = np.cos(np.deg2rad(np.array(y)[:,np.newaxis]))*np.ones([ny, nx]) +# normalize it so it ranges from 0 to 1 +coslat /= np.sum(coslat) + + +x = teca_variant_array.New(x) +y = teca_variant_array.New(y) +z = teca_variant_array.New([0.]) +t = teca_variant_array.New([1.]) +mask_grid = teca_variant_array.New(coslat.astype(np.float32).ravel()) + +ones_grid = teca_variant_array.New(np.ones(nxy).astype(np.float64)) +zeros_grid = teca_variant_array.New(np.zeros(nxy).astype(np.float64)) +mask_grid = teca_variant_array.New(mask_grid) + +wext = [0, nx - 1, 0, ny - 1, 0, 0] + +mesh = teca_cartesian_mesh.New() +mesh.set_x_coordinates("lon", x) +mesh.set_y_coordinates("lat", y) +mesh.set_z_coordinates("z", z) +mesh.set_whole_extent(wext) +mesh.set_extent(wext) +mesh.set_time(1.0) +mesh.set_time_step(0) +mesh.get_point_arrays().append("ones_grid", ones_grid) +mesh.get_point_arrays().append("zeros_grid", zeros_grid) +mesh.get_point_arrays().append("mask_grid", mask_grid) + +md = teca_metadata() +md["whole_extent"] = wext +md["time_steps"] = [0] +md["variables"] = ["ones_grid", "zeros_grid", "mask_grid"] +md["number_of_time_steps"] = 1 +md["index_initializer_key"] = "number_of_time_steps" +md["index_request_key"] = "time_step" + +# add attributes +ones_atts = teca_array_attributes( + teca_double_array_code.get(), + teca_array_attributes.no_centering, + 2, 'ones', 'unitless', + 'an array full of ones', + None) + +zeros_atts = teca_array_attributes( + teca_double_array_code.get(), + teca_array_attributes.no_centering, + 2, 'zeros', 'unitless', + 'an array full of zeros', + None) + +# put it in the array attributes +try: + atts = md['attributes'] +except: + atts = teca_metadata() +atts['ones_grid'] = ones_atts.to_metadata() +atts['zeros_grid'] = zeros_atts.to_metadata() +md['attributes'] = atts + + + +source = teca_dataset_source.New() +source.set_metadata(md) +source.set_dataset(mesh) + +mask_comp = teca_apply_binary_mask.New() +mask_comp.set_input_connection(source.get_output_port()) +mask_comp.set_mask_variable("mask_grid") +mask_comp.set_input_variables(["ones_grid", "zeros_grid"]) +mask_comp.set_output_var_prefix(prefix) + +mask_o = teca_dataset_capture.New() +mask_o.set_input_connection(mask_comp.get_output_port()) + +mask_o.update() + +ds = mask_o.get_dataset() +mdo = ds.get_metadata() + +out_mesh = teca_cartesian_mesh.New() +out_mesh.copy(ds) + +masked_ones_array = out_mesh.get_point_arrays().get(prefix + "ones_grid") +masked_zeros_array = out_mesh.get_point_arrays().get(prefix + "zeros_grid") + +# check that the sum of ones times the mask is equal to 1 +sum_difference = np.sum(masked_ones_array) - 1 +if np.abs(sum_difference) > tolerance: + sys.stderr.write('Failure: sum of dx*dy field ' + + 'differs from 1 by {:2.2g}\n'.format(sum_difference)) + sys.exit(-1) + +# check that the sum of zeros is zero +sum_difference = np.sum(masked_zeros_array) +if not np.isclose(sum_difference, 0): + sys.stderr.write('Failure: sum of zeros field ' + + 'differs from 0 by {:2.2g}\n'.format(sum_difference)) + sys.exit(-1) From ce1ebbc95c99ade04a402c409e58a8e4476c78ee Mon Sep 17 00:00:00 2001 From: "Travis A. O'Brien" Date: Sun, 28 Feb 2021 13:59:37 -0500 Subject: [PATCH 035/180] Added apply_binary_mask stage to teca_bayesian_ar_detect Also updated reference to TECA BARD paper --- alg/teca_bayesian_ar_detect.h | 13 +++++++++---- apps/teca_bayesian_ar_detect.cpp | 23 ++++++++++++++++++++--- 2 files changed, 29 insertions(+), 7 deletions(-) diff --git a/alg/teca_bayesian_ar_detect.h b/alg/teca_bayesian_ar_detect.h index c702910dc..a8cf0989b 100644 --- a/alg/teca_bayesian_ar_detect.h +++ b/alg/teca_bayesian_ar_detect.h @@ -10,11 +10,11 @@ TECA_SHARED_OBJECT_FORWARD_DECL(teca_bayesian_ar_detect) -/// CASCADE BARD atmospheric river detector +/// TECA BARD atmospheric river detector /** Given a point wise IVT (integrated vapor transport) field and a training parameter table computes the point wise probability of an atmospheric river -using the CASCADE BARD algorithm. +using the TECA BARD algorithm. Required inputs: @@ -33,8 +33,13 @@ through algorithm properties. For more information see: -Detection of Atmospheric Rivers with Inline Uncertainty Quantification: TECA-BARD v1.0 -O'Brien, T. A et al. Geoscientific Model Development, 2020 +O’Brien, T. A., Risser, M. D., Loring, B., Elbashandy, A. A., Krishnan, H., +Johnson, J., Patricola, C. M., O’Brien, J. P., Mahesh, A., Arriaga Ramirez, +S., Rhoades, A. M., Charn, A., Inda Díaz, H., & Collins, W. D. (2020). +Detection of atmospheric rivers with inline uncertainty quantification: +TECA-BARD v1.0.1. Geoscientific Model Development, 13(12), 6131–6148. +https://doi.org/10.5194/gmd-13-6131-2020 + */ class teca_bayesian_ar_detect : public teca_algorithm { diff --git a/apps/teca_bayesian_ar_detect.cpp b/apps/teca_bayesian_ar_detect.cpp index 4328b570d..e8dd140fc 100644 --- a/apps/teca_bayesian_ar_detect.cpp +++ b/apps/teca_bayesian_ar_detect.cpp @@ -8,6 +8,7 @@ #include "teca_bayesian_ar_detect_parameters.h" #include "teca_binary_segmentation.h" #include "teca_l2_norm.h" +#include "teca_apply_binary_mask.h" #include "teca_multi_cf_reader.h" #include "teca_integrated_vapor_transport.h" #include "teca_valid_value_mask.h" @@ -96,7 +97,7 @@ int main(int argc, char **argv) ("binary_ar_threshold", value()->default_value(2.0/3.0,"0.667"), "\nprobability threshold for segmenting ar_probability to produce ar_binary_tag\n") - ("output_file", value()->default_value(std::string("CASCADE_BARD_%t%.nc")), + ("output_file", value()->default_value(std::string("TECA_BARD_%t%.nc")), "\nA path and file name pattern for the output NetCDF files. %t% is replaced with a" " human readable date and time corresponding to the time of the first time step in" " the file. Use --cf_writer::date_format to change the formatting\n") @@ -184,6 +185,12 @@ int main(int argc, char **argv) ar_tag->set_threshold_variable("ar_probability"); ar_tag->set_segmentation_variable("ar_binary_tag"); + // mask any requested variables by "ar_probability" + p_teca_apply_binary_mask ar_mask = teca_apply_binary_mask::New(); + ar_mask->get_properties_description("ar_mask", advanced_opt_defs); + ar_mask->set_mask_variable("ar_probability"); + ar_mask->set_output_var_prefix("ar_wgtd_"); + // Add an executive for the writer p_teca_index_executive exec = teca_index_executive::New(); @@ -222,6 +229,7 @@ int main(int argc, char **argv) norm_coords->set_properties("norm_coords", opt_vals); params->set_properties("parameter_table", opt_vals); ar_detect->set_properties("ar_detect", opt_vals); + ar_mask->set_properties("ar_mask", opt_vals); cf_writer->set_properties("cf_writer", opt_vals); // now pass in the basic options, these are processed @@ -317,7 +325,7 @@ int main(int argc, char **argv) ar_detect->set_ivt_variable(opt_vals["ivt"].as()); } - // add the ivt caluation stages if needed + // add the ivt caculation stages if needed bool do_ivt = opt_vals.count("compute_ivt"); bool do_ivt_magnitude = opt_vals.count("compute_ivt_magnitude"); @@ -364,6 +372,14 @@ int main(int argc, char **argv) point_arrays.push_back(ivt_int->get_ivt_v_variable()); } + // add any ar-weighted variables to the output + std::vector mask_variables = ar_mask->get_input_variables(); + if (! mask_variables.empty()){ + for (std::string input_var : mask_variables){ + point_arrays.push_back(ar_mask->get_output_variable_name(input_var)); + } + } + cf_writer->set_file_name(opt_vals["output_file"].as()); cf_writer->set_information_arrays({"ar_count", "parameter_table_row"}); cf_writer->set_point_arrays(point_arrays); @@ -403,7 +419,8 @@ int main(int argc, char **argv) ar_detect->set_input_connection(0, params->get_output_port()); ar_detect->set_input_connection(1, head->get_output_port()); ar_tag->set_input_connection(0, ar_detect->get_output_port()); - cf_writer->set_input_connection(ar_tag->get_output_port()); + ar_mask->set_input_connection(0, ar_tag->get_output_port()); + cf_writer->set_input_connection(ar_mask->get_output_port()); // look for requested time step range, start bool parse_start_date = opt_vals.count("start_date"); From c4fc94c9502d7dc6e4b18cad89211b1cf21aa5ea Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Mon, 1 Mar 2021 11:16:03 -0800 Subject: [PATCH 036/180] binary_ar_mask address issues raised in review #565 * operate on teca_mesh so that all mesh types can be processed * clear long name attribute * output warning on rank 0 only * bring code into teca style, braces etc * some arbitary minor stylistic changes and tweaks --- alg/teca_apply_binary_mask.cxx | 171 ++++++++++++++++---------- alg/teca_apply_binary_mask.h | 28 +++-- test/python/test_apply_binary_mask.py | 4 +- 3 files changed, 126 insertions(+), 77 deletions(-) diff --git a/alg/teca_apply_binary_mask.cxx b/alg/teca_apply_binary_mask.cxx index ab94a4ea8..0e2d9aa8b 100644 --- a/alg/teca_apply_binary_mask.cxx +++ b/alg/teca_apply_binary_mask.cxx @@ -5,6 +5,7 @@ #include "teca_variant_array.h" #include "teca_metadata.h" #include "teca_array_attributes.h" +#include "teca_mpi_util.h" #include #include @@ -16,35 +17,28 @@ #include #endif -using std::vector; -using std::set; +//#define TECA_DEBUG + using std::cerr; using std::endl; -using std::string; namespace internal { +// output = mask*input template -void apply_mask(var_t * __restrict__ mask_output, const mask_t * __restrict__ -mask_variable, const var_t * __restrict__ input_variable, unsigned long n) +void apply_mask(var_t * __restrict__ output, + const mask_t * __restrict__ mask, + const var_t * __restrict__ input, + unsigned long n) { for (size_t i = 0; i < n; ++i) - { - mask_t m = mask_variable[i]; - var_t v = input_variable[i]; - mask_output[i] = m*v; - } + output[i] = mask[i]*input[i]; } -}; - -//#define TECA_DEBUG// -------------------------------------------------------------------------- -std::string teca_apply_binary_mask::get_output_variable_name(std::string input_var){ - return this->output_var_prefix + input_var; } // -------------------------------------------------------------------------- -teca_apply_binary_mask::teca_apply_binary_mask() : - mask_variable(""), output_var_prefix("masked_") +teca_apply_binary_mask::teca_apply_binary_mask() : + mask_variable(""), output_variable_prefix("masked_") { this->set_number_of_input_connections(1); this->set_number_of_output_ports(1); @@ -57,18 +51,19 @@ teca_apply_binary_mask::~teca_apply_binary_mask() #if defined(TECA_HAS_BOOST) // -------------------------------------------------------------------------- void teca_apply_binary_mask::get_properties_description( - const string &prefix, options_description &global_opts) + const std::string &prefix, options_description &global_opts) { options_description opts("Options for " + (prefix.empty()?"teca_apply_binary_mask":prefix)); opts.add_options() - TECA_POPTS_MULTI_GET(std::vector, prefix, input_variables, - "the input variables") + TECA_POPTS_MULTI_GET(std::vector, prefix, masked_variables, + "A list of variables to apply the mask to.") TECA_POPTS_GET(std::string, prefix, mask_variable, - "the name of the variable containing the mask array") - TECA_POPTS_GET(std::string, prefix, output_var_prefix, - "the prefix to apply to masked input variable names") + "The name of the variable containing the mask values.") + TECA_POPTS_GET(std::string, prefix, output_variable_prefix, + "A string prepended to the output variable names. If empty the" + " input variables will be replaced by their masked results") ; this->teca_algorithm::get_properties_description(prefix, opts); @@ -78,15 +73,34 @@ void teca_apply_binary_mask::get_properties_description( // -------------------------------------------------------------------------- void teca_apply_binary_mask::set_properties( - const string &prefix, variables_map &opts) + const std::string &prefix, variables_map &opts) { this->teca_algorithm::set_properties(prefix, opts); - TECA_POPTS_SET(opts, std::vector, prefix, input_variables) + TECA_POPTS_SET(opts, std::vector, prefix, masked_variables) TECA_POPTS_SET(opts, std::string, prefix, mask_variable) - TECA_POPTS_SET(opts, std::string, prefix, output_var_prefix) + TECA_POPTS_SET(opts, std::string, prefix, output_variable_prefix) } #endif + +// -------------------------------------------------------------------------- +std::string teca_apply_binary_mask::get_output_variable_name(std::string input_var) +{ + return this->output_variable_prefix + input_var; +} + +// -------------------------------------------------------------------------- +void teca_apply_binary_mask::get_output_variable_names( + std::vector &names) +{ + int n_inputs = this->masked_variables.size(); + for (int i = 0; i < n_inputs; ++i) + { + names.push_back( + this->get_output_variable_name(this->masked_variables[i])); + } +} + // -------------------------------------------------------------------------- teca_metadata teca_apply_binary_mask::get_output_metadata( unsigned int port, @@ -98,9 +112,13 @@ teca_metadata teca_apply_binary_mask::get_output_metadata( #endif (void)port; - if (this->input_variables.empty()) + // check that the input variables have been specified. + // this is likely a user error. + if (this->masked_variables.empty() && + teca_mpi_util::mpi_rank_0(this->get_communicator())) { - TECA_WARNING("The list of input variables was not set") + TECA_WARNING("Nothing to do, masked_variables have not" + " been specified.") } // add in the array we will generate @@ -111,7 +129,8 @@ teca_metadata teca_apply_binary_mask::get_output_metadata( out_md.get("attributes", attributes); // construct the list of output variable names - for (auto& input_var : input_variables){ + for (auto& input_var : masked_variables) + { std::string output_var = this->get_output_variable_name(input_var); // add the varible to the list of output variables @@ -130,15 +149,21 @@ teca_metadata teca_apply_binary_mask::get_output_metadata( // data type, size, units, etc. teca_array_attributes output_atts(input_atts); - // update description. - output_atts.description = - std::string("masked/weighted by `" + this->mask_variable + "`"); - + // update description and long name + output_atts.description = input_var + + " multiplied by " + this->mask_variable; + + output_atts.long_name.clear(); + + // update the array attributes attributes.set(output_var, (teca_metadata)output_atts); - out_md.set("attributes", attributes); } } + + // update the attributes + out_md.set("attributes", attributes); + return out_md; } @@ -154,9 +179,9 @@ std::vector teca_apply_binary_mask::get_upstream_request( (void) port; (void) input_md; - vector up_reqs; + std::vector up_reqs; - // get the name of the array to request + // get the name of the mask array if (this->mask_variable.empty()) { TECA_ERROR("A mask variable was not specified") @@ -167,25 +192,36 @@ std::vector teca_apply_binary_mask::get_upstream_request( // add in what we need teca_metadata req(request); std::set arrays; + if (req.has("arrays")) req.get("arrays", arrays); + arrays.insert(this->mask_variable); - // check that a prefix was given - if (this->get_output_var_prefix().empty()){ - TECA_ERROR("A prefix for the output variables was not specified") - return up_reqs; + // check that the input variables have been specified. + // this is likely a user error. + if (this->masked_variables.empty() && + teca_mpi_util::mpi_rank_0(this->get_communicator())) + { + TECA_WARNING("Nothing to do, masked_variables have not" + " been specified.") } - for (auto& input_var : input_variables){ - // insert the needed variable + // request the arrays to mask + for (auto& input_var : masked_variables) + { + // request the needed variable arrays.insert(input_var); // intercept request for our output if the variable will have a new name - if(this->get_output_variable_name(input_var) != input_var){ - arrays.erase(this->get_output_variable_name(input_var)); + std::string out_var = this->get_output_variable_name(input_var); + if (out_var != input_var) + { + arrays.erase(out_var); } } + + // update the list of arrays to request req.set("arrays", arrays); // send up @@ -205,23 +241,24 @@ const_p_teca_dataset teca_apply_binary_mask::execute( (void)request; // get the input - const_p_teca_cartesian_mesh in_mesh - = std::dynamic_pointer_cast(input_data[0]); + const_p_teca_mesh in_mesh + = std::dynamic_pointer_cast(input_data[0]); if (!in_mesh) { - TECA_ERROR("Failed to apply mask. Dataset is not a teca_cartesian_mesh") + TECA_ERROR("Failed to apply mask. Dataset is not a teca_mesh") return nullptr; } // create the output mesh, pass everything through - // output arrays are added in the variable loop - p_teca_cartesian_mesh out_mesh = teca_cartesian_mesh::New(); - out_mesh->shallow_copy(std::const_pointer_cast(in_mesh)); + // masked arrays are added or replaced below + p_teca_mesh out_mesh = + std::static_pointer_cast + (std::const_pointer_cast(in_mesh)->new_shallow_copy()); // check that a masking variable has been provided if (this->mask_variable.empty()) { - TECA_ERROR("A mask variable was not specified") + TECA_ERROR("The mask_variable name was not specified") return nullptr; } @@ -230,46 +267,50 @@ const_p_teca_dataset teca_apply_binary_mask::execute( = in_mesh->get_point_arrays()->get(this->mask_variable); if (!mask_array) { - TECA_ERROR("masking array \"" << this->mask_variable - << "\" requested but not present.") + TECA_ERROR("The mask_variable \"" << this->mask_variable + << "\" was requested but is not present in the input data.") return nullptr; } // apply the mask NESTED_TEMPLATE_DISPATCH(const teca_variant_array_impl, - mask_array.get(), _mask, + mask_array.get(), _MASK, // loop over input variables - for (auto& input_var : input_variables){ + for (auto& input_var : masked_variables) + { std::string output_var = this->get_output_variable_name(input_var); + // get the input array const_p_teca_variant_array input_array = in_mesh->get_point_arrays()->get(input_var); if (!input_array) { - TECA_ERROR("input array \"" << input_var - << "\" requested but not present.") + TECA_ERROR("The masked_variable \"" << input_var + << "\" was requested but is not present in the input data.") return nullptr; } // allocate the output array size_t n = input_array->size(); - p_teca_variant_array output_array = input_array->new_instance(); - output_array->resize(n); + + p_teca_variant_array output_array = input_array->new_instance(n); + + //output_array->resize(n); // do the mask calculation - NESTED_TEMPLATE_DISPATCH_FP( + NESTED_TEMPLATE_DISPATCH( teca_variant_array_impl, - output_array.get(), _var, + output_array.get(), _VAR, internal::apply_mask( - dynamic_cast(output_array.get())->get(), - static_cast(mask_array.get())->get(), - static_cast(input_array.get())->get(), + dynamic_cast(output_array.get())->get(), + static_cast(mask_array.get())->get(), + static_cast(input_array.get())->get(), n); ) - out_mesh->get_point_arrays()->append( + out_mesh->get_point_arrays()->set( output_var, output_array); } ) diff --git a/alg/teca_apply_binary_mask.h b/alg/teca_apply_binary_mask.h index 3090e3ba3..78654e3ab 100644 --- a/alg/teca_apply_binary_mask.h +++ b/alg/teca_apply_binary_mask.h @@ -27,7 +27,7 @@ of this masked precipitation variable gives the average precipitation due to atmospheric rivers. The output variable names are given a prefix to distinguish them from the -upstream versions. E.g., if the algorithm property `output_var_prefix` is set +upstream versions. E.g., if the algorithm property `output_variable_prefix` is set to 'ar_', and the variable being masked is 'precip', then the output array name is 'ar_precip'. @@ -45,18 +45,22 @@ class teca_apply_binary_mask : public teca_algorithm TECA_GET_ALGORITHM_PROPERTIES_DESCRIPTION() TECA_SET_ALGORITHM_PROPERTIES() - // set the name of the output array + // set the name of the variable containing the mask values TECA_ALGORITHM_PROPERTY(std::string, mask_variable) - // the arrays to mask. if empty no arrays will be + // A list of of variables to apply the mask to. If empty no arrays will be // requested, and no variables will be masked - TECA_ALGORITHM_VECTOR_PROPERTY(std::string, input_variable) + TECA_ALGORITHM_VECTOR_PROPERTY(std::string, masked_variable) - // the names of the arrays to store the masking results in - TECA_ALGORITHM_PROPERTY(std::string, output_var_prefix) + // A prefix to prepend to the names of the variables that have been masked. + // If this is empty maked data replaces its input, otherwise input data is + // preserved and maked data is added. + TECA_ALGORITHM_PROPERTY(std::string, output_variable_prefix) - // adds output_var_prefix to a given variable name - std::string get_output_variable_name(std::string input_var); + // helper that constructs and returns the result variable names taking into + // account he list of masked_variables and the output_variable_prefix. use + // this to know what variables will be produced. + void get_output_variable_names(std::vector &names); protected: teca_apply_binary_mask(); @@ -74,10 +78,14 @@ class teca_apply_binary_mask : public teca_algorithm const std::vector &input_data, const teca_metadata &request) override; + // helper that given and input variable name constructs the result variable + // name taking into account the output_variable_prefix + std::string get_output_variable_name(std::string input_var); + private: std::string mask_variable; - std::vector input_variables; - std::string output_var_prefix; + std::vector masked_variables; + std::string output_variable_prefix; }; #endif diff --git a/test/python/test_apply_binary_mask.py b/test/python/test_apply_binary_mask.py index 07da63dc2..0d2e873ab 100644 --- a/test/python/test_apply_binary_mask.py +++ b/test/python/test_apply_binary_mask.py @@ -100,8 +100,8 @@ def isequal(a, b, epsilon): mask_comp = teca_apply_binary_mask.New() mask_comp.set_input_connection(source.get_output_port()) mask_comp.set_mask_variable("mask_grid") -mask_comp.set_input_variables(["ones_grid", "zeros_grid"]) -mask_comp.set_output_var_prefix(prefix) +mask_comp.set_masked_variables(["ones_grid", "zeros_grid"]) +mask_comp.set_output_variable_prefix(prefix) mask_o = teca_dataset_capture.New() mask_o.set_input_connection(mask_comp.get_output_port()) From d8e9cdc93b1fd22dcb37b262a9ba0053711fdf26 Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Mon, 1 Mar 2021 11:22:04 -0800 Subject: [PATCH 037/180] bayesian_ar_detect app updates for review of #565 * adds a command line option --ar_weighted_variables. this controls whether or not the algorithm is in the pipeline * adds a weighted variable to app test * updates the rtd doc --- .travis.yml | 2 +- apps/teca_bayesian_ar_detect.cpp | 56 ++++++++++++++++-------- doc/rtd/applications.rst | 4 ++ test/apps/test_bayesian_ar_detect_app.sh | 3 +- 4 files changed, 45 insertions(+), 20 deletions(-) diff --git a/.travis.yml b/.travis.yml index b4986a69e..5b0a32b05 100644 --- a/.travis.yml +++ b/.travis.yml @@ -17,7 +17,7 @@ env: - BUILD_TYPE=Debug - TECA_DIR=/travis_teca_dir - TECA_PYTHON_VERSION=3 - - TECA_DATA_REVISION=105 + - TECA_DATA_REVISION=106 jobs: - DOCKER_IMAGE=ubuntu IMAGE_VERSION=20.04 IMAGE_NAME=ubuntu_20_04 REQUIRE_NETCDF_MPI=TRUE - DOCKER_IMAGE=ubuntu IMAGE_VERSION=20.04 IMAGE_NAME=ubuntu_20_04 REQUIRE_NETCDF_MPI=FALSE diff --git a/apps/teca_bayesian_ar_detect.cpp b/apps/teca_bayesian_ar_detect.cpp index e8dd140fc..02b4d9194 100644 --- a/apps/teca_bayesian_ar_detect.cpp +++ b/apps/teca_bayesian_ar_detect.cpp @@ -84,6 +84,11 @@ int main(int argc, char **argv) ("write_ivt", "\nwhen this flag is present IVT vector is written to disk with" " the result\n") + ("ar_weighted_variables", value>()->multitoken(), + "\nAn optional list of variables to weight with the computed AR probability." + " Each such variable will be multiplied by the computed AR probability, and" + " written to disk as \"NAME_ar_wgtd\".\n") + ("x_axis_variable", value()->default_value("lon"), "\nname of x coordinate variable\n") ("y_axis_variable", value()->default_value("lat"), @@ -178,7 +183,6 @@ int main(int argc, char **argv) ar_detect->get_properties_description("ar_detect", advanced_opt_defs); ar_detect->set_ivt_variable("IVT"); - // segment the ar probability field p_teca_binary_segmentation ar_tag = teca_binary_segmentation::New(); ar_tag->set_threshold_mode(ar_tag->BY_VALUE); @@ -189,7 +193,7 @@ int main(int argc, char **argv) p_teca_apply_binary_mask ar_mask = teca_apply_binary_mask::New(); ar_mask->get_properties_description("ar_mask", advanced_opt_defs); ar_mask->set_mask_variable("ar_probability"); - ar_mask->set_output_var_prefix("ar_wgtd_"); + ar_mask->set_output_variable_prefix("ar_wgtd_"); // Add an executive for the writer p_teca_index_executive exec = teca_index_executive::New(); @@ -264,7 +268,7 @@ int main(int argc, char **argv) reader = cf_reader; } - // add basic transfomration stages to the pipeline + // add transformation stages to the pipeline norm_coords->set_input_connection(reader->get_output_port()); vv_mask->set_input_connection(norm_coords->get_output_port()); unpack->set_input_connection(vv_mask->get_output_port()); @@ -325,7 +329,7 @@ int main(int argc, char **argv) ar_detect->set_ivt_variable(opt_vals["ivt"].as()); } - // add the ivt caculation stages if needed + // add the ivt calculation stages if needed bool do_ivt = opt_vals.count("compute_ivt"); bool do_ivt_magnitude = opt_vals.count("compute_ivt_magnitude"); @@ -359,6 +363,31 @@ int main(int argc, char **argv) head = l2_norm; } + // connect the detector and post detector operations + ar_detect->set_input_connection(0, params->get_output_port()); + ar_detect->set_input_connection(1, head->get_output_port()); + + ar_tag->set_input_connection(0, ar_detect->get_output_port()); + head = ar_tag; + + // set the variables to weight by AR probability + if (opt_vals.count("ar_weighted_variables")) + { + ar_mask->set_masked_variables + (opt_vals["ar_weighted_variables"].as>()); + } + + // if there are any variables to weight add the mask stage to the pipeline + bool do_weighted = ar_mask->get_number_of_masked_variables(); + if (do_weighted) + { + ar_mask->set_input_connection(0, ar_tag->get_output_port()); + head = ar_mask; + } + + // connect and configure the writer + cf_writer->set_input_connection(head->get_output_port()); + // tell the writer to write ivt if needed std::vector point_arrays({"ar_probability", "ar_binary_tag"}); if ((do_ivt || do_ivt_magnitude) && opt_vals.count("write_ivt_magnitude")) @@ -372,12 +401,10 @@ int main(int argc, char **argv) point_arrays.push_back(ivt_int->get_ivt_v_variable()); } - // add any ar-weighted variables to the output - std::vector mask_variables = ar_mask->get_input_variables(); - if (! mask_variables.empty()){ - for (std::string input_var : mask_variables){ - point_arrays.push_back(ar_mask->get_output_variable_name(input_var)); - } + // tell the writer to write ar weighted variables if needed + if (do_weighted) + { + ar_mask->get_output_variable_names(point_arrays); } cf_writer->set_file_name(opt_vals["output_file"].as()); @@ -415,13 +442,6 @@ int main(int argc, char **argv) return -1; } - // connect the fixed stages of the pipeline - ar_detect->set_input_connection(0, params->get_output_port()); - ar_detect->set_input_connection(1, head->get_output_port()); - ar_tag->set_input_connection(0, ar_detect->get_output_port()); - ar_mask->set_input_connection(0, ar_tag->get_output_port()); - cf_writer->set_input_connection(ar_mask->get_output_port()); - // look for requested time step range, start bool parse_start_date = opt_vals.count("start_date"); bool parse_end_date = opt_vals.count("end_date"); @@ -497,7 +517,7 @@ int main(int argc, char **argv) teca_metadata seg_atts; seg_atts.set("long_name", std::string("binary indicator of atmospheric river")); seg_atts.set("description", std::string("binary indicator of atmospheric river")); - seg_atts.set("scheme", std::string("cascade_bard")); + seg_atts.set("scheme", std::string("TECA_BARD")); seg_atts.set("version", std::string("1.0")); seg_atts.set("note", std::string("derived by thresholding ar_probability >= ") + diff --git a/doc/rtd/applications.rst b/doc/rtd/applications.rst index 24011a25e..9d5934544 100644 --- a/doc/rtd/applications.rst +++ b/doc/rtd/applications.rst @@ -942,6 +942,10 @@ Command Line Arguments --write_ivt when this flag is present IVT vector is written to disk with the result +--ar_weighted_variables arg + An optional list of variables to weight with the computed AR probability. Each such variable + will be multiplied by the computed AR probability, and written to disk as "NAME_ar_wgtd". + --x_axis_variable arg (=lon) name of x coordinate variable diff --git a/test/apps/test_bayesian_ar_detect_app.sh b/test/apps/test_bayesian_ar_detect_app.sh index 1449e3d88..de93bce27 100755 --- a/test/apps/test_bayesian_ar_detect_app.sh +++ b/test/apps/test_bayesian_ar_detect_app.sh @@ -23,6 +23,7 @@ set -x # run the app ${launcher} ${app_prefix}/teca_bayesian_ar_detect \ --input_regex "${data_root}/ARTMIP_MERRA_2D_2017-05.*\.nc$" \ + --ar_weighted_variables IVT \ --output_file test_bayesian_ar_detect_app_output_%t%.nc \ --steps_per_file 365 --n_threads ${n_threads} --verbose @@ -40,7 +41,7 @@ else ${app_prefix}/teca_cartesian_mesh_diff \ --reference_dataset "${data_root}/test_bayesian_ar_detect_app_ref.*\.nc" \ --test_dataset "test_bayesian_ar_detect_app_output.*\.nc" \ - --arrays ar_probability ar_binary_tag --verbose + --arrays ar_probability ar_binary_tag ar_wgtd_IVT --verbose # clean up rm test_bayesian_ar_detect_app_output*.nc From 87f4d22cb0c150199ccfdb3a223f9fcbe0d67231 Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Thu, 12 Mar 2020 10:51:34 -0700 Subject: [PATCH 038/180] add teca_priority_queue Adds indirect priority queue that supports random access modification of priorities in-situ. users of the queue intialize the queue with keys and an accessor callable that converts keys to priority values. support for generic key type is provided as well as an optimized specical case for integer ordinal keys. Keys are arranged according to the specified predicate (std::less is the default). pop/peak operations return the key with the "highest" priority. --- data/teca_priority_queue.h | 453 +++++++++++++++++++++++++++++++++++ test/CMakeLists.txt | 5 + test/test_priority_queue.cpp | 193 +++++++++++++++ 3 files changed, 651 insertions(+) create mode 100644 data/teca_priority_queue.h create mode 100644 test/test_priority_queue.cpp diff --git a/data/teca_priority_queue.h b/data/teca_priority_queue.h new file mode 100644 index 000000000..6f7a00a82 --- /dev/null +++ b/data/teca_priority_queue.h @@ -0,0 +1,453 @@ +#ifndef teca_priority_queue_h +#define teca_priority_queue_h + +#include +#include +#include +#include +#include +#include + +// use one of the following aliases for key_map_t. key_map_t +// is the type of container used to hold the locations of user provided +// keys in the heap. +// for keys that are not ordinals 0 to N, use the mapped_key_t alias +// for contiguious keys 0 to N (faster), use the contiguous_key_t alias +template +using mapped_key_t = std::map; + +using contiguous_key_t = std::vector; + +// use one of the following objects to provide the priority for the +// given key. these objects internally point to the container index by +// key value holding the associated priority. +// for keys that are not ordinals 0 to N, use the mapped_key_priority_t alias +// for contiguious keys 0 to N (faster), use the contiguous_key_priority_t alias +template +struct mapped_key_priority +{ + using key_map_t = mapped_key_t; + + mapped_key_priority(std::map &mp) : m_map(&mp) + {} + + priority_t operator()(key_t i) + { return (*m_map)[i]; } + + std::map *m_map; +}; + +template +struct contiguous_key_priority +{ + using key_map_t = contiguous_key_t; + + contiguous_key_priority(const std::vector &vec) : + m_vec(vec.data()) + {} + + priority_t operator()(key_t i) + { return m_vec[i]; } + + const priority_t *m_vec; +}; + +// forward declare the queue +template , + typename key_map_t=contiguous_key_t> +class teca_priority_queue; + +// pointer type +template +using p_teca_priority_queue = std::shared_ptr< + teca_priority_queue>; + +/// an indirect priority queue that supports random access modification of priority +/** + * an indirect priority queue that supports random access modification of + * priority the queue works with user provided keys and lookup functor that + * converts keys to priorities. + * + * teplate parameters: + * + * key_t - type of the user provided keys + * + * lookup_t - callable that implements: priority_t operator()(key_t key) + * + * comp_t - callable that implements the predicate: bool(key_t, key_t), + * used to enforce heap order. (std::less) + * + * key_map_t - type of container used to track the position in the heap + * of the keys. The default, a vector, is only valid for + * interger ordinals from 0 to N. Use mapped_key_t + * for all other cases. (contiguous_key_t) + * + * typical usage: + * + * construct a container of objects to prioritize, and initialize a lookup + * object that given a key returns the priority of the coresponding object. + * create an instance of the priority_queue and push the key values. as keys + * are pushed heap ording is imposed, this is why objects need to be in place + * before pushing keys. when an object's prioriy has been changed one must call + * modified passing the key of the object. the location of each object is + * tracked and the queue will reprioritize itself after modification. + * + * recomendation: + * + * to obtain high performance, it's best to avoid using std::function for + * lookup operations. Instead, write a small functor so that the compiler + * can inline lookup calls. + * + * don't forget to change key_map_t to mapped_key_t if + * keys are not integer ordinals 0 to N. +*/ +template +class teca_priority_queue +{ +public: + + ~teca_priority_queue() = default; + + // return a new instance, must pass the lookup operator that + // translates keys into priority values + static p_teca_priority_queue + New(lookup_t &lookup, unsigned long init_size=256, + unsigned long block_size=256) + { + p_teca_priority_queue ptr( + new teca_priority_queue< + key_t, lookup_t, comp_t, key_map_t>( + lookup, init_size, block_size)); + return ptr; + } + + // return true if the queue has no keys + bool empty() { return m_end == 0; } + + + /// add a value into the queue + void push(const key_t &key); + + /// free all resources and reset the queue to an empty state + void clear(); + + // restore heap condition after an id is modified + void modified(const key_t &key); + + // return the id at the top of the queue, and remove it. + // internal memory is not deallocated. + key_t pop(); + + // return the id in the top of queue + key_t peak(); + + // print the state of the queue + void to_stream(std::ostream &os, bool priorities = true); + +protected: + teca_priority_queue() = default; + + teca_priority_queue(const teca_priority_queue &) = delete; + void operator=(const teca_priority_queue &) = delete; + + // initialize the queue with an comperator, the initial size, and declare + // the amount to grow the queue by during dynamic resizing. + template + teca_priority_queue(lookup_t lookup, unsigned long init_size, + unsigned long block_size, + typename std::enable_if, u>::value>::type * = 0); + + template + teca_priority_queue(lookup_t lookup, unsigned long init_size, + unsigned long block_size, + typename std::enable_if, u>::value>::type * = 0); + + // grow the queue to the new size + template + void grow(unsigned long n, + typename std::enable_if, u>::value>::type * = 0); + + // grow the queue to the new size + template + void grow(unsigned long n, + typename std::enable_if, u>::value>::type * = 0); + + // restore the heap condition starting from here + // and working up + void up_heapify(unsigned long id); + + // restore the heap condition starting from here + // and working down + void down_heapify(unsigned long id); + + // exchange two items + void swap(unsigned long i, unsigned long j); + + // helpers for walking tree + unsigned long left_child(unsigned long a_id) + { return a_id*2; } + + unsigned long right_child(unsigned long a_id) + { return a_id*2 + 1; } + + unsigned long parent(unsigned long a_id) + { return a_id/2; } + + +private: + lookup_t m_lookup; // callable to turn keys into priority values + std::vector m_ids; // array of keys + key_map_t m_locs; // map indexed by key to find the current position in the queue + unsigned long m_size; // size of the key buffer + unsigned long m_end; // index of the last key in the queue + unsigned long m_block_size; // ammount to grow the dynamicaly alloacted buffers by +}; + + +// -------------------------------------------------------------------------- +template +void teca_priority_queue::push(const key_t &key) +{ + // extend the queue + ++m_end; + + // verify that there is space, if not allocate it + if (m_end >= m_size) + this->grow(m_size + m_block_size); + + // add key and it's location + m_ids[m_end] = key; + m_locs[key] = m_end; + + // restore heap condition + this->up_heapify(m_end); +} + +// -------------------------------------------------------------------------- +template +void teca_priority_queue::clear() +{ + m_ids.clear(); + m_locs.clear(); + m_size = 0; + m_end = 0; +} + +// -------------------------------------------------------------------------- +template +void teca_priority_queue::modified(const key_t &key) +{ + // find the loc of the modified key + unsigned long id = m_locs[key]; + // fix up then down + this->up_heapify(id); + this->down_heapify(id); +} + +// -------------------------------------------------------------------------- +template +key_t teca_priority_queue::pop() +{ + key_t id_1 = m_ids[1]; + if (m_end > 0) + { + this->swap(1, m_end); + --m_end; + this->down_heapify(1); + } + return id_1; +} + +// -------------------------------------------------------------------------- +template +key_t teca_priority_queue::peak() +{ + return m_ids[1]; +} + +// -------------------------------------------------------------------------- +template +void teca_priority_queue::to_stream(std::ostream &os, bool priorities) +{ + long log_end = std::log2(m_end); + long n_rows = log_end + 1; + unsigned long q = 0; + for (long i = 0; i < n_rows; ++i) + { + if (q > m_end) + break; + + long n_elem = 1 << i; + long isp = (1 << (n_rows - 1 - i)) - 1; + long bsp = 2*isp + 1; + + for (long j = 0; j < isp; ++j) + os << " "; + + for (long j = 0; (j < n_elem) && (q < m_end); ++j) + { + if (priorities) + os << m_lookup(m_ids[++q]); + else + os << m_ids[++q]; + for (long k = 0; k < bsp; ++k) + os << " "; + } + + os << std::endl; + } +} + +// -------------------------------------------------------------------------- +template +template +teca_priority_queue::teca_priority_queue(lookup_t lookup, + unsigned long init_size, unsigned long block_size, + typename std::enable_if, u>::value>::type *) : + m_lookup(lookup), m_size(init_size), m_end(0), + m_block_size(block_size) +{ + m_ids.resize(init_size); + m_locs.resize(init_size); +} + +// -------------------------------------------------------------------------- +template +template +teca_priority_queue::teca_priority_queue(lookup_t lookup, + unsigned long init_size, unsigned long block_size, + typename std::enable_if, u>::value>::type *) : + m_lookup(lookup), m_size(init_size), m_end(0), + m_block_size(block_size) +{ + m_ids.resize(init_size); +} + +// -------------------------------------------------------------------------- +template +template +void teca_priority_queue::grow(unsigned long n, + typename std::enable_if, u>::value>::type *) +{ + m_ids.resize(n); + m_locs.resize(n); + m_size = n; +} + +// -------------------------------------------------------------------------- +template +template +void teca_priority_queue::grow(unsigned long n, + typename std::enable_if, u>::value>::type *) +{ + m_ids.resize(n); + m_size = n; +} + + + // -------------------------------------------------------------------------- +template +void teca_priority_queue::up_heapify(unsigned long id) +{ + // if at tree root then stop + if (id < 2) + return; + + // else find parent and enforce heap order + comp_t comp; + unsigned long id_p = parent(id); + if (comp(m_lookup(m_ids[id]), m_lookup(m_ids[id_p]))) + this->swap(id, id_p); + + // continue up toward the root + this->up_heapify(id_p); +} + +// -------------------------------------------------------------------------- +template +void teca_priority_queue::down_heapify(unsigned long id) +{ + // if no current node then stop + if (id > m_end) + return; + + // if no left child then stop + unsigned long lc = left_child(id); + if (lc > m_end) + return; + + // find the smaller child + comp_t comp; + unsigned long smallc = lc; + unsigned long rc = right_child(id); + if (rc <= m_end) + smallc = comp(m_lookup(m_ids[lc]), + m_lookup(m_ids[rc])) ? lc : rc; + + // if in heap order then stop + if (comp(m_lookup(m_ids[id]), m_lookup(m_ids[smallc]))) + return; + + // else swap and continue + this->swap(id, smallc); + this->down_heapify(smallc); +} + +// -------------------------------------------------------------------------- +template +void teca_priority_queue::swap(unsigned long i, unsigned long j) +{ + key_t key_i = m_ids[i]; + key_t key_j = m_ids[j]; + // exchange keys + m_ids[i] = key_j; + m_ids[j] = key_i; + // update locs + m_locs[key_j] = i; + m_locs[key_i] = j; +} + +template +std::ostream & operator<<(std::ostream &os, p_teca_priority_queue &q) +{ + q->to_stream(os); + return os; +} + +#endif diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 0b5d3ffe4..09f8ec9e4 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -729,3 +729,8 @@ teca_add_test(test_unpack_data COMMAND test_unpack_data "${TECA_DATA_ROOT}/test_unpack_data.nc" FEATURES ${TECA_HAS_NETCDF} REQ_TECA_DATA) + +teca_add_test(test_priority_queue + SOURCES test_priority_queue.cpp + LIBS teca_core teca_data ${teca_test_link} + COMMAND test_priority_queue 57 1) diff --git a/test/test_priority_queue.cpp b/test/test_priority_queue.cpp new file mode 100644 index 000000000..664f68981 --- /dev/null +++ b/test/test_priority_queue.cpp @@ -0,0 +1,193 @@ +#include "teca_priority_queue.h" +#include "teca_common.h" + +#include +#include +#include + +// using contiguous ordinal keys +// randomly generate num_vals priorities, construct the queue, and update the +// priorities num_vals/2 times. finally verify that priorities are in sorted +// order. +int test_contiguous(int num_vals) +{ + // generate some values to prioritize + std::vector vals; + for (int i = 0; i < num_vals; ++i) + { + int val = rand() % num_vals; + vals.push_back(val); + } + + std::cerr << "vals="; + for (int i = 0; i < num_vals; ++i) + std::cerr << vals[i] << " "; + std::cerr << std::endl; + + /*std::function lookup = [&](int i) -> int + { return vals[i]; };*/ + + contiguous_key_priority lookup(vals); + auto q = teca_priority_queue::New(lookup); + + // test push keys + for (int i = 0; i < num_vals; ++i) + q->push(i); + + std::cerr << "initial state " << std::endl + << q << std::endl; + + // test updating priority of any element + for (int i = 0; i < num_vals/2; ++i) + { + int j = rand() % num_vals; + int vj = vals[j]; + int dvj = num_vals * (i % 2 == 0 ? -1 : 1); + + vals[j] += dvj; + + q->modified(j); + + std::cerr << "after vals[" << j << "] = " << vj + << " + " << dvj << " = " << vals[j] << std::endl + << q << std::endl; + } + + // test pop + int cur = 0; + int prev = vals[q->peak()]; + std::cerr << "sorted = "; + while (!q->empty()) + { + cur = vals[q->pop()]; + std::cerr << cur << " "; + if (prev > cur) + { + std::cerr << std::endl; + TECA_ERROR( + << "ERROR: heap ordering is violated! " + << prev << " > " << cur) + return -1; + } + prev = cur; + } + std::cerr << std::endl; + + return 0; +} + +// using unordered keys +// randomly generate num_vals priorities, construct the queue, and update the +// priorities num_vals/2 times. finally verify that priorities are in sorted +// order. +int test_mapped(int num_vals) +{ + using map_t = std::map; + using map_it_t = map_t::iterator; + + // generate some values to prioritize + map_t vals; + for (int i = 0; i < num_vals; ++i) + { + int key = 3*i; + int val = rand() % num_vals; + vals[key] = val; + } + + std::cerr << "vals="; + for (map_it_t it = vals.begin(); it != vals.end(); ++it) + std::cerr << "(key = " << it->first << ", value = " << it->second << ") "; + std::cerr << std::endl; + + mapped_key_priority lookup(vals); + auto q = teca_priority_queue, std::map>::New(lookup); + + // test push keys + std::cerr << "vals="; + for (map_it_t it = vals.begin(); it != vals.end(); ++it) + q->push(it->first); + + std::cerr << "initial state " << std::endl + << q << std::endl; + + // test updating priority of any element + for (int i = 0; i < num_vals/2; ++i) + { + int j = 3*(rand() % num_vals); + int vj = vals[j]; + int dvj = num_vals * (i % 2 == 0 ? -1 : 1); + + vals[j] += dvj; + + q->modified(j); + + std::cerr << "after vals[" << j << "] = " << vj + << " + " << dvj << " = " << vals[j] << std::endl + << q << std::endl; + } + + // test pop + int cur = 0; + int prev = vals[q->peak()]; + std::cerr << "sorted = "; + while (!q->empty()) + { + cur = vals[q->pop()]; + std::cerr << cur << " "; + if (prev < cur) + { + std::cerr << std::endl; + TECA_ERROR( + << "Heap ordering is violated! " + << prev << " < " << cur) + return -1; + } + prev = cur; + } + std::cerr << std::endl; + + return 0; +} + +// run the test +int main(int argc, char **argv) +{ + if (argc < 3) + { + std::cerr << "usage: a.out [num vals] [rng seed]" << std::endl; + return -1; + } + + + int num_vals = atoi(argv[1]); + int seed = atoi(argv[2]); + + srand(seed); + + std::cerr + << "============================================" << std::endl + << "Test contiguous keys" << std::endl + << "============================================" << std::endl; + + if (test_contiguous(num_vals)) + { + TECA_ERROR("Test contiguous failed") + return -1; + } + + std::cerr + << "============================================" << std::endl + << "Test mapped keys" << std::endl + << "============================================" << std::endl; + + if (test_mapped(num_vals)) + { + TECA_ERROR("Test mapped failed") + return -1; + } + + return 0; +} + + From 35407adb904ac8b509255ffb943f57c81abb9b7a Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Fri, 5 Mar 2021 07:50:48 -0800 Subject: [PATCH 039/180] l2_norm process teca_mesh --- alg/teca_l2_norm.cxx | 15 +++++++-------- alg/teca_l2_norm.h | 2 ++ 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/alg/teca_l2_norm.cxx b/alg/teca_l2_norm.cxx index 5142add16..df2aed92f 100644 --- a/alg/teca_l2_norm.cxx +++ b/alg/teca_l2_norm.cxx @@ -1,6 +1,6 @@ #include "teca_l2_norm.h" -#include "teca_cartesian_mesh.h" +#include "teca_mesh.h" #include "teca_array_collection.h" #include "teca_variant_array.h" #include "teca_metadata.h" @@ -283,12 +283,12 @@ const_p_teca_dataset teca_l2_norm::execute( (void)port; // get the input mesh - const_p_teca_cartesian_mesh in_mesh - = std::dynamic_pointer_cast(input_data[0]); + const_p_teca_mesh in_mesh + = std::dynamic_pointer_cast(input_data[0]); if (!in_mesh) { - TECA_ERROR("Failed to compute l2 norm. dataset is not a teca_cartesian_mesh") + TECA_ERROR("Failed to compute l2 norm. dataset is not a teca_mesh") return nullptr; } @@ -359,11 +359,10 @@ const_p_teca_dataset teca_l2_norm::execute( // create the output mesh, pass everything through, and // add the l2 norm array - p_teca_cartesian_mesh out_mesh = teca_cartesian_mesh::New(); + p_teca_mesh out_mesh = std::static_pointer_cast + (std::const_pointer_cast(in_mesh)->new_shallow_copy()); - out_mesh->shallow_copy(std::const_pointer_cast(in_mesh)); - - out_mesh->get_point_arrays()->append( + out_mesh->get_point_arrays()->set( this->get_l2_norm_variable(request), l2_norm); return out_mesh; diff --git a/alg/teca_l2_norm.h b/alg/teca_l2_norm.h index f67e23581..21def4b80 100644 --- a/alg/teca_l2_norm.h +++ b/alg/teca_l2_norm.h @@ -40,6 +40,8 @@ class teca_l2_norm : public teca_algorithm protected: teca_l2_norm(); + // helpers to get the variable names from either the incoming + // request or the class member variable. std::string get_component_0_variable(const teca_metadata &request); std::string get_component_1_variable(const teca_metadata &request); std::string get_component_2_variable(const teca_metadata &request); From d470100e82aaab8ff042dcb5bff3480098e03e43 Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Fri, 5 Mar 2021 09:22:06 -0800 Subject: [PATCH 040/180] add indexed_dataset_cache * teca_indexed_dataset_cache an algorithm that implements a thread safe cache where when predetermined maximum cahce size is exceeded the least recently used dataset is removed from the cache. This algorithm is used to reduce I/O demand in applications that repeatedly access the same datasets. Examples include computing a moving average over time dimension, and serving land-sea mask, or surface elevation field. * updated the temporal_average test to use the new algorithm. * did preliminary testing of threaded execution, but a threaded test still needs to be developed --- .travis.yml | 2 +- alg/CMakeLists.txt | 1 + alg/teca_indexed_dataset_cache.cxx | 351 +++++++++++++++++++++++++++++ alg/teca_indexed_dataset_cache.h | 61 +++++ test/CMakeLists.txt | 4 +- test/test_temporal_average.cpp | 87 +++++-- 6 files changed, 479 insertions(+), 27 deletions(-) create mode 100644 alg/teca_indexed_dataset_cache.cxx create mode 100644 alg/teca_indexed_dataset_cache.h diff --git a/.travis.yml b/.travis.yml index 5b0a32b05..4c7ad5daf 100644 --- a/.travis.yml +++ b/.travis.yml @@ -17,7 +17,7 @@ env: - BUILD_TYPE=Debug - TECA_DIR=/travis_teca_dir - TECA_PYTHON_VERSION=3 - - TECA_DATA_REVISION=106 + - TECA_DATA_REVISION=107 jobs: - DOCKER_IMAGE=ubuntu IMAGE_VERSION=20.04 IMAGE_NAME=ubuntu_20_04 REQUIRE_NETCDF_MPI=TRUE - DOCKER_IMAGE=ubuntu IMAGE_VERSION=20.04 IMAGE_NAME=ubuntu_20_04 REQUIRE_NETCDF_MPI=FALSE diff --git a/alg/CMakeLists.txt b/alg/CMakeLists.txt index bd3077adc..531de38db 100644 --- a/alg/CMakeLists.txt +++ b/alg/CMakeLists.txt @@ -24,6 +24,7 @@ set(teca_alg_cxx_srcs teca_evaluate_expression.cxx teca_face_to_cell_centering.cxx teca_geography.cxx + teca_indexed_dataset_cache.cxx teca_integrated_vapor_transport.cxx teca_l2_norm.cxx teca_latitude_damper.cxx diff --git a/alg/teca_indexed_dataset_cache.cxx b/alg/teca_indexed_dataset_cache.cxx new file mode 100644 index 000000000..f907edc00 --- /dev/null +++ b/alg/teca_indexed_dataset_cache.cxx @@ -0,0 +1,351 @@ +#include "teca_indexed_dataset_cache.h" + +#include "teca_metadata.h" +#include "teca_priority_queue.h" + +#include +#include +#include +#include + +#include +#include + +#if defined(TECA_HAS_BOOST) +#include +#endif + +//#define TECA_DEBUG + +struct cache_entry +{ + cache_entry() : m_data(nullptr), m_keep(1) {} + + std::mutex m_mutex; // for access to the cache and time + std::condition_variable m_cond; // use to wait for another thread to provide the data + const_p_teca_dataset m_data; // the dataset + unsigned long m_keep; // when 0 safe to delete the element +}; + +using p_cache_entry = std::shared_ptr; + +using index_t = unsigned long; +using priority_t = unsigned long; + +using data_map_t = std::map; +using use_map_t = std::map; + +using heap_t = teca_priority_queue, // to look up priorities + std::less<>, // heapify by smallest + mapped_key_t>; // location tracking container + +using p_heap_t = std::shared_ptr; + +struct teca_indexed_dataset_cache::internals_t +{ + internals_t() : m_current_time(0) + { + mapped_key_priority priority_lookup(m_time_used); + m_heap = heap_t::New(priority_lookup); + } + + std::mutex m_mutex; // for access to the following + p_heap_t m_heap; // heap with least recently used dataset at the top + use_map_t m_time_used; // the use time of each cached dataset + data_map_t m_data; // cached data + priority_t m_current_time; // the current time of use +}; + + +// -------------------------------------------------------------------------- +teca_indexed_dataset_cache::teca_indexed_dataset_cache() : + max_cache_size(0), internals(new internals_t) +{ + this->set_number_of_input_connections(1); + this->set_number_of_output_ports(1); +} + +// -------------------------------------------------------------------------- +teca_indexed_dataset_cache::~teca_indexed_dataset_cache() +{ + delete this->internals; +} + +#if defined(TECA_HAS_BOOST) +// -------------------------------------------------------------------------- +void teca_indexed_dataset_cache::get_properties_description( + const std::string &prefix, options_description &global_opts) +{ + options_description opts("Options for " + + (prefix.empty()?"teca_indexed_dataset_cache":prefix)); + + opts.add_options() + + TECA_POPTS_GET(unsigned long, prefix, max_cache_size, + "Sets the maximum number of datasets to cache.") + + ; + + this->teca_algorithm::get_properties_description(prefix, opts); + + global_opts.add(opts); +} + +// -------------------------------------------------------------------------- +void teca_indexed_dataset_cache::set_properties( + const std::string &prefix, variables_map &opts) +{ + this->teca_algorithm::set_properties(prefix, opts); + + TECA_POPTS_SET(opts, unsigned long, prefix, max_cache_size) +} +#endif + +// -------------------------------------------------------------------------- +void teca_indexed_dataset_cache::clear_cache() +{ + { + std::lock_guard lock(this->internals->m_mutex); + this->internals->m_heap->clear(); + this->internals->m_time_used.clear(); + this->internals->m_data.clear(); + this->internals->m_current_time = 0; + } +} + +// -------------------------------------------------------------------------- +std::vector teca_indexed_dataset_cache::get_upstream_request( + unsigned int port, + const std::vector &input_md, + const teca_metadata &request) +{ +#ifdef TECA_DEBUG + std::cerr << teca_parallel_id() + << "teca_indexed_dataset_cache::get_upstream_request" << std::endl; +#endif + (void)port; + (void)input_md; + + std::vector up_reqs; + + // force the user to set the cache size + if (this->max_cache_size == 0) + { + TECA_ERROR("max_cache_size is 0, you must set the" + " cache size before use.") + return up_reqs; + } + + // get the requested index + std::string request_key; + if (request.get("index_request_key", request_key)) + { + TECA_ERROR("Failed to locate the index_request_key") + return up_reqs; + } + + index_t index = 0; + if (request.get(request_key, index)) + { + TECA_ERROR("Failed to get the requested index using the" + " index_request_key \"" << request_key << "\"") + return up_reqs; + } + + { + std::lock_guard lock(this->internals->m_mutex); + + // is this index in the cache? + if (this->internals->m_time_used.count(index)) + { + // yes, update the use time + this->internals->m_time_used[index] = ++this->internals->m_current_time; + this->internals->m_heap->modified(index); + + // make a note that it needs to be served one more time before + // it can be removed + p_cache_entry elem = this->internals->m_data[index]; + + { + std::lock_guard elock(elem->m_mutex); + ++elem->m_keep; + } + +#ifdef TECA_DEBUG + std::cerr << teca_parallel_id() << "update entry " << index + << " keep=" << elem->m_keep << std::endl; +#endif + return up_reqs; + } + + // no, not in cache + // set the use time and put in the heap + this->internals->m_time_used[index] = ++this->internals->m_current_time; + this->internals->m_heap->push(index); + + // add an empty cache enrty + this->internals->m_data[index] = std::make_shared(); + +#ifdef TECA_DEBUG + std::cerr << teca_parallel_id() << "add entry " << index << " " + << this->internals->m_current_time << std::endl; +#endif + } + + // generate the request for this index + up_reqs.push_back(request); + return up_reqs; +} + +// -------------------------------------------------------------------------- +const_p_teca_dataset teca_indexed_dataset_cache::execute( + unsigned int port, + const std::vector &input_data, + const teca_metadata &request) +{ +#ifdef TECA_DEBUG + std::cerr << teca_parallel_id() + << "teca_indexed_dataset_cache::execute" << std::endl; +#endif + (void)port; + + // get the requested index + std::string request_key; + if (request.get("index_request_key", request_key)) + { + TECA_ERROR("Failed to locate the index_request_key") + return nullptr; + } + + index_t index = 0; + if (request.get(request_key, index)) + { + TECA_ERROR("Failed to get the requested index using the" + " index_request_key \"" << request_key << "\"") + return nullptr; + } + + const_p_teca_dataset data_out; + + // get the cache element associated with the requested index + p_cache_entry elem; + { + std::lock_guard lock(this->internals->m_mutex);; + data_map_t::iterator it = this->internals->m_data.find(index); + if (it == this->internals->m_data.end()) + { + TECA_ERROR("The cache is in an invalid state") + return nullptr; + } + elem = it->second; + } + + if (input_data.size()) + { + // add new data to the cache + { + std::lock_guard elock(elem->m_mutex); + elem->m_data = input_data[0]; + --elem->m_keep; + } + // notify other threads that may be waiting for this data + elem->m_cond.notify_all(); +#ifdef TECA_DEBUG + std::cerr << teca_parallel_id() << "add data " << index + << " keep=" << elem->m_keep << std::endl; +#endif + } + else + { + // fetch existing data from the cache + if (!elem->m_data) + { + // data is not yet ready, it will be provided by another thread + std::unique_lock elock(elem->m_mutex); + if (!elem->m_data) + { + // data is not ready wait for another thread to provide + elem->m_cond.wait(elock, [&]{ return bool(elem->m_data); }); + --elem->m_keep; + } + } + else + { + // data is ready + std::lock_guard elock(elem->m_mutex); + --elem->m_keep; + } +#ifdef TECA_DEBUG + std::cerr << teca_parallel_id() << "use data " << index + << " keep=" << elem->m_keep << std::endl; +#endif + } + + // return the dataset + data_out = elem->m_data; + + // enforce the max cache size + { + std::lock_guard lock(this->internals->m_mutex); + unsigned long n_cached = this->internals->m_time_used.size(); + if (n_cached > this->max_cache_size) + { +#ifdef TECA_DEBUG + std::cerr << "cache too large " << n_cached << std::endl; + this->internals->m_heap->to_stream(std::cerr, false); +#endif + // might have to save some elements if they haven't been served yet + std::vector save; + save.reserve(n_cached); + + unsigned long n_to_rm = n_cached - this->max_cache_size; + + // make one pass over the cache in lru order, or stop if we find + // enough elements that can be deleted + for (unsigned long i = 0; n_to_rm && (i < n_cached); ++i) + { + index_t idx = this->internals->m_heap->pop(); + + p_cache_entry elem = this->internals->m_data[idx]; + + // have all requests for the data been served? + unsigned long keep = 0; + { + std::lock_guard elock(elem->m_mutex); + keep = elem->m_keep; + } + if (keep) + { + // no, delete later + save.push_back(idx); +#ifdef TECA_DEBUG + std::cerr << teca_parallel_id() << "save " + << idx << " keep=" << keep << std::endl; +#endif + } + else + { + // yes, delete now + this->internals->m_data.erase(idx); + this->internals->m_time_used.erase(idx); + --n_to_rm; +#ifdef TECA_DEBUG + std::cerr << teca_parallel_id() << "evict " + << idx << std::endl; +#endif + } + } + + // put elements we couldn't remove because they haven't been + // served yet back on the heap + unsigned long n = save.size(); + for (unsigned long i = 0; i < n; ++i) + { + this->internals->m_heap->push(save[i]); + } + } + } + + return data_out; +} diff --git a/alg/teca_indexed_dataset_cache.h b/alg/teca_indexed_dataset_cache.h new file mode 100644 index 000000000..96c909836 --- /dev/null +++ b/alg/teca_indexed_dataset_cache.h @@ -0,0 +1,61 @@ +#ifndef teca_indexed_dataset_cache_h +#define teca_indexed_dataset_cache_h + +#include "teca_shared_object.h" +#include "teca_algorithm.h" +#include "teca_metadata.h" + +#include +#include + +TECA_SHARED_OBJECT_FORWARD_DECL(teca_indexed_dataset_cache) + +/// Caches N datasets such that repeated requests for the same dataset are served from the cache +/** + * A cache storing up to N datasets. Datasets are identified using their + * request index. Repeated requests for the same dataset (ie same index) are + * served from the cache. When more than N unique datasets have been requested + * the cache is modified such that the least recently used dataset is replaced. + */ +class teca_indexed_dataset_cache : public teca_algorithm +{ +public: + TECA_ALGORITHM_STATIC_NEW(teca_indexed_dataset_cache) + TECA_ALGORITHM_DELETE_COPY_ASSIGN(teca_indexed_dataset_cache) + TECA_ALGORITHM_CLASS_NAME(teca_indexed_dataset_cache) + ~teca_indexed_dataset_cache(); + + // report/initialize to/from Boost program options + // objects. + TECA_GET_ALGORITHM_PROPERTIES_DESCRIPTION() + TECA_SET_ALGORITHM_PROPERTIES() + + // set/get the max number of datasets to cache. + TECA_ALGORITHM_PROPERTY(unsigned long, max_cache_size) + + // clear any cached data. + void clear_cache(); + +protected: + teca_indexed_dataset_cache(); + +private: + + std::vector get_upstream_request( + unsigned int port, + const std::vector &input_md, + const teca_metadata &request) override; + + const_p_teca_dataset execute( + unsigned int port, + const std::vector &input_data, + const teca_metadata &request) override; + +private: + unsigned long max_cache_size; + + struct internals_t; + internals_t *internals; +}; + +#endif diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 09f8ec9e4..23313eb08 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -254,8 +254,8 @@ teca_add_test(test_temporal_average SOURCES test_temporal_average.cpp LIBS teca_core teca_data teca_io teca_alg ${teca_test_link} COMMAND test_temporal_average - "${TECA_DATA_ROOT}/cam5_1_amip_run2.cam2.h2.1991-10-01-10800.nc" - test_temporal_average_%t%.%e% 0 -1 3 U850 + "${TECA_DATA_ROOT}/prw_hus_day_MRI-CGCM3_historical_r1i1p1_19500101-19501231\\.nc$" + "${TECA_DATA_ROOT}/test_temporal_average" 0 -1 3 1 prw FEATURES ${TECA_HAS_NETCDF} REQ_TECA_DATA) diff --git a/test/test_temporal_average.cpp b/test/test_temporal_average.cpp index a4ee13b0f..de07eaa8f 100644 --- a/test/test_temporal_average.cpp +++ b/test/test_temporal_average.cpp @@ -1,68 +1,107 @@ #include "teca_cf_reader.h" #include "teca_normalize_coordinates.h" +#include "teca_indexed_dataset_cache.h" #include "teca_temporal_average.h" #include "teca_cartesian_mesh_writer.h" +#include "teca_cf_writer.h" +#include "teca_dataset_diff.h" #include "teca_index_executive.h" #include "teca_system_interface.h" +#include "teca_system_util.h" #include #include #include using namespace std; -// example use -// ./test/test_cf_reader ~/work/teca/data/'cam5_1_amip_run2.cam2.h2.1991-10-.*' tmp 0 -1 PS - int main(int argc, char **argv) { teca_system_interface::set_stack_trace_on_error(); - if (argc < 7) + if (argc < 8) { - cerr << endl << "Usage error:" << endl - << "test_cf_reader [input regex] [output] [first step] [last step] [filter width] [array] [array] ..." << endl - << endl; + std::cerr << std::endl << "Usage error:" << std::endl + << "test_temporal_average [input regex] [baseline] [first step] [last step]" + " [filter width] [n threads] [array] [array] ..." << std::endl + << std::endl; return -1; } // parse command line string regex = argv[1]; - string output = argv[2]; + string baseline = argv[2]; long first_step = atoi(argv[3]); long last_step = atoi(argv[4]); int filter_width = atoi(argv[5]); - vector arrays; - arrays.push_back(argv[6]); - for (int i = 7; i < argc; ++i) + int n_threads = atoi(argv[6]); + std::vector arrays; + arrays.push_back(argv[7]); + for (int i = 8; i < argc; ++i) arrays.push_back(argv[i]); // create the cf reader p_teca_cf_reader r = teca_cf_reader::New(); r->set_files_regex(regex); + // normalize coords p_teca_normalize_coordinates c = teca_normalize_coordinates::New(); c->set_input_connection(r->get_output_port()); + // ds cache + p_teca_indexed_dataset_cache dsc = teca_indexed_dataset_cache::New(); + dsc->set_input_connection(c->get_output_port()); + dsc->set_max_cache_size(2*n_threads*filter_width); + + // temporal avg p_teca_temporal_average a = teca_temporal_average::New(); a->set_filter_width(filter_width); a->set_filter_type(teca_temporal_average::backward); - a->set_input_connection(c->get_output_port()); + a->set_input_connection(dsc->get_output_port()); + + + bool do_test = true; + teca_system_util::get_environment_variable("TECA_DO_TEST", do_test); + if (do_test) + { + std::cerr << "running the test..." << std::endl; - // create the vtk writer connected to the cf reader - p_teca_cartesian_mesh_writer w = teca_cartesian_mesh_writer::New(); - w->set_file_name(output); - w->set_input_connection(a->get_output_port()); + baseline += ".*\\.nc$"; - // set the executive on the writer to stream time steps - p_teca_index_executive exec = teca_index_executive::New(); - exec->set_start_index(first_step); - exec->set_end_index(last_step); - exec->set_arrays(arrays); + p_teca_cf_reader br = teca_cf_reader::New(); + br->set_files_regex(baseline); - w->set_executive(exec); + // executive + p_teca_index_executive rex = teca_index_executive::New(); + rex->set_start_index(first_step); + rex->set_end_index(last_step); + rex->set_arrays(arrays); - // run the pipeline - w->update(); + p_teca_dataset_diff diff = teca_dataset_diff::New(); + diff->set_input_connection(0, br->get_output_port()); + diff->set_input_connection(1, a->get_output_port()); + diff->set_verbose(1); + diff->set_executive(rex); + // TODO : test with threads + //diff->set_thread_pool_size(n_threads); + diff->update(); + } + else + { + std::cerr << "writing the baseline..." << std::endl; + + baseline += "_%t%.nc"; + + // writer + p_teca_cf_writer w = teca_cf_writer::New(); + w->set_input_connection(a->get_output_port()); + w->set_thread_pool_size(n_threads); + w->set_point_arrays(arrays); + w->set_file_name(baseline); + w->set_first_step(first_step); + w->set_last_step(last_step); + w->set_steps_per_file(10000); + w->update(); + } return 0; } From 0bd2d21fdf2959f9e968fb2680dd1a70443ac292 Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Fri, 5 Mar 2021 09:29:43 -0800 Subject: [PATCH 041/180] test temporarily disable test_bayesian_ar_detect_app_packed on travis-ci --- test/apps/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/apps/CMakeLists.txt b/test/apps/CMakeLists.txt index 007e11c58..0cc400b38 100644 --- a/test/apps/CMakeLists.txt +++ b/test/apps/CMakeLists.txt @@ -106,7 +106,7 @@ teca_add_app_test(test_bayesian_ar_detect_app_packed_data_mpi COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_bayesian_ar_detect_app_packed_data.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} -1 ${MPIEXEC} ${TEST_CORES} - FEATURES ${TECA_HAS_NETCDF_MPI} ${TECA_HAS_MPI} + FEATURES ${TECA_HAS_NETCDF_MPI} ${TECA_HAS_MPI} ${TEST_MPI_THREADS} REQ_TECA_DATA) teca_add_app_test(test_integrated_vapor_transport_app_threads From 2e69b8f62ac97291f77d6ec80afc72830c22aa32 Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Fri, 5 Mar 2021 09:31:17 -0800 Subject: [PATCH 042/180] temporal_average improvements * handle empty inputs gracefully * improve request generation efficicency * improve diagnostic messages --- alg/teca_temporal_average.cxx | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/alg/teca_temporal_average.cxx b/alg/teca_temporal_average.cxx index 5abefaee3..52d95c1a5 100644 --- a/alg/teca_temporal_average.cxx +++ b/alg/teca_temporal_average.cxx @@ -132,22 +132,22 @@ std::vector teca_temporal_average::get_upstream_request( TECA_ERROR("Invalid \"filter_type\" " << this->filter_type) return up_reqs; } + first = std::max(0l, first); + last = std::min(num_steps - 1, last); + // make a request for each time that will be used in the + // average for (long i = first; i <= last; ++i) { - // make a request for each time that will be used in the - // average - if ((i >= 0) && (i < num_steps)) - { + teca_metadata up_req(request); + up_req.set("time_step", i); + up_reqs.push_back(up_req); + } + #ifdef TECA_DEBUG - cerr << teca_parallel_id() - << "request time_step " << i << endl; + cerr << teca_parallel_id() << "processing " << active_step + << " request " << first << " - " << last << endl; #endif - teca_metadata up_req(request); - up_req.set("time_step", i); - up_reqs.push_back(up_req); - } - } return up_reqs; } @@ -164,6 +164,10 @@ const_p_teca_dataset teca_temporal_average::execute( #endif (void)port; + // nothing to do + if ((input_data.size() < 1) || !input_data[0]) + return nullptr; + // create output and copy metadata, coordinates, etc p_teca_mesh out_mesh = std::dynamic_pointer_cast(input_data[0]->new_instance()); From 934217007c8a41ca51065e9a38c3939889e15a98 Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Fri, 5 Mar 2021 09:47:55 -0800 Subject: [PATCH 043/180] rename temporal_average to simple_moving_average this should avoid confusion with temporal reductions --- alg/CMakeLists.txt | 2 +- ...age.cxx => teca_simple_moving_average.cxx} | 20 +++++++++---------- ...average.h => teca_simple_moving_average.h} | 18 ++++++++--------- python/teca_py_alg.i | 12 +++++------ test/CMakeLists.txt | 6 +++--- ...age.cpp => test_simple_moving_average.cpp} | 8 ++++---- 6 files changed, 33 insertions(+), 33 deletions(-) rename alg/{teca_temporal_average.cxx => teca_simple_moving_average.cxx} (92%) rename alg/{teca_temporal_average.h => teca_simple_moving_average.h} (73%) rename test/{test_temporal_average.cpp => test_simple_moving_average.cpp} (91%) diff --git a/alg/CMakeLists.txt b/alg/CMakeLists.txt index 531de38db..cbcd774fe 100644 --- a/alg/CMakeLists.txt +++ b/alg/CMakeLists.txt @@ -42,7 +42,7 @@ set(teca_alg_cxx_srcs teca_tc_classify.cxx teca_tc_wind_radii.cxx teca_tc_trajectory.cxx - teca_temporal_average.cxx + teca_simple_moving_average.cxx teca_unpack_data.cxx teca_valid_value_mask.cxx teca_variant_array_operand.cxx diff --git a/alg/teca_temporal_average.cxx b/alg/teca_simple_moving_average.cxx similarity index 92% rename from alg/teca_temporal_average.cxx rename to alg/teca_simple_moving_average.cxx index 52d95c1a5..3e4e4eb8f 100644 --- a/alg/teca_temporal_average.cxx +++ b/alg/teca_simple_moving_average.cxx @@ -1,4 +1,4 @@ -#include "teca_temporal_average.h" +#include "teca_simple_moving_average.h" #include "teca_mesh.h" #include "teca_array_collection.h" @@ -21,7 +21,7 @@ using std::endl; //#define TECA_DEBUG // -------------------------------------------------------------------------- -teca_temporal_average::teca_temporal_average() +teca_simple_moving_average::teca_simple_moving_average() : filter_width(3), filter_type(backward) { this->set_number_of_input_connections(1); @@ -29,16 +29,16 @@ teca_temporal_average::teca_temporal_average() } // -------------------------------------------------------------------------- -teca_temporal_average::~teca_temporal_average() +teca_simple_moving_average::~teca_simple_moving_average() {} #if defined(TECA_HAS_BOOST) // -------------------------------------------------------------------------- -void teca_temporal_average::get_properties_description( +void teca_simple_moving_average::get_properties_description( const string &prefix, options_description &global_opts) { options_description opts("Options for " - + (prefix.empty()?"teca_temporal_average":prefix)); + + (prefix.empty()?"teca_simple_moving_average":prefix)); opts.add_options() TECA_POPTS_GET(unsigned int, prefix, filter_width, @@ -53,7 +53,7 @@ void teca_temporal_average::get_properties_description( } // -------------------------------------------------------------------------- -void teca_temporal_average::set_properties( +void teca_simple_moving_average::set_properties( const string &prefix, variables_map &opts) { this->teca_algorithm::set_properties(prefix, opts); @@ -64,7 +64,7 @@ void teca_temporal_average::set_properties( #endif // -------------------------------------------------------------------------- -std::vector teca_temporal_average::get_upstream_request( +std::vector teca_simple_moving_average::get_upstream_request( unsigned int port, const std::vector &input_md, const teca_metadata &request) @@ -84,7 +84,7 @@ std::vector teca_temporal_average::get_upstream_request( break; } cerr << teca_parallel_id() - << "teca_temporal_average::get_upstream_request filter_type=" + << "teca_simple_moving_average::get_upstream_request filter_type=" << type << endl; #endif (void) port; @@ -153,14 +153,14 @@ std::vector teca_temporal_average::get_upstream_request( } // -------------------------------------------------------------------------- -const_p_teca_dataset teca_temporal_average::execute( +const_p_teca_dataset teca_simple_moving_average::execute( unsigned int port, const std::vector &input_data, const teca_metadata &request) { #ifdef TECA_DEBUG cerr << teca_parallel_id() - << "teca_temporal_average::execute" << endl; + << "teca_simple_moving_average::execute" << endl; #endif (void)port; diff --git a/alg/teca_temporal_average.h b/alg/teca_simple_moving_average.h similarity index 73% rename from alg/teca_temporal_average.h rename to alg/teca_simple_moving_average.h index ca00c5c40..0c94540fb 100644 --- a/alg/teca_temporal_average.h +++ b/alg/teca_simple_moving_average.h @@ -1,5 +1,5 @@ -#ifndef teca_temporal_average_h -#define teca_temporal_average_h +#ifndef teca_simple_moving_average_h +#define teca_simple_moving_average_h #include "teca_shared_object.h" #include "teca_algorithm.h" @@ -8,7 +8,7 @@ #include #include -TECA_SHARED_OBJECT_FORWARD_DECL(teca_temporal_average) +TECA_SHARED_OBJECT_FORWARD_DECL(teca_simple_moving_average) /// an algorithm that averages data in time /** @@ -16,13 +16,13 @@ an algorithm that averages data in time. filter_width controls the number of time steps to average over. all arrays in the input data are processed. */ -class teca_temporal_average : public teca_algorithm +class teca_simple_moving_average : public teca_algorithm { public: - TECA_ALGORITHM_STATIC_NEW(teca_temporal_average) - TECA_ALGORITHM_DELETE_COPY_ASSIGN(teca_temporal_average) - TECA_ALGORITHM_CLASS_NAME(teca_temporal_average) - ~teca_temporal_average(); + TECA_ALGORITHM_STATIC_NEW(teca_simple_moving_average) + TECA_ALGORITHM_DELETE_COPY_ASSIGN(teca_simple_moving_average) + TECA_ALGORITHM_CLASS_NAME(teca_simple_moving_average) + ~teca_simple_moving_average(); // report/initialize to/from Boost program options // objects. @@ -42,7 +42,7 @@ class teca_temporal_average : public teca_algorithm TECA_ALGORITHM_PROPERTY(int, filter_type) protected: - teca_temporal_average(); + teca_simple_moving_average(); private: std::vector get_upstream_request( diff --git a/python/teca_py_alg.i b/python/teca_py_alg.i index c5ac4db63..97bc81bda 100644 --- a/python/teca_py_alg.i +++ b/python/teca_py_alg.i @@ -36,7 +36,7 @@ #include "teca_tc_classify.h" #include "teca_tc_trajectory.h" #include "teca_tc_wind_radii.h" -#include "teca_temporal_average.h" +#include "teca_simple_moving_average.h" #include "teca_unpack_data.h" #include "teca_valid_value_mask.h" #include "teca_vertical_reduction.h" @@ -119,12 +119,12 @@ %include "teca_table_to_stream.h" /*************************************************************************** - temporal_average + simple_moving_average ***************************************************************************/ -%ignore teca_temporal_average::shared_from_this; -%shared_ptr(teca_temporal_average) -%ignore teca_temporal_average::operator=; -%include "teca_temporal_average.h" +%ignore teca_simple_moving_average::shared_from_this; +%shared_ptr(teca_simple_moving_average) +%ignore teca_simple_moving_average::operator=; +%include "teca_simple_moving_average.h" /*************************************************************************** vorticity diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 23313eb08..ee1982b8a 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -250,10 +250,10 @@ teca_add_test(test_latitude_damper LIBS teca_core teca_data teca_io teca_alg ${teca_test_link} COMMAND test_latitude_damper 361 181 10 "test_latitude_damper_%t%.%e%") -teca_add_test(test_temporal_average - SOURCES test_temporal_average.cpp +teca_add_test(test_simple_moving_average + SOURCES test_simple_moving_average.cpp LIBS teca_core teca_data teca_io teca_alg ${teca_test_link} - COMMAND test_temporal_average + COMMAND test_simple_moving_average "${TECA_DATA_ROOT}/prw_hus_day_MRI-CGCM3_historical_r1i1p1_19500101-19501231\\.nc$" "${TECA_DATA_ROOT}/test_temporal_average" 0 -1 3 1 prw FEATURES ${TECA_HAS_NETCDF} diff --git a/test/test_temporal_average.cpp b/test/test_simple_moving_average.cpp similarity index 91% rename from test/test_temporal_average.cpp rename to test/test_simple_moving_average.cpp index de07eaa8f..e785ea342 100644 --- a/test/test_temporal_average.cpp +++ b/test/test_simple_moving_average.cpp @@ -1,7 +1,7 @@ #include "teca_cf_reader.h" #include "teca_normalize_coordinates.h" #include "teca_indexed_dataset_cache.h" -#include "teca_temporal_average.h" +#include "teca_simple_moving_average.h" #include "teca_cartesian_mesh_writer.h" #include "teca_cf_writer.h" #include "teca_dataset_diff.h" @@ -21,7 +21,7 @@ int main(int argc, char **argv) if (argc < 8) { std::cerr << std::endl << "Usage error:" << std::endl - << "test_temporal_average [input regex] [baseline] [first step] [last step]" + << "test_simple_moving_average [input regex] [baseline] [first step] [last step]" " [filter width] [n threads] [array] [array] ..." << std::endl << std::endl; return -1; @@ -53,9 +53,9 @@ int main(int argc, char **argv) dsc->set_max_cache_size(2*n_threads*filter_width); // temporal avg - p_teca_temporal_average a = teca_temporal_average::New(); + p_teca_simple_moving_average a = teca_simple_moving_average::New(); a->set_filter_width(filter_width); - a->set_filter_type(teca_temporal_average::backward); + a->set_filter_type(teca_simple_moving_average::backward); a->set_input_connection(dsc->get_output_port()); From 7cdc2ec2bf270fe8665a8a47ad6d6e665ce6e119 Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Fri, 5 Mar 2021 16:32:12 -0800 Subject: [PATCH 044/180] rtd fix m1517 modulefile path --- doc/rtd/applications.rst | 4 +++- doc/rtd/installation.rst | 7 +++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/doc/rtd/applications.rst b/doc/rtd/applications.rst index 9d5934544..9c2350758 100644 --- a/doc/rtd/applications.rst +++ b/doc/rtd/applications.rst @@ -87,6 +87,8 @@ loading the teca environment module. The second line tells the module system where to look for the teca modulefile and the third line loads the module, configuring the environment for use with TECA. +.. _m1517_installs: + m1517 CASCADE installs ~~~~~~~~~~~~~~~~~~~~~~ Members of the CASCADE project m1517 can access rolling installs on Cori. These @@ -103,7 +105,7 @@ at the top of their batch scripts. .. code-block:: bash module swap PrgEnv-intel PrgEnv-gnu - module use /global/common/software/m1517/develop + module use /global/common/software/m1517/teca/develop/modulefiles module load teca In order to make use of the `stable` release install swap `develop` for diff --git a/doc/rtd/installation.rst b/doc/rtd/installation.rst index bcd5784d0..4fe5d842f 100644 --- a/doc/rtd/installation.rst +++ b/doc/rtd/installation.rst @@ -8,6 +8,13 @@ on the platform and desired use. On a Cray Supercomputer ----------------------- + +.. tip:: + + CASCADE team members who need to run the command line applications on NERSC + Cori can use the m1517 group installs instead of manually installing TECA. + See :ref:`m1517_installs` for more information. + When installing TECA on a supercomputer one of the best options is the superbuild, a piece of CMake code that downloads and builds TECA and its many dependencies. The superbuild is located in a git repository here From 2d805c31c639bf0a959d8087f82f86afc2ecfb35 Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Tue, 9 Mar 2021 16:36:53 -0800 Subject: [PATCH 045/180] fix crash in teca_metadata_probe app this fixes a crash that occurs when the metadata_probe is run without --input_regex and --input_file and --help. eg. run with no options. the fix involves checking arguments sooner to prevent use of a nullptr when the options are not given. --- apps/teca_metadata_probe.cpp | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/apps/teca_metadata_probe.cpp b/apps/teca_metadata_probe.cpp index cb3fcc928..975346dd7 100644 --- a/apps/teca_metadata_probe.cpp +++ b/apps/teca_metadata_probe.cpp @@ -147,6 +147,18 @@ int main(int argc, char **argv) bool have_file = opt_vals.count("input_file"); bool have_regex = opt_vals.count("input_regex"); + // validate the input method + if ((have_file && have_regex) || !(have_file || have_regex)) + { + if (rank == 0) + { + TECA_ERROR("Extacly one of --input_file or --input_regex can be specified. " + "Use --input_file to activate the multi_cf_reader (CMIP6 datasets) " + "and --input_regex to activate the cf_reader (CAM like datasets)") + } + return -1; + } + p_teca_algorithm reader; if (opt_vals.count("input_file")) { @@ -174,18 +186,6 @@ int main(int argc, char **argv) if (opt_vals.count("end_date")) time_j = opt_vals["end_date"].as(); - // some minimal check for mising options - if ((have_file && have_regex) || !(have_file || have_regex)) - { - if (rank == 0) - { - TECA_ERROR("Extacly one of --input_file or --input_regex can be specified. " - "Use --input_file to activate the multi_cf_reader (HighResMIP datasets) " - "and --input_regex to activate the cf_reader (CAM like datasets)") - } - return -1; - } - // run the reporting phase of the pipeline teca_metadata md = norm_coords->update_metadata(); From 0797fdb7ca574ed8b22fe24ae9869bfaccdbac81 Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Tue, 9 Mar 2021 16:39:13 -0800 Subject: [PATCH 046/180] fix cf_reader missing calendar handling this change lets the cf_reader run when the base calendar is missing (it will be defaulted to the "standard" calendar) and one of the following files uses the "standard" calendar. In that case the calendars match and the reader can run. --- io/teca_cf_reader.cxx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/io/teca_cf_reader.cxx b/io/teca_cf_reader.cxx index a8a583011..1b73e0606 100644 --- a/io/teca_cf_reader.cxx +++ b/io/teca_cf_reader.cxx @@ -603,8 +603,8 @@ teca_metadata teca_cf_reader::get_output_metadata( const teca_metadata &md_i = teca_cf_time_axis_data::get_metadata(elem_i); std::string calendar_i; md_i.get("calendar", calendar_i); - if ((!has_calendar && !calendar_i.empty()) - || (has_calendar && (calendar_i != base_calendar))) + if ((has_calendar || !calendar_i.empty()) + && (calendar_i != base_calendar)) { TECA_ERROR("The base calendar is \"" << base_calendar << "\" but file " << i << " \"" << files[i] From b7bfd55bddb1456d636e50dfcd5117123b4d3774 Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Wed, 10 Mar 2021 22:34:30 -0800 Subject: [PATCH 047/180] mpi_util split a communicator into groups of a given size --- core/teca_mpi_util.cxx | 37 ++++++++++++++++++++++++++++++++++++- core/teca_mpi_util.h | 5 +++++ 2 files changed, 41 insertions(+), 1 deletion(-) diff --git a/core/teca_mpi_util.cxx b/core/teca_mpi_util.cxx index 220b67d6b..df67b6ab1 100644 --- a/core/teca_mpi_util.cxx +++ b/core/teca_mpi_util.cxx @@ -22,7 +22,7 @@ int equipartition_communicator(MPI_Comm comm, if (n_ranks < new_comm_size) { - // can't increase beyond the original sizew + // can't increase beyond the original size return 0; } @@ -52,6 +52,41 @@ int equipartition_communicator(MPI_Comm comm, return 0; } +// ************************************************************************** +int split_communicator(MPI_Comm world_comm, + int group_size, MPI_Comm *group_comm) +{ +#if defined(TECA_HAS_MPI) + int is_init = 0; + MPI_Initialized(&is_init); + if (is_init) + { + int world_rank = 0; + int world_size = 1; + + MPI_Comm_rank(world_comm, &world_rank); + MPI_Comm_size(world_comm, &world_size); + + MPI_Group world_group = MPI_GROUP_EMPTY; + MPI_Comm_group(world_comm, &world_group); + + int group_id = world_rank / group_size; + int group_start = group_id * group_size; + int group_end = std::min(world_size, group_start + group_size); + int group_range[3] = {group_start, group_end, 1}; + + MPI_Group sub_group = MPI_GROUP_EMPTY; + MPI_Group_range_incl(world_group, 1, &group_range, &sub_group); + + MPI_Comm_create(world_comm, sub_group, group_comm); + + MPI_Group_free(&world_group); + MPI_Group_free(&sub_group); + } +#endif + return 0; +} + // ************************************************************************** int mpi_rank_0(MPI_Comm comm) { diff --git a/core/teca_mpi_util.h b/core/teca_mpi_util.h index e51106533..ed2efb094 100644 --- a/core/teca_mpi_util.h +++ b/core/teca_mpi_util.h @@ -11,6 +11,11 @@ namespace teca_mpi_util int equipartition_communicator(MPI_Comm comm, int new_comm_size, MPI_Comm *new_comm); +// split the communicator into a number of new communicators +// such that each new communicator has group_size ranks. +int split_communicator(MPI_Comm comm, + int group_size, MPI_Comm *new_comm); + // return non-zero if this process is MPI rank 0 int mpi_rank_0(MPI_Comm comm); }; From fda304700a8f5514f81eb5c69b8c39a59bb53175 Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Wed, 10 Mar 2021 22:35:36 -0800 Subject: [PATCH 048/180] array_attributes make to_stream const --- data/teca_array_attributes.cxx | 2 +- data/teca_array_attributes.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/data/teca_array_attributes.cxx b/data/teca_array_attributes.cxx index b1b8fcd72..d0b703777 100644 --- a/data/teca_array_attributes.cxx +++ b/data/teca_array_attributes.cxx @@ -149,7 +149,7 @@ int teca_array_attributes::from(const teca_metadata &md) } // -------------------------------------------------------------------------- -void teca_array_attributes::to_stream(std::ostream &os) +void teca_array_attributes::to_stream(std::ostream &os) const { os << "type_code=" << type_code << ", centering=" << centering << ", size=" << size << ", units=\"" << units diff --git a/data/teca_array_attributes.h b/data/teca_array_attributes.h index c7b976003..197cb1d38 100644 --- a/data/teca_array_attributes.h +++ b/data/teca_array_attributes.h @@ -69,7 +69,7 @@ struct teca_array_attributes int from(const teca_metadata &md); // send to the stream in human readable form - void to_stream(std::ostream &os); + void to_stream(std::ostream &os) const; // possible centrings // From 617c6b34a7cd8ab50839a0210d9fa0dcf607b58b Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Wed, 10 Mar 2021 22:36:20 -0800 Subject: [PATCH 049/180] array_collection add method to get all names --- data/teca_array_collection.h | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/data/teca_array_collection.h b/data/teca_array_collection.h index e578df44a..494ca7e42 100644 --- a/data/teca_array_collection.h +++ b/data/teca_array_collection.h @@ -14,6 +14,7 @@ A collection of named arrays class teca_array_collection { public: + // construct on heap static p_teca_array_collection New() @@ -80,6 +81,13 @@ class teca_array_collection const std::string &get_name(unsigned int i) const { return m_names[i]; } + // access the list of names + std::vector &get_names() + { return m_names; } + + const std::vector &get_names() const + { return m_names; } + // return a unique string identifier std::string get_class_name() const { return "teca_array_collection"; } From 7105e8884b6ad4c2b7ff6c44d24ac7b4bbc4b74b Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Wed, 10 Mar 2021 22:44:09 -0800 Subject: [PATCH 050/180] normalize_coordinates add optional translation --- .travis.yml | 2 +- alg/teca_normalize_coordinates.cxx | 142 +++++++++++++++++++--------- alg/teca_normalize_coordinates.h | 35 +++++-- test/CMakeLists.txt | 92 ++++++++++++++---- test/test_normalize_coordinates.cpp | 31 ++++-- 5 files changed, 225 insertions(+), 77 deletions(-) diff --git a/.travis.yml b/.travis.yml index 4c7ad5daf..2632e0b09 100644 --- a/.travis.yml +++ b/.travis.yml @@ -17,7 +17,7 @@ env: - BUILD_TYPE=Debug - TECA_DIR=/travis_teca_dir - TECA_PYTHON_VERSION=3 - - TECA_DATA_REVISION=107 + - TECA_DATA_REVISION=108 jobs: - DOCKER_IMAGE=ubuntu IMAGE_VERSION=20.04 IMAGE_NAME=ubuntu_20_04 REQUIRE_NETCDF_MPI=TRUE - DOCKER_IMAGE=ubuntu IMAGE_VERSION=20.04 IMAGE_NAME=ubuntu_20_04 REQUIRE_NETCDF_MPI=FALSE diff --git a/alg/teca_normalize_coordinates.cxx b/alg/teca_normalize_coordinates.cxx index 4ae4bd586..548e2fa22 100644 --- a/alg/teca_normalize_coordinates.cxx +++ b/alg/teca_normalize_coordinates.cxx @@ -31,16 +31,18 @@ struct teca_normalize_coordinates::internals_t const const_p_teca_variant_array &in_x, const const_p_teca_variant_array &in_y, const const_p_teca_variant_array &in_z, - int x_axis_order, int y_axis_order, - int z_axis_order, double *bounds); + double *bounds, + int x_axis_order, int y_axis_order, int z_axis_order, + double tx_fact, double ty_fact, double tz_fact, + bool &flip_x, bool &flip_y, bool &flip_z); template typename compare_t> static p_teca_variant_array normalize_axis( - const const_p_teca_variant_array &x, double *bounds); + const const_p_teca_variant_array &x, double *bounds, + double tx_fact, bool &flip); static - void normalize_extent(p_teca_variant_array out_x, - p_teca_variant_array out_y, p_teca_variant_array out_z, + void normalize_extent(bool flip_x, bool flip_y, bool flip_z, unsigned long *whole_extent, unsigned long *extent_in, unsigned long *extent_out); @@ -53,24 +55,38 @@ struct teca_normalize_coordinates::internals_t // -------------------------------------------------------------------------- template typename compare_t> p_teca_variant_array teca_normalize_coordinates::internals_t::normalize_axis( - const const_p_teca_variant_array &x, double *bounds) + const const_p_teca_variant_array &x, double *bounds, double tx_fact, + bool &flip) { unsigned long nx = x->size(); unsigned long x1 = nx - 1; - NESTED_TEMPLATE_DISPATCH(const teca_variant_array_impl, - x.get(), _C, + p_teca_variant_array xo; - const NT_C *px = dynamic_cast(x.get())->get(); + TEMPLATE_DISPATCH(const teca_variant_array_impl, + x.get(), + + using TT_OUT = teca_variant_array_impl; + + const NT *px = dynamic_cast(x.get())->get(); + NT *pxo = nullptr; + + // initialize bounds, correct if a flip or translate is applied + bounds[0] = px[0]; + bounds[1] = px[x1]; // if comp(x0, x1) reverse the axis. // when comp is less than the output will be ascending // when comp is greater than the output will be descending compare_t compare; - if (compare(px[x1], px[0])) + flip = compare(px[x1], px[0]); + + bool translate = tx_fact != 0.0; + + if (flip) { - p_teca_variant_array xo = x->new_instance(nx); - NT_C *pxo = static_cast*>(xo.get())->get(); + xo = x->new_instance(nx); + pxo = static_cast(xo.get())->get(); pxo += x1; for (unsigned long i = 0; i < nx; ++i) @@ -78,14 +94,27 @@ p_teca_variant_array teca_normalize_coordinates::internals_t::normalize_axis( bounds[0] = px[x1]; bounds[1] = px[0]; - - return xo; } - bounds[0] = px[0]; - bounds[1] = px[x1]; + if (translate) + { + if (!xo) + xo = x->new_instance(nx); + else + px = static_cast(xo.get())->get(); + + pxo = static_cast(xo.get())->get(); + + NT tx = tx_fact; + for (unsigned long i = 0; i < nx; ++i) + pxo[i] = px[i] + tx; + + bounds[0] += tx_fact; + bounds[1] += tx_fact; + } ) - return nullptr; + + return xo; } // -------------------------------------------------------------------------- @@ -96,17 +125,21 @@ int teca_normalize_coordinates::internals_t::normalize_axes( const const_p_teca_variant_array &in_x, const const_p_teca_variant_array &in_y, const const_p_teca_variant_array &in_z, + double *bounds, int x_axis_order, int y_axis_order, int z_axis_order, - double *bounds) + double tx_fact, double ty_fact, double tz_fact, + bool &flip_x, bool &flip_y, bool &flip_z) { // x axis if (x_axis_order == ORDER_ASCENDING) { - out_x = internals_t::normalize_axis(in_x, bounds); + out_x = internals_t::normalize_axis(in_x, + bounds, tx_fact, flip_x); } else if (x_axis_order == ORDER_DESCENDING) { - out_x = internals_t::normalize_axis(in_x, bounds); + out_x = internals_t::normalize_axis(in_x, + bounds, tx_fact, flip_x); } else { @@ -117,11 +150,13 @@ int teca_normalize_coordinates::internals_t::normalize_axes( // y axis if (y_axis_order == ORDER_ASCENDING) { - out_y = internals_t::normalize_axis(in_y, bounds + 2); + out_y = internals_t::normalize_axis(in_y, + bounds + 2, ty_fact, flip_y); } else if (y_axis_order == ORDER_DESCENDING) { - out_y = internals_t::normalize_axis(in_y, bounds + 2); + out_y = internals_t::normalize_axis(in_y, + bounds + 2, ty_fact, flip_y); } else { @@ -132,11 +167,13 @@ int teca_normalize_coordinates::internals_t::normalize_axes( // z axis if (z_axis_order == ORDER_ASCENDING) { - out_z = internals_t::normalize_axis(in_z, bounds + 4); + out_z = internals_t::normalize_axis(in_z, + bounds + 4, tz_fact, flip_z); } else if (z_axis_order == ORDER_DESCENDING) { - out_z = internals_t::normalize_axis(in_z, bounds + 4); + out_z = internals_t::normalize_axis(in_z, + bounds + 4, tz_fact, flip_z); } else { @@ -149,13 +186,12 @@ int teca_normalize_coordinates::internals_t::normalize_axes( // -------------------------------------------------------------------------- void teca_normalize_coordinates::internals_t::normalize_extent( - p_teca_variant_array out_x, p_teca_variant_array out_y, - p_teca_variant_array out_z, unsigned long *whole_extent, + bool flip_x, bool flip_y, bool flip_z, unsigned long *whole_extent, unsigned long *extent_in, unsigned long *extent_out) { #if defined(TECA_DEBUG) std::cerr - << "out=[" << out_x << ", " << out_y << ", " << out_z << "]" << std::endl + << "out=[" << flip_x << ", " << flip_y << ", " << flip_z << "]" << std::endl << "whole_extent=[" << whole_extent[0] << ", " << whole_extent[1] << ", " << whole_extent[2] << ", " << whole_extent[3] << ", " << whole_extent[4] << ", " << whole_extent[5] << "]" << std::endl @@ -169,21 +205,21 @@ void teca_normalize_coordinates::internals_t::normalize_extent( // detect coordinate axes in descending order, transform the incoming // extents from ascending order coordinates back to original descending // order coordinate system so the upstream gets the correct extent - if (out_x) + if (flip_x) { unsigned long wnx = whole_extent[1] - whole_extent[0]; extent_out[0] = wnx - extent_in[1]; extent_out[1] = wnx - extent_in[0]; } - if (out_y) + if (flip_y) { unsigned long wny = whole_extent[3] - whole_extent[2]; extent_out[2] = wny - extent_in[3]; extent_out[3] = wny - extent_in[2]; } - if (out_z) + if (flip_z) { unsigned long wnz = whole_extent[5] - whole_extent[4]; extent_out[4] = wnz - extent_in[5]; @@ -311,7 +347,9 @@ void teca_normalize_coordinates::internals_t::normalize_variables( // -------------------------------------------------------------------------- teca_normalize_coordinates::teca_normalize_coordinates() : x_axis_order(ORDER_ASCENDING), y_axis_order(ORDER_ASCENDING), - z_axis_order(ORDER_DESCENDING), internals(nullptr) + z_axis_order(ORDER_DESCENDING), translate_x(0.0), + translate_y(0.0), translate_z(0.0), + internals(nullptr) { this->internals = new teca_normalize_coordinates::internals_t; @@ -343,6 +381,12 @@ void teca_normalize_coordinates::get_properties_description( TECA_POPTS_GET(int, prefix, z_axis_order, "Sets the desired output order of the z-axis. Use" " ORDER_ASCENDING(0) or ORDER_DESCENDING(1).") + TECA_POPTS_GET(double, prefix, translate_x, + "Translate the x-axis by the given value.") + TECA_POPTS_GET(double, prefix, translate_y, + "Translate the y-axis by the given value.") + TECA_POPTS_GET(double, prefix, translate_z, + "Translate the z-axis by the given value.") ; this->teca_algorithm::get_properties_description(prefix, opts); @@ -359,6 +403,9 @@ void teca_normalize_coordinates::set_properties( TECA_POPTS_SET(opts, int, prefix, x_axis_order) TECA_POPTS_SET(opts, int, prefix, y_axis_order) TECA_POPTS_SET(opts, int, prefix, z_axis_order) + TECA_POPTS_SET(opts, double, prefix, translate_x) + TECA_POPTS_SET(opts, double, prefix, translate_y) + TECA_POPTS_SET(opts, double, prefix, translate_z) } #endif @@ -424,11 +471,13 @@ teca_metadata teca_normalize_coordinates::get_output_metadata( // check for and transform coordinate axes from descending order // to ascending order + bool flip_x, flip_y, flip_z; double bounds[6] = {0.0}; p_teca_variant_array out_x, out_y, out_z; - if (this->internals->normalize_axes(out_x, out_y, out_z, - in_x, in_y, in_z, this->x_axis_order, this->y_axis_order, - this->z_axis_order, bounds)) + if (this->internals->normalize_axes(out_x, out_y, out_z, in_x, in_y, in_z, + bounds, this->x_axis_order, this->y_axis_order, this->z_axis_order, + this->translate_x, this->translate_y, this->translate_z, + flip_x, flip_y, flip_z)) { TECA_ERROR("Failed to normalize axes") return teca_metadata(); @@ -485,11 +534,13 @@ std::vector teca_normalize_coordinates::get_upstream_request( // now convert the original coordinate axes into the // normalized system. this isn't cached for thread safety + bool flip_x, flip_y, flip_z; double bounds[6] = {0.0}; p_teca_variant_array out_x, out_y, out_z; - if (this->internals->normalize_axes(out_x, out_y, out_z, - in_x, in_y, in_z, this->x_axis_order, this->y_axis_order, - this->z_axis_order, bounds)) + if (this->internals->normalize_axes(out_x, out_y, out_z, in_x, in_y, in_z, + bounds, this->x_axis_order, this->y_axis_order, this->z_axis_order, + this->translate_x, this->translate_y, this->translate_z, + flip_x, flip_y, flip_z)) { TECA_ERROR("Failed to normalize axes") return up_reqs; @@ -547,7 +598,7 @@ std::vector teca_normalize_coordinates::get_upstream_request( } // apply the transform if needed - this->internals->normalize_extent(out_x, out_y, out_z, + this->internals->normalize_extent(flip_x, flip_y, flip_z, whole_extent, extent_in, extent_out); // validate the requested extent @@ -603,11 +654,13 @@ const_p_teca_dataset teca_normalize_coordinates::execute(unsigned int port, const_p_teca_variant_array in_z = in_mesh->get_z_coordinates(); // transform the axes to ascending order if needed + bool flip_x, flip_y, flip_z; double bounds[6] = {0.0}; p_teca_variant_array out_x, out_y, out_z; - if (this->internals->normalize_axes(out_x, out_y, out_z, - in_x, in_y, in_z, this->x_axis_order, this->y_axis_order, - this->z_axis_order, bounds)) + if (this->internals->normalize_axes(out_x, out_y, out_z, in_x, in_y, in_z, + bounds, this->x_axis_order, this->y_axis_order, this->z_axis_order, + this->translate_x, this->translate_y, this->translate_z, + flip_x, flip_y, flip_z)) { TECA_ERROR("Failed to normalize axes") return nullptr; @@ -635,14 +688,13 @@ const_p_teca_dataset teca_normalize_coordinates::execute(unsigned int port, } // apply the same set of transforms to the data - if (out_x || out_y || out_z) + if (flip_x || flip_y || flip_z) { unsigned long extent[6]; in_mesh->get_extent(extent); - this->internals->normalize_variables(out_x.get(), - out_y.get(), out_z.get(), extent, - out_mesh->get_point_arrays()); + this->internals->normalize_variables(flip_x, flip_y, + flip_z, extent, out_mesh->get_point_arrays()); } return out_mesh; diff --git a/alg/teca_normalize_coordinates.h b/alg/teca_normalize_coordinates.h index 26050a8e7..c62f04f62 100644 --- a/alg/teca_normalize_coordinates.h +++ b/alg/teca_normalize_coordinates.h @@ -10,11 +10,20 @@ TECA_SHARED_OBJECT_FORWARD_DECL(teca_normalize_coordinates) -/// an algorithm to ensure that coordinates are in ascending order +/// an algorithm that ensures that Cartesian mesh coordinates follow conventions /** -Transformations of coordinates and data to/from ascending order -are made as data and information pass up and down stream through -the algorithm. + * Transformations of coordinates and data to/from ascending order + * are made as data and information pass up and down stream through + * the algorithm. See @ref axis_order + * + * An optional translation to each axis can be applied by setting + * one or more of translate_x, translate_y, or translate_z to a + * non-zero value. See @ref translate_axis + * + * Use this algorithm when downstream processing depends on coordinate + * conventions. For instance differentials or integrals may require spatial + * coordinate be in ascending or descending order. Similarly regriding + * operations may require data in the same coordinate system. */ class teca_normalize_coordinates : public teca_algorithm { @@ -29,8 +38,8 @@ class teca_normalize_coordinates : public teca_algorithm TECA_GET_ALGORITHM_PROPERTIES_DESCRIPTION() TECA_SET_ALGORITHM_PROPERTIES() - /** @anchor x,y,z_axis_order - * @name x,y,z_axis_order + /** @anchor axis_order + * @name axis_order * Set the desired order of the output for each coordinate * axis. Use ORDER_ASCENDING(0) to ensure the output is in * ascending order, and ORDER_DESCENDING(1) to ensure the @@ -40,12 +49,21 @@ class teca_normalize_coordinates : public teca_algorithm */ ///@{ enum {ORDER_ASCENDING = 0, ORDER_DESCENDING = 1}; - TECA_ALGORITHM_PROPERTY_V(int, x_axis_order) TECA_ALGORITHM_PROPERTY_V(int, y_axis_order) TECA_ALGORITHM_PROPERTY_V(int, z_axis_order) ///@} + /** @anchor translate_axis + * @name translate_axis + * Set the amount to translate the x, y, or z axis by. + */ + ///@{ + TECA_ALGORITHM_PROPERTY(double, translate_x) + TECA_ALGORITHM_PROPERTY(double, translate_y) + TECA_ALGORITHM_PROPERTY(double, translate_z) + ///@} + protected: teca_normalize_coordinates(); @@ -69,6 +87,9 @@ class teca_normalize_coordinates : public teca_algorithm int x_axis_order; int y_axis_order; int z_axis_order; + double translate_x; + double translate_y; + double translate_z; struct internals_t; internals_t *internals; diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index ee1982b8a..dd3247a81 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -582,85 +582,145 @@ teca_add_test(test_normalize_coordinates_pass_through EXEC_NAME test_normalize_coordinates SOURCES test_normalize_coordinates.cpp LIBS teca_core teca_data teca_io teca_alg ${teca_test_link} - COMMAND test_normalize_coordinates 90 45 7 0 0 0 0 360 -90 90 100 0 + COMMAND test_normalize_coordinates 90 45 7 0 0 0 0 0 0 0 360 -90 90 100 0 "${TECA_DATA_ROOT}/test_normalize_coordinates.bin" REQ_TECA_DATA) teca_add_test(test_normalize_coordinates_flip_x - COMMAND test_normalize_coordinates 90 45 7 1 0 0 0 360 -90 90 100 0 + COMMAND test_normalize_coordinates 90 45 7 1 0 0 0 0 0 0 360 -90 90 100 0 "${TECA_DATA_ROOT}/test_normalize_coordinates.bin" REQ_TECA_DATA) teca_add_test(test_normalize_coordinates_flip_y - COMMAND test_normalize_coordinates 90 45 7 0 1 0 0 360 -90 90 100 0 + COMMAND test_normalize_coordinates 90 45 7 0 1 0 0 0 0 0 360 -90 90 100 0 "${TECA_DATA_ROOT}/test_normalize_coordinates.bin" REQ_TECA_DATA) teca_add_test(test_normalize_coordinates_flip_z - COMMAND test_normalize_coordinates 90 45 7 0 0 1 0 360 -90 90 100 0 + COMMAND test_normalize_coordinates 90 45 7 0 0 1 0 0 0 0 360 -90 90 100 0 "${TECA_DATA_ROOT}/test_normalize_coordinates.bin" REQ_TECA_DATA) teca_add_test(test_normalize_coordinates_flip_xy - COMMAND test_normalize_coordinates 90 45 7 1 1 0 0 360 -90 90 100 0 + COMMAND test_normalize_coordinates 90 45 7 1 1 0 0 0 0 0 360 -90 90 100 0 "${TECA_DATA_ROOT}/test_normalize_coordinates.bin" REQ_TECA_DATA) teca_add_test(test_normalize_coordinates_flip_xz - COMMAND test_normalize_coordinates 90 45 7 1 0 1 0 360 -90 90 100 0 + COMMAND test_normalize_coordinates 90 45 7 1 0 1 0 0 0 0 360 -90 90 100 0 "${TECA_DATA_ROOT}/test_normalize_coordinates.bin" REQ_TECA_DATA) teca_add_test(test_normalize_coordinates_flip_yz - COMMAND test_normalize_coordinates 90 45 7 0 1 1 0 360 -90 90 100 0 + COMMAND test_normalize_coordinates 90 45 7 0 1 1 0 0 0 0 360 -90 90 100 0 "${TECA_DATA_ROOT}/test_normalize_coordinates.bin" REQ_TECA_DATA) teca_add_test(test_normalize_coordinates_flip_xyz - COMMAND test_normalize_coordinates 90 45 7 1 1 1 0 360 -90 90 100 0 + COMMAND test_normalize_coordinates 90 45 7 1 1 1 0 0 0 0 360 -90 90 100 0 "${TECA_DATA_ROOT}/test_normalize_coordinates.bin" REQ_TECA_DATA) teca_add_test(test_normalize_coordinates_pass_through_subset - COMMAND test_normalize_coordinates 90 45 7 0 0 0 40 190 -30 45 70 30 + COMMAND test_normalize_coordinates 90 45 7 0 0 0 0 0 0 40 190 -30 45 70 30 "${TECA_DATA_ROOT}/test_normalize_coordinates_subset.bin" REQ_TECA_DATA) teca_add_test(test_normalize_coordinates_flip_x_subset - COMMAND test_normalize_coordinates 90 45 7 1 0 0 40 190 -30 45 70 30 + COMMAND test_normalize_coordinates 90 45 7 1 0 0 0 0 0 40 190 -30 45 70 30 "${TECA_DATA_ROOT}/test_normalize_coordinates_subset.bin" REQ_TECA_DATA) teca_add_test(test_normalize_coordinates_flip_y_subset - COMMAND test_normalize_coordinates 90 45 7 0 1 0 40 190 -30 45 70 30 + COMMAND test_normalize_coordinates 90 45 7 0 1 0 0 0 0 40 190 -30 45 70 30 "${TECA_DATA_ROOT}/test_normalize_coordinates_subset.bin" REQ_TECA_DATA) teca_add_test(test_normalize_coordinates_flip_z_subset - COMMAND test_normalize_coordinates 90 45 7 0 0 1 40 190 -30 45 70 30 + COMMAND test_normalize_coordinates 90 45 7 0 0 1 0 0 0 40 190 -30 45 70 30 "${TECA_DATA_ROOT}/test_normalize_coordinates_subset.bin" REQ_TECA_DATA) teca_add_test(test_normalize_coordinates_flip_xy_subset - COMMAND test_normalize_coordinates 90 45 7 1 1 0 40 190 -30 45 70 30 + COMMAND test_normalize_coordinates 90 45 7 1 1 0 0 0 0 40 190 -30 45 70 30 "${TECA_DATA_ROOT}/test_normalize_coordinates_subset.bin" REQ_TECA_DATA) teca_add_test(test_normalize_coordinates_flip_xz_subset - COMMAND test_normalize_coordinates 90 45 7 1 0 1 40 190 -30 45 70 30 + COMMAND test_normalize_coordinates 90 45 7 1 0 1 0 0 0 40 190 -30 45 70 30 "${TECA_DATA_ROOT}/test_normalize_coordinates_subset.bin" REQ_TECA_DATA) teca_add_test(test_normalize_coordinates_flip_yz_subset - COMMAND test_normalize_coordinates 90 45 7 0 1 1 40 190 -30 45 70 30 + COMMAND test_normalize_coordinates 90 45 7 0 1 1 0 0 0 40 190 -30 45 70 30 "${TECA_DATA_ROOT}/test_normalize_coordinates_subset.bin" REQ_TECA_DATA) teca_add_test(test_normalize_coordinates_flip_xyz_subset - COMMAND test_normalize_coordinates 90 45 7 1 1 1 40 190 -30 45 70 30 + COMMAND test_normalize_coordinates 90 45 7 1 1 1 0 0 0 40 190 -30 45 70 30 "${TECA_DATA_ROOT}/test_normalize_coordinates_subset.bin" REQ_TECA_DATA) +teca_add_test(test_normalize_coordinates_trans_x + COMMAND test_normalize_coordinates 90 45 7 0 0 0 -180 0 0 0 360 -90 90 100 0 + "${TECA_DATA_ROOT}/test_normalize_coordinates_trans_x.bin" + REQ_TECA_DATA) + +teca_add_test(test_normalize_coordinates_trans_x_flip_x + COMMAND test_normalize_coordinates 90 45 7 1 0 0 -180 0 0 0 360 -90 90 100 0 + "${TECA_DATA_ROOT}/test_normalize_coordinates_trans_x.bin" + REQ_TECA_DATA) + +teca_add_test(test_normalize_coordinates_trans_x_flip_y + COMMAND test_normalize_coordinates 90 45 7 0 1 0 -180 0 0 0 360 -90 90 100 0 + "${TECA_DATA_ROOT}/test_normalize_coordinates_trans_x.bin" + REQ_TECA_DATA) + +teca_add_test(test_normalize_coordinates_trans_x_flip_z + COMMAND test_normalize_coordinates 90 45 7 0 0 1 -180 0 0 0 360 -90 90 100 0 + "${TECA_DATA_ROOT}/test_normalize_coordinates_trans_x.bin" + REQ_TECA_DATA) + +teca_add_test(test_normalize_coordinates_trans_y + COMMAND test_normalize_coordinates 90 45 7 0 0 0 0 90 0 0 360 -90 90 100 0 + "${TECA_DATA_ROOT}/test_normalize_coordinates_trans_y.bin" + REQ_TECA_DATA) + +teca_add_test(test_normalize_coordinates_trans_y_flip_x + COMMAND test_normalize_coordinates 90 45 7 1 0 0 0 90 0 0 360 -90 90 100 0 + "${TECA_DATA_ROOT}/test_normalize_coordinates_trans_y.bin" + REQ_TECA_DATA) + +teca_add_test(test_normalize_coordinates_trans_y_flip_y + COMMAND test_normalize_coordinates 90 45 7 0 1 0 0 90 0 0 360 -90 90 100 0 + "${TECA_DATA_ROOT}/test_normalize_coordinates_trans_y.bin" + REQ_TECA_DATA) + +teca_add_test(test_normalize_coordinates_trans_y_flip_z + COMMAND test_normalize_coordinates 90 45 7 0 0 1 0 90 0 0 360 -90 90 100 0 + "${TECA_DATA_ROOT}/test_normalize_coordinates_trans_y.bin" + REQ_TECA_DATA) + +teca_add_test(test_normalize_coordinates_trans_z + COMMAND test_normalize_coordinates 90 45 7 0 0 0 0 0 100 0 360 -90 90 100 0 + "${TECA_DATA_ROOT}/test_normalize_coordinates_trans_z.bin" + REQ_TECA_DATA) + +teca_add_test(test_normalize_coordinates_trans_z_flip_x + COMMAND test_normalize_coordinates 90 45 7 1 0 0 0 0 100 0 360 -90 90 100 0 + "${TECA_DATA_ROOT}/test_normalize_coordinates_trans_z.bin" + REQ_TECA_DATA) + +teca_add_test(test_normalize_coordinates_trans_z_flip_y + COMMAND test_normalize_coordinates 90 45 7 0 1 0 0 0 100 0 360 -90 90 100 0 + "${TECA_DATA_ROOT}/test_normalize_coordinates_trans_z.bin" + REQ_TECA_DATA) + +teca_add_test(test_normalize_coordinates_trans_z_flip_z + COMMAND test_normalize_coordinates 90 45 7 0 0 1 0 0 100 0 360 -90 90 100 0 + "${TECA_DATA_ROOT}/test_normalize_coordinates_trans_z.bin" + REQ_TECA_DATA) + teca_add_test(test_cf_writer_collective_serial EXEC_NAME test_cf_writer_collective SOURCES test_cf_writer_collective.cpp diff --git a/test/test_normalize_coordinates.cpp b/test/test_normalize_coordinates.cpp index b581db0df..70fcd2277 100644 --- a/test/test_normalize_coordinates.cpp +++ b/test/test_normalize_coordinates.cpp @@ -86,10 +86,10 @@ int main(int argc, char **argv) { teca_system_interface::set_stack_trace_on_error(); - if (argc != 14) + if (argc != 17) { cerr << "test_normalize_coordinates [nx] [ny] [nz] [flip x] [flip y] [flip z] " - "[x0 x1 y0 y1 z0 z1] [out file]" << endl; + "[trans x] [trans y] trans z] [x0 x1 y0 y1 z0 z1] [out file]" << endl; return -1; } @@ -99,9 +99,12 @@ int main(int argc, char **argv) int flip_x = atoi(argv[4]); int flip_y = atoi(argv[5]); int flip_z = atoi(argv[6]); - std::vector req_bounds({atof(argv[7]), atof(argv[8]), - atof(argv[9]), atof(argv[10]), atof(argv[11]), atof(argv[12])}); - std::string out_file = argv[13]; + double trans_x = atof(argv[7]); + double trans_y = atof(argv[8]); + double trans_z = atof(argv[9]); + std::vector req_bounds({atof(argv[10]), atof(argv[11]), + atof(argv[12]), atof(argv[13]), atof(argv[14]), atof(argv[15])}); + std::string out_file = argv[16]; p_teca_cartesian_mesh_source source = teca_cartesian_mesh_source::New(); source->set_whole_extents({0, nx-1, 0, ny-1, 0, nz-1, 0, 0}); @@ -119,6 +122,16 @@ int main(int argc, char **argv) p_teca_normalize_coordinates coords = teca_normalize_coordinates::New(); coords->set_input_connection(source->get_output_port()); + coords->set_translate_x(trans_x); + coords->set_translate_y(trans_y); + coords->set_translate_z(trans_z); + + req_bounds[0] += trans_x; + req_bounds[1] += trans_x; + req_bounds[2] += trans_y; + req_bounds[3] += trans_y; + req_bounds[4] += trans_z; + req_bounds[5] += trans_z; p_teca_index_executive exec = teca_index_executive::New(); exec->set_bounds(req_bounds); @@ -131,15 +144,17 @@ int main(int argc, char **argv) << "whole_extents = [0, " << nx-1 << ", 0, " << ny-1 << ", 0, " << nz-1 << "]" << std::endl << "bounds = [" << x0 << ", " << x1 << ", " << y0 - << ", " << y1 << ", " << z0 << ", " << z1 << "]" - << std::endl; + << ", " << y1 << ", " << z0 << ", " << z1 << "]" << std::endl + << "translate = [" << trans_x << ", " + << trans_y << ", " << trans_z << "]" << std::endl + << "req_bounds = [" << req_bounds << "]" << std::endl; teca_metadata md = coords->update_metadata(); teca_metadata coord_axes; md.get("coordinates", coord_axes); - std::cerr << "coordinates" << std::endl; + std::cerr << "coordinates :" << std::endl; coord_axes.to_stream(std::cerr); std::cerr << std::endl; From 4f58b00909ea6ca21a34a99e1bd2f2ff83cd78e6 Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Wed, 10 Mar 2021 23:19:27 -0800 Subject: [PATCH 051/180] cartesian_mesh_source methods to configure from metadata --- alg/teca_cartesian_mesh_source.cxx | 66 +++++++++++++++++++++++++++++- alg/teca_cartesian_mesh_source.h | 18 ++++++++ 2 files changed, 83 insertions(+), 1 deletion(-) diff --git a/alg/teca_cartesian_mesh_source.cxx b/alg/teca_cartesian_mesh_source.cxx index 482b732fe..a6532160e 100644 --- a/alg/teca_cartesian_mesh_source.cxx +++ b/alg/teca_cartesian_mesh_source.cxx @@ -94,7 +94,7 @@ teca_cartesian_mesh_source::teca_cartesian_mesh_source() : field_type_code(teca_variant_array_code::get()), x_axis_variable("lon"), y_axis_variable("lat"), z_axis_variable("plev"), t_axis_variable("time"), x_axis_units("degrees_east"), - y_axis_units("degrees_north"), z_axis_units("pascals"), + y_axis_units("degrees_north"), z_axis_units("Pa"), calendar("Gregorian"), time_units("seconds since 1970-01-01 00:00:00"), whole_extents{0l, 359l, 0l, 179l, 0l, 0l, 0l, 0l}, bounds{0., 360, -90., 90., 0., 0., 0., 0.}, @@ -129,6 +129,70 @@ void teca_cartesian_mesh_source::set_properties(const std::string &prefix, } #endif +// -------------------------------------------------------------------------- +int teca_cartesian_mesh_source::set_spatial_bounds(const teca_metadata &md) +{ + teca_metadata coords; + if (md.get("coordinates", coords)) + return -1; + + // get the bounds in the x direction + p_teca_variant_array x = coords.get("x"); + + if (!x) + return -1; + + x->get(0lu, this->bounds[0]); + x->get(x->size() - 1lu, this->bounds[1]); + + // get the bounds in the y direction + p_teca_variant_array y = coords.get("y"); + + if (!y) + return -1; + + y->get(0lu, this->bounds[2]); + y->get(y->size() - 1lu, this->bounds[3]); + + // get the bounds in the z direction + p_teca_variant_array z = coords.get("z"); + + if (!z) + return -1; + + z->get(0lu, this->bounds[4]); + z->get(z->size() - 1lu, this->bounds[5]); + + // set the coordinate type + this->set_coordinate_type_code(x->type_code()); + + return 0; +} + +// -------------------------------------------------------------------------- +int teca_cartesian_mesh_source::set_calendar(const teca_metadata &md) +{ + teca_metadata atts; + if (md.get("attributes", atts)) + return -1; + + teca_metadata time_atts; + if (atts.get("time", time_atts)) + return -1; + + std::string calendar; + if (time_atts.get("calendar", calendar)) + return -1; + + std::string units; + if (time_atts.get("units", units)) + return -1; + + this->calendar = calendar; + this->time_units = units; + + return 0; +} // -------------------------------------------------------------------------- void teca_cartesian_mesh_source::set_modified() diff --git a/alg/teca_cartesian_mesh_source.h b/alg/teca_cartesian_mesh_source.h index a28e413c2..39297e672 100644 --- a/alg/teca_cartesian_mesh_source.h +++ b/alg/teca_cartesian_mesh_source.h @@ -73,6 +73,14 @@ class teca_cartesian_mesh_source : public teca_algorithm // this should be the same on all ranks elements. TECA_ALGORITHM_VECTOR_PROPERTY(double, bound) + /** Set the spatial bounds from a metadata object following the conventions + * defined by the teca_cf_reader. This provides an easy way to get valid + * mesh bounds from an existing dataset where the producer of the dataset + * has followed those conventions. Returns zero if successful and non-zero + * if the supplied metadata is missing any of the requisite information. + */ + int set_spatial_bounds(const teca_metadata &md); + // set the variable to use for the coordinate axes. // the defaults are: x => lon, y => lat, z = plev, // t => time @@ -95,6 +103,16 @@ class teca_cartesian_mesh_source : public teca_algorithm // function must have the signature f(x,y,z,t). TECA_ALGORITHM_VECTOR_PROPERTY(field_generator_t, field_generator); + /** Set the time units and calendar from a metadata object following the + * conventions defined by the teca_cf_reader. This provides an easy way to + * get calendaring information from an existing dataset where the producer + * of the dataset has followed those conventions. Returns zero if + * successful and non-zero if the supplied metadata is missing any of the + * requisite information. + */ + int set_calendar(const teca_metadata &md); + + // set a callback function f(x,y,z,t) that generates a field named name // x,y,z are coordinate axes in variant arrays, t is the double precision // time value. From 692115479f0e578db8f19aa0096ca243d698f4ae Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Wed, 10 Mar 2021 23:20:24 -0800 Subject: [PATCH 052/180] cartesian_mesh_source revise class documentation --- alg/teca_cartesian_mesh_source.h | 173 +++++++++++++++++++++++++------ 1 file changed, 140 insertions(+), 33 deletions(-) diff --git a/alg/teca_cartesian_mesh_source.h b/alg/teca_cartesian_mesh_source.h index 39297e672..9b1073254 100644 --- a/alg/teca_cartesian_mesh_source.h +++ b/alg/teca_cartesian_mesh_source.h @@ -10,12 +10,18 @@ TECA_SHARED_OBJECT_FORWARD_DECL(teca_cartesian_mesh_source) -// f(x, y, z, t) -// given spatial coordinate axes x,y,z and the time t, return the field +/** The signature of the callback used to specify user defined fields. + * f(x, y, z, t) -> w + * Given spatial coordinate axes x,y,z and the time t, return the + * 3D field w. + */ using field_generator_callback = std::function; +/** An object that bundles field name, the metadata attributes needed for I/O, + * and a field generator callback. Use this with ::append_field_generator + */ struct field_generator { std::string name; @@ -23,6 +29,8 @@ struct field_generator field_generator_callback generator; }; +using field_generator_t = field_generator; + inline bool operator==(const field_generator &l, const field_generator &r) { @@ -35,12 +43,26 @@ bool operator!=(const field_generator &l, const field_generator &r) return l.name != r.name; } -using field_generator_t = field_generator; -/** -An algorithm that constructs and serves up a Cartesian mesh -of the specified dimensions. -*/ +/** @brief + * An algorithm that generates a teca_cartesian_mesh of the requested + * spatial and temporal dimensions with optional user defined fields. + * + * @details + * User defined fields are specified by passing callbacks and metadata + * via @ref field_generator and @ref append_field_generator + * + * The spatial and temporal dimensions are set by the combination of + * @ref whole_extent and @ref bounds. + * + * The names of coordinate axes are set by the combination + * of @ref x_axis_variable, @ref y_axis_variable, @ref z_axis_variable, + * and @ref t_axis_variable + * + * The units of the coordinate axes are set by the combination of + * @ref x_axis_units, @ref y_axis_units, @ref z_axis_units, @ref calendar, + * and @ref time_units. + */ class teca_cartesian_mesh_source : public teca_algorithm { public: @@ -54,23 +76,43 @@ class teca_cartesian_mesh_source : public teca_algorithm TECA_GET_ALGORITHM_PROPERTIES_DESCRIPTION() TECA_SET_ALGORITHM_PROPERTIES() - // set/get the type code for generated coordinates. - // default is a 64 bit floating point type. Use - // teca_variant_array_code::get() to get specific type - // codes for C++ POD types NT. + /** @anchor coordinate_type_code + * @name coordinate_type_code + * set/get the type code for generated coordinates. The default is a 64 bit + * floating point type. Use teca_variant_array_code::get() to get + * specific type codes for C++ POD types NT. + */ + ///@{ TECA_ALGORITHM_PROPERTY(unsigned int, coordinate_type_code) + ///@} + + /** @anchor field_type_code + * @name field_type_code + * set/get the type code for generated fields. The default is a 64 bit + * floating point type. Use teca_variant_array_code::get() to get + * specific type codes for C++ POD types NT. + */ + ///@{ TECA_ALGORITHM_PROPERTY(unsigned int, field_type_code) + ///@} - // set/get the global index space extent of the data. the extents are - // given by 8 values, 6 spatial plus 2 temporal, in the following order - // [i0 i1 j0 j1 k0 k1 q0 q1] - // this should be the same on all ranks elements. + /** @anchor whole_extent + * @name whole_extent + * set/get the global index space extent of the data. the extents are + * given by 8 values, 6 spatial plus 2 temporal, in the following order + * [i0 i1 j0 j1 k0 k1 q0 q1] This should be the same on all ranks + */ + ///@{ TECA_ALGORITHM_VECTOR_PROPERTY(unsigned long, whole_extent) + ///@} - // set/get the global bounds of the data. the bounds are 8 values 6 spatial - // plus 2 temporal in the following order. - // [x0 x1 y0 y1 z0 z1 t0 t1] - // this should be the same on all ranks elements. + /** @anchor bounds + * @name bounds + * set/get the global bounds of the data. the bounds are 8 values 6 spatial + * plus 2 temporal in the following order. [x0 x1 y0 y1 z0 z1 t0 t1] + * this should be the same on all ranks. + */ + ///@{ TECA_ALGORITHM_VECTOR_PROPERTY(double, bound) /** Set the spatial bounds from a metadata object following the conventions @@ -80,28 +122,70 @@ class teca_cartesian_mesh_source : public teca_algorithm * if the supplied metadata is missing any of the requisite information. */ int set_spatial_bounds(const teca_metadata &md); + ///@} - // set the variable to use for the coordinate axes. - // the defaults are: x => lon, y => lat, z = plev, - // t => time + /** @anchor x_axis_variable + * @name x_axis_variable + * set the variable to use for the coordinate axes. the default is: lon + */ + ///@{ TECA_ALGORITHM_PROPERTY(std::string, x_axis_variable) + ///@} + + /** @anchor y_axis_variable + * @name y_axis_variable + * set the variable to use for the coordinate axes. the defaults is: lat + */ + ///@{ TECA_ALGORITHM_PROPERTY(std::string, y_axis_variable) + ///@} + + /** @anchor z_axis_variable + * @name z_axis_variable + * set the variable to use for the coordinate axes. the default is: plev + */ + ///@{ TECA_ALGORITHM_PROPERTY(std::string, z_axis_variable) + ///@} + + /** @anchor t_axis_variable + * @name t_axis_variable + * set the variable to use for the coordinate axes. * the default is: time + */ + ///@{ TECA_ALGORITHM_PROPERTY(std::string, t_axis_variable) + ///@} - // set the units of spatial axes. The defaults are: - // degrees_east, degrees_north, and pressure_level + /** @anchor x_axis_units + * @name x_axis_units + * set the units of spatial axes. The defaults is: degrees_east + */ + ///@{ TECA_ALGORITHM_PROPERTY(std::string, x_axis_units) + ///@} + + /** @anchor y_axis_units + * @name y_axis_units + * set the units of spatial axes. The defaults is: degrees_north + */ + ///@{ TECA_ALGORITHM_PROPERTY(std::string, y_axis_units) + ///@} + + /** @anchor z_axis_units + * @name z_axis_units + * set the units of spatial axes. The defaults is: Pa + */ + ///@{ TECA_ALGORITHM_PROPERTY(std::string, z_axis_units) + ///@} - // set the calendar and time units + /** @anchor calendar + * @name calendar + * Set/get the calendar. The default is "Gregorian". + */ + ///@{ TECA_ALGORITHM_PROPERTY(std::string, calendar) - TECA_ALGORITHM_PROPERTY(std::string, time_units) - - // set the named callbacks to generate fields on the mesh. A callback - // function must have the signature f(x,y,z,t). - TECA_ALGORITHM_VECTOR_PROPERTY(field_generator_t, field_generator); /** Set the time units and calendar from a metadata object following the * conventions defined by the teca_cf_reader. This provides an easy way to @@ -111,13 +195,36 @@ class teca_cartesian_mesh_source : public teca_algorithm * requisite information. */ int set_calendar(const teca_metadata &md); + ///@} + /** @anchor time_units + * @name time_units + * Set/get the calendar. The default is "seconds since 1970-01-01 00:00:00". + */ + ///@{ + TECA_ALGORITHM_PROPERTY(std::string, time_units) + ///@} - // set a callback function f(x,y,z,t) that generates a field named name - // x,y,z are coordinate axes in variant arrays, t is the double precision - // time value. + /** @anchor append_field_generator + * @name append_field_generator + * set a callback function f(x,y,z,t) that generates a field named name + * x,y,z are coordinate axes in variant arrays, t is the double precision + * time value. + */ + ///@{ void append_field_generator(const std::string &name, const teca_metadata &atts, field_generator_callback &callback); + ///@} + + /** @anchor field_generator + * @name field_generator + * Set/get the named callbacks that generate fields on the mesh. These + * should be packaged in the field_generator struct so that field name + * and attributes for I/O are provided together with the callback. + */ + ///@{ + TECA_ALGORITHM_VECTOR_PROPERTY(field_generator_t, field_generator) + ///@} protected: teca_cartesian_mesh_source(); From 3b8ade8b70d6cca38c0337f1099f50858be1e186 Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Thu, 11 Mar 2021 08:01:27 -0800 Subject: [PATCH 053/180] coordinate_util linear interpolation fix adds a check to avoid divide by 0 at domain boundry --- data/teca_coordinate_util.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/data/teca_coordinate_util.h b/data/teca_coordinate_util.h index 04a7642e9..1f0f2b907 100644 --- a/data/teca_coordinate_util.h +++ b/data/teca_coordinate_util.h @@ -433,15 +433,15 @@ int interpolate_linear(CT cx, CT cy, CT cz, return -1; } - // get i,j of node greater than cx,cy + // get i,j of node greater than cx,cy,cz unsigned long ii = std::min(i + 1, ihi); unsigned long jj = std::min(j + 1, jhi); unsigned long kk = std::min(k + 1, khi); // compute weights - CT wx = (cx - p_x[i])/(p_x[ii] - p_x[i]); - CT wy = (cy - p_y[i])/(p_y[ii] - p_y[i]); - CT wz = (cz - p_z[i])/(p_z[ii] - p_z[i]); + CT wx = ii == i ? 0 : (cx - p_x[i])/(p_x[ii] - p_x[i]); + CT wy = jj == j ? 0 : (cy - p_y[j])/(p_y[jj] - p_y[j]); + CT wz = kk == k ? 0 : (cz - p_z[k])/(p_z[kk] - p_z[k]); CT vx = CT(1) - wx; CT vy = CT(1) - wy; From bd9b24fae0bb798b9aac0f3fbd1f22811e72ab73 Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Thu, 11 Mar 2021 08:49:29 -0800 Subject: [PATCH 054/180] coordinate_util add 2D x-y interpolation overloads --- data/teca_coordinate_util.h | 138 ++++++++++++++++++++++++++++++++++-- 1 file changed, 133 insertions(+), 5 deletions(-) diff --git a/data/teca_coordinate_util.h b/data/teca_coordinate_util.h index 1f0f2b907..4ab9fcfca 100644 --- a/data/teca_coordinate_util.h +++ b/data/teca_coordinate_util.h @@ -374,8 +374,16 @@ void get_table_offsets(const int_t *index, unsigned long n_rows, offsets[i] = offsets[i-1] + counts[i-1]; } -// 0 order (nearest neighbor) interpolation -// for nodal data on stretched cartesian mesh. +/** 0th order (nearest neighbor) interpolation for nodal data on a stretched + * cartesian mesh. This overload implements the general 3D case. + * cx, cy, cz is the location to interpolate to + * p_x, p_y, p_z array arrays containing the source cooridnates with extents + * [0, ihi, 0, jhi, 0, khi] + * p_data is the field to interpolate from + * val is the result + * returns 0 if successful, an error occurs if cx, cy, cz is outside of the + * source coordinate system + */ template int interpolate_nearest(CT cx, CT cy, CT cz, const CT *p_x, const CT *p_y, const CT *p_z, @@ -411,8 +419,58 @@ int interpolate_nearest(CT cx, CT cy, CT cz, return 0; } -// 1 order (linear) interpolation -// for nodal data on stretched cartesian mesh. +/** 0th order (nearest neighbor) interpolation for nodal data on a stretched + * cartesian mesh. This overload implements the special case where both source + * and targent mesh data are in a 2D x-y plane using fewer operations than the + * general 3D implementation. + * cx, cy, cz is the location to interpolate to + * p_x, p_y, p_z array arrays containing the source cooridnates with extents + * [0, ihi, 0, jhi, 0, khi] + * p_data is the field to interpolate from + * val is the result + * returns 0 if successful, an error occurs if cx, cy, cz is outside of the + * source coordinate system + */ +template +int interpolate_nearest(coord_t cx, coord_t cy, const coord_t *p_x, + const coord_t *p_y, const data_t *p_data, unsigned long ihi, + unsigned long jhi, unsigned long nx, data_t &val) +{ + // get i,j of node less than cx,cy + unsigned long i = 0; + unsigned long j = 0; + + if ((ihi && teca_coordinate_util::index_of(p_x, 0, ihi, cx, true, i)) + || (jhi && teca_coordinate_util::index_of(p_y, 0, jhi, cy, true, j))) + { + // cx,cy is outside the coordinate axes + return -1; + } + + // get i,j of node greater than cx,cy + unsigned long ii = std::min(i + 1, ihi); + unsigned long jj = std::min(j + 1, jhi); + + // get index of nearest node + unsigned long p = (cx - p_x[i]) <= (p_x[ii] - cx) ? i : ii; + unsigned long q = (cy - p_y[j]) <= (p_y[jj] - cy) ? j : jj; + + // assign value from nearest node + val = p_data[p + nx*q]; + + return 0; +} + +/** 1st order (linear) interpolation for nodal data on stretched cartesian + * mesh. This overload implements the general 3D case. + * cx, cy, cz is the location to interpolate to + * p_x, p_y, p_z array arrays containing the source cooridnates with extents + * [0, ihi, 0, jhi, 0, khi] + * p_data is the field to interpolate from + * val is the result + * returns 0 if successful, an error occurs if cx, cy, cz is outside of the + * source coordinate system + */ template int interpolate_linear(CT cx, CT cy, CT cz, const CT *p_x, const CT *p_y, const CT *p_z, @@ -460,11 +518,60 @@ int interpolate_linear(CT cx, CT cy, CT cz, return 0; } -// functor templated on order of accuracy for above Cartesian mesh interpolants +/** 1st order (linear) interpolation for nodal data on stretched cartesian mesh. + * This overload implements the special case where both source and target data + * are in a 2D x-y plane using fewer operations than the general 3D + * implementation. + * cx, cy, cz is the location to interpolate to + * p_x, p_y, p_z array arrays containing the source cooridnates with extents + * [0, ihi, 0, jhi, 0, khi] + * p_data is the field to interpolate from + * val is the result + * returns 0 if successful, an error occurs if cx, cy, cz is outside of the + * source coordinate system + */ +template +int interpolate_linear(CT cx, CT cy, const CT *p_x, const CT *p_y, + const DT *p_data, unsigned long ihi, unsigned long jhi, + unsigned long nx, DT &val) +{ + // get i,j of node less than cx,cy + unsigned long i = 0; + unsigned long j = 0; + + if ((ihi && teca_coordinate_util::index_of(p_x, 0, ihi, cx, true, i)) + || (jhi && teca_coordinate_util::index_of(p_y, 0, jhi, cy, true, j))) + { + // cx,cy is outside the coordinate axes + return -1; + } + + // get i,j of node greater than cx,cy + unsigned long ii = std::min(i + 1, ihi); + unsigned long jj = std::min(j + 1, jhi); + + // compute weights + CT wx = ii == i ? 0 : (cx - p_x[i])/(p_x[ii] - p_x[i]); + CT wy = jj == j ? 0 : (cy - p_y[j])/(p_y[jj] - p_y[j]); + + CT vx = CT(1) - wx; + CT vy = CT(1) - wy; + + // interpolate + val = vx*vy*p_data[ i + j*nx] + + wx*vy*p_data[ii + j*nx] + + wx*wy*p_data[ii + jj*nx] + + vx*wy*p_data[ i + jj*nx]; + + return 0; +} + +// A functor templated on order of accuracy for above Cartesian mesh interpolants template struct interpolate_t; template<> struct interpolate_t<0> { + // 3D template int operator()(CT tx, CT ty, CT tz, const CT *sx, const CT *sy, const CT *sz, const DT *sa, unsigned long ihi, unsigned long jhi, @@ -473,10 +580,21 @@ template<> struct interpolate_t<0> return teca_coordinate_util::interpolate_nearest(tx,ty,tz, sx,sy,sz,sa, ihi,jhi,khi, nx,nxy, ta); } + + // 2D x-y plane + template + int operator()(CT tx, CT ty, const CT *sx, const CT *sy, + const DT *sa, unsigned long ihi, unsigned long jhi, + unsigned long nx, DT &ta) + { + return teca_coordinate_util::interpolate_nearest(tx,ty, + sx,sy,sa, ihi,jhi, nx, ta); + } }; template<> struct interpolate_t<1> { + // 3D template int operator()(CT tx, CT ty, CT tz, const CT *sx, const CT *sy, const CT *sz, const DT *sa, unsigned long ihi, unsigned long jhi, @@ -485,6 +603,16 @@ template<> struct interpolate_t<1> return teca_coordinate_util::interpolate_linear(tx,ty,tz, sx,sy,sz,sa, ihi,jhi,khi, nx,nxy, ta); } + + // 2D x-y plane + template + int operator()(CT tx, CT ty, const CT *sx, const CT *sy, + const DT *sa, unsigned long ihi, unsigned long jhi, + unsigned long nx, DT &ta) + { + return teca_coordinate_util::interpolate_linear(tx,ty, + sx,sy,sa, ihi,jhi, nx, ta); + } }; // return 0 if the centering is one of the values defined From 8eaf0098dff3ab52eea0a01ca648bd27b5d16369 Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Thu, 11 Mar 2021 08:59:10 -0800 Subject: [PATCH 055/180] cartesian_mesh_regrid interpolate all requested arrays --- alg/teca_cartesian_mesh_regrid.cxx | 9 ++++++++- alg/teca_cartesian_mesh_regrid.h | 2 +- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/alg/teca_cartesian_mesh_regrid.cxx b/alg/teca_cartesian_mesh_regrid.cxx index c8bd5d6ab..4148cd338 100644 --- a/alg/teca_cartesian_mesh_regrid.cxx +++ b/alg/teca_cartesian_mesh_regrid.cxx @@ -351,7 +351,7 @@ const_p_teca_dataset teca_cartesian_mesh_regrid::execute( // get the list of arrays to move std::vector req_arrays; - request.get("regrid_arrays", req_arrays); + request.get("arrays", req_arrays); // add any explicitly named std::copy(this->arrays.begin(), this->arrays.end(), @@ -369,6 +369,13 @@ const_p_teca_dataset teca_cartesian_mesh_regrid::execute( source_arrays.push_back(*it); } + // catch a user error + if (!source_arrays.size() && + teca_mpi_util::mpi_rank_0(this->get_communicator())) + { + TECA_WARNING("No arrays will be interpolated") + } + // move the arrays const_p_teca_variant_array target_xc = target->get_x_coordinates(); const_p_teca_variant_array target_yc = target->get_y_coordinates(); diff --git a/alg/teca_cartesian_mesh_regrid.h b/alg/teca_cartesian_mesh_regrid.h index f9ae85da2..6e09126e1 100644 --- a/alg/teca_cartesian_mesh_regrid.h +++ b/alg/teca_cartesian_mesh_regrid.h @@ -21,7 +21,7 @@ By default the first input is the target mesh. the second input is the source mesh. This can be changed by setting the target_input property. the arrays to move from source to target can be selected using add_array api or -in the request key regrid_source_arrays. this is a spatial regriding operation +in the request key "arrays". this is a spatial regriding operation for temporal regriding see teca_mesh_temporal_regrid. */ class teca_cartesian_mesh_regrid : public teca_algorithm From 7fb68cd22c0f9e37b324be1b019270223cf95c2a Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Thu, 11 Mar 2021 14:39:48 -0800 Subject: [PATCH 056/180] cartesian_mesh_regrid use nearest neighbor for integer data --- alg/teca_cartesian_mesh_regrid.cxx | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/alg/teca_cartesian_mesh_regrid.cxx b/alg/teca_cartesian_mesh_regrid.cxx index 4148cd338..92f7cf210 100644 --- a/alg/teca_cartesian_mesh_regrid.cxx +++ b/alg/teca_cartesian_mesh_regrid.cxx @@ -18,6 +18,27 @@ using std::endl; //#define TECA_DEBUG +// always use nearest neighbor interpolation for integers +// to avoid truncation errors. an alternative would be to +// implement rounding in the interpolator for integer types +template +int get_interpolation_mode(int desired_mode, + typename std::enable_if::value>::type* = 0) +{ + (void)desired_mode; + return teca_cartesian_mesh_regrid::nearest; +} + +// use the requested interpolation mode for floating point +// data +template +int get_interpolation_mode(int desired_mode, + typename std::enable_if::value>::type* = 0) +{ + return desired_mode; +} + + template int interpolate(unsigned long target_nx, unsigned long target_ny, unsigned long target_nz, const NT1 *p_target_xc, const NT1 *p_target_yc, @@ -64,7 +85,7 @@ int interpolate(int mode, unsigned long target_nx, unsigned long target_ny, using nearest_interp_t = teca_coordinate_util::interpolate_t<0>; using linear_interp_t = teca_coordinate_util::interpolate_t<1>; - switch (mode) + switch (get_interpolation_mode(mode)) { case teca_cartesian_mesh_regrid::nearest: return interpolate( From f8eacb2c37e9a807a7104bba650de93baa753093 Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Thu, 11 Mar 2021 14:40:30 -0800 Subject: [PATCH 057/180] cartesian_mesh_regrid optimization for 2D x-y planar data --- alg/teca_cartesian_mesh_regrid.cxx | 141 +++++++++++++++++++++++------ 1 file changed, 111 insertions(+), 30 deletions(-) diff --git a/alg/teca_cartesian_mesh_regrid.cxx b/alg/teca_cartesian_mesh_regrid.cxx index 92f7cf210..e248ef8dd 100644 --- a/alg/teca_cartesian_mesh_regrid.cxx +++ b/alg/teca_cartesian_mesh_regrid.cxx @@ -39,6 +39,7 @@ int get_interpolation_mode(int desired_mode, } +// 3D template int interpolate(unsigned long target_nx, unsigned long target_ny, unsigned long target_nz, const NT1 *p_target_xc, const NT1 *p_target_yc, @@ -74,31 +75,100 @@ int interpolate(unsigned long target_nx, unsigned long target_ny, return 0; } +// 2D - x-y +template +int interpolate(unsigned long target_nx, unsigned long target_ny, + const NT1 *p_target_xc, const NT1 *p_target_yc, + NT3 *p_target_a, const NT2 *p_source_xc, + const NT2 *p_source_yc, const NT3 *p_source_a, + unsigned long source_ihi, unsigned long source_jhi, + unsigned long source_nx) +{ + interp_t f; + unsigned long q = 0; + for (unsigned long j = 0; j < target_ny; ++j) + { + NT2 ty = static_cast(p_target_yc[j]); + for (unsigned long i = 0; i < target_nx; ++i, ++q) + { + NT2 tx = static_cast(p_target_xc[i]); + if (f(tx,ty, + p_source_xc, p_source_yc, + p_source_a, source_ihi, source_jhi, + source_nx, p_target_a[q])) + { + TECA_ERROR("failed to interpolate i=(" << i << ", " << j + << ") x=(" << tx << ", " << ty << ", " << ")") + return -1; + } + } + } + return 0; +} + template int interpolate(int mode, unsigned long target_nx, unsigned long target_ny, - unsigned long target_nz, const taget_coord_t *p_target_xc, const taget_coord_t *p_target_yc, - const taget_coord_t *p_target_zc, array_t *p_target_a, const source_coord_t *p_source_xc, - const source_coord_t *p_source_yc, const source_coord_t *p_source_zc, const array_t *p_source_a, - unsigned long source_ihi, unsigned long source_jhi, unsigned long source_khi, - unsigned long source_nx, unsigned long source_nxy) + unsigned long target_nz, const taget_coord_t *p_target_xc, + const taget_coord_t *p_target_yc, const taget_coord_t *p_target_zc, + array_t *p_target_a, const source_coord_t *p_source_xc, + const source_coord_t *p_source_yc, const source_coord_t *p_source_zc, + const array_t *p_source_a, unsigned long source_ihi, unsigned long source_jhi, + unsigned long source_khi, unsigned long source_nx, unsigned long source_ny, + unsigned long source_nz) { using nearest_interp_t = teca_coordinate_util::interpolate_t<0>; using linear_interp_t = teca_coordinate_util::interpolate_t<1>; + unsigned long source_nxy = source_nx*source_ny; + switch (get_interpolation_mode(mode)) { case teca_cartesian_mesh_regrid::nearest: - return interpolate( - target_nx, target_ny, target_nz, p_target_xc, p_target_yc, p_target_zc, - p_target_a, p_source_xc, p_source_yc, p_source_zc, p_source_a, - source_ihi, source_jhi, source_khi, source_nx, source_nxy); + { + if ((target_nz == 1) && (source_nz == 1)) + { + // 2D in the x-y plane + return interpolate( + target_nx, target_ny, p_target_xc, p_target_yc, + p_target_a, p_source_xc, p_source_yc, p_source_a, + source_ihi, source_jhi, source_nx); + } + else + { + // 3D + return interpolate( + target_nx, target_ny, target_nz, p_target_xc, + p_target_yc, p_target_zc, p_target_a, p_source_xc, + p_source_yc, p_source_zc, p_source_a, source_ihi, + source_jhi, source_khi, source_nx, source_nxy); + } break; + } case teca_cartesian_mesh_regrid::linear: - return interpolate( - target_nx, target_ny, target_nz, p_target_xc, p_target_yc, p_target_zc, - p_target_a, p_source_xc, p_source_yc, p_source_zc, p_source_a, - source_ihi, source_jhi, source_khi, source_nx, source_nxy); + { + if ((target_nz == 1) && (source_nz == 1)) + { + // 2D in the x-y plane + return interpolate( + target_nx, target_ny, p_target_xc, p_target_yc, + p_target_a, p_source_xc, p_source_yc, p_source_a, + source_ihi, source_jhi, source_nx); + } + else + { + // 3D + return interpolate( + target_nx, target_ny, target_nz, p_target_xc, + p_target_yc, p_target_zc, p_target_a, p_source_xc, + p_source_yc, p_source_zc, p_source_a, source_ihi, + source_jhi, source_khi, source_nx, source_nxy); + } break; + } } TECA_ERROR("invalid interpolation mode \"" << mode << "\"") @@ -107,6 +177,7 @@ int interpolate(int mode, unsigned long target_nx, unsigned long target_ny, + // -------------------------------------------------------------------------- teca_cartesian_mesh_regrid::teca_cartesian_mesh_regrid() : target_input(0), interpolation_mode(nearest) @@ -329,7 +400,6 @@ std::vector teca_cartesian_mesh_regrid::get_upstream_request( return up_reqs; } - // -------------------------------------------------------------------------- const_p_teca_dataset teca_cartesian_mesh_regrid::execute( unsigned int port, const std::vector &input_data, @@ -416,7 +486,6 @@ const_p_teca_dataset teca_cartesian_mesh_regrid::execute( unsigned long source_nx = source_xc->size(); unsigned long source_ny = source_yc->size(); unsigned long source_nz = source_zc->size(); - unsigned long source_nxy = source_nx*source_ny; unsigned long source_ihi = source_nx - 1; unsigned long source_jhi = source_ny - 1; unsigned long source_khi = source_nz - 1; @@ -424,20 +493,20 @@ const_p_teca_dataset teca_cartesian_mesh_regrid::execute( NESTED_TEMPLATE_DISPATCH_FP( const teca_variant_array_impl, target_xc.get(), - 1, + _TGT, - const NT1 *p_target_xc = std::dynamic_pointer_cast(target_xc)->get(); - const NT1 *p_target_yc = std::dynamic_pointer_cast(target_yc)->get(); - const NT1 *p_target_zc = std::dynamic_pointer_cast(target_zc)->get(); + const NT_TGT *p_target_xc = std::dynamic_pointer_cast(target_xc)->get(); + const NT_TGT *p_target_yc = std::dynamic_pointer_cast(target_yc)->get(); + const NT_TGT *p_target_zc = std::dynamic_pointer_cast(target_zc)->get(); NESTED_TEMPLATE_DISPATCH_FP( const teca_variant_array_impl, source_xc.get(), - 2, + _SRC, - const NT2 *p_source_xc = std::dynamic_pointer_cast(source_xc)->get(); - const NT2 *p_source_yc = std::dynamic_pointer_cast(source_yc)->get(); - const NT2 *p_source_zc = std::dynamic_pointer_cast(source_zc)->get(); + const NT_SRC *p_source_xc = std::dynamic_pointer_cast(source_xc)->get(); + const NT_SRC *p_source_yc = std::dynamic_pointer_cast(source_yc)->get(); + const NT_SRC *p_source_zc = std::dynamic_pointer_cast(source_zc)->get(); size_t n_arrays = source_arrays.size(); for (size_t i = 0; i < n_arrays; ++i) @@ -449,25 +518,37 @@ const_p_teca_dataset teca_cartesian_mesh_regrid::execute( NESTED_TEMPLATE_DISPATCH( teca_variant_array_impl, target_a.get(), - 3, + _DATA, - const NT3 *p_source_a = std::static_pointer_cast(source_a)->get(); - NT3 *p_target_a = std::static_pointer_cast(target_a)->get(); + const NT_DATA *p_source_a = std::static_pointer_cast(source_a)->get(); + NT_DATA *p_target_a = std::static_pointer_cast(target_a)->get(); if (interpolate(this->interpolation_mode, target_nx, target_ny, target_nz, p_target_xc, p_target_yc, p_target_zc, p_target_a, p_source_xc, p_source_yc, p_source_zc, p_source_a, source_ihi, source_jhi, - source_khi, source_nx, source_nxy)) + source_khi, source_nx, source_ny, source_nz)) { TECA_ERROR("Failed to move \"" << source_arrays[i] << "\"") return nullptr; } - - target_ac->set(source_arrays[i], target_a); ) + else + { + TECA_ERROR("Unsupported array type " << source_a->get_class_name()) } - ) + + target_ac->set(source_arrays[i], target_a); + } ) + else + { + TECA_ERROR("Unupported coordinate type " << source_xc->get_class_name()) + } + ) + else + { + TECA_ERROR("Unupported coordinate type " << target_xc->get_class_name()) + } return target; } From d91e0059ad04e7f022d4226f8caa820d567299b2 Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Thu, 11 Mar 2021 14:41:21 -0800 Subject: [PATCH 058/180] cf_restripe app coordinate normalization and regriding --- apps/teca_cf_restripe.cpp | 147 ++++++++++++++++++++++++++++++++++---- 1 file changed, 135 insertions(+), 12 deletions(-) diff --git a/apps/teca_cf_restripe.cpp b/apps/teca_cf_restripe.cpp index 7d2acff12..6d2720e3a 100644 --- a/apps/teca_cf_restripe.cpp +++ b/apps/teca_cf_restripe.cpp @@ -4,6 +4,9 @@ #include "teca_variant_array.h" #include "teca_cf_reader.h" #include "teca_multi_cf_reader.h" +#include "teca_normalize_coordinates.h" +#include "teca_cartesian_mesh_regrid.h" +#include "teca_cartesian_mesh_source.h" #include "teca_cf_writer.h" #include "teca_dataset_diff.h" #include "teca_index_executive.h" @@ -67,8 +70,18 @@ int main(int argc, char **argv) ("steps_per_file", value(), "\nnumber of time steps per output file\n") + ("normalize_coordinates", "\nEnable coordinate normalization pipeline stage\n") + + ("regrid", "\nEnable mesh regridding pipeline stage. When enabled requires --dims" + " to be provided\n") + + ("dims", value>()->multitoken(), + "\nA 3-tuple of values specifying the mesh size of the output dataset in the x, y," + " and z dimensions. The accepted format for dimensions is: nx ny nz\n") + ("bounds", value>()->multitoken(), - "\nlat lon lev bounding box to subset with\n") + "\nA hex-tuple of low and high values specifying lon lat lev bounding box to subset" + " the input dataset with. The accepted format for bounds is: x0 x1 y0 y1 z0 z1\n") ("first_step", value(), "\nfirst time step to process\n") ("last_step", value(), "\nlast time step to process\n") @@ -97,13 +110,23 @@ int main(int argc, char **argv) // documentation and parse command line. // objects report all of their properties directly // set default options here so that command line options override - // them. while we are at it connect the pipeline + // them. p_teca_cf_reader cf_reader = teca_cf_reader::New(); cf_reader->get_properties_description("cf_reader", advanced_opt_defs); p_teca_multi_cf_reader mcf_reader = teca_multi_cf_reader::New(); mcf_reader->get_properties_description("mcf_reader", advanced_opt_defs); + p_teca_normalize_coordinates norm_coords = teca_normalize_coordinates::New(); + norm_coords->get_properties_description("norm_coords", advanced_opt_defs); + + p_teca_cartesian_mesh_regrid regrid = teca_cartesian_mesh_regrid::New(); + regrid->set_interpolation_mode_linear(); + regrid->get_properties_description("regrid", advanced_opt_defs); + + p_teca_cartesian_mesh_source regrid_src = teca_cartesian_mesh_source::New(); + regrid_src->get_properties_description("regrid_source", advanced_opt_defs); + p_teca_cf_writer cf_writer = teca_cf_writer::New(); cf_writer->get_properties_description("cf_writer", advanced_opt_defs); @@ -131,6 +154,9 @@ int main(int argc, char **argv) // options will override them cf_reader->set_properties("cf_reader", opt_vals); mcf_reader->set_properties("mcf_reader", opt_vals); + norm_coords->set_properties("norm_coords", opt_vals); + regrid->set_properties("regrid", opt_vals); + regrid_src->set_properties("regrid_source", opt_vals); cf_writer->set_properties("cf_writer", opt_vals); // now pass in the basic options, these are processed @@ -189,9 +215,50 @@ int main(int argc, char **argv) if (opt_vals.count("last_step")) cf_writer->set_last_step(opt_vals["last_step"].as()); - if (opt_vals.count("bounds")) - exec->set_bounds( - opt_vals["bounds"].as>()); + std::vector bounds; + bool have_bounds = opt_vals.count("bounds"); + if (have_bounds) + { + std::vector bounds = + opt_vals["bounds"].as>(); + + if (bounds.size() != 6) + { + TECA_ERROR("An invlaid bounds specification was provided in" + " --bounds, size != 6. Use: --bounds x0 x1 y0 y1 z0 z1") + return -1; + } + } + + bool do_regrid = opt_vals.count("regrid"); + + // when not regriding let the executive subset. when regriding + // the regrid algorithm handles subsetting and the executive should + // request the entire domain. + if (have_bounds && !do_regrid) + exec->set_bounds(bounds); + + // when regriding target mesh dimensions must be provided + std::vector dims; + if (do_regrid) + { + if (opt_vals.count("dims")) + { + dims = opt_vals["dims"].as>(); + if (dims.size() != 3) + { + TECA_ERROR("An invlaid dimension specification was provided in" + " --dims, size != 3. Use: --dims nx ny nz") + return -1; + } + } + else + { + TECA_ERROR("The --regrid option requires that --dims" + " also be specified") + return -1; + } + } if (opt_vals.count("verbose")) { @@ -226,10 +293,21 @@ int main(int argc, char **argv) return -1; } + // add the normalize coordinates stage before accessing metadata + p_teca_algorithm head = reader; + if (opt_vals.count("normalize_coordinates")) + { + norm_coords->set_input_connection(reader->get_output_port()); + head = norm_coords; + } + // if no point arrays were specified on the command line by default // write all point arrays teca_metadata md; teca_metadata atts; + teca_metadata time_atts; + std::string calendar; + std::string units; // TODO -- this will need some more work in the reader as currently // all arrays are marked as being point centered, but here we need // to identify only the arrays on the mesh. @@ -237,7 +315,7 @@ int main(int argc, char **argv) { // run the reporting phase of the pipeline if (md.empty()) - md = cf_reader->update_metadata(); + md = head->update_metadata(); // if array attributes are present, use them to locate the set of // point centered arrrays @@ -275,7 +353,7 @@ int main(int argc, char **argv) { // run the reporting phase of the pipeline if (md.empty()) - md = cf_reader->update_metadata(); + md = head->update_metadata(); if (atts.empty() && md.get("attributes", atts)) { @@ -283,9 +361,6 @@ int main(int argc, char **argv) return -1; } - teca_metadata time_atts; - std::string calendar; - std::string units; if (atts.get("time", time_atts) || time_atts.get("calendar", calendar) || time_atts.get("units", units)) @@ -335,8 +410,56 @@ int main(int argc, char **argv) } } - // connect the pipeline - cf_writer->set_input_connection(reader->get_output_port()); + // set up regridding + if (do_regrid) + { + // run the reporting phase of the pipeline, the resulting metadata + // can be used to automatically determine the calendaring parameters + // and spatial bounds + if (md.empty()) + md = head->update_metadata(); + + // if possible use the calendar of the input dataset + if (regrid_src->set_calendar(md)) + { + // fallback to the standard calendar and an arbitary + // reference date + regrid_src->set_calendar("standard"); + regrid_src->set_time_units("days since 1800-01-01 00:00:00"); + } + + // to construct the target mesh we need bounds. if no bounds are + // specified on the command line use those of the input dataset and + // error out if that fails + if (have_bounds) + { + // extend to include time + bounds.resize(8, 0.0); + regrid_src->set_bounds(bounds); + } + else + { + // try to determine the bounds from the input mesh metadata + if (regrid_src->set_spatial_bounds(md)) + { + TECA_ERROR("Failed to determine target mesh bounds from the" + " input metadata. Use --bounds to specify them manually.") + return -1; + } + } + + // set the target mesh dimensions + regrid_src->set_whole_extents({0lu, dims[0] - 1lu, + 0lu, dims[1] - 1lu, 0lu, dims[2] - 1lu, 0lu, 0lu}); + + // connect to the pipeline + regrid->set_input_connection(0, regrid_src->get_output_port()); + regrid->set_input_connection(1, head->get_output_port()); + head = regrid; + } + + // add the writer last + cf_writer->set_input_connection(head->get_output_port()); // run the pipeline cf_writer->set_executive(exec); From ba0dd7b34f6ca36ee874800725f2921a657aaf72 Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Fri, 12 Mar 2021 11:47:36 -0800 Subject: [PATCH 059/180] travis-ci test switch from mpich to openmpi on macos cmake is failing to locate the latest brew'd mpich. I can reproduce this on my laptop. --- test/travis_ci/install_osx.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/travis_ci/install_osx.sh b/test/travis_ci/install_osx.sh index afd64e660..ec2910982 100755 --- a/test/travis_ci/install_osx.sh +++ b/test/travis_ci/install_osx.sh @@ -7,8 +7,9 @@ export PATH=/usr/local/bin:$PATH # install deps. note that many are included as a part of brew-core # these days. hence this list isn't comprehensive brew update +brew upgrade brew unlink python@2 -brew install mpich swig svn udunits openssl python@3.8 curl +brew install openmpi swig svn udunits openssl python@3.8 curl cmake brew unlink python brew link --force python@3.8 brew link curl --force From 7c0ee5b804f2703074fc349296c986e7d4ab67a9 Mon Sep 17 00:00:00 2001 From: Burlen Loring Date: Sun, 14 Mar 2021 16:33:39 -0700 Subject: [PATCH 060/180] travis-ci disable the mpi trajectory scalars app test in this test python is failing to find mpi4py when teca is imported. many other tests do the same and run successfully. only occurs with openmpi. --- test/apps/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/apps/CMakeLists.txt b/test/apps/CMakeLists.txt index 0cc400b38..8abb36140 100644 --- a/test/apps/CMakeLists.txt +++ b/test/apps/CMakeLists.txt @@ -191,7 +191,7 @@ teca_add_app_test(test_tc_trajectory_scalars_app_mpi teca_tc_trajectory_scalars COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test_tc_trajectory_scalars_app.sh ${CMAKE_BINARY_DIR}/${BIN_PREFIX} ${TECA_DATA_ROOT} ${MPIEXEC} ${TEST_CORES} - FEATURES ${TECA_HAS_MPI} ${MPI4Py_FOUND} + FEATURES ${TECA_HAS_MPI} ${MPI4Py_FOUND} ${TEST_MPI_THREADS} REQ_TECA_DATA) teca_add_app_test(test_tc_wind_radii_stats_app teca_tc_wind_radii_stats From 685f850ef4665b3b79b5cc1ae2d0941f5de616be Mon Sep 17 00:00:00 2001 From: Abdelrahman Elbashandy Date: Thu, 11 Mar 2021 09:32:15 -0800 Subject: [PATCH 061/180] Supporting Doxygen as part of the RTD build --- doc/rtd/Doxyfile | 2538 ++++++++++++++++++++++++++++++++++++++++++++++ doc/rtd/conf.py | 10 + 2 files changed, 2548 insertions(+) create mode 100644 doc/rtd/Doxyfile diff --git a/doc/rtd/Doxyfile b/doc/rtd/Doxyfile new file mode 100644 index 000000000..560db113f --- /dev/null +++ b/doc/rtd/Doxyfile @@ -0,0 +1,2538 @@ +# Doxyfile 1.8.20 + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project. +# +# All text after a double hash (##) is considered a comment and is placed in +# front of the TAG it is preceding. +# +# All text after a single hash (#) is considered a comment and will be ignored. +# The format is: +# TAG = value [value, ...] +# For lists, items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (\" \"). + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# This tag specifies the encoding used for all characters in the configuration +# file that follow. The default is UTF-8 which is also the encoding used for all +# text before the first occurrence of this tag. Doxygen uses libiconv (or the +# iconv built into libc) for the transcoding. See +# https://www.gnu.org/software/libiconv/ for the list of possible encodings. +# The default value is: UTF-8. + +DOXYFILE_ENCODING = UTF-8 + +# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by +# double-quotes, unless you are using Doxywizard) that should identify the +# project for which the documentation is generated. This name is used in the +# title of most generated pages and in a few other places. +# The default value is: My Project. + +PROJECT_NAME = "TECA" + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. This +# could be handy for archiving the generated documentation or if some version +# control system is used. + +PROJECT_NUMBER = + +# Using the PROJECT_BRIEF tag one can provide an optional one line description +# for a project that appears at the top of each page and should give viewer a +# quick idea about the purpose of the project. Keep the description short. + +PROJECT_BRIEF = + +# With the PROJECT_LOGO tag one can specify a logo or an icon that is included +# in the documentation. The maximum height of the logo should not exceed 55 +# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy +# the logo to the output directory. + +PROJECT_LOGO = + +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path +# into which the generated documentation will be written. If a relative path is +# entered, it will be relative to the location where doxygen was started. If +# left blank the current directory will be used. + +OUTPUT_DIRECTORY = _build + +# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub- +# directories (in 2 levels) under the output directory of each output format and +# will distribute the generated files over these directories. Enabling this +# option can be useful when feeding doxygen a huge amount of source files, where +# putting all generated files in the same directory would otherwise causes +# performance problems for the file system. +# The default value is: NO. + +CREATE_SUBDIRS = NO + +# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII +# characters to appear in the names of generated files. If set to NO, non-ASCII +# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode +# U+3044. +# The default value is: NO. + +ALLOW_UNICODE_NAMES = NO + +# The OUTPUT_LANGUAGE tag is used to specify the language in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all constant output in the proper language. +# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese, +# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States), +# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian, +# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages), +# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian, +# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian, +# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish, +# Ukrainian and Vietnamese. +# The default value is: English. + +OUTPUT_LANGUAGE = English + +# The OUTPUT_TEXT_DIRECTION tag is used to specify the direction in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all generated output in the proper direction. +# Possible values are: None, LTR, RTL and Context. +# The default value is: None. + +OUTPUT_TEXT_DIRECTION = None + +# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member +# descriptions after the members that are listed in the file and class +# documentation (similar to Javadoc). Set to NO to disable this. +# The default value is: YES. + +BRIEF_MEMBER_DESC = YES + +# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief +# description of a member or function before the detailed description +# +# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the +# brief descriptions will be completely suppressed. +# The default value is: YES. + +REPEAT_BRIEF = YES + +# This tag implements a quasi-intelligent brief description abbreviator that is +# used to form the text in various listings. Each string in this list, if found +# as the leading text of the brief description, will be stripped from the text +# and the result, after processing the whole list, is used as the annotated +# text. Otherwise, the brief description is used as-is. If left blank, the +# following values are used ($name is automatically replaced with the name of +# the entity):The $name class, The $name widget, The $name file, is, provides, +# specifies, contains, represents, a, an and the. + +ABBREVIATE_BRIEF = "The $name class" \ + "The $name widget" \ + "The $name file" \ + is \ + provides \ + specifies \ + contains \ + represents \ + a \ + an \ + the + +# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then +# doxygen will generate a detailed section even if there is only a brief +# description. +# The default value is: NO. + +ALWAYS_DETAILED_SEC = NO + +# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all +# inherited members of a class in the documentation of that class as if those +# members were ordinary class members. Constructors, destructors and assignment +# operators of the base classes will not be shown. +# The default value is: NO. + +INLINE_INHERITED_MEMB = NO + +# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path +# before files name in the file list and in the header files. If set to NO the +# shortest path that makes the file name unique will be used +# The default value is: YES. + +FULL_PATH_NAMES = YES + +# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. +# Stripping is only done if one of the specified strings matches the left-hand +# part of the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the path to +# strip. +# +# Note that you can specify absolute paths here, but also relative paths, which +# will be relative from the directory where doxygen is started. +# This tag requires that the tag FULL_PATH_NAMES is set to YES. + +STRIP_FROM_PATH = ../../.. + +# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the +# path mentioned in the documentation of a class, which tells the reader which +# header file to include in order to use a class. If left blank only the name of +# the header file containing the class definition is used. Otherwise one should +# specify the list of include paths that are normally passed to the compiler +# using the -I flag. + +STRIP_FROM_INC_PATH = + +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but +# less readable) file names. This can be useful is your file systems doesn't +# support long names like on DOS, Mac, or CD-ROM. +# The default value is: NO. + +SHORT_NAMES = NO + +# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the +# first line (until the first dot) of a Javadoc-style comment as the brief +# description. If set to NO, the Javadoc-style will behave just like regular Qt- +# style comments (thus requiring an explicit @brief command for a brief +# description.) +# The default value is: NO. + +JAVADOC_AUTOBRIEF = NO + +# If the JAVADOC_BANNER tag is set to YES then doxygen will interpret a line +# such as +# /*************** +# as being the beginning of a Javadoc-style comment "banner". If set to NO, the +# Javadoc-style will behave just like regular comments and it will not be +# interpreted by doxygen. +# The default value is: NO. + +JAVADOC_BANNER = NO + +# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first +# line (until the first dot) of a Qt-style comment as the brief description. If +# set to NO, the Qt-style will behave just like regular Qt-style comments (thus +# requiring an explicit \brief command for a brief description.) +# The default value is: NO. + +QT_AUTOBRIEF = NO + +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a +# multi-line C++ special comment block (i.e. a block of //! or /// comments) as +# a brief description. This used to be the default behavior. The new default is +# to treat a multi-line C++ comment block as a detailed description. Set this +# tag to YES if you prefer the old behavior instead. +# +# Note that setting this tag to YES also means that rational rose comments are +# not recognized any more. +# The default value is: NO. + +MULTILINE_CPP_IS_BRIEF = NO + +# By default Python docstrings are displayed as preformatted text and doxygen's +# special commands cannot be used. By setting PYTHON_DOCSTRING to NO the +# doxygen's special commands can be used and the contents of the docstring +# documentation blocks is shown as doxygen documentation. +# The default value is: YES. + +PYTHON_DOCSTRING = YES + +# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the +# documentation from any documented member that it re-implements. +# The default value is: YES. + +INHERIT_DOCS = YES + +# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new +# page for each member. If set to NO, the documentation of a member will be part +# of the file/class/namespace that contains it. +# The default value is: NO. + +SEPARATE_MEMBER_PAGES = NO + +# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen +# uses this value to replace tabs by spaces in code fragments. +# Minimum value: 1, maximum value: 16, default value: 4. + +TAB_SIZE = 4 + +# This tag can be used to specify a number of aliases that act as commands in +# the documentation. An alias has the form: +# name=value +# For example adding +# "sideeffect=@par Side Effects:\n" +# will allow you to put the command \sideeffect (or @sideeffect) in the +# documentation, which will result in a user-defined paragraph with heading +# "Side Effects:". You can put \n's in the value part of an alias to insert +# newlines (in the resulting output). You can put ^^ in the value part of an +# alias to insert a newline as if a physical newline was in the original file. +# When you need a literal { or } or , in the value part of an alias you have to +# escape them by means of a backslash (\), this can lead to conflicts with the +# commands \{ and \} for these it is advised to use the version @{ and @} or use +# a double escape (\\{ and \\}) + +ALIASES = "rst=\verbatim embed:rst" +ALIASES += "endrst=\endverbatim" + +ALIASES += "rststar=\verbatim embed:rst:leading-asterisk" +ALIASES += "endrststar=\endverbatim" + +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources +# only. Doxygen will then generate output that is more tailored for C. For +# instance, some of the names that are used will be different. The list of all +# members will be omitted, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_FOR_C = NO + +# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or +# Python sources only. Doxygen will then generate output that is more tailored +# for that language. For instance, namespaces will be presented as packages, +# qualified scopes will look different, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_JAVA = NO + +# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran +# sources. Doxygen will then generate output that is tailored for Fortran. +# The default value is: NO. + +OPTIMIZE_FOR_FORTRAN = NO + +# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL +# sources. Doxygen will then generate output that is tailored for VHDL. +# The default value is: NO. + +OPTIMIZE_OUTPUT_VHDL = NO + +# Set the OPTIMIZE_OUTPUT_SLICE tag to YES if your project consists of Slice +# sources only. Doxygen will then generate output that is more tailored for that +# language. For instance, namespaces will be presented as modules, types will be +# separated into more groups, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_SLICE = NO + +# Doxygen selects the parser to use depending on the extension of the files it +# parses. With this tag you can assign which parser to use for a given +# extension. Doxygen has a built-in mapping, but you can override or extend it +# using this tag. The format is ext=language, where ext is a file extension, and +# language is one of the parsers supported by doxygen: IDL, Java, JavaScript, +# Csharp (C#), C, C++, D, PHP, md (Markdown), Objective-C, Python, Slice, VHDL, +# Fortran (fixed format Fortran: FortranFixed, free formatted Fortran: +# FortranFree, unknown formatted Fortran: Fortran. In the later case the parser +# tries to guess whether the code is fixed or free formatted code, this is the +# default for Fortran type files). For instance to make doxygen treat .inc files +# as Fortran files (default is PHP), and .f files as C (default is Fortran), +# use: inc=Fortran f=C. +# +# Note: For files without extension you can use no_extension as a placeholder. +# +# Note that for custom extensions you also need to set FILE_PATTERNS otherwise +# the files are not read by doxygen. + +EXTENSION_MAPPING = + +# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments +# according to the Markdown format, which allows for more readable +# documentation. See https://daringfireball.net/projects/markdown/ for details. +# The output of markdown processing is further processed by doxygen, so you can +# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in +# case of backward compatibilities issues. +# The default value is: YES. + +MARKDOWN_SUPPORT = YES + +# When the TOC_INCLUDE_HEADINGS tag is set to a non-zero value, all headings up +# to that level are automatically included in the table of contents, even if +# they do not have an id attribute. +# Note: This feature currently applies only to Markdown headings. +# Minimum value: 0, maximum value: 99, default value: 5. +# This tag requires that the tag MARKDOWN_SUPPORT is set to YES. + +TOC_INCLUDE_HEADINGS = 5 + +# When enabled doxygen tries to link words that correspond to documented +# classes, or namespaces to their corresponding documentation. Such a link can +# be prevented in individual cases by putting a % sign in front of the word or +# globally by setting AUTOLINK_SUPPORT to NO. +# The default value is: YES. + +AUTOLINK_SUPPORT = YES + +# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want +# to include (a tag file for) the STL sources as input, then you should set this +# tag to YES in order to let doxygen match functions declarations and +# definitions whose arguments contain STL classes (e.g. func(std::string); +# versus func(std::string) {}). This also make the inheritance and collaboration +# diagrams that involve STL classes more complete and accurate. +# The default value is: NO. + +BUILTIN_STL_SUPPORT = NO + +# If you use Microsoft's C++/CLI language, you should set this option to YES to +# enable parsing support. +# The default value is: NO. + +CPP_CLI_SUPPORT = NO + +# Set the SIP_SUPPORT tag to YES if your project consists of sip (see: +# https://www.riverbankcomputing.com/software/sip/intro) sources only. Doxygen +# will parse them like normal C++ but will assume all classes use public instead +# of private inheritance when no explicit protection keyword is present. +# The default value is: NO. + +SIP_SUPPORT = NO + +# For Microsoft's IDL there are propget and propput attributes to indicate +# getter and setter methods for a property. Setting this option to YES will make +# doxygen to replace the get and set methods by a property in the documentation. +# This will only work if the methods are indeed getting or setting a simple +# type. If this is not the case, or you want to show the methods anyway, you +# should set this option to NO. +# The default value is: YES. + +IDL_PROPERTY_SUPPORT = YES + +# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC +# tag is set to YES then doxygen will reuse the documentation of the first +# member in the group (if any) for the other members of the group. By default +# all members of a group must be documented explicitly. +# The default value is: NO. + +DISTRIBUTE_GROUP_DOC = NO + +# If one adds a struct or class to a group and this option is enabled, then also +# any nested class or struct is added to the same group. By default this option +# is disabled and one has to add nested compounds explicitly via \ingroup. +# The default value is: NO. + +GROUP_NESTED_COMPOUNDS = NO + +# Set the SUBGROUPING tag to YES to allow class member groups of the same type +# (for instance a group of public functions) to be put as a subgroup of that +# type (e.g. under the Public Functions section). Set it to NO to prevent +# subgrouping. Alternatively, this can be done per class using the +# \nosubgrouping command. +# The default value is: YES. + +SUBGROUPING = YES + +# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions +# are shown inside the group in which they are included (e.g. using \ingroup) +# instead of on a separate page (for HTML and Man pages) or section (for LaTeX +# and RTF). +# +# Note that this feature does not work in combination with +# SEPARATE_MEMBER_PAGES. +# The default value is: NO. + +INLINE_GROUPED_CLASSES = NO + +# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions +# with only public data fields or simple typedef fields will be shown inline in +# the documentation of the scope in which they are defined (i.e. file, +# namespace, or group documentation), provided this scope is documented. If set +# to NO, structs, classes, and unions are shown on a separate page (for HTML and +# Man pages) or section (for LaTeX and RTF). +# The default value is: NO. + +INLINE_SIMPLE_STRUCTS = NO + +# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or +# enum is documented as struct, union, or enum with the name of the typedef. So +# typedef struct TypeS {} TypeT, will appear in the documentation as a struct +# with name TypeT. When disabled the typedef will appear as a member of a file, +# namespace, or class. And the struct will be named TypeS. This can typically be +# useful for C code in case the coding convention dictates that all compound +# types are typedef'ed and only the typedef is referenced, never the tag name. +# The default value is: NO. + +TYPEDEF_HIDES_STRUCT = NO + +# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This +# cache is used to resolve symbols given their name and scope. Since this can be +# an expensive process and often the same symbol appears multiple times in the +# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small +# doxygen will become slower. If the cache is too large, memory is wasted. The +# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range +# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536 +# symbols. At the end of a run doxygen will report the cache usage and suggest +# the optimal cache size from a speed point of view. +# Minimum value: 0, maximum value: 9, default value: 0. + +LOOKUP_CACHE_SIZE = 0 + +# The NUM_PROC_THREADS specifies the number threads doxygen is allowed to use +# during processing. When set to 0 doxygen will based this on the number of +# cores available in the system. You can set it explicitly to a value larger +# than 0 to get more control over the balance between CPU load and processing +# speed. At this moment only the input processing can be done using multiple +# threads. Since this is still an experimental feature the default is set to 1, +# which efficively disables parallel processing. Please report any issues you +# encounter. Generating dot graphs in parallel is controlled by the +# DOT_NUM_THREADS setting. +# Minimum value: 0, maximum value: 32, default value: 1. + +NUM_PROC_THREADS = 1 + +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- + +# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in +# documentation are documented, even if no documentation was available. Private +# class members and static file members will be hidden unless the +# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. +# Note: This will also disable the warnings about undocumented members that are +# normally produced when WARNINGS is set to YES. +# The default value is: NO. + +EXTRACT_ALL = NO + +# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will +# be included in the documentation. +# The default value is: NO. + +EXTRACT_PRIVATE = NO + +# If the EXTRACT_PRIV_VIRTUAL tag is set to YES, documented private virtual +# methods of a class will be included in the documentation. +# The default value is: NO. + +EXTRACT_PRIV_VIRTUAL = NO + +# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal +# scope will be included in the documentation. +# The default value is: NO. + +EXTRACT_PACKAGE = NO + +# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be +# included in the documentation. +# The default value is: NO. + +EXTRACT_STATIC = NO + +# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined +# locally in source files will be included in the documentation. If set to NO, +# only classes defined in header files are included. Does not have any effect +# for Java sources. +# The default value is: YES. + +EXTRACT_LOCAL_CLASSES = NO + +# This flag is only useful for Objective-C code. If set to YES, local methods, +# which are defined in the implementation section but not in the interface are +# included in the documentation. If set to NO, only methods in the interface are +# included. +# The default value is: NO. + +EXTRACT_LOCAL_METHODS = NO + +# If this flag is set to YES, the members of anonymous namespaces will be +# extracted and appear in the documentation as a namespace called +# 'anonymous_namespace{file}', where file will be replaced with the base name of +# the file that contains the anonymous namespace. By default anonymous namespace +# are hidden. +# The default value is: NO. + +EXTRACT_ANON_NSPACES = NO + +# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all +# undocumented members inside documented classes or files. If set to NO these +# members will be included in the various overviews, but no documentation +# section is generated. This option has no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_MEMBERS = NO + +# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. If set +# to NO, these classes will be included in the various overviews. This option +# has no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_CLASSES = NO + +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend +# declarations. If set to NO, these declarations will be included in the +# documentation. +# The default value is: NO. + +HIDE_FRIEND_COMPOUNDS = NO + +# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any +# documentation blocks found inside the body of a function. If set to NO, these +# blocks will be appended to the function's detailed documentation block. +# The default value is: NO. + +HIDE_IN_BODY_DOCS = NO + +# The INTERNAL_DOCS tag determines if documentation that is typed after a +# \internal command is included. If the tag is set to NO then the documentation +# will be excluded. Set it to YES to include the internal documentation. +# The default value is: NO. + +INTERNAL_DOCS = NO + +# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file +# names in lower-case letters. If set to YES, upper-case letters are also +# allowed. This is useful if you have classes or files whose names only differ +# in case and if your file system supports case sensitive file names. Windows +# (including Cygwin) and Mac users are advised to set this option to NO. +# The default value is: system dependent. + +CASE_SENSE_NAMES = NO + +# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with +# their full class and namespace scopes in the documentation. If set to YES, the +# scope will be hidden. +# The default value is: NO. + +HIDE_SCOPE_NAMES = NO + +# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will +# append additional text to a page's title, such as Class Reference. If set to +# YES the compound reference will be hidden. +# The default value is: NO. + +HIDE_COMPOUND_REFERENCE= NO + +# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of +# the files that are included by a file in the documentation of that file. +# The default value is: YES. + +SHOW_INCLUDE_FILES = YES + +# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each +# grouped member an include statement to the documentation, telling the reader +# which file to include in order to use the member. +# The default value is: NO. + +SHOW_GROUPED_MEMB_INC = NO + +# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include +# files with double quotes in the documentation rather than with sharp brackets. +# The default value is: NO. + +FORCE_LOCAL_INCLUDES = NO + +# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the +# documentation for inline members. +# The default value is: YES. + +INLINE_INFO = YES + +# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the +# (detailed) documentation of file and class members alphabetically by member +# name. If set to NO, the members will appear in declaration order. +# The default value is: YES. + +SORT_MEMBER_DOCS = YES + +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief +# descriptions of file, namespace and class members alphabetically by member +# name. If set to NO, the members will appear in declaration order. Note that +# this will also influence the order of the classes in the class list. +# The default value is: NO. + +SORT_BRIEF_DOCS = NO + +# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the +# (brief and detailed) documentation of class members so that constructors and +# destructors are listed first. If set to NO the constructors will appear in the +# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS. +# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief +# member documentation. +# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting +# detailed member documentation. +# The default value is: NO. + +SORT_MEMBERS_CTORS_1ST = NO + +# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy +# of group names into alphabetical order. If set to NO the group names will +# appear in their defined order. +# The default value is: NO. + +SORT_GROUP_NAMES = NO + +# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by +# fully-qualified names, including namespaces. If set to NO, the class list will +# be sorted only by class name, not including the namespace part. +# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. +# Note: This option applies only to the class list, not to the alphabetical +# list. +# The default value is: NO. + +SORT_BY_SCOPE_NAME = NO + +# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper +# type resolution of all parameters of a function it will reject a match between +# the prototype and the implementation of a member function even if there is +# only one candidate or it is obvious which candidate to choose by doing a +# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still +# accept a match between prototype and implementation in such cases. +# The default value is: NO. + +STRICT_PROTO_MATCHING = NO + +# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo +# list. This list is created by putting \todo commands in the documentation. +# The default value is: YES. + +GENERATE_TODOLIST = YES + +# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test +# list. This list is created by putting \test commands in the documentation. +# The default value is: YES. + +GENERATE_TESTLIST = YES + +# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug +# list. This list is created by putting \bug commands in the documentation. +# The default value is: YES. + +GENERATE_BUGLIST = YES + +# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO) +# the deprecated list. This list is created by putting \deprecated commands in +# the documentation. +# The default value is: YES. + +GENERATE_DEPRECATEDLIST= YES + +# The ENABLED_SECTIONS tag can be used to enable conditional documentation +# sections, marked by \if ... \endif and \cond +# ... \endcond blocks. + +ENABLED_SECTIONS = + +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the +# initial value of a variable or macro / define can have for it to appear in the +# documentation. If the initializer consists of more lines than specified here +# it will be hidden. Use a value of 0 to hide initializers completely. The +# appearance of the value of individual variables and macros / defines can be +# controlled using \showinitializer or \hideinitializer command in the +# documentation regardless of this setting. +# Minimum value: 0, maximum value: 10000, default value: 30. + +MAX_INITIALIZER_LINES = 30 + +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at +# the bottom of the documentation of classes and structs. If set to YES, the +# list will mention the files that were used to generate the documentation. +# The default value is: YES. + +SHOW_USED_FILES = YES + +# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This +# will remove the Files entry from the Quick Index and from the Folder Tree View +# (if specified). +# The default value is: YES. + +SHOW_FILES = YES + +# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces +# page. This will remove the Namespaces entry from the Quick Index and from the +# Folder Tree View (if specified). +# The default value is: YES. + +SHOW_NAMESPACES = YES + +# The FILE_VERSION_FILTER tag can be used to specify a program or script that +# doxygen should invoke to get the current version for each file (typically from +# the version control system). Doxygen will invoke the program by executing (via +# popen()) the command command input-file, where command is the value of the +# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided +# by doxygen. Whatever the program writes to standard output is used as the file +# version. For an example see the documentation. + +FILE_VERSION_FILTER = + +# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed +# by doxygen. The layout file controls the global structure of the generated +# output files in an output format independent way. To create the layout file +# that represents doxygen's defaults, run doxygen with the -l option. You can +# optionally specify a file name after the option, if omitted DoxygenLayout.xml +# will be used as the name of the layout file. +# +# Note that if you run doxygen from a directory containing a file called +# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE +# tag is left empty. + +LAYOUT_FILE = + +# The CITE_BIB_FILES tag can be used to specify one or more bib files containing +# the reference definitions. This must be a list of .bib files. The .bib +# extension is automatically appended if omitted. This requires the bibtex tool +# to be installed. See also https://en.wikipedia.org/wiki/BibTeX for more info. +# For LaTeX the style of the bibliography can be controlled using +# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the +# search path. See also \cite for info how to create references. + +CITE_BIB_FILES = + +#--------------------------------------------------------------------------- +# Configuration options related to warning and progress messages +#--------------------------------------------------------------------------- + +# The QUIET tag can be used to turn on/off the messages that are generated to +# standard output by doxygen. If QUIET is set to YES this implies that the +# messages are off. +# The default value is: NO. + +QUIET = NO + +# The WARNINGS tag can be used to turn on/off the warning messages that are +# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES +# this implies that the warnings are on. +# +# Tip: Turn warnings on while writing the documentation. +# The default value is: YES. + +WARNINGS = YES + +# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate +# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag +# will automatically be disabled. +# The default value is: YES. + +WARN_IF_UNDOCUMENTED = YES + +# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as not documenting some parameters +# in a documented function, or documenting parameters that don't exist or using +# markup commands wrongly. +# The default value is: YES. + +WARN_IF_DOC_ERROR = YES + +# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that +# are documented, but have no documentation for their parameters or return +# value. If set to NO, doxygen will only warn about wrong or incomplete +# parameter documentation, but not about the absence of documentation. If +# EXTRACT_ALL is set to YES then this flag will automatically be disabled. +# The default value is: NO. + +WARN_NO_PARAMDOC = NO + +# If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when +# a warning is encountered. +# The default value is: NO. + +WARN_AS_ERROR = NO + +# The WARN_FORMAT tag determines the format of the warning messages that doxygen +# can produce. The string should contain the $file, $line, and $text tags, which +# will be replaced by the file and line number from which the warning originated +# and the warning text. Optionally the format may contain $version, which will +# be replaced by the version of the file (if it could be obtained via +# FILE_VERSION_FILTER) +# The default value is: $file:$line: $text. + +WARN_FORMAT = "$file:$line: $text" + +# The WARN_LOGFILE tag can be used to specify a file to which warning and error +# messages should be written. If left blank the output is written to standard +# error (stderr). + +WARN_LOGFILE = + +#--------------------------------------------------------------------------- +# Configuration options related to the input files +#--------------------------------------------------------------------------- + +# The INPUT tag is used to specify the files and/or directories that contain +# documented source files. You may enter file names like myfile.cpp or +# directories like /usr/src/myproject. Separate the files or directories with +# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING +# Note: If this tag is empty the current directory is searched. + +INPUT = ../../alg \ + ../../core \ + ../../data \ + ../../io + +# This tag can be used to specify the character encoding of the source files +# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses +# libiconv (or the iconv built into libc) for the transcoding. See the libiconv +# documentation (see: https://www.gnu.org/software/libiconv/) for the list of +# possible encodings. +# The default value is: UTF-8. + +INPUT_ENCODING = UTF-8 + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and +# *.h) to filter out the source-files in the directories. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# read by doxygen. +# +# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp, +# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, +# *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, +# *.m, *.markdown, *.md, *.mm, *.dox (to be provided as doxygen C comment), +# *.doc (to be provided as doxygen C comment), *.txt (to be provided as doxygen +# C comment), *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, *.f18, *.f, *.for, *.vhd, +# *.vhdl, *.ucf, *.qsf and *.ice. + +FILE_PATTERNS = *.h \ + *.py + +# The RECURSIVE tag can be used to specify whether or not subdirectories should +# be searched for input files as well. +# The default value is: NO. + +RECURSIVE = YES + +# The EXCLUDE tag can be used to specify files and/or directories that should be +# excluded from the INPUT source files. This way you can easily exclude a +# subdirectory from a directory tree whose root is specified with the INPUT tag. +# +# Note that relative paths are relative to the directory from which doxygen is +# run. + +EXCLUDE = + +# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or +# directories that are symbolic links (a Unix file system feature) are excluded +# from the input. +# The default value is: NO. + +EXCLUDE_SYMLINKS = NO + +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# certain files from those directories. +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories for example use the pattern */test/* + +EXCLUDE_PATTERNS = + +# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names +# (namespaces, classes, functions, etc.) that should be excluded from the +# output. The symbol name can be a fully qualified name, a word, or if the +# wildcard * is used, a substring. Examples: ANamespace, AClass, +# AClass::ANamespace, ANamespace::*Test +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories use the pattern */test/* + +EXCLUDE_SYMBOLS = + +# The EXAMPLE_PATH tag can be used to specify one or more files or directories +# that contain example code fragments that are included (see the \include +# command). + +EXAMPLE_PATH = + +# If the value of the EXAMPLE_PATH tag contains directories, you can use the +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and +# *.h) to filter out the source-files in the directories. If left blank all +# files are included. + +EXAMPLE_PATTERNS = * + +# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be +# searched for input files to be used with the \include or \dontinclude commands +# irrespective of the value of the RECURSIVE tag. +# The default value is: NO. + +EXAMPLE_RECURSIVE = NO + +# The IMAGE_PATH tag can be used to specify one or more files or directories +# that contain images that are to be included in the documentation (see the +# \image command). + +IMAGE_PATH = + +# The INPUT_FILTER tag can be used to specify a program that doxygen should +# invoke to filter for each input file. Doxygen will invoke the filter program +# by executing (via popen()) the command: +# +# +# +# where is the value of the INPUT_FILTER tag, and is the +# name of an input file. Doxygen will then use the output that the filter +# program writes to standard output. If FILTER_PATTERNS is specified, this tag +# will be ignored. +# +# Note that the filter must not add or remove lines; it is applied before the +# code is scanned, but not when the output code is generated. If lines are added +# or removed, the anchors will not be placed correctly. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# properly processed by doxygen. + +INPUT_FILTER = + +# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern +# basis. Doxygen will compare the file name with each pattern and apply the +# filter if there is a match. The filters are a list of the form: pattern=filter +# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how +# filters are used. If the FILTER_PATTERNS tag is empty or if none of the +# patterns match the file name, INPUT_FILTER is applied. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# properly processed by doxygen. + +FILTER_PATTERNS = + +# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using +# INPUT_FILTER) will also be used to filter the input files that are used for +# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). +# The default value is: NO. + +FILTER_SOURCE_FILES = NO + +# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file +# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and +# it is also possible to disable source filtering for a specific pattern using +# *.ext= (so without naming a filter). +# This tag requires that the tag FILTER_SOURCE_FILES is set to YES. + +FILTER_SOURCE_PATTERNS = + +# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that +# is part of the input, its contents will be placed on the main page +# (index.html). This can be useful if you have a project on for instance GitHub +# and want to reuse the introduction page also for the doxygen output. + +USE_MDFILE_AS_MAINPAGE = + +#--------------------------------------------------------------------------- +# Configuration options related to source browsing +#--------------------------------------------------------------------------- + +# If the SOURCE_BROWSER tag is set to YES then a list of source files will be +# generated. Documented entities will be cross-referenced with these sources. +# +# Note: To get rid of all source code in the generated output, make sure that +# also VERBATIM_HEADERS is set to NO. +# The default value is: NO. + +SOURCE_BROWSER = NO + +# Setting the INLINE_SOURCES tag to YES will include the body of functions, +# classes and enums directly into the documentation. +# The default value is: NO. + +INLINE_SOURCES = NO + +# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any +# special comment blocks from generated source code fragments. Normal C, C++ and +# Fortran comments will always remain visible. +# The default value is: YES. + +STRIP_CODE_COMMENTS = YES + +# If the REFERENCED_BY_RELATION tag is set to YES then for each documented +# entity all documented functions referencing it will be listed. +# The default value is: NO. + +REFERENCED_BY_RELATION = NO + +# If the REFERENCES_RELATION tag is set to YES then for each documented function +# all documented entities called/used by that function will be listed. +# The default value is: NO. + +REFERENCES_RELATION = NO + +# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set +# to YES then the hyperlinks from functions in REFERENCES_RELATION and +# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will +# link to the documentation. +# The default value is: YES. + +REFERENCES_LINK_SOURCE = YES + +# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the +# source code will show a tooltip with additional information such as prototype, +# brief description and links to the definition and documentation. Since this +# will make the HTML file larger and loading of large files a bit slower, you +# can opt to disable this feature. +# The default value is: YES. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +SOURCE_TOOLTIPS = YES + +# If the USE_HTAGS tag is set to YES then the references to source code will +# point to the HTML generated by the htags(1) tool instead of doxygen built-in +# source browser. The htags tool is part of GNU's global source tagging system +# (see https://www.gnu.org/software/global/global.html). You will need version +# 4.8.6 or higher. +# +# To use it do the following: +# - Install the latest version of global +# - Enable SOURCE_BROWSER and USE_HTAGS in the configuration file +# - Make sure the INPUT points to the root of the source tree +# - Run doxygen as normal +# +# Doxygen will invoke htags (and that will in turn invoke gtags), so these +# tools must be available from the command line (i.e. in the search path). +# +# The result: instead of the source browser generated by doxygen, the links to +# source code will now point to the output of htags. +# The default value is: NO. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +USE_HTAGS = NO + +# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a +# verbatim copy of the header file for each class for which an include is +# specified. Set to NO to disable this. +# See also: Section \class. +# The default value is: YES. + +VERBATIM_HEADERS = YES + +#--------------------------------------------------------------------------- +# Configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- + +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all +# compounds will be generated. Enable this if the project contains a lot of +# classes, structs, unions or interfaces. +# The default value is: YES. + +ALPHABETICAL_INDEX = YES + +# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in +# which the alphabetical index list will be split. +# Minimum value: 1, maximum value: 20, default value: 5. +# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. + +COLS_IN_ALPHA_INDEX = 5 + +# In case all classes in a project start with a common prefix, all classes will +# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag +# can be used to specify a prefix (or a list of prefixes) that should be ignored +# while generating the index headers. +# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. + +IGNORE_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the HTML output +#--------------------------------------------------------------------------- + +# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output +# The default value is: YES. + +GENERATE_HTML = YES + +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a +# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of +# it. +# The default directory is: html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_OUTPUT = html/api + +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each +# generated HTML page (for example: .htm, .php, .asp). +# The default value is: .html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FILE_EXTENSION = .html + +# The HTML_HEADER tag can be used to specify a user-defined HTML header file for +# each generated HTML page. If the tag is left blank doxygen will generate a +# standard header. +# +# To get valid HTML the header file that includes any scripts and style sheets +# that doxygen needs, which is dependent on the configuration options used (e.g. +# the setting GENERATE_TREEVIEW). It is highly recommended to start with a +# default header using +# doxygen -w html new_header.html new_footer.html new_stylesheet.css +# YourConfigFile +# and then modify the file new_header.html. See also section "Doxygen usage" +# for information on how to generate the default header that doxygen normally +# uses. +# Note: The header is subject to change so you typically have to regenerate the +# default header when upgrading to a newer version of doxygen. For a description +# of the possible markers and block names see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_HEADER = + +# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each +# generated HTML page. If the tag is left blank doxygen will generate a standard +# footer. See HTML_HEADER for more information on how to generate a default +# footer and what special commands can be used inside the footer. See also +# section "Doxygen usage" for information on how to generate the default footer +# that doxygen normally uses. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FOOTER = + +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style +# sheet that is used by each HTML page. It can be used to fine-tune the look of +# the HTML output. If left blank doxygen will generate a default style sheet. +# See also section "Doxygen usage" for information on how to generate the style +# sheet that doxygen normally uses. +# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as +# it is more robust and this tag (HTML_STYLESHEET) will in the future become +# obsolete. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_STYLESHEET = + +# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined +# cascading style sheets that are included after the standard style sheets +# created by doxygen. Using this option one can overrule certain style aspects. +# This is preferred over using HTML_STYLESHEET since it does not replace the +# standard style sheet and is therefore more robust against future updates. +# Doxygen will copy the style sheet files to the output directory. +# Note: The order of the extra style sheet files is of importance (e.g. the last +# style sheet in the list overrules the setting of the previous ones in the +# list). For an example see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_STYLESHEET = + +# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or +# other source files which should be copied to the HTML output directory. Note +# that these files will be copied to the base HTML output directory. Use the +# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these +# files. In the HTML_STYLESHEET file, use the file name only. Also note that the +# files will be copied as-is; there are no commands or markers available. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_FILES = + +# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen +# will adjust the colors in the style sheet and background images according to +# this color. Hue is specified as an angle on a colorwheel, see +# https://en.wikipedia.org/wiki/Hue for more information. For instance the value +# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 +# purple, and 360 is red again. +# Minimum value: 0, maximum value: 359, default value: 220. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_HUE = 220 + +# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors +# in the HTML output. For a value of 0 the output will use grayscales only. A +# value of 255 will produce the most vivid colors. +# Minimum value: 0, maximum value: 255, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_SAT = 100 + +# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the +# luminance component of the colors in the HTML output. Values below 100 +# gradually make the output lighter, whereas values above 100 make the output +# darker. The value divided by 100 is the actual gamma applied, so 80 represents +# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not +# change the gamma. +# Minimum value: 40, maximum value: 240, default value: 80. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_GAMMA = 80 + +# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML +# page will contain the date and time when the page was generated. Setting this +# to YES can help to show when doxygen was last run and thus if the +# documentation is up to date. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_TIMESTAMP = NO + +# If the HTML_DYNAMIC_MENUS tag is set to YES then the generated HTML +# documentation will contain a main index with vertical navigation menus that +# are dynamically created via JavaScript. If disabled, the navigation index will +# consists of multiple levels of tabs that are statically embedded in every HTML +# page. Disable this option to support browsers that do not have JavaScript, +# like the Qt help browser. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_DYNAMIC_MENUS = YES + +# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML +# documentation will contain sections that can be hidden and shown after the +# page has loaded. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_DYNAMIC_SECTIONS = NO + +# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries +# shown in the various tree structured indices initially; the user can expand +# and collapse entries dynamically later on. Doxygen will expand the tree to +# such a level that at most the specified number of entries are visible (unless +# a fully collapsed tree already exceeds this amount). So setting the number of +# entries 1 will produce a full collapsed tree by default. 0 is a special value +# representing an infinite number of entries and will result in a full expanded +# tree by default. +# Minimum value: 0, maximum value: 9999, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_INDEX_NUM_ENTRIES = 100 + +# If the GENERATE_DOCSET tag is set to YES, additional index files will be +# generated that can be used as input for Apple's Xcode 3 integrated development +# environment (see: https://developer.apple.com/xcode/), introduced with OSX +# 10.5 (Leopard). To create a documentation set, doxygen will generate a +# Makefile in the HTML output directory. Running make will produce the docset in +# that directory and running make install will install the docset in +# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at +# startup. See https://developer.apple.com/library/archive/featuredarticles/Doxy +# genXcode/_index.html for more information. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_DOCSET = NO + +# This tag determines the name of the docset feed. A documentation feed provides +# an umbrella under which multiple documentation sets from a single provider +# (such as a company or product suite) can be grouped. +# The default value is: Doxygen generated docs. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_FEEDNAME = "Doxygen generated docs" + +# This tag specifies a string that should uniquely identify the documentation +# set bundle. This should be a reverse domain-name style string, e.g. +# com.mycompany.MyDocSet. Doxygen will append .docset to the name. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_BUNDLE_ID = org.doxygen.Project + +# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify +# the documentation publisher. This should be a reverse domain-name style +# string, e.g. com.mycompany.MyDocSet.documentation. +# The default value is: org.doxygen.Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_ID = org.doxygen.Publisher + +# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. +# The default value is: Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_NAME = Publisher + +# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three +# additional HTML index files: index.hhp, index.hhc, and index.hhk. The +# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop +# (see: https://www.microsoft.com/en-us/download/details.aspx?id=21138) on +# Windows. +# +# The HTML Help Workshop contains a compiler that can convert all HTML output +# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML +# files are now used as the Windows 98 help format, and will replace the old +# Windows help format (.hlp) on all Windows platforms in the future. Compressed +# HTML files also contain an index, a table of contents, and you can search for +# words in the documentation. The HTML workshop also contains a viewer for +# compressed HTML files. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_HTMLHELP = NO + +# The CHM_FILE tag can be used to specify the file name of the resulting .chm +# file. You can add a path in front of the file if the result should not be +# written to the html output directory. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_FILE = + +# The HHC_LOCATION tag can be used to specify the location (absolute path +# including file name) of the HTML help compiler (hhc.exe). If non-empty, +# doxygen will try to run the HTML help compiler on the generated index.hhp. +# The file has to be specified with full path. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +HHC_LOCATION = + +# The GENERATE_CHI flag controls if a separate .chi index file is generated +# (YES) or that it should be included in the main .chm file (NO). +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +GENERATE_CHI = NO + +# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc) +# and project file content. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_INDEX_ENCODING = + +# The BINARY_TOC flag controls whether a binary table of contents is generated +# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it +# enables the Previous and Next buttons. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +BINARY_TOC = NO + +# The TOC_EXPAND flag can be set to YES to add extra items for group members to +# the table of contents of the HTML help documentation and to the tree view. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +TOC_EXPAND = NO + +# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and +# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that +# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help +# (.qch) of the generated HTML documentation. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_QHP = NO + +# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify +# the file name of the resulting .qch file. The path specified is relative to +# the HTML output folder. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QCH_FILE = + +# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help +# Project output. For more information please see Qt Help Project / Namespace +# (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#namespace). +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_NAMESPACE = org.doxygen.Project + +# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt +# Help Project output. For more information please see Qt Help Project / Virtual +# Folders (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#virtual- +# folders). +# The default value is: doc. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_VIRTUAL_FOLDER = doc + +# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom +# filter to add. For more information please see Qt Help Project / Custom +# Filters (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom- +# filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_NAME = + +# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the +# custom filter to add. For more information please see Qt Help Project / Custom +# Filters (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom- +# filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_ATTRS = + +# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this +# project's filter section matches. Qt Help Project / Filter Attributes (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#filter-attributes). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_SECT_FILTER_ATTRS = + +# The QHG_LOCATION tag can be used to specify the location of Qt's +# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the +# generated .qhp file. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHG_LOCATION = + +# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be +# generated, together with the HTML files, they form an Eclipse help plugin. To +# install this plugin and make it available under the help contents menu in +# Eclipse, the contents of the directory containing the HTML and XML files needs +# to be copied into the plugins directory of eclipse. The name of the directory +# within the plugins directory should be the same as the ECLIPSE_DOC_ID value. +# After copying Eclipse needs to be restarted before the help appears. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_ECLIPSEHELP = NO + +# A unique identifier for the Eclipse help plugin. When installing the plugin +# the directory name containing the HTML and XML files should also have this +# name. Each documentation set should have its own identifier. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. + +ECLIPSE_DOC_ID = org.doxygen.Project + +# If you want full control over the layout of the generated HTML pages it might +# be necessary to disable the index and replace it with your own. The +# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top +# of each HTML page. A value of NO enables the index and the value YES disables +# it. Since the tabs in the index contain the same information as the navigation +# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +DISABLE_INDEX = NO + +# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index +# structure should be generated to display hierarchical information. If the tag +# value is set to YES, a side panel will be generated containing a tree-like +# index structure (just like the one that is generated for HTML Help). For this +# to work a browser that supports JavaScript, DHTML, CSS and frames is required +# (i.e. any modern browser). Windows users are probably better off using the +# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can +# further fine-tune the look of the index. As an example, the default style +# sheet generated by doxygen has an example that shows how to put an image at +# the root of the tree instead of the PROJECT_NAME. Since the tree basically has +# the same information as the tab index, you could consider setting +# DISABLE_INDEX to YES when enabling this option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_TREEVIEW = NO + +# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that +# doxygen will group on one line in the generated HTML documentation. +# +# Note that a value of 0 will completely suppress the enum values from appearing +# in the overview section. +# Minimum value: 0, maximum value: 20, default value: 4. +# This tag requires that the tag GENERATE_HTML is set to YES. + +ENUM_VALUES_PER_LINE = 4 + +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used +# to set the initial width (in pixels) of the frame in which the tree is shown. +# Minimum value: 0, maximum value: 1500, default value: 250. +# This tag requires that the tag GENERATE_HTML is set to YES. + +TREEVIEW_WIDTH = 250 + +# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to +# external symbols imported via tag files in a separate window. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +EXT_LINKS_IN_WINDOW = NO + +# If the HTML_FORMULA_FORMAT option is set to svg, doxygen will use the pdf2svg +# tool (see https://github.com/dawbarton/pdf2svg) or inkscape (see +# https://inkscape.org) to generate formulas as SVG images instead of PNGs for +# the HTML output. These images will generally look nicer at scaled resolutions. +# Possible values are: png (the default) and svg (looks nicer but requires the +# pdf2svg or inkscape tool). +# The default value is: png. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FORMULA_FORMAT = png + +# Use this tag to change the font size of LaTeX formulas included as images in +# the HTML documentation. When you change the font size after a successful +# doxygen run you need to manually remove any form_*.png images from the HTML +# output directory to force them to be regenerated. +# Minimum value: 8, maximum value: 50, default value: 10. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_FONTSIZE = 10 + +# Use the FORMULA_TRANSPARENT tag to determine whether or not the images +# generated for formulas are transparent PNGs. Transparent PNGs are not +# supported properly for IE 6.0, but are supported on all modern browsers. +# +# Note that when changing this option you need to delete any form_*.png files in +# the HTML output directory before the changes have effect. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_TRANSPARENT = YES + +# The FORMULA_MACROFILE can contain LaTeX \newcommand and \renewcommand commands +# to create new LaTeX commands to be used in formulas as building blocks. See +# the section "Including formulas" for details. + +FORMULA_MACROFILE = + +# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see +# https://www.mathjax.org) which uses client side JavaScript for the rendering +# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX +# installed or if you want to formulas look prettier in the HTML output. When +# enabled you may also need to install MathJax separately and configure the path +# to it using the MATHJAX_RELPATH option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +USE_MATHJAX = YES + +# When MathJax is enabled you can set the default output format to be used for +# the MathJax output. See the MathJax site (see: +# http://docs.mathjax.org/en/latest/output.html) for more details. +# Possible values are: HTML-CSS (which is slower, but has the best +# compatibility), NativeMML (i.e. MathML) and SVG. +# The default value is: HTML-CSS. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_FORMAT = HTML-CSS + +# When MathJax is enabled you need to specify the location relative to the HTML +# output directory using the MATHJAX_RELPATH option. The destination directory +# should contain the MathJax.js script. For instance, if the mathjax directory +# is located at the same level as the HTML output directory, then +# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax +# Content Delivery Network so you can quickly see the result without installing +# MathJax. However, it is strongly recommended to install a local copy of +# MathJax from https://www.mathjax.org before deployment. +# The default value is: https://cdn.jsdelivr.net/npm/mathjax@2. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_RELPATH = https://cdn.jsdelivr.net/npm/mathjax@2 + +# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax +# extension names that should be enabled during MathJax rendering. For example +# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_EXTENSIONS = + +# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces +# of code that will be used on startup of the MathJax code. See the MathJax site +# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an +# example see the documentation. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_CODEFILE = + +# When the SEARCHENGINE tag is enabled doxygen will generate a search box for +# the HTML output. The underlying search engine uses javascript and DHTML and +# should work on any modern browser. Note that when using HTML help +# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) +# there is already a search function so this one should typically be disabled. +# For large projects the javascript based search engine can be slow, then +# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to +# search using the keyboard; to jump to the search box use + S +# (what the is depends on the OS and browser, but it is typically +# , /