diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index 4284765e..564fe979 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -5,8 +5,9 @@ on: branches: [ master ] pull_request: branches: [ master ] - schedule: - - cron: "0 0 * * *" +# don't run the CRON part in my fork +# schedule: +# - cron: "0 0 * * *" workflow_dispatch: diff --git a/.github/workflows/run_cmake.yaml b/.github/workflows/run_cmake.yaml new file mode 100644 index 00000000..76d7d94f --- /dev/null +++ b/.github/workflows/run_cmake.yaml @@ -0,0 +1,83 @@ +# This workflow carries out the build of the package with CMake + +name: CMake build + +on: + push: + workflow_dispatch: + +jobs: + build-GNU: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + + - name: Install dependencies + run: | + sudo apt-get update + sudo apt-get install gfortran libopenmpi-dev cmake python3-dev pip + pip install scipy matplotlib getdist anesthetic ipython mpi4py + + - name: Run CMake + run: cmake -B build + + - name: Build + run: make -C build install + + - name: Test pypolychord + run: python run_pypolychord.py + + - name: Test pypolychord (MPI) + run: mpirun -np 2 python run_pypolychord.py + + build-Intel: + runs-on: ubuntu-latest + container: + image: intel/oneapi-hpckit + + steps: + - uses: actions/checkout@v2 + + - name: Run CMake + run: cmake -B build -DCMAKE_Fortran_COMPILER=ifort -DCMAKE_CXX_COMPILER=icpc + + - name: Build + run: make -C build install + + - name: Test pypolychord + run: python run_pypolychord.py + + - name: Test pypolychord (MPI) + run: mpirun -np 2 python run_pypolychord.py + + build-MacOS: + runs-on: macos-latest + strategy: + matrix: + cc: [ gcc, clang ] + + steps: + - uses: actions/checkout@v2 + + - name: Install dependencies + run: | + brew install cmake gcc openmpi + pip3 install numpy scipy matplotlib getdist anesthetic ipython mpi4py + + - name: Run CMake - GCC + if: ${{ env.cc == 'gcc' }} + run: cmake -B build -DCMAKE_Fortran_COMPILER=gfortran-11 -DCMAKE_CXX_COMPILER=gcc-11 + + - name: Run CMake - AppleClang + if: ${{ env.cc == 'clang' }} + run: cmake -B build -DCMAKE_Fortran_COMPILER=gfortran-11 + + - name: Build + run: make -C build install + + - name: Test pypolychord + run: python run_pypolychord.py + + - name: Test pypolychord (MPI) + run: mpirun -np 2 python run_pypolychord.py \ No newline at end of file diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 00000000..9ef355e7 --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,145 @@ +cmake_minimum_required(VERSION 3.4) + +project(PolyChordLite Fortran) + +# languages +enable_language(CXX) +enable_language(Fortran) + +# options +option(MPI "Build with MPI" ON) +option(python "Build PyPolyChord package" ON) +option(python_user_install "Install Python package for user (--user)" ON) + +# enforce matching of the CXX and Fortran compilers +if (NOT "${CMAKE_Fortran_COMPILER_ID}" MATCHES "${CMAKE_CXX_COMPILER_ID}" + AND NOT ("${CMAKE_Fortran_COMPILER_ID}" MATCHES "GNU" AND + "${CMAKE_CXX_COMPILER_ID}" MATCHES "AppleClang")) + message(FATAL_ERROR "You need to use the same vendor for your C++ and Fortran compiler") +endif () + +# flags for all three compiler types +# todo: inspect the flags and implement in a nicer way +if ("${CMAKE_Fortran_COMPILER_ID}" MATCHES "Intel") + message("Using INTEL compilers") + + set(CMAKE_Fortran_FLAGS "-fpp -fpic -assume noold_maxminloc -heap-arrays -ipo -O3 -no-prec-div -xHost -w -vec-report0 -qopt-report0") + set(CMAKE_CXX_FLAGS "-std=c++11 -fpic -ipo -O3 -no-prec-div -xHost -w -vec-report0 -qopt-report0") + + add_compile_options("-nofor-main") + link_libraries("-nofor-main") + +elseif ("${CMAKE_Fortran_COMPILER_ID}" MATCHES "GNU") + message("Using GNU compilers") + + set(CMAKE_Fortran_FLAGS "-ffree-line-length-none -cpp -fPIC -fno-stack-arrays -Ofast") + set(CMAKE_CXX_FLAGS "-std=c++11 -fPIC -Ofast") + + # for GitHub actions MacOS -- debug only + add_compile_options("-lstdc++") + link_libraries("-lstdc++") + + if (CMAKE_Fortran_COMPILER_VERSION VERSION_GREATER 10.0) + # GCC 10 is complaining about MPI interfaces, turn this to warnings + SET(CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} -fallow-argument-mismatch") + endif () + +elseif ("${CMAKE_Fortran_COMPILER_ID}" MATCHES "Cray") + message("Using Cray compilers") + + set(CMAKE_Fortran_FLAGS "-fpp -fpic -qopenmp -dynamic") + set(CMAKE_CXX_FLAGS "-fpic -qopenmp -dynamic") + +endif () + +# find MPI & link to everything from now on +if (MPI) + find_package(MPI REQUIRED) + message(STATUS "MPI_Fortran_COMPILER: ${MPI_Fortran_COMPILER}") + link_libraries(MPI::MPI_Fortran) + link_libraries(MPI::MPI_CXX) + + # definitions + add_compile_definitions(USE_MPI) +endif (MPI) + +# this will be module building directory, allows linking +set(CMAKE_Fortran_MODULE_DIRECTORY ${PROJECT_BINARY_DIR}/generated/mods) + +# on installation we get this prefix as well +set(CMAKE_INSTALL_PREFIX ${PROJECT_BINARY_DIR}) + +# target directories -> stuff installed in these directories +set(target_lib_dir ${PROJECT_SOURCE_DIR}/lib) +set(target_bin_dir ${PROJECT_SOURCE_DIR}/bin) + +# directories where we want to make things +add_subdirectory(src) +add_subdirectory(likelihoods) + +if (python) + ################################################################# + # The Compiled .so file + ################################################################# + + # PyPolyChord stuff + message(STATUS "Enabling Python package build") + find_package(Python3 COMPONENTS Interpreter Development NumPy REQUIRED) + + # find out the suffix for the current python version + EXECUTE_PROCESS( + COMMAND ${Python3_EXECUTABLE} "-c" "import sysconfig;print(sysconfig.get_config_var('EXT_SUFFIX'))" + OUTPUT_VARIABLE python_so_suffix + ) + STRING(REPLACE "\n" "" python_so_suffix ${python_so_suffix}) + message(STATUS "Python shared object suffix is: ${python_so_suffix}") + + # the extension module + add_library(_pypolychord SHARED pypolychord/_pypolychord.cpp $) + set_target_properties(_pypolychord + PROPERTIES + OUTPUT_NAME _pypolychord + PREFIX "" + SUFFIX "${python_so_suffix}" + ) + # dependencies and includes + target_link_libraries(_pypolychord Python3::Python Python3::NumPy) + target_include_directories(_pypolychord PUBLIC src/polychord) # PolyChord headers for python extensions + + # remember the place of this + set(target_python_so ${CMAKE_CURRENT_BINARY_DIR}/_pypolychord${python_so_suffix}) + + # place all outputs in one place + install(TARGETS _pypolychord + LIBRARY DESTINATION ${target_lib_dir}) + + ################################################################# + # The simple python module + ################################################################# + + # hack ofr version + set(PACKAGE_VERSION "1.18.2") + + set(SETUP_PY_IN "${CMAKE_CURRENT_SOURCE_DIR}/setup_cmake_template.py") + set(SETUP_PY "${CMAKE_CURRENT_BINARY_DIR}/setup.py") + set(DEPS "${CMAKE_CURRENT_SOURCE_DIR}/pypolychord/__init__.py") + set(OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/build/timestamp") # dummy target for dependence + + # fill in the variables of the setup.py file + configure_file(${SETUP_PY_IN} ${SETUP_PY}) + + # custom target + add_custom_command(OUTPUT ${OUTPUT} + COMMAND ${Python3_EXECUTABLE} ${SETUP_PY} build + COMMAND ${CMAKE_COMMAND} -E touch ${OUTPUT} + DEPENDS ${DEPS} _pypolychord) + add_custom_target(target ALL DEPENDS ${OUTPUT}) + + # installation with setup.py + if (python_user_install) + install(CODE "execute_process(COMMAND ${Python3_EXECUTABLE} ${SETUP_PY} install)") + else (python_user_install) + install(CODE "execute_process(COMMAND ${Python3_EXECUTABLE} ${SETUP_PY} install --user)") + endif (python_user_install) + +endif (python) diff --git a/Makefile b/Makefile index 8e871ce0..107448bf 100644 --- a/Makefile +++ b/Makefile @@ -44,7 +44,7 @@ endif ifeq ($(MPI),1) -FFLAGS += -DMPI +FFLAGS += -DUSE_MPI CXXFLAGS += -DUSE_MPI endif diff --git a/README.rst b/README.rst index 363f72da..d88147f8 100644 --- a/README.rst +++ b/README.rst @@ -5,8 +5,7 @@ :target: https://arxiv.org/abs/1506.00171 :alt: Open-access paper -PolyChord v 1.20.1 - +PolyChord v 1.21.0 Will Handley, Mike Hobson & Anthony Lasenby wh260@mrao.cam.ac.uk diff --git a/devtools/docker_gnu/Dockerfile b/devtools/docker_gnu/Dockerfile new file mode 100644 index 00000000..3f6881b1 --- /dev/null +++ b/devtools/docker_gnu/Dockerfile @@ -0,0 +1,40 @@ +FROM ubuntu:latest +MAINTAINER "Tamas K Stenczel " + +# Time Zone data +ARG DEBIAN_FRONTEND=noninteractive +ENV TZ=Europe/London + +RUN apt-get -q update && \ + apt-get -qy install --no-install-recommends \ + # general + make \ + gfortran \ + libopenmpi-dev \ + libblas-dev \ + liblapack-dev \ + libfftw3-3 \ + # cmake and tools + cmake vim git wget ca-certificates \ + g++ \ + python3-dev pip \ + && rm -rf /var/lib/apt/lists/* + +RUN pip3 install scipy matplotlib getdist anesthetic ipython mpi4py + +# reinstall newer cmake, same as in the Intel image +ARG cmake_url=https://github.com/Kitware/CMake/releases/download/v3.20.0/cmake-3.20.0-linux-x86_64.sh +ADD $cmake_url / +RUN file=$(basename "$cmake_url") && \ + bash $file --prefix=/usr --skip-license && \ + rm $file + +# DEV use: oh-my-zsh instead of bash +# Uses "robbyrussell" theme (original Oh My Zsh theme), with no plugins +RUN sh -c "$(wget -O- https://github.com/deluan/zsh-in-docker/releases/download/v1.1.1/zsh-in-docker.sh)" -- \ + -t robbyrussell +ENTRYPOINT "/usr/bin/zsh" +WORKDIR /work + +# python executable +RUN echo "alias python=python3" >> /root/.zshrc diff --git a/devtools/docker_intel/Dockerfile b/devtools/docker_intel/Dockerfile new file mode 100644 index 00000000..8c9ee817 --- /dev/null +++ b/devtools/docker_intel/Dockerfile @@ -0,0 +1,107 @@ +FROM intel/oneapi:os-tools-ubuntu18.04 +MAINTAINER "Tamas K Stenczel " + +# Time Zone data +ARG DEBIAN_FRONTEND=noninteractive +ARG APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=1 +ENV TZ=Europe/London + +############################################################# +# Assembled from Intel Base-Kit and HPC-Kit Dockerfiles +# includes my additions of apt packages +############################################################# +# install Intel(R) oneAPI Base Toolkit +RUN apt-get update -y && \ +apt-get install -y --no-install-recommends -o=Dpkg::Use-Pty=0 \ + # Intel compilers and MPI library + intel-oneapi-compiler-dpcpp-cpp-and-cpp-classic \ + intel-oneapi-mpi-devel \ + intel-oneapi-compiler-fortran \ + intel-oneapi-python python3-pip \ + # other tools and libraries + libblas-dev \ + liblapack-dev \ + cmake \ + make \ + vim git wget ca-certificates \ +-- + +RUN pip3 install scipy matplotlib getdist anesthetic ipython mpi4py + +# reinstall newer cmake +ARG cmake_url=https://github.com/Kitware/CMake/releases/download/v3.20.0/cmake-3.20.0-linux-x86_64.sh +ADD $cmake_url / +RUN file=$(basename "$cmake_url") && \ + bash $file --prefix=/usr --skip-license && \ + rm $file + +# from Intel HPC toolkit +# setvars.sh environment variables +ENV ACL_BOARD_VENDOR_PATH='/opt/Intel/OpenCLFPGA/oneAPI/Boards' +ENV ADVISOR_2021_DIR='/opt/intel/oneapi/advisor/2021.3.0' +ENV APM='/opt/intel/oneapi/advisor/2021.3.0/perfmodels' +ENV CCL_CONFIGURATION='cpu_gpu_dpcpp' +ENV CCL_ROOT='/opt/intel/oneapi/ccl/2021.3.0' +ENV CLASSPATH='/opt/intel/oneapi/mpi/2021.3.0//lib/mpi.jar:/opt/intel/oneapi/dal/2021.3.0/lib/onedal.jar' +ENV CLCK_ROOT='/opt/intel/oneapi/clck/2021.3.0' +ENV CMAKE_PREFIX_PATH='/opt/intel/oneapi/vpl/2021.4.0:/opt/intel/oneapi/tbb/2021.3.0/env/..:/opt/intel/oneapi/dal/2021.3.0' +ENV CMPLR_ROOT='/opt/intel/oneapi/compiler/2021.3.0' +ENV CONDA_DEFAULT_ENV='intelpython-python3.7' +ENV CONDA_EXE='/opt/intel/oneapi/intelpython/latest/bin/conda' +ENV CONDA_PREFIX='/opt/intel/oneapi/intelpython/latest' +ENV CONDA_PROMPT_MODIFIER='(intelpython-python3.7) ' +ENV CONDA_PYTHON_EXE='/opt/intel/oneapi/intelpython/latest/bin/python' +ENV CONDA_SHLVL='1' +ENV CPATH='/opt/intel/oneapi/vpl/2021.4.0/include:/opt/intel/oneapi/tbb/2021.3.0/env/../include:/opt/intel/oneapi/mpi/2021.3.0//include:/opt/intel/oneapi/mkl/2021.3.0/include:/opt/intel/oneapi/ippcp/2021.3.0/include:/opt/intel/oneapi/ipp/2021.3.0/include:/opt/intel/oneapi/dpl/2021.4.0/linux/include:/opt/intel/oneapi/dnnl/2021.3.0/cpu_dpcpp_gpu_dpcpp/lib:/opt/intel/oneapi/dev-utilities/2021.3.0/include:/opt/intel/oneapi/dal/2021.3.0/include:/opt/intel/oneapi/compiler/2021.3.0/linux/include:/opt/intel/oneapi/ccl/2021.3.0/include/cpu_gpu_dpcpp' +ENV CPLUS_INCLUDE_PATH='/opt/intel/oneapi/clck/2021.3.0/include' +ENV DAALROOT='/opt/intel/oneapi/dal/2021.3.0' +ENV DALROOT='/opt/intel/oneapi/dal/2021.3.0' +ENV DAL_MAJOR_BINARY='1' +ENV DAL_MINOR_BINARY='1' +ENV DNNLROOT='/opt/intel/oneapi/dnnl/2021.3.0/cpu_dpcpp_gpu_dpcpp' +ENV DPL_ROOT='/opt/intel/oneapi/dpl/2021.4.0' +ENV FI_PROVIDER_PATH='/opt/intel/oneapi/mpi/2021.3.0//libfabric/lib/prov:/usr/lib64/libfabric' +ENV FPGA_VARS_ARGS='' +ENV FPGA_VARS_DIR='/opt/intel/oneapi/compiler/2021.3.0/linux/lib/oclfpga' +ENV INFOPATH='/opt/intel/oneapi/debugger/10.1.2/gdb/intel64/lib' +ENV INSPECTOR_2021_DIR='/opt/intel/oneapi/inspector/2021.3.0' +ENV INTELFPGAOCLSDKROOT='/opt/intel/oneapi/compiler/2021.3.0/linux/lib/oclfpga' +ENV INTEL_LICENSE_FILE='/opt/intel/licenses:/root/intel/licenses:/opt/intel/oneapi/clck/2021.3.0/licensing:/opt/intel/licenses:/root/intel/licenses:/Users/Shared/Library/Application Support/Intel/Licenses' +ENV INTEL_PYTHONHOME='/opt/intel/oneapi/debugger/10.1.2/dep' +ENV IPPCP_TARGET_ARCH='intel64' +ENV IPPCRYPTOROOT='/opt/intel/oneapi/ippcp/2021.3.0' +ENV IPPROOT='/opt/intel/oneapi/ipp/2021.3.0' +ENV IPP_TARGET_ARCH='intel64' +ENV I_MPI_ROOT='/opt/intel/oneapi/mpi/2021.3.0' +ENV LD_LIBRARY_PATH='/opt/intel/oneapi/vpl/2021.4.0/lib:/opt/intel/oneapi/tbb/2021.3.0/env/../lib/intel64/gcc4.8:/opt/intel/oneapi/mpi/2021.3.0//libfabric/lib:/opt/intel/oneapi/mpi/2021.3.0//lib/release:/opt/intel/oneapi/mpi/2021.3.0//lib:/opt/intel/oneapi/mkl/2021.3.0/lib/intel64:/opt/intel/oneapi/itac/2021.3.0/slib:/opt/intel/oneapi/ippcp/2021.3.0/lib/intel64:/opt/intel/oneapi/ipp/2021.3.0/lib/intel64:/opt/intel/oneapi/dnnl/2021.3.0/cpu_dpcpp_gpu_dpcpp/lib:/opt/intel/oneapi/debugger/10.1.2/gdb/intel64/lib:/opt/intel/oneapi/debugger/10.1.2/libipt/intel64/lib:/opt/intel/oneapi/debugger/10.1.2/dep/lib:/opt/intel/oneapi/dal/2021.3.0/lib/intel64:/opt/intel/oneapi/compiler/2021.3.0/linux/lib:/opt/intel/oneapi/compiler/2021.3.0/linux/lib/x64:/opt/intel/oneapi/compiler/2021.3.0/linux/lib/emu:/opt/intel/oneapi/compiler/2021.3.0/linux/lib/oclfpga/host/linux64/lib:/opt/intel/oneapi/compiler/2021.3.0/linux/lib/oclfpga/linux64/lib:/opt/intel/oneapi/compiler/2021.3.0/linux/compiler/lib/intel64_lin:/opt/intel/oneapi/ccl/2021.3.0/lib/cpu_gpu_dpcpp' +ENV LIBRARY_PATH='/opt/intel/oneapi/vpl/2021.4.0/lib:/opt/intel/oneapi/tbb/2021.3.0/env/../lib/intel64/gcc4.8:/opt/intel/oneapi/mpi/2021.3.0//libfabric/lib:/opt/intel/oneapi/mpi/2021.3.0//lib/release:/opt/intel/oneapi/mpi/2021.3.0//lib:/opt/intel/oneapi/mkl/2021.3.0/lib/intel64:/opt/intel/oneapi/ippcp/2021.3.0/lib/intel64:/opt/intel/oneapi/ipp/2021.3.0/lib/intel64:/opt/intel/oneapi/dnnl/2021.3.0/cpu_dpcpp_gpu_dpcpp/lib:/opt/intel/oneapi/dal/2021.3.0/lib/intel64:/opt/intel/oneapi/compiler/2021.3.0/linux/compiler/lib/intel64_lin:/opt/intel/oneapi/compiler/2021.3.0/linux/lib:/opt/intel/oneapi/clck/2021.3.0/lib/intel64:/opt/intel/oneapi/ccl/2021.3.0/lib/cpu_gpu_dpcpp' +ENV MANPATH='/opt/intel/oneapi/mpi/2021.3.0/man:/opt/intel/oneapi/itac/2021.3.0/man:/opt/intel/oneapi/debugger/10.1.2/documentation/man:/opt/intel/oneapi/compiler/2021.3.0/documentation/en/man/common:/opt/intel/oneapi/clck/2021.3.0/man::' +ENV MKLROOT='/opt/intel/oneapi/mkl/2021.3.0' +ENV NLSPATH='/opt/intel/oneapi/mkl/2021.3.0/lib/intel64/locale/%l_%t/%N' +ENV OCL_ICD_FILENAMES='libintelocl_emu.so:libalteracl.so:/opt/intel/oneapi/compiler/2021.3.0/linux/lib/x64/libintelocl.so' +ENV ONEAPI_ROOT='/opt/intel/oneapi' +ENV PATH='/opt/intel/oneapi/vtune/2021.5.0/bin64:/opt/intel/oneapi/vpl/2021.4.0/bin:/opt/intel/oneapi/mpi/2021.3.0//libfabric/bin:/opt/intel/oneapi/mpi/2021.3.0//bin:/opt/intel/oneapi/mkl/2021.3.0/bin/intel64:/opt/intel/oneapi/itac/2021.3.0/bin:/opt/intel/oneapi/intelpython/latest/bin:/opt/intel/oneapi/intelpython/latest/condabin:/opt/intel/oneapi/inspector/2021.3.0/bin64:/opt/intel/oneapi/dev-utilities/2021.3.0/bin:/opt/intel/oneapi/debugger/10.1.2/gdb/intel64/bin:/opt/intel/oneapi/compiler/2021.3.0/linux/lib/oclfpga/llvm/aocl-bin:/opt/intel/oneapi/compiler/2021.3.0/linux/lib/oclfpga/bin:/opt/intel/oneapi/compiler/2021.3.0/linux/bin/intel64:/opt/intel/oneapi/compiler/2021.3.0/linux/bin:/opt/intel/oneapi/clck/2021.3.0/bin/intel64:/opt/intel/oneapi/advisor/2021.3.0/bin64:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin' +ENV PKG_CONFIG_PATH='/opt/intel/oneapi/vtune/2021.5.0/include/pkgconfig/lib64:/opt/intel/oneapi/vpl/2021.4.0/lib/pkgconfig:/opt/intel/oneapi/mkl/2021.3.0/tools/pkgconfig:/opt/intel/oneapi/inspector/2021.3.0/include/pkgconfig/lib64:/opt/intel/oneapi/advisor/2021.3.0/include/pkgconfig/lib64:' +ENV PYTHONPATH='/opt/intel/oneapi/advisor/2021.3.0/pythonapi' +ENV SETVARS_COMPLETED='1' +ENV SETVARS_VARS_PATH='/opt/intel/oneapi/vtune/latest/env/vars.sh' +ENV TBBROOT='/opt/intel/oneapi/tbb/2021.3.0/env/..' +ENV VTUNE_PROFILER_2021_DIR='/opt/intel/oneapi/vtune/2021.5.0' +ENV VT_ADD_LIBS='-ldwarf -lelf -lvtunwind -lm -lpthread' +ENV VT_LIB_DIR='/opt/intel/oneapi/itac/2021.3.0/lib' +ENV VT_MPI='impi4' +ENV VT_ROOT='/opt/intel/oneapi/itac/2021.3.0' +ENV VT_SLIB_DIR='/opt/intel/oneapi/itac/2021.3.0/slib' +ENV _CE_CONDA='' +ENV _CE_M='' + + +############################################################# +# Entrypoint & ZSH +############################################################# +# DEV use: oh-my-zsh instead of bash +RUN sh -c "$(wget -O- https://github.com/deluan/zsh-in-docker/releases/download/v1.1.1/zsh-in-docker.sh)" -- \ + -t dallas +ENTRYPOINT "/usr/bin/zsh" +WORKDIR /work + diff --git a/likelihoods/CMakeLists.txt b/likelihoods/CMakeLists.txt new file mode 100644 index 00000000..0de4fee7 --- /dev/null +++ b/likelihoods/CMakeLists.txt @@ -0,0 +1,4 @@ +project(likelyhoods) + +# only include the tree +add_subdirectory(examples) \ No newline at end of file diff --git a/likelihoods/examples/CMakeLists.txt b/likelihoods/examples/CMakeLists.txt new file mode 100644 index 00000000..cdbac61d --- /dev/null +++ b/likelihoods/examples/CMakeLists.txt @@ -0,0 +1,41 @@ +project(examples Fortran) + +# parse each example separately +file(GLOB ${PROJECT_NAME}_sources "*.f90" "*.F90") +set(${PROJECT_NAME}_library_list "") # hold all the targets we make +foreach (filename ${${PROJECT_NAME}_sources}) + # strip the file name + get_filename_component(archive_name ${filename} NAME_WE) + + if (${archive_name} STREQUAL "polychord_examples") + # make sure this is not turned into an object & executable alone + continue() + endif () + + # compile once into object -> use in lib and exec as well + add_library(objlib_${archive_name} OBJECT ${filename}) + set_target_properties(objlib_${archive_name} PROPERTIES POSITION_INDEPENDENT_CODE 1 LINKER_LANGUAGE Fortran) + target_link_libraries(objlib_${archive_name} chord) + + # static library + add_library(${archive_name} STATIC $) + + # executable + linking, uses the program and one of the likelihood implementations + add_executable(${archive_name}_exec $ polychord_examples.f90) + target_link_libraries(${archive_name}_exec chord) # link libchoord.a to it + set_target_properties(${archive_name}_exec PROPERTIES OUTPUT_NAME ${archive_name} CLEAN_DIRECT_OUTPUT 1 LINKER_LANGUAGE Fortran) + + # add to target list + list(APPEND ${PROJECT_NAME}_library_list ${archive_name}) + list(APPEND ${PROJECT_NAME}_library_list ${archive_name}_exec) + + # tell the user + message("Adding example: ${archive_name} (from ${filename})") +endforeach () + + +# standard: where to put the targets +install(TARGETS ${${PROJECT_NAME}_library_list} + RUNTIME DESTINATION ${target_bin_dir} + LIBRARY DESTINATION ${target_lib_dir} + ARCHIVE DESTINATION ${target_lib_dir}) diff --git a/likelihoods/examples/polychord_examples.f90 b/likelihoods/examples/polychord_examples.f90 new file mode 100644 index 00000000..49ad93e1 --- /dev/null +++ b/likelihoods/examples/polychord_examples.f90 @@ -0,0 +1,23 @@ +!> This is the main driving routine of the nested sampling algorithm +! TKS: copied from src/drivers for the CMake build system, only kept there to not break the old build system +program PolyChord + + ! ~~~~~~~ Loaded Modules ~~~~~~~ + use interfaces_module, only: run_polychord + use loglikelihood_module, only: loglikelihood, setup_loglikelihood + use utils_module, only: STR_LENGTH + use abort_module, only: halt_program + + ! ~~~~~~~ Local Variable Declaration ~~~~~~~ + implicit none + character(len=STR_LENGTH) :: input_file ! input file + + if(iargc()==1) then + call getarg(1,input_file) + else + call halt_program('PolyChord should be called with at most one argument, the input file') + end if + + call run_polychord(loglikelihood, setup_loglikelihood, input_file) + +end program PolyChord diff --git a/likelihoods/examples/random_gaussian.f90 b/likelihoods/examples/random_gaussian.f90 index c54584ac..6efc0fd5 100644 --- a/likelihoods/examples/random_gaussian.f90 +++ b/likelihoods/examples/random_gaussian.f90 @@ -32,7 +32,7 @@ end function loglikelihood subroutine setup_loglikelihood(settings) -#ifdef MPI +#ifdef USE_MPI use mpi_module #endif use settings_module, only: program_settings @@ -54,7 +54,7 @@ subroutine setup_loglikelihood(settings) ! Generate a random covariance matrix, its inverse and logdet on the root node call random_inverse_covmat(invcovmat,logdetcovmat,sigma,nDims) -#ifdef MPI +#ifdef USE_MPI call initialise_mpi(settings%feedback) ! Broadcast the covariance matrix and normalisation data to the ! rest of the nodes diff --git a/pypolychord/__init__.py b/pypolychord/__init__.py index 3e4047d7..7c6e3ddb 100644 --- a/pypolychord/__init__.py +++ b/pypolychord/__init__.py @@ -1,3 +1,3 @@ -__version__ = "1.20.1" +__version__ = "1.21.0" from pypolychord.settings import PolyChordSettings from pypolychord.polychord import run_polychord diff --git a/setup_cmake_template.py b/setup_cmake_template.py new file mode 100644 index 00000000..6380f289 --- /dev/null +++ b/setup_cmake_template.py @@ -0,0 +1,79 @@ +""" +Template for setup.py that should be populated by CMake with: +configure_file(${SETUP_PY_IN} ${SETUP_PY}) + +Used template variables: + - "target_python_so" : place where _pypolychord.ARCH.so is built (NOT install path) + - "PACKAGE_VERSION" : version of the package + - "CMAKE_CURRENT_SOURCE_DIR": root directory, where a standard setup.py would be placed +""" + +import os +import shutil +import sys +from distutils.core import setup + +from setuptools import Extension +from setuptools.command.build_ext import build_ext + + +def readme(): + with open('${CMAKE_CURRENT_SOURCE_DIR}/README.rst') as f: + return f.read() + + +class MyBuildExtension(build_ext): + """This class 'builds' the extension module, by + copying it from the place where CMake placed it. + """ + + def build_extension(self, ext): + + # _pypolychord.ARCH.so + if os.path.exists("${target_python_so}"): + shutil.copyfile("${target_python_so}", self.get_ext_fullpath(ext.name)) + elif sys.argv[2] == "install": + # let's warn here, though this should not happen with the current CMake setup + print("NOT FOUND: ${target_python_so}\nYour installation may be incomplete.") + + +setup(name='pypolychord', + description='Python interface to PolyChord ${PACKAGE_VERSION}', + long_description=readme(), + long_description_content_type='text/x-rst', + url='https://ccpforge.cse.rl.ac.uk/gf/project/polychord/', + author='Will Handley', + author_email='wh260@cam.ac.uk', + license='PolyChord', + install_requires=['numpy', 'scipy'], + extras_require={'plotting': 'getdist'}, + + classifiers=[ + "Development Status :: 5 - Production/Stable", + + "Intended Audience :: Education", + + "Operating System :: MacOS :: MacOS X", + "Operating System :: Unix", + + "Topic :: Scientific/Engineering :: Astronomy", + "Topic :: Scientific/Engineering :: Physics", + + "Programming Language :: C++", + "Programming Language :: Fortran", + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', + "Programming Language :: Python :: 3.10", + + ], + + # Don't touch anything below here - CMake fills this in + version='${PACKAGE_VERSION}', + package_dir={'pypolychord': '${CMAKE_CURRENT_SOURCE_DIR}/pypolychord'}, + packages=['pypolychord'], + cmdclass={'build_ext': MyBuildExtension}, + ext_modules=[Extension('_pypolychord', [])], + ) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt new file mode 100644 index 00000000..bdcecba5 --- /dev/null +++ b/src/CMakeLists.txt @@ -0,0 +1,5 @@ +project(src) + +# only include the tree +add_subdirectory(polychord) +add_subdirectory(drivers) \ No newline at end of file diff --git a/src/drivers/CMakeLists.txt b/src/drivers/CMakeLists.txt new file mode 100644 index 00000000..21b618a8 --- /dev/null +++ b/src/drivers/CMakeLists.txt @@ -0,0 +1 @@ +project(polychord_examples) diff --git a/src/drivers/polychord_examples.f90 b/src/drivers/polychord_examples.f90 index 40c6149d..bf88d7c8 100644 --- a/src/drivers/polychord_examples.f90 +++ b/src/drivers/polychord_examples.f90 @@ -1,4 +1,6 @@ !> This is the main driving routine of the nested sampling algorithm +! TKS: this was relocated to likelihoods/examples for the CMake build system. +! delete here when the Makefiles are updated or outdated program PolyChord ! ~~~~~~~ Loaded Modules ~~~~~~~ diff --git a/src/polychord/CMakeLists.txt b/src/polychord/CMakeLists.txt new file mode 100644 index 00000000..fc26af5e --- /dev/null +++ b/src/polychord/CMakeLists.txt @@ -0,0 +1,27 @@ +project(chord Fortran) + +# file glob -> sources -> object -> static & shared lib as well +file(GLOB ${PROJECT_NAME}_sources "*.f90" "*.F90" "*.cpp") +message(STATUS "Sources for project ${PROJECT_NAME}: ${${PROJECT_NAME}_sources}") + +# this is the "object library" target: compiles the sources only once +add_library(objlib_${PROJECT_NAME} OBJECT ${${PROJECT_NAME}_sources}) + +# shared libraries need PIC +set_property(TARGET objlib_${PROJECT_NAME} PROPERTY POSITION_INDEPENDENT_CODE 1) + +# libcoord.a +add_library(${PROJECT_NAME} STATIC $) +set_property(TARGET ${PROJECT_NAME} PROPERTY LINKER_LANGUAGE Fortran) + +# libcoord.so as well -> needs a different target name, but the same lib name +add_library(${PROJECT_NAME}_shared SHARED $) +set_target_properties(${PROJECT_NAME}_shared + PROPERTIES + OUTPUT_NAME ${PROJECT_NAME} + CLEAN_DIRECT_OUTPUT 1 + LINKER_LANGUAGE Fortran) + +# standard: where to put the targets +install(TARGETS ${PROJECT_NAME} ${PROJECT_NAME}_shared + LIBRARY DESTINATION ${target_lib_dir}) diff --git a/src/polychord/abort.F90 b/src/polychord/abort.F90 index fda69985..7cd49946 100644 --- a/src/polychord/abort.F90 +++ b/src/polychord/abort.F90 @@ -5,13 +5,13 @@ module abort_module subroutine halt_program(message) use utils_module, only: stderr_unit implicit none -#ifdef MPI +#ifdef USE_MPI include 'mpif.h' #endif character(LEN=*), intent(in), optional :: message -#ifdef MPI +#ifdef USE_MPI integer :: errorcode=1 integer :: mpierror #endif @@ -22,7 +22,7 @@ subroutine halt_program(message) write(stderr_unit,'( 20("=") )') end if -#ifdef MPI +#ifdef USE_MPI call MPI_ABORT(MPI_COMM_WORLD,errorcode,mpierror) #else stop 1 diff --git a/src/polychord/feedback.f90 b/src/polychord/feedback.f90 index 090d4639..8c8e4596 100644 --- a/src/polychord/feedback.f90 +++ b/src/polychord/feedback.f90 @@ -28,7 +28,7 @@ subroutine write_opening_statement(settings) write(stdout_unit,'("")') write(stdout_unit,'("PolyChord: Next Generation Nested Sampling")') write(stdout_unit,'("copyright: Will Handley, Mike Hobson & Anthony Lasenby")') - write(stdout_unit,'(" version: 1.20.1")') + write(stdout_unit,'(" version: 1.21.0")') write(stdout_unit,'(" release: 1st June 2021")') write(stdout_unit,'(" email: wh260@mrao.cam.ac.uk")') write(stdout_unit,'("")') diff --git a/src/polychord/generate.F90 b/src/polychord/generate.F90 index 8adf0fb3..23d37f7f 100644 --- a/src/polychord/generate.F90 +++ b/src/polychord/generate.F90 @@ -68,7 +68,7 @@ subroutine GenerateLivePoints(loglikelihood,prior,settings,RTI,mpi_information) use run_time_module, only: run_time_info,initialise_run_time_info, find_min_loglikelihoods use array_module, only: add_point use abort_module -#ifdef MPI +#ifdef USE_MPI use mpi_module, only: mpi_bundle,is_root,linear_mode,throw_point,catch_point,more_points_needed,sum_integers,sum_doubles,request_point,no_more_points #else use mpi_module, only: mpi_bundle,is_root,linear_mode @@ -100,7 +100,7 @@ function prior(cube) result(theta) type(run_time_info) :: RTI type(mpi_bundle),intent(in) :: mpi_information -#ifdef MPI +#ifdef USE_MPI integer :: active_workers ! Number of currently working workers integer :: worker_id ! Worker identifier to signal who to throw back to #endif @@ -181,7 +181,7 @@ function prior(cube) result(theta) end do -#ifdef MPI +#ifdef USE_MPI else !===================== PARALLEL MODE ======================= @@ -246,7 +246,7 @@ function prior(cube) result(theta) #endif end if !(nprocs case) -#ifdef MPI +#ifdef USE_MPI nlike = sum_integers(nlike,mpi_information) ! Gather the likelihood calls onto one node ndiscarded = sum_integers(ndiscarded,mpi_information) ! Gather the likelihood calls onto one node total_time = sum_doubles(total_time,mpi_information) ! Sum up the total time taken @@ -319,7 +319,7 @@ subroutine time_speeds(loglikelihood,prior,settings,RTI,speed,mpi_information) use utils_module, only: normal_fb,stdout_unit,fancy_fb,time use calculate_module, only: calculate_point use abort_module -#ifdef MPI +#ifdef USE_MPI use mpi_module, only: mpi_bundle,is_root,sum_doubles,sum_integers #else use mpi_module, only: mpi_bundle,is_root @@ -419,7 +419,7 @@ function prior(cube) result(theta) end if end do -#ifdef MPI +#ifdef USE_MPI total_time=sum_doubles(total_time,mpi_information) i_live = sum_integers(i_live,mpi_information) nlike = sum_integers(nlike,mpi_information) @@ -450,7 +450,7 @@ subroutine GenerateLivePointsFromSeed(loglikelihood,prior,settings,RTI,mpi_infor use array_module, only: add_point use abort_module use chordal_module, only: slice_sample -#ifdef MPI +#ifdef USE_MPI use mpi_module, only: mpi_bundle,is_root,linear_mode,throw_point,catch_point,more_points_needed,sum_integers,sum_doubles,request_point,no_more_points #else use mpi_module, only: mpi_bundle,is_root,linear_mode @@ -482,7 +482,7 @@ function prior(cube) result(theta) type(run_time_info) :: RTI type(mpi_bundle),intent(in) :: mpi_information -#ifdef MPI +#ifdef USE_MPI integer :: active_workers ! Number of currently working workers integer :: worker_id ! Worker identifier to signal who to throw back to #endif @@ -559,7 +559,7 @@ function prior(cube) result(theta) end do -#ifdef MPI +#ifdef USE_MPI else !===================== PARALLEL MODE ======================= @@ -629,7 +629,7 @@ function prior(cube) result(theta) #endif end if !(nprocs case) -#ifdef MPI +#ifdef USE_MPI do i_grade=1,size(settings%grade_dims) nlikes(i_grade) = sum_integers(nlikes(i_grade),mpi_information) ! Gather the likelihood calls onto one node times(i_grade) = sum_doubles(times(i_grade),mpi_information) ! Sum up the total time taken diff --git a/src/polychord/interfaces.F90 b/src/polychord/interfaces.F90 index ac3fe110..1f654d92 100644 --- a/src/polychord/interfaces.F90 +++ b/src/polychord/interfaces.F90 @@ -2,7 +2,7 @@ module interfaces_module use utils_module, only: dp implicit none -#ifdef MPI +#ifdef USE_MPI include 'mpif.h' #endif interface run_polychord @@ -16,11 +16,11 @@ subroutine run_polychord_full(loglikelihood, prior_transform, dumper, settings_i use settings_module, only: program_settings,initialise_settings use random_module, only: initialise_random use nested_sampling_module, only: NestedSampling -#ifdef MPI +#ifdef USE_MPI use mpi_module, only: initialise_mpi, finalise_mpi #endif implicit none -#ifdef MPI +#ifdef USE_MPI include 'mpif.h' #endif @@ -56,7 +56,7 @@ end subroutine dumper real(dp), dimension(4) :: output_info -#ifdef MPI +#ifdef USE_MPI if (present(mpi_communicator)) then comm = mpi_communicator else @@ -73,7 +73,7 @@ end subroutine dumper end if settings = settings_in call initialise_settings(settings) -#ifdef MPI +#ifdef USE_MPI output_info = NestedSampling(loglikelihood,prior_transform,dumper,settings,comm) call finalise_mpi #else @@ -105,7 +105,7 @@ end subroutine dumper type(program_settings),intent(in) :: settings ! The program settings integer, intent(in), optional :: mpi_communicator integer :: comm -#ifdef MPI +#ifdef USE_MPI if (present(mpi_communicator)) then comm = mpi_communicator else @@ -145,7 +145,7 @@ end function prior_transform type(program_settings),intent(in) :: settings ! The program settings integer, intent(in), optional :: mpi_communicator integer :: comm -#ifdef MPI +#ifdef USE_MPI if (present(mpi_communicator)) then comm = mpi_communicator else @@ -177,7 +177,7 @@ end function loglikelihood !> MPI handle integer, intent(in), optional :: mpi_communicator integer :: comm -#ifdef MPI +#ifdef USE_MPI if (present(mpi_communicator)) then comm = mpi_communicator else @@ -231,7 +231,7 @@ end subroutine setup_loglikelihood end interface integer, intent(in), optional :: mpi_communicator integer :: comm -#ifdef MPI +#ifdef USE_MPI if (present(mpi_communicator)) then comm = mpi_communicator else diff --git a/src/polychord/maximiser.F90 b/src/polychord/maximiser.F90 index bf949e7b..f2990220 100644 --- a/src/polychord/maximiser.F90 +++ b/src/polychord/maximiser.F90 @@ -11,7 +11,7 @@ subroutine maximise(loglikelihood,prior,settings,RTI) use run_time_module, only: run_time_info use read_write_module, only: write_max_file, mean use chordal_module, only: generate_nhats, slice_sample -#ifdef MPI +#ifdef USE_MPI use mpi_module, only: mpi_bundle,is_root, throw_point, catch_point, mpi_synchronise, throw_seed, catch_seed #else use mpi_module, only: mpi_bundle,is_root @@ -70,7 +70,7 @@ function do_maximisation(loglikelihood,prior,settings,RTI, posterior) result(max use read_write_module, only: write_max_file, mean use chordal_module, only: generate_nhats, slice_sample use nelder_mead_module, only: nelder_mead -#ifdef MPI +#ifdef USE_MPI use mpi_module, only: mpi_bundle,is_root, throw_point, catch_point, mpi_synchronise, throw_seed, catch_seed #else use mpi_module, only: mpi_bundle,is_root diff --git a/src/polychord/mpi_utils.F90 b/src/polychord/mpi_utils.F90 index 9a891b2a..813cf89d 100644 --- a/src/polychord/mpi_utils.F90 +++ b/src/polychord/mpi_utils.F90 @@ -2,7 +2,7 @@ module mpi_module use utils_module, only: dp, normal_fb implicit none -#ifdef MPI +#ifdef USE_MPI include 'mpif.h' #endif @@ -83,7 +83,7 @@ function get_nprocs(mpi_communicator) result(nprocs) integer, intent(in) :: mpi_communicator integer :: nprocs -#ifdef MPI +#ifdef USE_MPI call MPI_COMM_SIZE( & mpi_communicator,&!handle nprocs, &!return number of processors @@ -103,7 +103,7 @@ function get_rank(mpi_communicator) result(myrank) integer, intent(in) :: mpi_communicator integer :: myrank -#ifdef MPI +#ifdef USE_MPI call MPI_COMM_RANK( & mpi_communicator,&!handle myrank, &!return rank of calling processor @@ -132,7 +132,7 @@ function get_root(mpi_communicator) result(root) ! Get the rank of the process myrank = get_rank(mpi_communicator) -#ifdef MPI +#ifdef USE_MPI call MPI_ALLREDUCE( & myrank, &!send buffer root, &!recieve buffer @@ -149,7 +149,7 @@ function get_root(mpi_communicator) result(root) end function get_root -#ifdef MPI +#ifdef USE_MPI !> Procedure to initialise mpi subroutine initialise_mpi(feedback) implicit none @@ -217,7 +217,7 @@ subroutine mpi_synchronise(mpi_information) implicit none type(mpi_bundle), intent(in) :: mpi_information -#ifdef MPI +#ifdef USE_MPI call MPI_BARRIER(mpi_information%communicator,mpierror) #endif diff --git a/src/polychord/nested_sampling.F90 b/src/polychord/nested_sampling.F90 index 0db480ae..b2e6600d 100644 --- a/src/polychord/nested_sampling.F90 +++ b/src/polychord/nested_sampling.F90 @@ -1,7 +1,7 @@ module nested_sampling_module use utils_module, only: dp -#ifdef MPI +#ifdef USE_MPI use mpi_module, only: get_mpi_information,mpi_bundle,is_root,linear_mode,catch_babies,throw_babies,throw_seed,catch_seed,broadcast_integers,mpi_synchronise #else use mpi_module, only: get_mpi_information,mpi_bundle,is_root,linear_mode @@ -23,7 +23,7 @@ function NestedSampling(loglikelihood,prior, dumper, settings, mpi_communicator) use cluster_module, only: do_clustering use generate_module, only: GenerateSeed,GenerateLivePoints,GenerateLivePointsFromSeed use maximise_module, only: maximise -#ifdef MPI +#ifdef USE_MPI use utils_module, only: normal_fb,stdout_unit #else use utils_module, only: stdout_unit @@ -114,7 +114,7 @@ end subroutine dumper type(mpi_bundle) :: mpi_information -#ifdef MPI +#ifdef USE_MPI ! MPI specific variables ! ---------------------- integer :: i_worker ! Worker iterator @@ -142,7 +142,7 @@ end subroutine dumper ! MPI initialisation mpi_information = get_mpi_information(mpi_communicator) -#ifdef MPI +#ifdef USE_MPI allocate(worker_cluster(mpi_information%nprocs-1)) ! Allocate the worker arrays worker_cluster = 1 ! initialise with 1 @@ -222,7 +222,7 @@ end subroutine dumper num_repeats = RTI%num_repeats call write_num_repeats(num_repeats,settings%feedback) end if -#ifdef MPI +#ifdef USE_MPI call broadcast_integers(num_repeats,mpi_information) allocate(nursary(settings%nTotal,sum(num_repeats), mpi_information%nprocs-1)) allocate(worker_epochs(mpi_information%nprocs-1), nlikes(size(settings%grade_dims),mpi_information%nprocs-1)) @@ -262,7 +262,7 @@ end subroutine dumper ! Generate a new set of points within the likelihood bound of the late point baby_points = SliceSampling(loglikelihood,prior,settings,logL,seed_point,cholesky,nlike,num_repeats) baby_points(settings%b0,:) = logL ! Note the moment it is born at -#ifdef MPI +#ifdef USE_MPI else if(settings%synchronous) then ! Parallel synchronous mode ! ------------------------- @@ -313,7 +313,7 @@ end subroutine dumper ! See if this point is suitable to be added to the arrays -#ifdef MPI +#ifdef USE_MPI if( linear_mode(mpi_information) .or. administrator_epoch==worker_epoch ) then #endif if(replace_point(settings,RTI,baby_points,cluster_id)) then @@ -341,7 +341,7 @@ end subroutine dumper end if if(delete_cluster(settings,RTI)) then -#ifdef MPI +#ifdef USE_MPI administrator_epoch = administrator_epoch+1 #endif end if! Delete any clusters as necessary @@ -357,21 +357,21 @@ end subroutine dumper ! If we want to cluster on sub dimensions, then do this first if(allocated(settings%sub_clustering_dimensions)) then if( do_clustering(settings,RTI,settings%sub_clustering_dimensions) ) then -#ifdef MPI +#ifdef USE_MPI administrator_epoch = administrator_epoch+1 #endif end if end if if( do_clustering(settings,RTI) ) then -#ifdef MPI +#ifdef USE_MPI administrator_epoch = administrator_epoch+1 #endif end if end if call calculate_covmats(settings,RTI) end if -#ifdef MPI +#ifdef USE_MPI end if #endif @@ -421,7 +421,7 @@ end subroutine dumper ! C) Clean up ! ======== -#ifdef MPI +#ifdef USE_MPI ! MPI cleanup ! ----------- ! Kill off the final workers. @@ -429,7 +429,7 @@ end subroutine dumper ! data from each node (and throw it away) and then send a kill signal back to it if (settings%synchronous) then do worker_id=mpi_information%nprocs-1,1,-1 - call throw_seed(seed_point,cholesky,logL,mpi_information,worker_id,administrator_epoch,.false.) + call throw_seed(seed_point,cholesky,logL,mpi_information,worker_id,administrator_epoch,.false.) end do else do i_worker=mpi_information%nprocs-1,1,-1 @@ -441,7 +441,7 @@ end subroutine dumper RTI%nlike = RTI%nlike + nlike ! Send kill signal to worker worker_id (note that we no longer care about seed_point, so we'll just use the last one - call throw_seed(seed_point,cholesky,logL,mpi_information,worker_id,administrator_epoch,.false.) + call throw_seed(seed_point,cholesky,logL,mpi_information,worker_id,administrator_epoch,.false.) end do end if @@ -504,7 +504,7 @@ end subroutine dumper #endif end if !(myrank==root / myrank/=root) -#ifdef MPI +#ifdef USE_MPI call mpi_synchronise(mpi_information) #endif diff --git a/src/polychord/random_utils.F90 b/src/polychord/random_utils.F90 index 1192606e..d27238eb 100644 --- a/src/polychord/random_utils.F90 +++ b/src/polychord/random_utils.F90 @@ -4,7 +4,7 @@ module random_module use utils_module, only: dp -#ifdef MPI +#ifdef USE_MPI use mpi_module #endif @@ -32,7 +32,7 @@ subroutine initialise_random(mpi_communicator,seed_input) integer, optional, intent(in) :: seed_input integer,allocatable,dimension(:) :: seed ! vector to be passed to random_seed -#ifdef MPI +#ifdef USE_MPI integer :: mpierror #endif @@ -46,7 +46,7 @@ subroutine initialise_random(mpi_communicator,seed_input) ! Get the global ranking -#ifdef MPI +#ifdef USE_MPI call MPI_COMM_RANK(mpi_communicator, myrank, mpierror) #else myrank = 0 @@ -76,7 +76,7 @@ subroutine initialise_random(mpi_communicator,seed_input) end if -#ifdef MPI +#ifdef USE_MPI ! Broadcast the system time to all nodes call MPI_BCAST(t,1,MPI_INTEGER,0,mpi_communicator,mpierror) diff --git a/src/polychord/utils.F90 b/src/polychord/utils.F90 index 105e09a4..e5f406a3 100644 --- a/src/polychord/utils.F90 +++ b/src/polychord/utils.F90 @@ -1,7 +1,7 @@ module utils_module implicit none -#ifdef MPI +#ifdef USE_MPI include 'mpif.h' #endif integer, parameter :: dp = kind(1.d0) @@ -1050,12 +1050,12 @@ end function log_gauss !> This gets the wallclock timer from the mpi library function time() implicit none -#ifdef MPI +#ifdef USE_MPI include 'mpif.h' #endif real(dp) :: time -#ifdef MPI +#ifdef USE_MPI time = MPI_Wtime() #else call cpu_time(time)