Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/development' into nonlocal-compa…
Browse files Browse the repository at this point in the history
…tibility-in-dependent-state
  • Loading branch information
MarDiehl committed Dec 31, 2023
2 parents 0d2ff12 + 14bafc6 commit c6dae71
Show file tree
Hide file tree
Showing 18 changed files with 104 additions and 136 deletions.
13 changes: 5 additions & 8 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -17,17 +17,14 @@ pkg_get_variable(CMAKE_Fortran_COMPILER PETSc fcompiler)
pkg_get_variable(CMAKE_C_COMPILER PETSc ccompiler)

# Solver determines name of project
string(TOUPPER "${DAMASK_SOLVER}" DAMASK_SOLVER)
if(DAMASK_SOLVER STREQUAL "GRID")
project(damask-grid HOMEPAGE_URL https://damask.mpie.de LANGUAGES Fortran C)
elseif(DAMASK_SOLVER STREQUAL "MESH")
project(damask-mesh HOMEPAGE_URL https://damask.mpie.de LANGUAGES Fortran C)
elseif(DAMASK_SOLVER STREQUAL "TEST")
project(damask-test HOMEPAGE_URL https://damask.mpie.de LANGUAGES Fortran C)
string(TOUPPER "${DAMASK_SOLVER}" DAMASK_SOLVER_UPPER)
string(TOLOWER "${DAMASK_SOLVER}" DAMASK_SOLVER_LOWER)
if("${DAMASK_SOLVER_UPPER}" MATCHES "^(GRID|MESH|TEST)$")
project("damask-${DAMASK_SOLVER_LOWER}" HOMEPAGE_URL https://damask.mpie.de LANGUAGES Fortran C)
else()
message(FATAL_ERROR "Invalid solver: DAMASK_SOLVER=${DAMASK_SOLVER}")
endif()
add_definitions("-D${DAMASK_SOLVER}")
add_definitions("-D${DAMASK_SOLVER_UPPER}")

set(CMAKE_Fortran_PREPROCESS "ON") # works only for CMake >= 3.18

Expand Down
2 changes: 1 addition & 1 deletion VERSION
Original file line number Diff line number Diff line change
@@ -1 +1 @@
3.0.0-alpha8-211-gccf4867e0
3.0.0-alpha8-228-g10a2b58f8
13 changes: 2 additions & 11 deletions src/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,16 +1,7 @@
file(GLOB damask-sources CONFIGURE_DEPENDS *.f90 *.c)

if(PROJECT_NAME STREQUAL "damask-grid")
set(executable-name "DAMASK_grid")
file(GLOB solver-sources CONFIGURE_DEPENDS grid/*.f90)
elseif(PROJECT_NAME STREQUAL "damask-mesh")
set(executable-name "DAMASK_mesh")
file(GLOB solver-sources CONFIGURE_DEPENDS mesh/*.f90)
elseif(PROJECT_NAME STREQUAL "damask-test")
set(executable-name "DAMASK_test")
file(GLOB solver-sources CONFIGURE_DEPENDS test/*.f90)
endif()

set(executable-name "DAMASK_${DAMASK_SOLVER_LOWER}")
file(GLOB solver-sources CONFIGURE_DEPENDS ${DAMASK_SOLVER_LOWER}/*.f90)

if(NOT CMAKE_BUILD_TYPE STREQUAL "SYNTAXONLY")
add_executable(${executable-name} ${damask-sources} ${solver-sources})
Expand Down
4 changes: 2 additions & 2 deletions src/HDF5_utilities.f90
Original file line number Diff line number Diff line change
Expand Up @@ -1560,7 +1560,7 @@ subroutine initialize_read(dset_id, filespace_id, memspace_id, plist_id, aplist_
call HDF5_chkerr(hdferr)
call MPI_Allgather(int(localShape(ubound(localShape,1)),MPI_INTEGER_KIND),1_MPI_INTEGER_KIND,MPI_INTEGER,&
readSize,1_MPI_INTEGER_KIND,MPI_INTEGER,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call parallelization_chkerr(err_MPI)
end if
#endif
myStart = int(0,HSIZE_T)
Expand Down Expand Up @@ -1667,7 +1667,7 @@ subroutine initialize_write(dset_id, filespace_id, memspace_id, plist_id, &
if (parallel) then
call MPI_Allgather(int(localShape(ubound(localShape,1)),MPI_INTEGER_KIND),1_MPI_INTEGER_KIND,MPI_INTEGER,&
writeSize,1_MPI_INTEGER_KIND,MPI_INTEGER,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call parallelization_chkerr(err_MPI)
end if
#endif
myStart = int(0,HSIZE_T)
Expand Down
3 changes: 0 additions & 3 deletions src/Marc/materialpoint_Marc.f90
Original file line number Diff line number Diff line change
Expand Up @@ -158,9 +158,6 @@ subroutine materialpoint_general(mode, ffn, ffn1, temperature_inp, dt, elFE, ip,
else validCalculation
call homogenization_mechanical_response(dt,(elCP-1)*discretization_nIPs + ip, &
(elCP-1)*discretization_nIPs + ip)
if (.not. terminallyIll) &
call homogenization_mechanical_response2(dt,(elCP-1)*discretization_nIPs + ip, &
(elCP-1)*discretization_nIPs + ip)

terminalIllness: if (terminallyIll) then

Expand Down
6 changes: 3 additions & 3 deletions src/grid/DAMASK_grid.f90
Original file line number Diff line number Diff line change
Expand Up @@ -363,15 +363,15 @@ program DAMASK_grid
end if; flush(IO_STDOUT)

call MPI_Allreduce(signal_SIGUSR1,sig,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call parallelization_chkerr(err_MPI)
if (mod(inc,loadCases(l)%f_out) == 0 .or. sig) then
print'(/,1x,a)', '... saving results ........................................................'
flush(IO_STDOUT)
call materialpoint_result(totalIncsCounter,t)
end if
if (sig) call signal_setSIGUSR1(.false.)
call MPI_Allreduce(signal_SIGUSR2,sig,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call parallelization_chkerr(err_MPI)
if (mod(inc,loadCases(l)%f_restart) == 0 .or. sig) then
do field = 1, nActiveFields
select case (ID(field))
Expand All @@ -387,7 +387,7 @@ program DAMASK_grid
end if
if (sig) call signal_setSIGUSR2(.false.)
call MPI_Allreduce(signal_SIGINT,sig,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call parallelization_chkerr(err_MPI)
if (sig) exit loadCaseLooping
end if skipping

Expand Down
20 changes: 10 additions & 10 deletions src/grid/discretization_grid.f90
Original file line number Diff line number Diff line change
Expand Up @@ -97,12 +97,12 @@ subroutine discretization_grid_init(restart)


call MPI_Bcast(cells,3_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD, err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call parallelization_chkerr(err_MPI)
if (cells(1) < 2) call IO_error(844, ext_msg='cells(1) must be larger than 1')
call MPI_Bcast(geomSize,3_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD, err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call parallelization_chkerr(err_MPI)
call MPI_Bcast(origin,3_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD, err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call parallelization_chkerr(err_MPI)

print'(/,1x,a,i0,a,i0,a,i0)', 'cells: ', cells(1), ' × ', cells(2), ' × ', cells(3)
print '(1x,a,es8.2,a,es8.2,a,es8.2,a)', 'size: ', geomSize(1), ' × ', geomSize(2), ' × ', geomSize(3), ''
Expand All @@ -126,15 +126,15 @@ subroutine discretization_grid_init(restart)

call MPI_Gather(product(cells(1:2))*cells3Offset,1_MPI_INTEGER_KIND,MPI_INTEGER,displs,&
1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call parallelization_chkerr(err_MPI)
call MPI_Gather(product(myGrid), 1_MPI_INTEGER_KIND,MPI_INTEGER,sendcounts,&
1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call parallelization_chkerr(err_MPI)

allocate(materialAt(product(myGrid)))
call MPI_Scatterv(materialAt_global,sendcounts,displs,MPI_INTEGER,materialAt,size(materialAt),&
MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call parallelization_chkerr(err_MPI)

call discretization_init(materialAt, &
IPcoordinates0(myGrid,mySize,cells3Offset), &
Expand Down Expand Up @@ -318,10 +318,10 @@ function discretization_grid_getInitialCondition(label) result(ic)

real(pREAL), dimension(:), allocatable :: ic_global, ic_local
integer(MPI_INTEGER_KIND) :: err_MPI

integer, dimension(worldsize) :: &
displs, sendcounts


if (worldrank == 0) then
ic_global = VTI_readDataset_real(IO_read(CLI_geomFile),label)
else
Expand All @@ -330,15 +330,15 @@ function discretization_grid_getInitialCondition(label) result(ic)

call MPI_Gather(product(cells(1:2))*cells3Offset, 1_MPI_INTEGER_KIND,MPI_INTEGER,displs,&
1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call parallelization_chkerr(err_MPI)
call MPI_Gather(product(cells(1:2))*cells3, 1_MPI_INTEGER_KIND,MPI_INTEGER,sendcounts,&
1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call parallelization_chkerr(err_MPI)

allocate(ic_local(product(cells(1:2))*cells3))
call MPI_Scatterv(ic_global,sendcounts,displs,MPI_DOUBLE,ic_local,size(ic_local),&
MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call parallelization_chkerr(err_MPI)

ic = reshape(ic_local,[cells(1),cells(2),cells3])

Expand Down
10 changes: 5 additions & 5 deletions src/grid/grid_damage_spectral.f90
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ subroutine grid_damage_spectral_init(num_grid)
CHKERRQ(err_PETSc)
call MPI_Allgather(int(cells3,MPI_INTEGER_KIND),1_MPI_INTEGER_KIND,MPI_INTEGER,&
cells3_global,1_MPI_INTEGER_KIND,MPI_INTEGER,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call parallelization_chkerr(err_MPI)
call DMDACreate3D(PETSC_COMM_WORLD, &
DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary
DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point
Expand Down Expand Up @@ -239,10 +239,10 @@ function grid_damage_spectral_solution(Delta_t) result(solution)
phi_max = maxval(phi)
stagNorm = maxval(abs(phi - phi_stagInc))
call MPI_Allreduce(MPI_IN_PLACE,stagNorm,1_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_MAX,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call parallelization_chkerr(err_MPI)
solution%stagConverged = stagNorm < max(num%eps_damage_atol, num%eps_damage_rtol*phi_max)
call MPI_Allreduce(MPI_IN_PLACE,solution%stagConverged,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LAND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call parallelization_chkerr(err_MPI)
phi_stagInc = phi

call homogenization_set_phi(reshape(phi,[product(cells(1:2))*cells3]))
Expand Down Expand Up @@ -379,10 +379,10 @@ subroutine updateReference()

K_ref = K_ref*wgt
call MPI_Allreduce(MPI_IN_PLACE,K_ref,9_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call parallelization_chkerr(err_MPI)
mu_ref = mu_ref*wgt
call MPI_Allreduce(MPI_IN_PLACE,mu_ref,1_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call parallelization_chkerr(err_MPI)

end subroutine updateReference

Expand Down
16 changes: 8 additions & 8 deletions src/grid/grid_mech_FEM.f90
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,7 @@ subroutine grid_mechanical_FEM_init(num_grid)
CHKERRQ(err_PETSc)
call MPI_Allgather(int(cells3,MPI_INTEGER_KIND),1_MPI_INTEGER_KIND,MPI_INTEGER,&
cells3_global,1_MPI_INTEGER_KIND,MPI_INTEGER,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call parallelization_chkerr(err_MPI)
call DMDACreate3d(PETSC_COMM_WORLD, &
DM_BOUNDARY_PERIODIC, DM_BOUNDARY_PERIODIC, DM_BOUNDARY_PERIODIC, &
DMDA_STENCIL_BOX, &
Expand Down Expand Up @@ -246,16 +246,16 @@ subroutine grid_mechanical_FEM_init(num_grid)

call HDF5_read(P_aim,groupHandle,'P_aim',.false.)
call MPI_Bcast(P_aim,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call parallelization_chkerr(err_MPI)
call HDF5_read(F_aim,groupHandle,'F_aim',.false.)
call MPI_Bcast(F_aim,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call parallelization_chkerr(err_MPI)
call HDF5_read(F_aim_lastInc,groupHandle,'F_aim_lastInc',.false.)
call MPI_Bcast(F_aim_lastInc,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call parallelization_chkerr(err_MPI)
call HDF5_read(F_aimDot,groupHandle,'F_aimDot',.false.)
call MPI_Bcast(F_aimDot,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call parallelization_chkerr(err_MPI)
call HDF5_read(temp33n,groupHandle,'F')
F = reshape(temp33n,[3,3,cells(1),cells(2),cells3])
call HDF5_read(temp33n,groupHandle,'F_lastInc')
Expand Down Expand Up @@ -283,10 +283,10 @@ subroutine grid_mechanical_FEM_init(num_grid)
print'(1x,a,1x,i0)', 'loading additional restart data of increment', CLI_restartInc
call HDF5_read(C_volAvg,groupHandle,'C_volAvg',.false.)
call MPI_Bcast(C_volAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call parallelization_chkerr(err_MPI)
call HDF5_read(C_volAvgLastInc,groupHandle,'C_volAvgLastInc',.false.)
call MPI_Bcast(C_volAvgLastInc,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call parallelization_chkerr(err_MPI)

call HDF5_closeGroup(groupHandle)
call HDF5_closeFile(fileHandle)
Expand Down Expand Up @@ -575,7 +575,7 @@ subroutine formResidual(da_local,x_local, &
P_av,C_volAvg,devNull, &
F,params%Delta_t,params%rotation_BC)
call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call parallelization_chkerr(err_MPI)

!--------------------------------------------------------------------------------------------------
! stress BC handling
Expand Down
18 changes: 9 additions & 9 deletions src/grid/grid_mech_spectral_basic.f90
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ subroutine grid_mechanical_spectral_basic_init(num_grid)
CHKERRQ(err_PETSc)
call MPI_Allgather(int(cells3,MPI_INTEGER_KIND),1_MPI_INTEGER_KIND,MPI_INTEGER,&
cells3_global,1_MPI_INTEGER_KIND,MPI_INTEGER,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call parallelization_chkerr(err_MPI)
call DMDACreate3d(PETSC_COMM_WORLD, &
DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary
DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point
Expand Down Expand Up @@ -207,16 +207,16 @@ subroutine grid_mechanical_spectral_basic_init(num_grid)

call HDF5_read(P_aim,groupHandle,'P_aim',.false.)
call MPI_Bcast(P_aim,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call parallelization_chkerr(err_MPI)
call HDF5_read(F_aim,groupHandle,'F_aim',.false.)
call MPI_Bcast(F_aim,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call parallelization_chkerr(err_MPI)
call HDF5_read(F_aim_lastInc,groupHandle,'F_aim_lastInc',.false.)
call MPI_Bcast(F_aim_lastInc,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call parallelization_chkerr(err_MPI)
call HDF5_read(F_aimDot,groupHandle,'F_aimDot',.false.)
call MPI_Bcast(F_aimDot,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call parallelization_chkerr(err_MPI)
call HDF5_read(temp33n,groupHandle,'F')
F = reshape(temp33n,[9,cells(1),cells(2),cells3])
call HDF5_read(temp33n,groupHandle,'F_lastInc')
Expand All @@ -238,13 +238,13 @@ subroutine grid_mechanical_spectral_basic_init(num_grid)
print'(1x,a,1x,i0)', 'loading additional restart data of increment', CLI_restartInc
call HDF5_read(C_volAvg,groupHandle,'C_volAvg',.false.)
call MPI_Bcast(C_volAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call parallelization_chkerr(err_MPI)
call HDF5_read(C_volAvgLastInc,groupHandle,'C_volAvgLastInc',.false.)
call MPI_Bcast(C_volAvgLastInc,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call parallelization_chkerr(err_MPI)
call HDF5_read(C_minMaxAvg,groupHandle,'C_minMaxAvg',.false.)
call MPI_Bcast(C_minMaxAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call parallelization_chkerr(err_MPI)

call HDF5_closeGroup(groupHandle)
call HDF5_closeFile(fileHandle)
Expand Down Expand Up @@ -519,7 +519,7 @@ subroutine formResidual(residual_subdomain, F, &
P_av,C_volAvg,C_minMaxAvg, &
F,params%Delta_t,params%rotation_BC)
call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call parallelization_chkerr(err_MPI)
err_div = utilities_divergenceRMS(P)
end associate

Expand Down
Loading

0 comments on commit c6dae71

Please sign in to comment.