Skip to content

Commit

Permalink
Fix hard coded SF mass and add params for GRTresna
Browse files Browse the repository at this point in the history
  • Loading branch information
AreefW committed Dec 11, 2024
1 parent 12bcc0b commit 6ba4c57
Show file tree
Hide file tree
Showing 17 changed files with 655 additions and 85 deletions.
116 changes: 116 additions & 0 deletions Examples/ScalarFieldCosmo/ConstraintsExtraction.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,116 @@
/* GRChombo
* Copyright 2012 The GRChombo collaboration.
* Please refer to LICENSE in GRChombo's root directory.
*/

#ifndef CONSTRAINTSEXTRACTION_HPP_
#define CONSTRAINTSEXTRACTION_HPP_

#include "AMRInterpolator.hpp"
#include "InterpolationQuery.hpp"
#include "Lagrange.hpp"
#include "SimulationParametersBase.hpp"
#include "SmallDataIO.hpp"
#include "SphericalHarmonics.hpp"
#include "UserVariables.hpp"
#include <fstream>
#include <iomanip>
#include <iostream>
#include <sstream>
#include <string>

//! The class allows extraction of constraints (Ham and Mom) along x-axis

class ConstraintsExtraction
{
private:
//! Params for extraction
const int m_comp1;
const int m_comp2;
const int m_num_points;
const double m_L;
const std::array<double, CH_SPACEDIM>
m_origin; // Origin of an extraction line
const double m_dt;
const double m_time;

public:
//! The constructor
ConstraintsExtraction(int a_comp1, int a_comp2, int a_num_points,
double a_L, std::array<double, CH_SPACEDIM> a_origin,
double a_dt, double a_time)
: m_comp1(a_comp1), m_comp2(a_comp2), m_num_points(a_num_points),
m_origin(a_origin), m_L(a_L), m_dt(a_dt), m_time(a_time)
{
}

//! Destructor
~ConstraintsExtraction() {}

//! Execute the query
void execute_query(AMRInterpolator<Lagrange<2>> *a_interpolator,
std::string a_file_prefix) const
{
CH_TIME("CustomExtraction::execute_query");
if (a_interpolator == nullptr)
{
MayDay::Error("Interpolator has not been initialised.");
}
std::vector<double> interp_ham_data(m_num_points);
std::vector<double> interp_mom_data(m_num_points);
std::vector<double> interp_x(m_num_points);
std::vector<double> interp_y(m_num_points);
std::vector<double> interp_z(m_num_points);

// Work out the coordinates
// go out along x-axis from 0 to L
for (int idx = 0; idx < m_num_points; ++idx)
{
interp_x[idx] = (double(idx) / double(m_num_points) * m_L);
interp_y[idx] = m_origin[1];
interp_z[idx] = m_origin[2];
}

// set up the query
InterpolationQuery query_ham(m_num_points);
query_ham.setCoords(0, interp_x.data())
.setCoords(1, interp_y.data())
.setCoords(2, interp_z.data())
.addComp(m_comp1, interp_ham_data.data(), Derivative::LOCAL,
VariableType::diagnostic); // evolution or diagnostic
InterpolationQuery query_mom(m_num_points);
query_mom.setCoords(0, interp_x.data())
.setCoords(1, interp_y.data())
.setCoords(2, interp_z.data())
.addComp(m_comp2, interp_mom_data.data(), Derivative::LOCAL,
VariableType::diagnostic); // evolution or diagnostic

// submit the query
a_interpolator->interp(query_ham);
a_interpolator->interp(query_mom);

// now write out
bool first_step = (m_time == 0.0);
double restart_time = 0.0;
SmallDataIO output_file(a_file_prefix, m_dt, m_time, restart_time,
SmallDataIO::APPEND, first_step);

std::vector<std::string> header_line(2);
if (first_step)
{
header_line[0] = "Ham";
header_line[1] = "Mom";
output_file.write_header_line(header_line, "x");
}

for (int idx = 0; idx < m_num_points; ++idx)
{
std::vector<double> data(2);
data[0] = interp_ham_data[idx];
data[1] = interp_mom_data[idx];
output_file.write_data_line(data, interp_x[idx]);
}
}
};

#endif /* CONSTRAINTSEXTRACTION_HPP_ */
10 changes: 6 additions & 4 deletions Examples/ScalarFieldCosmo/CustomExtraction.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -104,11 +104,13 @@ class CustomExtraction
{
for (int i = 0; i < m_num_points; ++i)
{
// header_line[i] =
// "p" + std::to_string(i + 1) + "(" +
// to_string_with_precision(interp_x[i], 2) + "," +
// to_string_with_precision(m_origin[1], 2) + "," +
// to_string_with_precision(m_origin[2], 2) + ")";
header_line[i] =
"p" + std::to_string(i + 1) + "(" +
to_string_with_precision(interp_x[i], 2) + "," +
to_string_with_precision(m_origin[1], 2) + "," +
to_string_with_precision(m_origin[2], 2) + ")";
"x = " + to_string_with_precision(interp_x[i], 2);
}
output_file.write_header_line(header_line);
}
Expand Down
5 changes: 5 additions & 0 deletions Examples/ScalarFieldCosmo/GRTresna_restart_setup/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
## Restarting from GRTresna and convergence testing

GRTresna is the GRTL Collaboration code for solving the initial data constraints of GR. See [the GRTresna repository](https://github.com/GRTLCollaboration/GRTresna).

These parameters relate to the GRTresna [ScalarFieldCosmo example](https://github.com/GRTLCollaboration/GRTresna/tree/main/Examples/ScalarFieldCosmo). One should run the solver there, and use the output as a restart file for this evolution example. We also provide an example of how to perform a convergence test on the outputs, which is essential for verifying that things are working correctly. For more details see the [GRTresna wiki page](https://github.com/GRTLCollaboration/GRTresna/wiki/Convergence-testing).
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
190 changes: 190 additions & 0 deletions Examples/ScalarFieldCosmo/GRTresna_restart_setup/params_HR.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,190 @@
# See the wiki page for an explanation of the params!
# https://github.com/GRChombo/GRChombo/wiki/Guide-to-parameters

#################################################
# Filesystem parameters

verbosity = 0

# location / naming of output files
# output_path = "" # Main path for all files. Must exist!
chk_prefix = Cosmo_
plot_prefix = Cosmop_

# If restarting from the IC Solver or checkpoint, uncomment this and put the
# file into the hdf5 folder (need to create this if not there)
restart_file = hdf5_HR/InitialDataFinal.3d.hdf5

# HDF5files are written every dt = L/N*dt_multiplier*checkpoint_interval
checkpoint_interval = 1000
# set to 0 to turn off plot files (except at t=0 and t=stop_time)
# set to -1 to never ever print plotfiles
plot_interval = 500
num_plot_vars = 7
plot_vars = chi phi rho_scaled S_scaled K_scaled Ham Mom

# subpaths - specific directories for hdf5, pout, extraction data
# (these are created at runtime)
hdf5_subpath = "hdf5"
pout_subpath = "pout"
data_subpath = "data"

# change the name of output files
# pout_prefix = "pout"
print_progress_only_to_rank_0 = 1

# ignore_checkpoint_name_mismatch = 0
# write_plot_ghosts = 0

#################################################
# Initial Data parameters

# Change the gravitational constant of the Universe!
# Default is 1.0, for standard geometric units
# Here we decouple the evolution so the scalar evolved on the
# metric background without backreaction (this avoids the need
# to solve the constaints)
G_Newton = 1.

# Scalar field initial data
# These scalar field params make the Hubble length 10 times wavelength of the scalar field
scalar_amplitude = 1e-1 # same as dphi in solver
scalar_mass = 1e-1 # same as used in solver
# scalar_field_mode = 1 # must be an integer

# Lineout params
lineout_num_points = 10

# Tagging params
# tagging_center = 0 0 0
# tagging_radius = 7

#################################################
# Grid parameters

# 'N' is the number of subdivisions in each direction of a cubic box
# 'L' is the length of the longest side of the box, dx_coarsest = L/N
# NB - If you use reflective BC and want to specify the subdivisions and side
# of the box were there are no symmetries, specify 'N_full' and 'L_full' instead
# NB - if you have a non-cubic grid, you can specify 'N1' or 'N1_full',
# 'N2' or 'N2_full' and 'N3' or 'N3_full' ( then dx_coarsest = L/N(max) )
# NB - the N values need to be multiples of the block_factor
N_full = 128
L_full = 128

# Maximum number of times you can regrid above coarsest level
max_level = 0 # There are (max_level+1) grids, so min is zero

# Frequency of regridding at each level and thresholds on the tagging
# Need one for each level except the top one, ie max_level items
# Generally you do not need to regrid frequently on every level
# in this example turn off regridding on all levels
# Level Regridding: 0 1 2 3 4 5
regrid_interval = 10 0 0 0 0 0
regrid_threshold = 1e6

# Max and min box sizes
max_box_size = 16
min_box_size = 16

# tag_buffer_size = 0

# grid_buffer_size = 8
# fill_ratio = 0.7
# num_ghosts = 3
# center = 256.0 256.0 256.0 # defaults to center of the grid

#################################################
# Boundary Conditions parameters

#Periodic directions - 0 = false, 1 = true
isPeriodic = 1 1 1
# if not periodic, then specify the boundary type
# 0 = static, 1 = sommerfeld, 2 = reflective
# 3 = extrapolating, 4 = mixed
# (see BoundaryConditions.hpp for details)
# hi_boundary = 4 4 4
# lo_boundary = 2 2 2

# if reflective boundaries selected, must set
# parity of all vars (in order given by UserVariables.hpp)
# 0 = even
# 1,2,3 = odd x, y, z
# 4,5,6 = odd xy, yz, xz
# 7 = odd xyz
vars_parity = 0 0 4 6 0 5 0 #chi and hij
0 0 4 6 0 5 0 #K and Aij
0 1 2 3 #Theta and Gamma
0 1 2 3 1 2 3 #lapse shift and B
0 0 #phi and Pi
vars_parity_diagnostic = 0 1 2 3 #Ham and Mom

# if sommerfeld boundaries selected, must select
# non zero asymptotic values
num_nonzero_asymptotic_vars = 5
nonzero_asymptotic_vars = chi h11 h22 h33 lapse
nonzero_asymptotic_values = 1.0 1.0 1.0 1.0 1.0

# if you are using extrapolating BC:
extrapolation_order = 1
num_extrapolating_vars = 2
extrapolating_vars = phi Pi

#################################################
# Evolution parameters

# dt will be dx*dt_multiplier on each grid level
dt_multiplier = 0.25
stop_time = 100.
# max_steps = 4

# Spatial derivative order (only affects CCZ4 RHS)
# In cosmology sixth order stencils are rarely use so the example only have fourth order
max_spatial_derivative_order = 4 # can be 6 but need to add the stencils in CosmoLevel.cpp (See other examples)

nan_check = 1

# Lapse evolution
lapse_advec_coeff = 1.0
lapse_coeff = 2.0
lapse_power = 1.0

# Shift evolution
shift_advec_coeff = 0.0 # Usually no advection for beta
shift_Gamma_coeff = 0.75
eta = 1.0 # eta of gamma driver, should be of order ~1/M_ADM of spacetime

# CCZ4 parameters
formulation = 1 # 1 for BSSN, 0 for CCZ4
kappa1 = 0.
kappa2 = 0.
kappa3 = 0.
covariantZ4 = 1 # 0: keep kappa1; 1 [default]: replace kappa1 -> kappa1/lapse

# coefficient for KO numerical dissipation
sigma = 0.3

min_chi = 1.e-50
min_lapse = 1.e-50

#################################################
# Apparent Horizon Finder parameters

AH_activate = 0
AH_num_ranks = 20
AH_num_points_u = 30
AH_num_points_v = 50
AH_solve_interval = 1
AH_print_interval = 1
AH_track_center = false
# AH_predict_origin = true
# AH_level_to_run = 0
# AH_allow_re_attempt = 0
# AH_start_time = 0.0
# AH_give_up_time = -1. # -1 to never
# AH_max_fails_after_lost = 0 # -1 to never
# AH_verbose = 1

# AH_initial_guess = 1.0

#################################################
Loading

0 comments on commit 6ba4c57

Please sign in to comment.