diff --git a/CHANGELOG.md b/CHANGELOG.md
index dd94a2063..e8f8084d4 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -12,6 +12,7 @@ Code freeze date: YYYY-MM-DD
### Added
+- `climada.hazard.tc_tracks.TCTracks.from_FAST` function, add Australia basin (AU) [#993](https://github.com/CLIMADA-project/climada_python/pull/993)
- Add `osm-flex` package to CLIMADA core [#981](https://github.com/CLIMADA-project/climada_python/pull/981)
- `doc.tutorial.climada_entity_Exposures_osm.ipynb` tutorial explaining how to use `osm-flex`with CLIMADA
- `climada.util.coordinates.bounding_box_global` function [#980](https://github.com/CLIMADA-project/climada_python/pull/980)
diff --git a/climada/hazard/storm_europe.py b/climada/hazard/storm_europe.py
index c4b49e7fc..a527b6165 100644
--- a/climada/hazard/storm_europe.py
+++ b/climada/hazard/storm_europe.py
@@ -551,9 +551,15 @@ def from_icon_grib(
+ run_datetime.strftime("%Y%m%d%H")
)
+ # Starting with eccodes 2.28 the name of the data variable in `stacked` is
+ # [i10fg](https://codes.ecmwf.int/grib/param-db/228029).
+ # Before, it used to be the less precise
+ # [gust](https://codes.ecmwf.int/grib/param-db/260065)
+ [data_variable] = list(stacked)
+
# Create Hazard
haz = cls(
- intensity=sparse.csr_matrix(stacked["gust"].T),
+ intensity=sparse.csr_matrix(stacked[data_variable].T),
centroids=cls._centroids_from_nc(nc_centroids_file),
event_id=event_id,
date=date,
@@ -1069,7 +1075,7 @@ def generate_WS_forecast_hazard(
if haz_model == "cosmo1e_file":
haz_model = "C1E"
full_model_name_temp = "COSMO-1E"
- if haz_model == "cosmo2e_file":
+ else: # if haz_model == "cosmo2e_file":
haz_model = "C2E"
full_model_name_temp = "COSMO-2E"
haz_file_name = (
diff --git a/climada/hazard/tc_tracks.py b/climada/hazard/tc_tracks.py
index fce41053a..ec63d0d90 100644
--- a/climada/hazard/tc_tracks.py
+++ b/climada/hazard/tc_tracks.py
@@ -162,6 +162,7 @@
"SI": 1005,
"WP": 1005,
"SP": 1004,
+ "AU": 1004,
}
"""Basin-specific default environmental pressure"""
@@ -1619,6 +1620,118 @@ def from_netcdf(cls, folder_name):
data.append(track)
return cls(data)
+ @classmethod
+ def from_FAST(cls, folder_name: str):
+ """Create a new TCTracks object from NetCDF files generated by the FAST model, modifying
+ the xr.array structure to ensure compatibility with CLIMADA, and calculating the central
+ pressure and radius of maximum wind.
+
+ Model GitHub Repository: https://github.com/linjonathan/tropical_cyclone_risk?
+ tab=readme-ov-file
+ Model Publication: https://agupubs.onlinelibrary.wiley.com/doi/epdf/10.1029/2023MS003686
+
+ Parameters:
+ ----------
+ folder_name : str
+ Folder name from where to read files.
+ storm_id : int
+ Number of the simulated storm
+
+ Returns:
+ -------
+ tracks : TCTracks
+ TCTracks object with tracks data from the given directory of NetCDF files.
+ """
+
+ LOGGER.info("Reading %s files.", len(get_file_names(folder_name)))
+ data = []
+ for file in get_file_names(folder_name):
+ if Path(file).suffix != ".nc":
+ continue
+ with xr.open_dataset(file) as dataset:
+ for year in dataset.year:
+ for i in dataset.n_trk:
+
+ # Select track
+ track = dataset.sel(n_trk=i, year=year)
+ # chunk dataset at first NaN value
+ lon = track.lon_trks.data
+ last_valid_index = np.where(np.isfinite(lon))[0][-1]
+ track = track.isel(time=slice(0, last_valid_index + 1))
+ # Select lat, lon
+ lat = track.lat_trks.data
+ lon = track.lon_trks.data
+ # Convert lon from 0-360 to -180 - 180
+ lon = ((lon + 180) % 360) - 180
+ # Convert time to pandas Datetime "yyyy.mm.dd"
+ reference_time = (
+ f"{track.tc_years.item()}-{int(track.tc_month.item())}-01"
+ )
+ time = pd.to_datetime(
+ track.time.data, unit="s", origin=reference_time
+ ).astype("datetime64[s]")
+ # Define variables
+ ms_to_kn = 1.943844
+ max_wind_kn = track.vmax_trks.data * ms_to_kn
+ env_pressure = BASIN_ENV_PRESSURE[track.tc_basins.data.item()]
+ cen_pres = _estimate_pressure(
+ np.full(lat.shape, np.nan),
+ lat,
+ lon,
+ max_wind_kn,
+ )
+
+ data.append(
+ xr.Dataset(
+ {
+ "time_step": (
+ "time",
+ np.full(time.shape[0], track.time.data[1]),
+ ),
+ "max_sustained_wind": (
+ "time",
+ track.vmax_trks.data,
+ ),
+ "central_pressure": ("time", cen_pres),
+ "radius_max_wind": (
+ "time",
+ estimate_rmw(
+ np.full(lat.shape, np.nan), cen_pres
+ ),
+ ),
+ "environmental_pressure": (
+ "time",
+ np.full(time.shape[0], env_pressure),
+ ),
+ "basin": (
+ "time",
+ np.full(
+ time.shape[0], track.tc_basins.data.item()
+ ),
+ ),
+ },
+ coords={
+ "time": ("time", time),
+ "lat": ("time", lat),
+ "lon": ("time", lon),
+ },
+ attrs={
+ "max_sustained_wind_unit": "m/s",
+ "central_pressure_unit": "hPa",
+ "name": f"storm_{track.n_trk.item()}",
+ "sid": track.n_trk.item(),
+ "orig_event_flag": True,
+ "data_provider": "FAST",
+ "id_no": track.n_trk.item(),
+ "category": set_category(
+ max_wind_kn, wind_unit="kn", saffir_scale=None
+ ),
+ },
+ )
+ )
+
+ return cls(data)
+
def write_hdf5(self, file_name, complevel=5):
"""Write TC tracks in NetCDF4-compliant HDF5 format.
@@ -2665,12 +2778,12 @@ def ibtracs_fit_param(explained, explanatory, year_range=(1980, 2019), order=1):
return sm_results
-def ibtracs_track_agency(ds_sel):
+def ibtracs_track_agency(track):
"""Get preferred IBTrACS agency for each entry in the dataset.
Parameters
----------
- ds_sel : xarray.Dataset
+ track : xarray.Dataset
Subselection of original IBTrACS NetCDF dataset.
Returns
@@ -2678,7 +2791,7 @@ def ibtracs_track_agency(ds_sel):
agency_pref : list of str
Names of IBTrACS agencies in order of preference.
track_agency_ix : xarray.DataArray of ints
- For each entry in `ds_sel`, the agency to use, given as an index into `agency_pref`.
+ For each entry in `track`, the agency to use, given as an index into `agency_pref`.
"""
agency_pref = ["wmo"] + IBTRACS_AGENCIES
agency_map = {a.encode("utf-8"): i for i, a in enumerate(agency_pref)}
@@ -2687,11 +2800,11 @@ def ibtracs_track_agency(ds_sel):
)
agency_map[b""] = agency_map[b"wmo"]
agency_fun = lambda x: agency_map[x]
- if "track_agency" not in ds_sel.data_vars.keys():
- ds_sel["track_agency"] = ds_sel["wmo_agency"].where(
- ds_sel["wmo_agency"] != b"", ds_sel["usa_agency"]
+ if "track_agency" not in track.data_vars.keys():
+ track["track_agency"] = track["wmo_agency"].where(
+ track["wmo_agency"] != b"", track["usa_agency"]
)
- track_agency_ix = xr.apply_ufunc(agency_fun, ds_sel["track_agency"], vectorize=True)
+ track_agency_ix = xr.apply_ufunc(agency_fun, track["track_agency"], vectorize=True)
return agency_pref, track_agency_ix
diff --git a/climada/hazard/test/data/FAST_test_tracks.nc b/climada/hazard/test/data/FAST_test_tracks.nc
new file mode 100644
index 000000000..dfe9d8b71
Binary files /dev/null and b/climada/hazard/test/data/FAST_test_tracks.nc differ
diff --git a/climada/hazard/test/test_tc_tracks.py b/climada/hazard/test/test_tc_tracks.py
index 56005b51a..2828fbfe3 100644
--- a/climada/hazard/test/test_tc_tracks.py
+++ b/climada/hazard/test/test_tc_tracks.py
@@ -44,6 +44,7 @@
TEST_TRACK_EMANUEL = DATA_DIR.joinpath("emanuel_test_tracks.mat")
TEST_TRACK_EMANUEL_CORR = DATA_DIR.joinpath("temp_mpircp85cal_full.mat")
TEST_TRACK_CHAZ = DATA_DIR.joinpath("chaz_test_tracks.nc")
+TEST_TRACK_FAST = DATA_DIR.joinpath("FAST_test_tracks.nc")
TEST_TRACK_STORM = DATA_DIR.joinpath("storm_test_tracks.txt")
TEST_TRACKS_ANTIMERIDIAN = DATA_DIR.joinpath("tracks-antimeridian")
TEST_TRACKS_LEGACY_HDF5 = DATA_DIR.joinpath("tctracks_hdf5_legacy.nc")
@@ -631,6 +632,51 @@ def test_from_simulations_storm(self):
tc_track = tc.TCTracks.from_simulations_storm(TEST_TRACK_STORM, years=[7])
self.assertEqual(len(tc_track.data), 0)
+ def test_from_FAST(self):
+ """test the correct import of netcdf files from FAST model and the conversion to a
+ different xr.array structure compatible with CLIMADA."""
+
+ tc_track = tc.TCTracks.from_FAST(TEST_TRACK_FAST)
+
+ expected_attributes = {
+ "max_sustained_wind_unit": "m/s",
+ "central_pressure_unit": "hPa",
+ "name": "storm_0",
+ "sid": 0,
+ "orig_event_flag": True,
+ "data_provider": "FAST",
+ "id_no": 0,
+ "category": 1,
+ }
+
+ self.assertIsInstance(
+ tc_track, tc.TCTracks, "tc_track is not an instance of TCTracks"
+ )
+ self.assertIsInstance(
+ tc_track.data, list, "tc_track.data is not an instance of list"
+ )
+ self.assertIsInstance(
+ tc_track.data[0],
+ xr.Dataset,
+ "tc_track.data[0] not an instance of xarray.Dataset",
+ )
+ self.assertEqual(len(tc_track.data), 5)
+ self.assertEqual(tc_track.data[0].attrs, expected_attributes)
+ self.assertEqual(list(tc_track.data[0].coords.keys()), ["time", "lat", "lon"])
+ self.assertEqual(
+ tc_track.data[0].time.values[0],
+ np.datetime64("2025-09-01T00:00:00.000000000"),
+ )
+ self.assertEqual(tc_track.data[0].lat.values[0], 17.863591350508266)
+ self.assertEqual(tc_track.data[0].lon.values[0], -71.76441758319629)
+ self.assertEqual(len(tc_track.data[0].time), 35)
+ self.assertEqual(tc_track.data[0].time_step[0], 10800)
+ self.assertEqual(
+ tc_track.data[0].max_sustained_wind.values[10], 24.71636959089841
+ )
+ self.assertEqual(tc_track.data[0].environmental_pressure.data[0], 1010)
+ self.assertEqual(tc_track.data[0].basin[0], "NA")
+
def test_to_geodataframe_points(self):
"""Conversion of TCTracks to GeoDataFrame using Points."""
tc_track = tc.TCTracks.from_processed_ibtracs_csv(TEST_TRACK)
diff --git a/climada/util/calibrate/test/test_bayesian_optimizer.py b/climada/util/calibrate/test/test_bayesian_optimizer.py
index e80960fda..ef01d1c8b 100644
--- a/climada/util/calibrate/test/test_bayesian_optimizer.py
+++ b/climada/util/calibrate/test/test_bayesian_optimizer.py
@@ -94,13 +94,9 @@ def test_optimizer_params(self):
)
result = contr.optimizer_params()
- self.assertDictContainsSubset(
- {
- "init_points": 1,
- "n_iter": 2,
- },
- result,
- )
+ self.assertEqual(result.get("init_points"), 1)
+ self.assertEqual(result.get("n_iter"), 2)
+
util_func = result["acquisition_function"]
self.assertEqual(util_func.kappa, 3)
self.assertEqual(util_func._kappa_decay, contr._calc_kappa_decay())
diff --git a/doc/guide/Guide_Euler.ipynb b/doc/guide/Guide_Euler.ipynb
index 2f2cfa7f4..ccfc8a445 100644
--- a/doc/guide/Guide_Euler.ipynb
+++ b/doc/guide/Guide_Euler.ipynb
@@ -26,7 +26,7 @@
},
"source": [
"\n",
- "## Installation and working directories\n",
+ "## Installation directory and working directory\n",
"\n",
"Please, get familiar with the various Euler storage options: https://scicomp.ethz.ch/wiki/Storage_systems.
\n",
"As a general rule: use `/cluster/project` for installation and `/cluster/work` for data processing.\n",
@@ -44,10 +44,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "\n",
- "## Pre-installed version of Climada\n",
- "\n",
- "Climada is pre-installed and available in the default pip environment of Euler."
+ "## Climada installation in a virtual environment"
]
},
{
@@ -57,12 +54,24 @@
"\n",
"### 1. Load dependencies\n",
"\n",
- "Use the new software stack. Unless you have already done so, run `set_software_stack.sh new`.\n",
- "\n",
"```bash\n",
- "module load gcc/8.2.0 python/3.11.2 hdf5/1.10.1 gdal/3.4.3 geos/3.9.1 proj/8.2.1 libspatialindex/1.9.3 netcdf/4.6.3 eccodes/2.31.0 zlib/1.2.9 libtiff/4.2.0 sqlite/3.35.5\n",
+ "module load \\\n",
+ " gcc/12.2.0 \\\n",
+ " stack/2024-06 \\\n",
+ " python/3.11.6 \\\n",
+ " hdf5/1.14.3 \\\n",
+ " geos/3.9.1 \\\n",
+ " sqlite/3.43.2 \\\n",
+ " eccodes/2.25.0 \\\n",
+ " gdal/3.6.3 \\\n",
+ " eth_proxy\n",
+ "\n",
+ "module load proj\n",
+ "module unload proj\n",
"```\n",
"\n",
+ "(The last two lines may seem odd but they are working around a conficting dependency version situation.)\n",
+ "\n",
"You need to execute this every time you login to Euler before Climada can be used. \n",
"To safe yourself from doing it manually, append these lines to the ~/.bashrc script, which is automatically executed upon logging in to Euler."
]
@@ -72,25 +81,73 @@
"metadata": {},
"source": [
"\n",
- "### 2. Check installation \n",
+ "### 2. Create and prepare virtual environment\n",
"\n",
"```bash\n",
- "python -c 'import climada; print(climada.__file__)'\n",
- "```\n",
+ "envname=climada_env\n",
+ "\n",
+ "# create environment\n",
+ "python -m venv --system-site-packages /cluster/project/climate/$USER/venv/$envname\n",
+ "\n",
+ "# acitvate it\n",
+ ". /cluster/project/climate/$USER/venv/$envname/bin/activate\n",
"\n",
- "should output something like this:\n",
+ "# install python kernel (to be used in JupyterHub, s.b.)\n",
+ "python -m ipykernel install --user --name $envname\n",
+ "```"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### 3. Install dependencies\n",
"\n",
"```bash\n",
- "/cluster/apps/nss/gcc-8.2.0/python/3.10.4/x86_64/lib64/python3.10/site-packages/climada/__init__.py\n",
+ "pip install \\\n",
+ " dask[dataframe] \\\n",
+ " fiona==1.9 \\\n",
+ " gdal==3.6 \\\n",
+ " netcdf4==1.6.2 \\\n",
+ " rasterio==1.4 \\\n",
+ " pyproj==3.7 \\\n",
+ " geopandas==1.0 \\\n",
+ " xarray==2024.9 \\\n",
+ " sparse==0.15\n",
"```"
]
},
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### 4. Install climada\n",
+ "\n",
+ "There are two options. Either install from the downloaded repository (option A), or use a particular released version (option B).\n",
+ "\n",
+ "#### option A\n",
+ "```bash\n",
+ "cd /cluster/project/climate/$USER # or wherever you plan to download the repository\n",
+ "git clone https://github.com/CLIMADA-project/climada_python.git # unless this has been done before\n",
+ "cd climada_python\n",
+ "pip install -e .\n",
+ "```\n",
+ "\n",
+ "#### option B\n",
+ "\n",
+ "```bash\n",
+ "pip install climada=5.0\n",
+ "```\n",
+ "\n",
+ "or whatever version you prefer"
+ ]
+ },
{
"cell_type": "markdown",
"metadata": {},
"source": [
"\n",
- "### 3. Adjust the Climada configuration\n",
+ "### 5. Adjust the Climada configuration\n",
"\n",
"Edit a configuration file according to your needs (see [Guide_Configuration](../guide/Guide_Configuration.ipynb)).\n",
"Create a climada.conf file e.g., in /cluster/home/$USER/.config with the following content:\n",
@@ -112,35 +169,25 @@
"metadata": {},
"source": [
"\n",
- "### 4. Run a job\n",
+ "### 6. Test the installation\n",
"\n",
- "Please see the docs at https://slurm.schedmd.com/ on how to use the `slurm` batch system \n",
- "and the Wiki https://scicomp.ethz.ch/wiki/Transition_from_LSF_to_Slurm for a mapping of `lsf` commands to their `slurm` equivalents.\n",
+ "Check installation in login node:\n",
"\n",
"```bash\n",
- "cd /cluster/work/climate/$USER # change to the working directory\n",
- "sbatch [slurm options*] --wrap 'python climada_job_script.py' # submit the job\n",
- "```"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
+ "python -m unittest climada.engine.test.test_impact_calc\n",
+ "```\n",
"\n",
- "## Working with Git branches\n",
+ "This should prompt the usual \"`OK`\" in the end.\n",
+ "Once that succeeded you may want to test the installation also in a compute node, just for the sake of it:\n",
"\n",
- "If the Climada version of the default installation is not according to your needs, you can install Climada from a local Git repository."
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
+ "```bash\n",
+ "sbatch --wrap=\"python -m unittest climada.engine.test.test_impact_calc\"\n",
+ "```\n",
"\n",
- "### 1. Load dependencies \n",
+ "Look for the \"`OK`\" in the hereby created `slurm-[XXXXXXX].out` file\n",
"\n",
- "See [Load dependencies](#1.-load-dependencies) above."
+ "Please see the docs at https://slurm.schedmd.com/ on how to use the `slurm` batch system \n",
+ "and the Wiki https://scicomp.ethz.ch/wiki/Transition_from_LSF_to_Slurm for a mapping of `lsf` commands to their `slurm` equivalents."
]
},
{
@@ -148,67 +195,52 @@
"metadata": {},
"source": [
"\n",
- "### 2. Create installation environment\n",
- "\n",
- "```bash\n",
- "python -m venv --system-site-packages /cluster/project/climate/$USER/climada_venv\n",
- "```"
+ "## Run a Jupyter Notebook on Euler"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
+ "It is possible to run a Jupyter Notebook on Euler within a JupytherHub instance running as an interactive slurm job.\n",
+ "See the documentation https://scicomp.ethz.ch/wiki/JupyterHub.\n",
"\n",
- "### 3. Checkout sources\n",
+ "For using climada inside the jupyter notebook, you need to create a customized `jupyterlabrc` file by running the following lines:\n",
"\n",
"```bash\n",
- "cd /cluster/project/climate/$USER\n",
- "git clone https://github.com/CLIMADA-project/climada_python.git\n",
- "cd climada_python\n",
- "git checkout develop # i.e., your branch of interest\n",
- "```"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
+ "mkdir -p ~/.config/euler/jupyterhub\n",
+ "cat > ~/.config/euler/jupyterhub/jupyterlabrc < **WARNING:** This approach is highly discouraged, as it imposes a heavy and mostly unnecessary burden on the file system of the cluster."
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "### Installation\n",
+ "- Make sure your python environment is activated.\n",
+ "- Run `pip install --upgrade MISSING_MODULE`.\n",
"\n",
- "#### 1. Conda\n",
+ "### 2. Upgrading from Python 3.9 or 3.10\n",
"\n",
- "Download or update to the latest version of [Miniconda](https://conda.io/miniconda.html).
\n",
- "Installation is done by execution of the following steps:\n",
+ "Virtual environments created are i.g. only working for the Python version they were created with.\n",
+ "In particular Python kernels from 3.9 environments will fail to connect in a Jupyter notebook on https://jupyter.euler.hpc.ethz.ch/.\n",
"\n",
- "```bash\n",
- "cd /cluster/project/climate/USERNAME\n",
- "wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh\n",
- "bash Miniconda3-latest-Linux-x86_64.sh\n",
- "miniconda3/bin/conda init\n",
- "rm Miniconda3-latest-Linux-x86_64.sh\n",
- "```\n",
+ "- It's suggested to create new environments and remove the old kernels from `~/.local/share/jupyter/kernels/`.\n",
"\n",
- "During the installation process of Miniconda, you are prompted to set the working directory according to your choice.\n",
- "Set it to `/cluster/project/climate/USERNAME/miniconda3`.
\n",
- "Once the installation has finished, log out of Euler and in again. The command prompt should be preceded by `(base)`, \n",
- "indicating that the installation was a success and that you login in into conda's base environment by default."
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "#### 2. Checkout sources \n",
+ "### 3. Incompatible GEOS version\n",
"\n",
- "See [Checkout sources](#3.-Checkout-sources) above."
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "#### 3. Climada environment\n",
+ "If you get a warning `UserWarning: The Shapely GEOS version (3.9.1-CAPI-1.14.2) is incompatible with the GEOS version PyGEOS was compiled with (3.9.1-CAPI-1.14.2). Conversions between both will be slow.` or similar (version numbers may vary), updating geopandas can help:\n",
"\n",
- "Create the conda environment:\n",
+ "- Create and activate a virtual environment with `venv` (s.a.)\n",
+ "- Run `pip install --upgrade geopandas`\n",
"\n",
- "```bash\n",
- "cd /cluster/project/climate/USERNAME/climada_python\n",
- "conda env create -f requirements/env_climada.yml --name climada_env\n",
- "conda env update -n climada_env -f requirements/env_developer.yml\n",
+ "### 4. Installation doesn't work\n",
"\n",
- "conda activate climada_env\n",
- "conda install conda-build\n",
- "conda develop .\n",
- "```"
+ "If you have additional requirements it may be that the installation process described above is failing. In this case you can run climada from a customized singularity container."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "#### 4. Adjust the Climada configuration\n",
+ "## Fall back: Singularity Container\n",
+ "\n",
+ "In case the installation in a virtual environment does not work, e.g,, because some module on Euler in incompatible with additional requirements for Python packages, the last resort is an installation of CLIMADA into a Singularity container. \\\n",
+ "In general, this is more difficult and time-consuming and easier to get wrong. It also requires a lot of diskspace and produces a high number of files, but it provides more flexibility, as one can install basically anything you want.\n",
"\n",
- "See [Adjust the Climada configuration](#3.-Adjust-the-Climada-configuration) above."
+ "To install CLIMADA into a Singularity container, follow these steps:"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "#### 5. Climada scripts \n",
+ "### 1. create a recipe\n",
+ "\n",
+ "Create a file `recipe.txt` with the following content:\n",
"\n",
- "Create a bash script for executing python scripts in the climada environment, `climadajob.sh`:\n",
- "```bash\n",
- "#!/bin/bash\n",
- "PYTHON_SCRIPT=$1\n",
- "shift\n",
- ". ~/.bashrc\n",
- "conda activate climada_env\n",
- "python $PYTHON_SCRIPT $@\n",
- "echo $PYTHON_SCRIPT completed\n",
"```\n",
+ "Bootstrap: docker\n",
+ "From: nvidia/cuda:12.0.0-devel-ubuntu22.04\n",
"\n",
- "Make it executable:\n",
"\n",
- "```bash\n",
- "chmod +x climadajob.sh\n",
- "```\n",
+ "%labels\n",
+ " version=\"1.0.0\"\n",
+ " description=\"climada\"\n",
"\n",
- "Create a python script that executes climada code, e.g., `climada_smoke_test.py`:\n",
"\n",
- "```python\n",
- "import sys\n",
- "from climada import CONFIG, SYSTEM_DIR\n",
- "from climada.util.test.test_finance import TestNetpresValue\n",
- "TestNetpresValue().test_net_pres_val_pass()\n",
- "print(SYSTEM_DIR)\n",
- "print(CONFIG.local_data.save_dir.str())\n",
- "print(\"the script ran with arguments\", sys.argv)\n",
- "```"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "#### 6. Run a job \n",
+ "%post\n",
"\n",
- "With the scripts from above you can submit the python script as a job like this:\n",
+ " # Install requirements\n",
+ " apt-get -y update\n",
+ " DEBIAN_FRONTEND=\"noninteractive\" TZ=\"Europe/Rome\" apt-get -y install tzdata\n",
+ " apt-get install -y `apt-cache depends openssh-client | awk '/Depends:/{print$2}'`\n",
+ " apt-get download openssh-client\n",
+ " dpkg --unpack openssh-client*.deb\n",
+ " rm /var/lib/dpkg/info/openssh-client.postinst -f\n",
+ " dpkg --configure openssh-client\n",
+ " apt-get -y install tk tcl rsync wget curl git patch\n",
"\n",
- "```bash\n",
- "sbatch [slurm options] --wrap \"/path/to/climadajob.sh /path/to/climada_smoke_test.py arg1 arg2\"\n",
- "```\n",
+ " mkdir -p /opt/software\n",
"\n",
- "After the job has finished the slurm output file should look something like this:\n",
+ " # Install conda and mamba\n",
+ " cd /opt/software\n",
+ " curl -O https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh\n",
+ " sh ./Miniconda3-latest-Linux-x86_64.sh -p /opt/software/conda -b\n",
+ " /opt/software/conda/bin/conda install -y -c conda-forge mamba\n",
"\n",
- "```\n",
- "/cluster/work/climate/USERNAME/climada/data\n",
- "/cluster/work/climate/USERNAME/climada/results\n",
- "the script ran with arguments ['/path/to/climada_smoke_test.py', 'arg1' 'arg2']\n",
- "python_script.sh completed\n",
- "```\n",
+ " # Create and activate environment\n",
+ " /opt/software/conda/bin/mamba create -n climada_env python=3.11 --yes\n",
+ " . /opt/software/conda/etc/profile.d/conda.sh && conda activate climada_env\n",
"\n",
- "Please see the docs at https://slurm.schedmd.com/ on how to use the `slurm` batch system \n",
- "and the Wiki https://scicomp.ethz.ch/wiki/Transition_from_LSF_to_Slurm for a mapping of `lsf` commands to their `slurm` equivalents."
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "### Deinstallation"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "#### 1. Conda \n",
+ " # Install jupyter\n",
+ " python -m pip install jupyterhub jupyterlab\n",
"\n",
- "Remove the miniconda3 directory from the installation directory:\n",
"\n",
- "```bash\n",
- "rm -rf /cluster/project/climate/USERNAME/miniconda3/\n",
- "```\n",
+ " # Install climada from source\n",
+ " mkdir -p /opt/climada_workspace\n",
+ " cd /opt/climada_workspace\n",
"\n",
- "Delete the conda related parts from `/cluster/home/USERNAME/.bashrc`, i.e., everything between\n",
+ " git clone https://github.com/CLIMADA-project/climada_python.git\n",
+ " cd climada_python\n",
+ " git checkout develop\n",
"\n",
- "`# >>> conda initialize >>>`\\\n",
- "and\\\n",
- "`# <<< conda initialize <<<`"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "#### 2. Climada\n",
+ " mamba env update -n climada_env -f requirements/env_climada.yml\n",
+ " python -m pip install -e ./\n",
"\n",
- "Remove the climada sources and config file:\n",
+ " # Install climada-petals from source\n",
+ " cd /opt/climada_workspace\n",
"\n",
- "```bash\n",
- "rm -rf /cluster/project/climate/USERNAME/climada_python\n",
- "rm -f /cluster/home/USERNAME/climada.conf /cluster/home/USERNAME/*/climada.conf\n",
- "```"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
+ " git clone https://github.com/CLIMADA-project/climada_petals.git\n",
+ " cd climada_petals\n",
+ " git checkout develop\n",
"\n",
- "## Run Jupyter Notebook on Euler"
+ " mamba env update -n climada_env -f requirements/env_climada.yml\n",
+ " python -m pip install -e ./\n",
+ "\n",
+ "\n",
+ "%environment\n",
+ " #export LC_ALL=C\n",
+ "\n",
+ "\n",
+ "%runscript\n",
+ " . /opt/software/conda/bin/activate && conda activate climada_env\n",
+ " $@\n",
+ "```"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "It is possible to run a Jupyter Notebook on Euler within a JupytherHub instance running as an interactive slurm job.\n",
- "See the documentation https://scicomp.ethz.ch/wiki/JupyterHub.\n",
+ "### 2. build the container\n",
"\n",
- "For using climada inside the jupyter notebook, create a `.jupyterlabrc` file in your Euler home directory with the following content:\n",
+ "- as it is cpu and memory consuming, run a job\n",
+ "- the container will be created in the directory `climada.sif`, it is going to be huge, so it's best located within the `project` file system\n",
"\n",
- "```\n",
- "module purge\n",
- "module load StdEnv gcc/8.2.0 python_gpu/3.11.2 eth_proxy hdf5/1.10.1 gdal/3.4.3 geos/3.9.1 proj/8.2.1 libspatialindex/1.9.3 netcdf/4.6.3 eccodes/2.31.0 zlib/1.2.9 libtiff/4.2.0 sqlite/3.35.5\n",
- "```\n",
- "\n",
- "Then login to https://jupyter.euler.hpc.ethz.ch/ and start a JupyterLab server."
+ "```bash\n",
+ "sbatch \\\n",
+ " --ntasks=1\\\n",
+ " --cpus-per-task=1 \\\n",
+ " --time=1:00:00 \\\n",
+ " --job-name=\"build-climada-container\" \\\n",
+ " --mem-per-cpu=4096 \\\n",
+ " --wrap=\"singularity build --sandbox /cluster/project/[path/to]/climada.sif recipe.txt\"\n",
+ "```"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
+ "### 3. Configure jupyterhub\n",
"\n",
- "### Using a virtual environment in a Jupyter notebook\n",
+ "create a file `~/.config/euler/jupyterhub/jupyterlabrc` with the following content:\n",
"\n",
- "By default the pre-installed climada version is running in your notebooks. If you want to use climada from source you can simply install a python kernel from the `climada_venv` environment, see [Working with Git branches](#working-with-git-branches)\n",
+ "```\n",
+ "#!/bin/bash\n",
"\n",
- "Install an IPyhton-kernel:\n",
+ "# Import required modules\n",
+ "module purge\n",
+ "module load stack/2024-05 gcc/13.2.0 python/3.11.6_cuda eth_proxy\n",
"\n",
- "```bash\n",
- "source /cluster/project/climate/$USER/climada_venv/bin/activate\n",
- "python -m ipykernel install --user --name climada_venv\n",
- "```\n",
"\n",
- "Start a new JupyterLab server, the `climada_venv` kernel should appear in the list of available kernels in JupyterHub."
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
+ "# Setup the required env\n",
+ "export JUPYTER_CONFIG_PATH=$HOME/.jupyterlab:$PYTHON_EULER_ROOT/share/jupyter\n",
+ "export JUPYTER_CONFIG_DIR=$HOME/.jupyterlab\n",
+ "export JUPYTER_PATH=$PYTHON_EULER_ROOT/share/jupyter\n",
+ "export JUPYTERLAB_DIR=$PYTHON_EULER_ROOT/share/jupyter/lab\n",
+ "export JUPYTERLAB_ROOT=$PYTHON_EULER_ROOT\n",
"\n",
- "## Trouble shooting\n",
+ "export JUPYTER_HOME=${JUPYTER_HOME:-$HOME}\n",
+ "export JUPYTER_DIR=${JUPYTER_DIR:-/}\n",
+ "export JUPYTER_EXTRA_ARGS=${JUPYTER_EXTRA_ARGS:-}\n",
"\n",
- "### 1. Python Module not found or available\n",
+ "warn_jupyterhub\n",
"\n",
- "- Make sure your python environment is activated.\n",
- "- Run `pip install --upgrade MISSING_MODULE`.\n",
+ "sleep 1\n",
"\n",
- "### 2. Upgrading from Python 3.9 or 3.10\n",
+ "echo $PYTHON_EULER_ROOT\n",
+ "echo $JUPYTER_EXTRA_ARGS\n",
+ "echo $PROXY_PORT\n",
"\n",
- "Virtual environments created are i.g. only working for the Python version they were created with.\n",
- "In particular Python kernels from 3.9 environments will fail to connect in a Jupyter notebook on https://jupyter.euler.hpc.ethz.ch/.\n",
+ "export PYTHON_ROOT=/opt/software/conda/envs/climada_env\n",
+ "module purge\n",
"\n",
- "- It's suggested to create new environments and remove the old kernels from `~/.local/share/jupyter/kernels/`.\n",
+ "export APPTAINER_BIND=\"/cluster,$TMPDIR,$SCRATCH\"\n",
+ "singularity exec --nv \\\n",
+ " --env=\"NVIDIA_VISIBLE_DEVICES=all\" \\\n",
+ " --bind /cluster/project/[path/to]/climada_python:/opt/climada_workspace/climada_python \\\n",
+ " --bind /cluster/project/[path/to]/climada_petals:/opt/climada_workspace/climada_petals \\\n",
+ " /cluster/project/[path/to]/climada.sif \\\n",
+ " /bin/bash < \n",
"### d) Load TC tracks from other sources\n",
"\n",
- "In addition to the [historical records of TCs (IBTrACS)](#Part1.a), the [probabilistic extension](#Part1.b) of these tracks, and the [ECMWF Forecast tracks](#Part1.c), CLIMADA also features functions to read in synthetic TC tracks from other sources. These include synthetic storm tracks from Kerry Emanuel's coupled statistical-dynamical model (Emanuel et al., 2006 as used in Geiger et al., 2016), synthetic storm tracks from a second coupled statistical-dynamical model (CHAZ) (as described in Lee et al., 2018), and synthetic storm tracks from a fully statistical model (STORM) Bloemendaal et al., 2020). However, these functions are partly under development and/or targeted at advanced users of CLIMADA in the context of very specific use cases. They are thus not covered in this tutorial."
+ "In addition to the [historical records of TCs (IBTrACS)](#Part1.a), the [probabilistic extension](#Part1.b) of these tracks, and the [ECMWF Forecast tracks](#Part1.c), CLIMADA also features functions to read in synthetic TC tracks from other sources. These include synthetic storm tracks from Kerry Emanuel's coupled statistical-dynamical model (Emanuel et al., 2006 as used in Geiger et al., 2016), from an open source derivative of Kerry Emanuel's model [FAST](https://github.com/linjonathan/tropical_cyclone_risk?tab=readme-ov-file), synthetic storm tracks from a second coupled statistical-dynamical model (CHAZ) (as described in Lee et al., 2018), and synthetic storm tracks from a fully statistical model (STORM) Bloemendaal et al., 2020). However, these functions are partly under development and/or targeted at advanced users of CLIMADA in the context of very specific use cases. They are thus not covered in this tutorial."
]
},
{
diff --git a/requirements/env_climada.yml b/requirements/env_climada.yml
index c7cf9b823..ea455124a 100644
--- a/requirements/env_climada.yml
+++ b/requirements/env_climada.yml
@@ -4,18 +4,18 @@ channels:
- nodefaults
dependencies:
- bottleneck>=1.4
- - cartopy>=0.23
- - cfgrib>=0.9.9,<0.9.10 # 0.9.10 cannot read the icon_grib files from https://opendata.dwd.de
+ - cartopy>=0.24
+ - cfgrib>=0.9
- contextily>=1.6
- - dask>=2024.5
- - eccodes>=2.27,<2.28 # 2.28 changed some labels, in particular: gust -> i20fg
- - gdal>=3.6
- - geopandas>=0.14
- - h5py>=3.8
- - haversine>=2.8
- - matplotlib-base>=3.9
- - netcdf4>=1.6
- - numba>=0.60
+ - dask>=2025.2
+ - eccodes>=2.40
+ - gdal>=3.10
+ - geopandas>=0.14,<1.0 # geopandas 1.0 does not depend on fiona anymore, hence fiona would need to be added as dependency
+ - h5py>=3.12
+ - haversine>=2.9
+ - matplotlib-base>=3.10
+ - netcdf4>=1.7
+ - numba>=0.61
- numexpr>=2.10
- openpyxl>=3.1
- osm-flex>=1.1
@@ -25,20 +25,20 @@ dependencies:
- pint>=0.24
- pip
- pycountry>=24.6
- - pyproj>=3.5
- - pytables>=3.7
+ - pyproj>=3.7
+ - pytables>=3.10
- pyxlsb>=1.0
- - rasterio>=1.3
+ - rasterio>=1.4
- requests>=2.32
- salib>=1.5
- seaborn>=0.13
- - scikit-learn>=1.5
- - scipy>=1.13,<1.15 # 1.15 is not compatible with climada_petals, climada_petals.engine.test.test_supplychain fails with "'Series' has no attribute 'nonzero'"
+ - scikit-learn>=1.6
+ - scipy>=1.14,<1.15 # 1.15 is not compatible with climada_petals, climada_petals.engine.test.test_supplychain fails with "'Series' has no attribute 'nonzero'"
- sparse>=0.15
- statsmodels>=0.14
- tabulate>=0.9
- - tqdm>=4.66
+ - tqdm>=4.67
- unittest-xml-reporting>=3.2
- - xarray>=2024.6
+ - xarray>=2025.1
- xlrd>=2.0
- - xlsxwriter>=3.1
+ - xlsxwriter>=3.2