Skip to content

Commit

Permalink
Merge branch 'develop' into feature/improve_local_exceedance
Browse files Browse the repository at this point in the history
  • Loading branch information
ValentinGebhart committed Feb 14, 2025
2 parents 30bd733 + 10e7869 commit 5c219b1
Show file tree
Hide file tree
Showing 9 changed files with 446 additions and 301 deletions.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ Code freeze date: YYYY-MM-DD

### Added

- `climada.hazard.tc_tracks.TCTracks.from_FAST` function, add Australia basin (AU) [#993](https://github.com/CLIMADA-project/climada_python/pull/993)
- Add `osm-flex` package to CLIMADA core [#981](https://github.com/CLIMADA-project/climada_python/pull/981)
- `doc.tutorial.climada_entity_Exposures_osm.ipynb` tutorial explaining how to use `osm-flex`with CLIMADA
- `climada.util.coordinates.bounding_box_global` function [#980](https://github.com/CLIMADA-project/climada_python/pull/980)
Expand Down
10 changes: 8 additions & 2 deletions climada/hazard/storm_europe.py
Original file line number Diff line number Diff line change
Expand Up @@ -551,9 +551,15 @@ def from_icon_grib(
+ run_datetime.strftime("%Y%m%d%H")
)

# Starting with eccodes 2.28 the name of the data variable in `stacked` is
# [i10fg](https://codes.ecmwf.int/grib/param-db/228029).
# Before, it used to be the less precise
# [gust](https://codes.ecmwf.int/grib/param-db/260065)
[data_variable] = list(stacked)

# Create Hazard
haz = cls(
intensity=sparse.csr_matrix(stacked["gust"].T),
intensity=sparse.csr_matrix(stacked[data_variable].T),
centroids=cls._centroids_from_nc(nc_centroids_file),
event_id=event_id,
date=date,
Expand Down Expand Up @@ -1069,7 +1075,7 @@ def generate_WS_forecast_hazard(
if haz_model == "cosmo1e_file":
haz_model = "C1E"
full_model_name_temp = "COSMO-1E"
if haz_model == "cosmo2e_file":
else: # if haz_model == "cosmo2e_file":
haz_model = "C2E"
full_model_name_temp = "COSMO-2E"
haz_file_name = (
Expand Down
127 changes: 120 additions & 7 deletions climada/hazard/tc_tracks.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,7 @@
"SI": 1005,
"WP": 1005,
"SP": 1004,
"AU": 1004,
}
"""Basin-specific default environmental pressure"""

Expand Down Expand Up @@ -1619,6 +1620,118 @@ def from_netcdf(cls, folder_name):
data.append(track)
return cls(data)

@classmethod
def from_FAST(cls, folder_name: str):
"""Create a new TCTracks object from NetCDF files generated by the FAST model, modifying
the xr.array structure to ensure compatibility with CLIMADA, and calculating the central
pressure and radius of maximum wind.
Model GitHub Repository: https://github.com/linjonathan/tropical_cyclone_risk?
tab=readme-ov-file
Model Publication: https://agupubs.onlinelibrary.wiley.com/doi/epdf/10.1029/2023MS003686
Parameters:
----------
folder_name : str
Folder name from where to read files.
storm_id : int
Number of the simulated storm
Returns:
-------
tracks : TCTracks
TCTracks object with tracks data from the given directory of NetCDF files.
"""

LOGGER.info("Reading %s files.", len(get_file_names(folder_name)))
data = []
for file in get_file_names(folder_name):
if Path(file).suffix != ".nc":
continue
with xr.open_dataset(file) as dataset:
for year in dataset.year:
for i in dataset.n_trk:

# Select track
track = dataset.sel(n_trk=i, year=year)
# chunk dataset at first NaN value
lon = track.lon_trks.data
last_valid_index = np.where(np.isfinite(lon))[0][-1]
track = track.isel(time=slice(0, last_valid_index + 1))
# Select lat, lon
lat = track.lat_trks.data
lon = track.lon_trks.data
# Convert lon from 0-360 to -180 - 180
lon = ((lon + 180) % 360) - 180
# Convert time to pandas Datetime "yyyy.mm.dd"
reference_time = (
f"{track.tc_years.item()}-{int(track.tc_month.item())}-01"
)
time = pd.to_datetime(
track.time.data, unit="s", origin=reference_time
).astype("datetime64[s]")
# Define variables
ms_to_kn = 1.943844
max_wind_kn = track.vmax_trks.data * ms_to_kn
env_pressure = BASIN_ENV_PRESSURE[track.tc_basins.data.item()]
cen_pres = _estimate_pressure(
np.full(lat.shape, np.nan),
lat,
lon,
max_wind_kn,
)

data.append(
xr.Dataset(
{
"time_step": (
"time",
np.full(time.shape[0], track.time.data[1]),
),
"max_sustained_wind": (
"time",
track.vmax_trks.data,
),
"central_pressure": ("time", cen_pres),
"radius_max_wind": (
"time",
estimate_rmw(
np.full(lat.shape, np.nan), cen_pres
),
),
"environmental_pressure": (
"time",
np.full(time.shape[0], env_pressure),
),
"basin": (
"time",
np.full(
time.shape[0], track.tc_basins.data.item()
),
),
},
coords={
"time": ("time", time),
"lat": ("time", lat),
"lon": ("time", lon),
},
attrs={
"max_sustained_wind_unit": "m/s",
"central_pressure_unit": "hPa",
"name": f"storm_{track.n_trk.item()}",
"sid": track.n_trk.item(),
"orig_event_flag": True,
"data_provider": "FAST",
"id_no": track.n_trk.item(),
"category": set_category(
max_wind_kn, wind_unit="kn", saffir_scale=None
),
},
)
)

return cls(data)

def write_hdf5(self, file_name, complevel=5):
"""Write TC tracks in NetCDF4-compliant HDF5 format.
Expand Down Expand Up @@ -2665,20 +2778,20 @@ def ibtracs_fit_param(explained, explanatory, year_range=(1980, 2019), order=1):
return sm_results


def ibtracs_track_agency(ds_sel):
def ibtracs_track_agency(track):
"""Get preferred IBTrACS agency for each entry in the dataset.
Parameters
----------
ds_sel : xarray.Dataset
track : xarray.Dataset
Subselection of original IBTrACS NetCDF dataset.
Returns
-------
agency_pref : list of str
Names of IBTrACS agencies in order of preference.
track_agency_ix : xarray.DataArray of ints
For each entry in `ds_sel`, the agency to use, given as an index into `agency_pref`.
For each entry in `track`, the agency to use, given as an index into `agency_pref`.
"""
agency_pref = ["wmo"] + IBTRACS_AGENCIES
agency_map = {a.encode("utf-8"): i for i, a in enumerate(agency_pref)}
Expand All @@ -2687,11 +2800,11 @@ def ibtracs_track_agency(ds_sel):
)
agency_map[b""] = agency_map[b"wmo"]
agency_fun = lambda x: agency_map[x]
if "track_agency" not in ds_sel.data_vars.keys():
ds_sel["track_agency"] = ds_sel["wmo_agency"].where(
ds_sel["wmo_agency"] != b"", ds_sel["usa_agency"]
if "track_agency" not in track.data_vars.keys():
track["track_agency"] = track["wmo_agency"].where(
track["wmo_agency"] != b"", track["usa_agency"]
)
track_agency_ix = xr.apply_ufunc(agency_fun, ds_sel["track_agency"], vectorize=True)
track_agency_ix = xr.apply_ufunc(agency_fun, track["track_agency"], vectorize=True)
return agency_pref, track_agency_ix


Expand Down
Binary file added climada/hazard/test/data/FAST_test_tracks.nc
Binary file not shown.
46 changes: 46 additions & 0 deletions climada/hazard/test/test_tc_tracks.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@
TEST_TRACK_EMANUEL = DATA_DIR.joinpath("emanuel_test_tracks.mat")
TEST_TRACK_EMANUEL_CORR = DATA_DIR.joinpath("temp_mpircp85cal_full.mat")
TEST_TRACK_CHAZ = DATA_DIR.joinpath("chaz_test_tracks.nc")
TEST_TRACK_FAST = DATA_DIR.joinpath("FAST_test_tracks.nc")
TEST_TRACK_STORM = DATA_DIR.joinpath("storm_test_tracks.txt")
TEST_TRACKS_ANTIMERIDIAN = DATA_DIR.joinpath("tracks-antimeridian")
TEST_TRACKS_LEGACY_HDF5 = DATA_DIR.joinpath("tctracks_hdf5_legacy.nc")
Expand Down Expand Up @@ -631,6 +632,51 @@ def test_from_simulations_storm(self):
tc_track = tc.TCTracks.from_simulations_storm(TEST_TRACK_STORM, years=[7])
self.assertEqual(len(tc_track.data), 0)

def test_from_FAST(self):
"""test the correct import of netcdf files from FAST model and the conversion to a
different xr.array structure compatible with CLIMADA."""

tc_track = tc.TCTracks.from_FAST(TEST_TRACK_FAST)

expected_attributes = {
"max_sustained_wind_unit": "m/s",
"central_pressure_unit": "hPa",
"name": "storm_0",
"sid": 0,
"orig_event_flag": True,
"data_provider": "FAST",
"id_no": 0,
"category": 1,
}

self.assertIsInstance(
tc_track, tc.TCTracks, "tc_track is not an instance of TCTracks"
)
self.assertIsInstance(
tc_track.data, list, "tc_track.data is not an instance of list"
)
self.assertIsInstance(
tc_track.data[0],
xr.Dataset,
"tc_track.data[0] not an instance of xarray.Dataset",
)
self.assertEqual(len(tc_track.data), 5)
self.assertEqual(tc_track.data[0].attrs, expected_attributes)
self.assertEqual(list(tc_track.data[0].coords.keys()), ["time", "lat", "lon"])
self.assertEqual(
tc_track.data[0].time.values[0],
np.datetime64("2025-09-01T00:00:00.000000000"),
)
self.assertEqual(tc_track.data[0].lat.values[0], 17.863591350508266)
self.assertEqual(tc_track.data[0].lon.values[0], -71.76441758319629)
self.assertEqual(len(tc_track.data[0].time), 35)
self.assertEqual(tc_track.data[0].time_step[0], 10800)
self.assertEqual(
tc_track.data[0].max_sustained_wind.values[10], 24.71636959089841
)
self.assertEqual(tc_track.data[0].environmental_pressure.data[0], 1010)
self.assertEqual(tc_track.data[0].basin[0], "NA")

def test_to_geodataframe_points(self):
"""Conversion of TCTracks to GeoDataFrame using Points."""
tc_track = tc.TCTracks.from_processed_ibtracs_csv(TEST_TRACK)
Expand Down
10 changes: 3 additions & 7 deletions climada/util/calibrate/test/test_bayesian_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,13 +94,9 @@ def test_optimizer_params(self):
)
result = contr.optimizer_params()

self.assertDictContainsSubset(
{
"init_points": 1,
"n_iter": 2,
},
result,
)
self.assertEqual(result.get("init_points"), 1)
self.assertEqual(result.get("n_iter"), 2)

util_func = result["acquisition_function"]
self.assertEqual(util_func.kappa, 3)
self.assertEqual(util_func._kappa_decay, contr._calc_kappa_decay())
Expand Down
Loading

0 comments on commit 5c219b1

Please sign in to comment.