Skip to content

Commit

Permalink
Merge pull request UDST#105 from BayAreaMetro/hazards_strategies
Browse files Browse the repository at this point in the history
Hazards strategies
theocharides authored Aug 26, 2019
2 parents ba34046 + 537126e commit 6defd1e
Showing 8 changed files with 239,567 additions and 61,206 deletions.
16 changes: 15 additions & 1 deletion baus/datasources.py
Original file line number Diff line number Diff line change
@@ -788,14 +788,28 @@ def zones(store):
return store['zones'].sort_index()


# SLR inundation levels for parcels
# SLR inundation levels for parcels, with full, partial, or no mitigation
@orca.table(cache=True)
def slr_parcel_inundation():
return pd.read_csv(
os.path.join(misc.data_dir(), "slr_parcel_inundation.csv"),
index_col='parcel_id')


@orca.table(cache=True)
def slr_parcel_inundation_mf():
return pd.read_csv(
os.path.join(misc.data_dir(), "slr_parcel_inundation_mf.csv"),
index_col='parcel_id')


@orca.table(cache=True)
def slr_parcel_inundation_mp():
return pd.read_csv(
os.path.join(misc.data_dir(), "slr_parcel_inundation_mp.csv"),
index_col='parcel_id')


# SLR progression by year, for "futures" C, B, R
@orca.table(cache=True)
def slr_progression_C():
28 changes: 26 additions & 2 deletions baus/earthquake.py
Original file line number Diff line number Diff line change
@@ -255,8 +255,8 @@ def earthquake_demolish(parcels, parcels_tract, tracts_earthquake, buildings,
return

if year == 2035:
# assign each parcel to a census tract
# using the lookup table created with "parcel_tract_assignment.ipynb"
# assign each parcel to a census tract using the lookup table
# created with scripts/parcel_tract_assignment.py
census_tract = pd.Series(parcels_tract['census_tract'],
parcels_tract.index)
print "Number of parcels with census tracts is: %d" % len(census_tract)
@@ -286,6 +286,7 @@ def earthquake_demolish(parcels, parcels_tract, tracts_earthquake, buildings,
existing_buildings = []
new_buildings = []
fire_buildings = []
retrofit_bldgs_tot = pd.DataFrame()

for i in range(len(tracts)):
grp = [x[1] for x in tract_parcels_grp[i]]
@@ -298,6 +299,29 @@ def earthquake_demolish(parcels, parcels_tract, tracts_earthquake, buildings,
build_frag = buildings_i['eq_destroy'].sort_values(ascending=False)
top_build_frag = build_frag[: int(round(
len(build_frag) * existing_pct))]
# in "strategies" scenarios, exclude some existing buildings
# from destruction due to retrofit
if scenario in settings["eq_scenarios"]["mitigation"]:
retrofit_codes = ['DU01G1N', 'DU01G2N', 'MF01G1N', 'MF01G2N',
'MF25G1N', 'MF25G2N', 'MF25G3N', 'MF25G4N',
'SF01G1N', 'SF2PG1N']
top_build_frag_bldgs = buildings[buildings.index.isin
(top_build_frag.index)]
retrofit_bldgs = top_build_frag_bldgs[top_build_frag_bldgs.
earthquake_code.isin
(retrofit_codes)]
retro_no = round(float(len(retrofit_bldgs))/2)
retrofit_set = np.random.choice(retrofit_bldgs.index,
retro_no, replace=False)
# update top_build_frag to remove retrofit buildings
top_build_frag = top_build_frag[~top_build_frag.index.isin
(retrofit_set)]
# add table of retrofit buildings that weren't destroyed
retrofit_bldgs_set = buildings[buildings.index.isin
(retrofit_set)]
retrofit_bldgs_tot = retrofit_bldgs_tot. \
append(retrofit_bldgs_set)
orca.add_table("retrofit_bldgs_tot", retrofit_bldgs_tot)
# add to a list of buildings to destroy
buildings_top = top_build_frag.index
existing_buildings.extend(buildings_top)
24 changes: 18 additions & 6 deletions baus/slr.py
Original file line number Diff line number Diff line change
@@ -12,22 +12,34 @@

@orca.step()
def slr_inundate(scenario, parcels, slr_progression_C, slr_progression_R,
slr_progression_B, year, slr_parcel_inundation, settings):
slr_progression_B, slr_parcel_inundation,
slr_parcel_inundation_mf, slr_parcel_inundation_mp,
year, settings):

if scenario not in settings["slr_scenarios"]["enable_in"]:
return

if scenario in settings["slr_scenarios"]["rtff"]:
if scenario in settings["slr_scenarios"]["rtff_prog"]:
slr_progression = slr_progression_R.to_frame()
elif scenario in settings["slr_scenarios"]["cag"]:
elif scenario in settings["slr_scenarios"]["cag_prog"]:
slr_progression = slr_progression_C.to_frame()
elif scenario in settings["slr_scenarios"]["bttf"]:
elif scenario in settings["slr_scenarios"]["bttf_prog"]:
slr_progression = slr_progression_B.to_frame()

orca.add_table("slr_progression", slr_progression)

inundation_yr = slr_progression.query('year==@year')['inundated'].item()
print "Inundation in model year is %d inches" % inundation_yr
slr_parcel_inundation = slr_parcel_inundation.to_frame()

if scenario in settings["slr_scenarios"]["mitigation_full"]:
slr_parcel_inundation = slr_parcel_inundation_mf.to_frame()
orca.add_injectable("slr_mitigation", 'full mitigation')
elif scenario in settings["slr_scenarios"]["mitigation_partial"]:
slr_parcel_inundation = slr_parcel_inundation_mp.to_frame()
orca.add_injectable("slr_mitigation", 'partial mitigation')
else:
slr_parcel_inundation = slr_parcel_inundation.to_frame()
orca.add_injectable("slr_mitigation", 'none')

destroy_parcels = slr_parcel_inundation.\
query('inundation<=@inundation_yr').astype('bool')
orca.add_table('destroy_parcels', destroy_parcels)
159 changes: 106 additions & 53 deletions baus/summaries.py
Original file line number Diff line number Diff line change
@@ -22,20 +22,30 @@ def write(s):
f.write(s + "\n")

# sea level rise
# level
if scenario in settings["slr_scenarios"]["enable_in"]:
slr_progression = orca.get_table("slr_progression")
slr = slr_progression['inundated'].max()
write("Sea level rise in this scenario is %d inches" % slr)
else:
write("There is no sea level rise in this scenario")
# mitigation
slr_mitigation = orca.get_injectable("slr_mitigation")
write("Sea level rise mitigation is %s" % slr_mitigation)

write("")

# earthquake
# activation
if scenario in settings["eq_scenarios"]["enable_in"]:
write("Earthquake is activated")
else:
write("Earthquake is not activated")
# mitigation
if scenario in settings["eq_scenarios"]["mitigation"]:
write("Earthquake retrofit policies are applied")
else:
write("Earthquake retrofit policies are not applied")

write("")

@@ -47,7 +57,6 @@ def write(s):
write("Household control file used: %s" % hh_fname)
emp_fname = orca.get_injectable("employment_control_file")
write("Employment file used: %s" % emp_fname)
# these injectables are not storing ...
reg_fname = orca.get_injectable("reg_control_file")
write("Regional control file used is: %s" % reg_fname)
reg_dem_fname = orca.get_injectable("reg_dem_control_file")
@@ -80,7 +89,6 @@ def write(s):
else:
write("No 2030 non-mandatory accessibility file is set")
# segmentation
# this injectable is also not storing ...
acc_seg_fname_2010 = orca.get_injectable("acc_seg_file_2010")
write("2010 accessibility segmentation file used: %s"
% acc_seg_fname_2010)
@@ -1633,60 +1641,81 @@ def hazards_slr_summary(run_number, year, scenario, households, jobs, parcels,
if scenario not in settings["slr_scenarios"]["enable_in"]:
return

f = open(os.path.join("runs", "run%d_hazards_slr_%d.log" %
(run_number, year)), "w")
destroy_parcels = orca.get_table("destroy_parcels")
if len(destroy_parcels) > 0:

def write(s):
# print s
f.write(s + "\n\n")
def write(s):
# print s
f.write(s + "\n\n")

destroy_parcels = orca.get_table("destroy_parcels")
slr_demolish = orca.get_table("slr_demolish")
n = len(destroy_parcels)
write("Number of impacted parcels = %d" % n)
n = slr_demolish['residential_units'].sum()
write("Number of impacted residential units = %d" % n)
n = slr_demolish['building_sqft'].sum()
write("Number of impacted building sqft = %d" % n)

# income quartile counts
hh_unplaced_slr = orca.get_injectable("hh_unplaced_slr")

write("Number of impacted households by type")

hh_summary = pd.DataFrame(index=[0])
hh_summary['hhincq1'] = \
(hh_unplaced_slr["base_income_quartile"] == 1).sum()
hh_summary['hhincq2'] = \
(hh_unplaced_slr["base_income_quartile"] == 2).sum()
hh_summary['hhincq3'] = \
(hh_unplaced_slr["base_income_quartile"] == 3).sum()
hh_summary['hhincq4'] = \
(hh_unplaced_slr["base_income_quartile"] == 4).sum()
hh_summary.to_string(f, index=False)
f = open(os.path.join("runs", "run%d_hazards_slr_%d.log" %
(run_number, year)), "w")

write("")
jobs_unplaced_slr = orca.get_injectable("jobs_unplaced_slr")
# employees by sector
n = len(destroy_parcels)
write("Number of impacted parcels = %d" % n)

try:
slr_demolish_cum = orca.get_table("slr_demolish_cum").to_frame()
except:
slr_demolish_cum = pd.DataFrame()
slr_demolish = orca.get_table("slr_demolish").to_frame()
slr_demolish_cum = slr_demolish.append(slr_demolish_cum)
orca.add_table("slr_demolish_cum", slr_demolish_cum)

write("Number of impacted jobs by sector")
n = slr_demolish_cum['residential_units'].sum()
write("Number of impacted residential units = %d" % n)
n = slr_demolish_cum['building_sqft'].sum()
write("Number of impacted building sqft = %d" % n)

jobs_summary = pd.DataFrame(index=[0])
jobs_summary['agrempn'] = (jobs_unplaced_slr["empsix"] == 'AGREMPN').sum()
jobs_summary['mwtempn'] = (jobs_unplaced_slr["empsix"] == 'MWTEMPN').sum()
jobs_summary['retempn'] = (jobs_unplaced_slr["empsix"] == 'RETEMPN').sum()
jobs_summary['fpsempn'] = (jobs_unplaced_slr["empsix"] == 'FPSEMPN').sum()
jobs_summary['herempn'] = (jobs_unplaced_slr["empsix"] == 'HEREMPN').sum()
jobs_summary['othempn'] = (jobs_unplaced_slr["empsix"] == 'OTHEMPN').sum()
jobs_summary.to_string(f, index=False)
# income quartile counts
try:
hh_unplaced_slr_cum = \
orca.get_table("hh_unplaced_slr_cum").to_frame()
except:
hh_unplaced_slr_cum = pd.DataFrame()
hh_unplaced_slr = orca.get_injectable("hh_unplaced_slr")
hh_unplaced_slr_cum = hh_unplaced_slr.append(hh_unplaced_slr_cum)
orca.add_table("hh_unplaced_slr_cum", hh_unplaced_slr_cum)

f.close()
write("Number of impacted households by type")
hs = pd.DataFrame(index=[0])
hs['hhincq1'] = \
(hh_unplaced_slr_cum["base_income_quartile"] == 1).sum()
hs['hhincq2'] = \
(hh_unplaced_slr_cum["base_income_quartile"] == 2).sum()
hs['hhincq3'] = \
(hh_unplaced_slr_cum["base_income_quartile"] == 3).sum()
hs['hhincq4'] = \
(hh_unplaced_slr_cum["base_income_quartile"] == 4).sum()
hs.to_string(f, index=False)

write("")

# employees by sector
try:
jobs_unplaced_slr_cum = \
orca.get_table("jobs_unplaced_slr_cum").to_frame()
except:
jobs_unplaced_slr_cum = pd.DataFrame()
jobs_unplaced_slr = orca.get_injectable("jobs_unplaced_slr")
jobs_unplaced_slr_cum = jobs_unplaced_slr.append(jobs_unplaced_slr_cum)
orca.add_table("jobs_unplaced_slr_cum", jobs_unplaced_slr_cum)

write("Number of impacted jobs by sector")
js = pd.DataFrame(index=[0])
js['agrempn'] = (jobs_unplaced_slr_cum["empsix"] == 'AGREMPN').sum()
js['mwtempn'] = (jobs_unplaced_slr_cum["empsix"] == 'MWTEMPN').sum()
js['retempn'] = (jobs_unplaced_slr_cum["empsix"] == 'RETEMPN').sum()
js['fpsempn'] = (jobs_unplaced_slr_cum["empsix"] == 'FPSEMPN').sum()
js['herempn'] = (jobs_unplaced_slr_cum["empsix"] == 'HEREMPN').sum()
js['othempn'] = (jobs_unplaced_slr_cum["empsix"] == 'OTHEMPN').sum()
js.to_string(f, index=False)

f.close()

slr_demolish = slr_demolish.to_frame()
slr_demolish = slr_demolish[['parcel_id']]
slr_demolish.to_csv(os.path.join("runs",
"run%d_hazards_slr_buildings_%d.csv"
% (run_number, year)))
slr_demolish.to_csv(os.path.join("runs",
"run%d_hazards_slr_buildings_%d.csv"
% (run_number, year)))


@orca.step()
@@ -1780,29 +1809,53 @@ def write(s):

f.close()

# print out demolished buildings
eq_demolish = eq_demolish.to_frame()
eq_demolish_taz = misc.reindex(parcels.zone_id,
eq_demolish.parcel_id)
eq_demolish['taz'] = eq_demolish_taz
eq_demolish['count'] = 1
eq_demolish = eq_demolish.drop(['parcel_id', 'year_built',
'redfin_sale_year'], axis=1)
eq_demolish = eq_demolish.groupby(['taz']).sum()
eq_demolish.to_csv(os.path.join("runs",
"run%d_hazards_eq_demolish_buildings_%d.csv"
% (run_number, year)))

# print out retrofit buildings that were saved
if scenario in settings["eq_scenarios"]["mitigation"]:
retrofit_bldgs_tot = orca.get_table("retrofit_bldgs_tot")
retrofit_bldgs_tot = retrofit_bldgs_tot.to_frame()
retrofit_bldgs_tot_taz = misc.reindex(parcels.zone_id,
retrofit_bldgs_tot.parcel_id)
retrofit_bldgs_tot['taz'] = retrofit_bldgs_tot_taz
retrofit_bldgs_tot['count'] = 1
retrofit_bldgs_tot = retrofit_bldgs_tot[[
'taz', 'residential_units', 'residential_sqft',
'non_residential_sqft', 'building_sqft', 'stories',
'redfin_sale_price', 'non_residential_rent',
'deed_restricted_units', 'residential_price', 'count']]
retrofit_bldgs_tot = retrofit_bldgs_tot.groupby(['taz']).sum()
retrofit_bldgs_tot.\
to_csv(os.path.join(
"runs", "run%d_hazards_eq_retrofit_buildings_%d.csv"
% (run_number, year)))

# print out buildings in 2030, 2035, and 2050 so Horizon team can compare
# building inventory by TAZ
if year in [2030, 2035, 2050] and scenario in \
settings["eq_scenarios"]["enable_in"]:
buildings = buildings.to_frame()
buildings_taz = misc.reindex(parcels.zone_id,
buildings.parcel_id)
buildings['taz'] = buildings_taz
buildings = buildings[['taz', 'residential_units', 'residential_sqft',
'non_residential_sqft', 'building_sqft',
'stories', 'redfin_sale_price',
buildings['count'] = 1
buildings = buildings[['taz', 'count', 'residential_units',
'residential_sqft', 'non_residential_sqft',
'building_sqft', 'stories', 'redfin_sale_price',
'non_residential_rent', 'deed_restricted_units',
'residential_price']]
buildings = buildings.groupby(['taz']).sum()
buildings.to_csv(os.path.join("runs",
"run%d_hazards_eq_buildings_%d.csv"
"run%d_hazards_eq_buildings_list_%d.csv"
% (run_number, year)))
12 changes: 8 additions & 4 deletions configs/settings.yaml
Original file line number Diff line number Diff line change
@@ -17,12 +17,16 @@
# scenarios for hazards models
slr_scenarios:
enable_in: ["1", "2", "5", "11", "12", "15"]
rtff: ["2", "7", "12"]
cag: ["1", "6", "11"]
bttf: ["5", "10", "15"]
rtff_prog: ["2", "7", "12"]
cag_prog: ["1", "6", "11"]
bttf_prog: ["5", "10", "15"]
mitigation_full: ["11", "15"]
mitigation_partial: ["12"]
eq_scenarios:
enable_in: ["1", "2", "5", "11", "12", "15"]

mitigation: ["11", "15"]
# rtff is not mitigated


# these are the tables the get auto-merged to buildings/parcels in the hedonic and lcms
aggregation_tables:
122,280 changes: 61,140 additions & 61,140 deletions data/slr_parcel_inundation.csv

Large diffs are not rendered by default.

89,127 changes: 89,127 additions & 0 deletions data/slr_parcel_inundation_mf.csv

Large diffs are not rendered by default.

89,127 changes: 89,127 additions & 0 deletions data/slr_parcel_inundation_mp.csv

Large diffs are not rendered by default.

0 comments on commit 6defd1e

Please sign in to comment.