Commit 01e2b3b1 authored by Cecilia Nievas's avatar Cecilia Nievas
Browse files

Merge branch 'dev_aggreg01' into 'master'

Merge branch Dev aggreg01: new tools for aggregated exposure models

See merge request !10
parents 4e0ad9e0 5cef1912
"""
Copyright (C) 2021
Helmholtz-Zentrum Potsdam Deutsches GeoForschungsZentrum GFZ
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Global Dynamic Exposure Model
Helmholtz Centre Potsdam
GFZ German Research Centre for Geosciences
Section 2.6: Seismic Hazard and Risk Dynamics
GDE_TOOLS_aggregated_models
===========================
These functions are used to aid in the creation of aggregated exposure models.
"""
import os
import numpy as np
import h5py
def write_admin_unit_to_hdf(adm_unit_id, adm_level,
bdg_classes, bdg_proportions,
num_bdgs, num_dwells, num_ppl,
out_filename, out_pathname):
"""write_admin_unit_to_hdf
This function writes an administrative unit to the administrative units HDF5 file. If the
unit was already in the file, it will replace whatever it contained with the data provided
as input.
Args:
adm_unit_id (str): ID of the administrative unit, with format "XX_YYYY", where "XX" is
the ISO 3166-1 alpha 2 country code, and "YYYY" is the ID of the
administrative unit as defined according to a relevant standard.
E.g. “GR_3514604” in Greece.
adm_level (int): administrative level of adm_unit_id according to the relevant standard.
bdg_classes (list or array of str): all building classes associated with this
admin_unit_id. Length equal to bdg_proportions.
bdg_proportions (array of floats): proportions in which each building class of
bdg_classes is present in adm_unit_id. Its elements
must add up to 1.0. Length equal to bdg_classes.
num_bdgs (float): total number of buildings in adm_unit_id.
num_dwells (float): total number of dwellings in adm_unit_id.
num_ppl (float): total number of people in adm_unit_id.
out_filename (str): name of the output HDF5 file, including extension. Example:
"Europe_admin_units_Res.hdf5".
out_pathname (str): path where the HDF5 file is or should be placed.
"""
# Check length of bdg_classes and bdg_proportions is the same:
if len(bdg_classes)!=bdg_proportions.shape[0]:
print("ERROR!! Lengths of bdg_classes and bdg_proportions do not match!!")
return
# Check the elements of bdg_proportions add up to 1:
if (bdg_proportions.sum()>1.001 or bdg_proportions.sum()<0.999):
print("ERROR!! Elements of bdg_proportions do not sum up to 1.0!!")
return
# Check that bdg_proportions does not contain negative numbers:
if np.any(bdg_proportions<0.0):
print("ERROR!! Some elements of bdg_proportions are negative!!")
return
# Create out_pathname if it does not exist:
if not os.path.exists(out_pathname):
os.makedirs(out_pathname)
# Open in write mode HDF5 file (create if it does not exist):
fle = h5py.File(os.path.join(out_pathname, out_filename), "a")
# Create group for this administrative unit if it does not exist:
if adm_unit_id not in fle:
gr= fle.create_group(adm_unit_id)
# Write attributes of this administrative unit:
fle[adm_unit_id].attrs["Adm_Level"] = adm_level
fle[adm_unit_id].attrs["Num_Bdgs"] = num_bdgs
fle[adm_unit_id].attrs["Num_Dwells"] = num_dwells
fle[adm_unit_id].attrs["Num_Ppl"] = num_ppl
# Write dataset of building classes. It will replace it if it already exists:
if "Bdg_classes" in fle[adm_unit_id]:
del fle[adm_unit_id]["Bdg_classes"]
dt=h5py.special_dtype(vlen=str)
dataset_classes = fle[adm_unit_id].create_dataset("Bdg_classes",
bdg_proportions.shape,
dtype=dt)
if type(bdg_classes)==list:
bdg_classes = np.array(bdg_classes)
dataset_classes[:] = bdg_classes
# Write dataset of proportions of buildings. It will replace it if it already
# exists:
if "Vals" in fle[adm_unit_id]:
del fle[adm_unit_id]["Vals"]
dataset_props = fle[adm_unit_id].create_dataset("Vals",
bdg_proportions.shape,
dtype="float64")
dataset_props[:] = bdg_proportions
# Close HDF5 file (important!):
fle.close()
def write_bdg_class_to_hdf(adm_unit_id, bdg_class, param_names, param_vals,
out_filename, out_pathname):
"""write_bdg_class_to_hdf
This function writes the parameters of a building class located in adm_unit_id to the
building classes HDF5 file. If this combination of building class and adm_unit_id already
exist in the file, it will replace whatever it contained with the data provided as input.
If the building class exists but contains no parameters for this adm_unit_id, the parameters
for this adm_unit_id get appended to the data already existing in the file.
If the input data param_names contains parameters that did not already exist in the HDF
file, the function adds the missing parameters and fills in with -999.9 the cells of this
parameter for previous administrative units (different from adm_unit_id) that were already
in the file.
The order of param_names does not need to match the order of parameters already existing in
the file, as the parameters names get stored as attributes that provide the link between
these names and their position in the array of parameter values.
Args:
adm_unit_id (str): ID of the administrative unit, with format "XX_YYYY", where "XX" is
the ISO 3166-1 alpha 2 country code, and "YYYY" is the ID of the
administrative unit as defined according to a relevant standard.
E.g. “GR_3514604” in Greece.
bdg_class (str): building class to write to the HDF5 file.
param_names (list or array of str): names of the parameters to be written to the file.
E.g. dwell_per_bdg, area_per_dwelling_sqm,
cost_per_area_usd, ppl_per_dwell. Length equalt to
param_vals.
param_vals (list or array of floats): values of the parameters to be written to the
file, in the same order as in param_names. Length
equal to param_names.
out_filename (str): name of the output HDF5 file, including extension. Example:
"Europe_admin_units_Res.hdf5".
out_pathname (str): path where the HDF5 file is or should be placed.
"""
# Check length of bdg_classes and bdg_proportions is the same:
if len(param_names)!=len(param_vals):
print("ERROR!! Lengths of param_names and param_vals do not match!!")
return
# Create out_pathname if it does not exist:
if not os.path.exists(out_pathname):
os.makedirs(out_pathname)
# Open in write mode HDF5 file (create if it does not exist):
fle = h5py.File(os.path.join(out_pathname, out_filename), "a")
# Replace "/" with "|" because group names cannot contain "/":
bdg_class_adj= bdg_class.replace("/","|")
# Create group for this building class if it does not exist:
if bdg_class_adj not in fle:
gr= fle.create_group(bdg_class_adj)
# Retrieve locations that already exist in the file for this bdg_class_adj:
if 'Locations' in fle[bdg_class_adj]:
existing_locations= list(fle[bdg_class_adj]['Locations'][:])
del fle[bdg_class_adj]['Locations'] # erase it from file to then re-write it
else:
existing_locations = []
# Retrieve parameters names and values that already exist in the file for this
# bdg_class_adj:
existing_params_names = []
existing_params_contents = []
if 'Parameters' in fle[bdg_class_adj]: #
existing_params = fle[bdg_class_adj]['Parameters'][:] # the values
for attribute in fle[bdg_class_adj]['Parameters'].attrs: # retrieve the attributes too
existing_params_names.append(attribute)
existing_params_contents.append(fle[bdg_class_adj]['Parameters'].attrs[attribute])
del fle[bdg_class_adj]['Parameters']
else:
existing_params = np.empty([0,0], dtype='float')
# Identify position of adm_unit_id in existing_locations if it is already there, otherwise
# add a row to existing_params and the position is this last row:
if adm_unit_id in existing_locations:
# adm_unit_id was already in the file (the values of the parameters will be overwritten):
which_row = np.where(np.array(existing_locations)==adm_unit_id)[0]
else:
# adm_unit_id was not yet in the file:
row_to_add = -999.9 * np.ones([existing_params.shape[1]]) # dummy additional row
existing_params = np.vstack((existing_params,row_to_add)) # extend existing_params
existing_locations.append(adm_unit_id)
which_row = [existing_params.shape[0] - 1]
if len(which_row)!=1:
print("ERROR!! REPEATED adm_unit_id VALUES ENCOUNTERED IN HDF5 FILE "+out_filename+", "
+bdg_class+": PARAMETERS NOT UPDATED BY FUNCTION write_bdg_class_to_hdf!!!")
return
# Go one by one the input parameter names:
for k, col in enumerate(param_names):
# Is this parameter with name "col" already in the file?
which_in_list = np.where(np.array(existing_params_contents)==col)[0]
if len(which_in_list)==1:
# This parameter name already existed in the file. The old value can be replaced by the new
# one:
which_col = int(existing_params_names[which_in_list[0]].split('_')[-1]) # position in array
existing_params[which_row[0], which_col] = param_vals[k] # update
elif len(which_in_list)==0:
# This parameter name did not yet exist in the file. The existing_params array needs to be
# extended by one column to host the values of this new parameter name:
col_to_add = -999.9 * np.ones([existing_params.shape[0],1])
col_to_add[which_row[0],0] = param_vals[k]
existing_params = np.hstack((existing_params, col_to_add)) # extend existing_params
existing_params_names.append('Col_'+str(existing_params.shape[1]-1))
existing_params_contents.append(col)
# Write the Locations dataset:
dt = h5py.special_dtype(vlen=str)
sbgr_strgs= fle[bdg_class_adj].create_dataset('Locations',
np.array(existing_locations).shape,
dtype=dt)
sbgr_strgs[:]= np.array(existing_locations)
# Write the Parameters dataset:
sbgr_floats= fle[bdg_class_adj].create_dataset('Parameters',
existing_params.shape,
dtype='float64')
sbgr_floats[:]= existing_params
# Write the names of the variables as attributes of the dataset:
for k, col in enumerate(existing_params_names):
sbgr_floats.attrs[col]= existing_params_contents[k]
# Close HDF5 file (important!):
fle.close()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment