Commit 42143ae3 authored by Tara Evaz Zadeh's avatar Tara Evaz Zadeh
Browse files

Fixed the linter requests

parent 0bd87691
#!/usr/bin/env python3
import numpy as np
import pandas as pd
import csv
import datetime
def AddRespectiveCellIdToOBMExposureFiles(exposureFile_path,OSMSource_path,Result_path):
def AddRespectiveCellIdToOBMExposureFiles(exposureFile_path, OSMSource_path, Result_path):
startTime = datetime.datetime.now()
print(startTime)
OSMSource = csv.reader(open(OSMSource_path),delimiter=';')
OSMSource = csv.reader(open(OSMSource_path), delimiter=";")
next(OSMSource, None)
exposureSource=pd.read_csv(exposureFile_path)
originids=exposureSource.origin_id
exposureSource = pd.read_csv(exposureFile_path)
originids = exposureSource.origin_id
def append_list_as_row(file_name, list_of_elem):
with open(file_name, 'a+', newline='') as write_obj:
with open(file_name, "a+", newline="") as write_obj:
csv_writer = csv.writer(write_obj)
csv_writer.writerow(list_of_elem)
title=['id','lon','lat','taxonomy','number','structural','night','occupancy','admin_name','admin_ID','origin_id','respectiveCellId']
title = [
"id",
"lon",
"lat",
"taxonomy",
"number",
"structural",
"night",
"occupancy",
"admin_name",
"admin_ID",
"origin_id",
"respectiveCellId",
]
append_list_as_row(Result_path, title)
OSMIdToCellId = {}
......@@ -32,13 +44,11 @@ def AddRespectiveCellIdToOBMExposureFiles(exposureFile_path,OSMSource_path,Resul
pass
OSMIdToCellId[MappingItem[0]] = MappingItem[2]
for asset in range(exposureSource.shape[0]):
exposureArr= list(exposureSource.loc[asset])
origin_id=originids[asset]
exposureArr = list(exposureSource.loc[asset])
origin_id = originids[asset]
respectiveCellId = OSMIdToCellId[origin_id]
exposureArr.append(respectiveCellId)
append_list_as_row(Result_path, exposureArr)
print('Execution time of the script',(datetime.datetime.now() - startTime))
print("Execution time of the script", (datetime.datetime.now() - startTime))
......@@ -32,7 +32,8 @@ def get_exposure_per_tile(
def write_asset_to_file(file_name, list_of_elem):
"""This function writes a list (list_of_elem) to a file (file_name)"""
# Open the file to write the lists to. a+ gives the possibility of reading and writing if file exists and creating it if it doesn't.
# Open the file to write the lists to. a+ gives the possibility of reading and
# writing if file exists and creating it if it doesn't.
with open(file_name, "a+", newline="") as write_obj:
csv_writer = csv.writer(write_obj)
csv_writer.writerow(list_of_elem)
......@@ -128,7 +129,11 @@ def get_exposure_per_tile(
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="This program computes the probabilities of occurence of different damage states for a scenario earthquake, given a ground-motion field for the area, an exposure model representing the assets in the region of interest and fragility functions that show the probability of exceeding a set of damage states, given an intensity measure level."
description="This program computes the probabilities of occurence of different "
+ "damage states for a scenario earthquake, given a ground-motion field "
+ "for the area, an exposure model representing the assets in the region "
+ "of interest and fragility functions that show the probability of "
+ "exceeding a set of damage states, given an intensity measure level."
)
parser.add_argument(
"-m",
......@@ -137,7 +142,9 @@ if __name__ == "__main__":
type=str,
default="linear",
choices=["linear", "cubic", "nearest"],
help="method used to interpolate the input ground-motion values over all the exposure locations (asset locations). Options: [‘linear’, ‘nearest’, ‘cubic’] (default set to linear)",
help="method used to interpolate the input ground-motion values over all "
+ "the exposure locations (asset locations). "
+ "Options: [‘linear’, ‘nearest’, ‘cubic’] (default set to linear)",
)
parser.add_argument(
"-x",
......@@ -146,35 +153,41 @@ if __name__ == "__main__":
type=str,
default="cell",
choices=["cell", "building"],
help="exposure type. 'cell' if your assets belong to a tile and 'building' if your assets are buildings. Options = [‘cell’, ‘building’] (default set to cell)",
help="exposure type. 'cell' if your assets belong to a tile and 'building' if "
+ "your assets are buildings. Options = [‘cell’, ‘building’] (default set to cell)",
)
parser.add_argument(
"-f",
"--fragility-pathname",
required=True,
type=str,
help="path to the directory that includes all the fragility csv file functions (Required)",
help="path to the directory that includes all the fragility csv file functions "
+ "(Required)",
)
parser.add_argument(
"-t",
"--taxonomy-conversion-filepath",
required=True,
type=str,
help="path to the file that includes the taxonomy names and their respective fragility function names (Required)",
help="path to the file that includes the taxonomy names and their respective "
+ "fragility function names (Required)",
)
parser.add_argument(
"-g",
"--shakemap-filepath",
required=True,
type=str,
help="path to the ground-motion values file. This file should include all the intensity measure types that you have in your fragility functions (Required)",
help="path to the ground-motion values file. This file should include all the "
+ " intensity measure types that you have in your fragility functions (Required)",
)
parser.add_argument(
"-c",
"--cellIdSource-filepath",
required=True,
type=str,
help="path to the file that includes all the unique cell-ids you have in your exposure file (same as the origin-ids of the cell parts of the exposure files or simpy cell-ids) (Required)",
help="path to the file that includes all the unique cell-ids you have in your "
+ "exposure file (same as the origin-ids of the cell parts of the exposure "
+ "files or simpy cell-ids) (Required)",
)
parser.add_argument(
"-e",
......@@ -188,7 +201,11 @@ if __name__ == "__main__":
"--polygonSource-filepath",
required=True,
type=str,
help="path to the file that includes the origin-ids of either your tiles (in case exposure-type = 'cell'. As an example of the origin-id: cell_92258412) or the buildings (in case exposure-type = 'building'. As an example of the origin-id: OSM_529613252) and their respective polygons. (Required)",
help="path to the file that includes the origin-ids of either your tiles "
+ "(in case exposure-type = 'cell'. As an example of the origin-id: "
+ "cell_92258412) "
+ "or the buildings (in case exposure-type = 'building'. As an example of the "
+ "origin-id: OSM_529613252) and their respective polygons. (Required)",
)
parser.add_argument(
"-r",
......@@ -196,14 +213,16 @@ if __name__ == "__main__":
required=False,
type=str,
default="damage_result.gpkg",
help="path to the file that we want to write the results to (default set to damage_result.gpkg)",
help="path to the file that we want to write the results to (default set to"
+ "damage_result.gpkg) ",
)
parser.add_argument(
"-o",
"--overwrite-result-file",
required=False,
action="store_true",
help="to overwrite the existing result file if this option is mentioned in the command line",
help="to overwrite the existing result file if this option is mentioned in "
+ "the command line",
)
args = parser.parse_args()
......@@ -220,9 +239,11 @@ if __name__ == "__main__":
overwrite_result_file = args.overwrite_result_file
if os.path.exists(result_filepath):
if overwrite_result_file == False:
if not overwrite_result_file:
raise ValueError(
"result_filepath exists. Choose another name or use --overwrite_result_file set to True if you want to overwrite the results."
"result_filepath exists. Choose another name or use "
+ "--overwrite_result_file set to True if you want to "
+ "overwrite the results."
)
else:
os.remove(result_filepath)
......
#!/usr/bin/env python3
import numpy as np
import pandas as pd
import csv
import datetime
import losslib
def damageCalculator_TileVersion(fullGroundMotionField,Result_path,fragilityFileDir,exposures,\
taxonomy_conversion_path,shakemap_path,polygonSource_path,exposureType="cell",method="linear"):
def damageCalculator_TileVersion(
fullGroundMotionField,
Result_path,
fragilityFileDir,
exposures,
taxonomy_conversion_path,
shakemap_path,
polygonSource_path,
exposureType="cell",
method="linear",
):
"""
Returns a file "Result_path" including damage results of a scenario earthquake using the ""losslib" functions.
Returns a file "Result_path" including damage results of a scenario earthquake using the
"losslib" functions.
Input:
------
......@@ -78,10 +88,12 @@ taxonomy_conversion_path,shakemap_path,polygonSource_path,exposureType="cell",me
"/home/TileCalculations/shakemap1381_2.csv"
- polygonSource_path: (str)
Address to the file including the origin-ids and thir respecive polygins (Could be whether each tile oan OSM building).
Address to the file including the origin-ids and thir respecive polygons
(Could be whether each tile oan OSM building).
Example extract:
>>> polygonSource_path
"/home/TileCalculations/M008_exposure_Attica_GDE_visual_v001_sat_27f_by_cell_reOrder.csv"
"/home/TileCalculations
/M008_exposure_Attica_GDE_visual_v001_sat_27f_by_cell_reOrder.csv"
- exposureType: (string), optional
{‘cell’, ‘OBM’}, optional'. cell by default.
......@@ -94,9 +106,18 @@ taxonomy_conversion_path,shakemap_path,polygonSource_path,exposureType="cell",me
- Result_path: (arrays written to file)
file containing the damage elements for each asset of the exposure.
Example extract of the result file:
polygon,,origin_id,,asset_id,,lon,,lat,,taxonomy,,gmfValue,,PoEs,,PoOs,,tot_num_buildings,,structural_No-damage,,structural_Slight,,structural_Moderate,,structural_Extensive,,structural_Complete
"POLYGON ((23.68611111111113 38.3388888888889, 23.6888888888889 38.3388888888889, 23.6888888888889 38.34166666666667, 23.68611111111113 38.34166666666667, 23.68611111111113 38.3388888888889))",,cell_2410244527,,GDE_Ind_0,,23.6875,,38.340277777800004,,CR/LFINF+CDM/H:2,,0.3623185006574049,,"[0.2495074860984066, 0.008729386056721225, 0.0010441231376296645, 0.00021103727864033706]",,"[0.7504925139015934, 0.24077810004168537, 0.007685262919091561, 0.0008330858589893275, 0.00021103727864033706]",,0.000629094517209,,0.0004721307257018916,,0.00015147218260022435,,4.834756765710135e-06,,5.24089746254536e-07,,1.3276239491934405e-07
polygon,,origin_id,,asset_id,,lon,,lat,,taxonomy,,gmfValue,,PoEs,,PoOs,,
tot_num_buildings,,structural_No-damage,,structural_Slight,,structural_Moderate,,
structural_Extensive,,structural_Complete
"POLYGON ((23.68611111111113 38.3388888888889, 23.6888888888889 38.3388888888889,
23.6888888888889 38.34166666666667, 23.68611111111113 38.34166666666667,
23.68611111111113 38.3388888888889))",,cell_2410244527,,GDE_Ind_0,,23.6875,,
38.340277777800004,,CR/LFINF+CDM/H:2,,0.3623185006574049,,"[0.2495074860984066,
0.008729386056721225, 0.0010441231376296645, 0.00021103727864033706]",,
"[0.7504925139015934, 0.24077810004168537, 0.007685262919091561, 0.0008330858589893275,
0.00021103727864033706]",,0.000629094517209,,0.0004721307257018916,,
0.00015147218260022435,,4.834756765710135e-06,,5.24089746254536e-07,,
1.3276239491934405e-07
"""
# Show the time the script begins running
......@@ -107,74 +128,123 @@ taxonomy_conversion_path,shakemap_path,polygonSource_path,exposureType="cell",me
taxonomyToFragilitySource = csv.reader(open(taxonomy_conversion_path))
# Skip the header
next(taxonomyToFragilitySource, None)
PolygonSource = csv.reader(open(polygonSource_path),delimiter=';')
PolygonSource = csv.reader(open(polygonSource_path), delimiter=";")
# Skip the header
next(PolygonSource, None)
# Read each column of the input "exposure"
taxonomies=exposures.taxonomy
tot_num_buildings_row=exposures.number
lons=exposures.lon
lats=exposures.lat
assetids=exposures.id
originids=exposures.origin_id
taxonomies = exposures.taxonomy
tot_num_buildings_row = exposures.number
lons = exposures.lon
lats = exposures.lat
assetids = exposures.id
originids = exposures.origin_id
# Prepare Result File
def append_list_as_row(file_name, list_of_elem):
with open(file_name, 'a+', newline='') as write_obj:
with open(file_name, "a+", newline="") as write_obj:
csv_writer = csv.writer(write_obj)
csv_writer.writerow(list_of_elem)
# Begin Computation
# Define a dictionary with keys as the ground-motion type and value as the column number of the ground-motion type in the shakemap file.
gmDict = {'PGA' : 2 , 'SA(0.3)' : 3, 'SA(0.6)' : 4, 'SA(1.0)' : 5\
, 'SA(1)' : 5}
# Calling the function "Taxonomy_to_Fragility" to get a dictionary with keys as the taxonomy and the values as both the fragility function name (excluding ".csv" part) and column of ground-motion_type in ground-motion_field file.
taxonomyToFragilityMap = losslib.Taxonomy_to_Fragility(gmDict,\
taxonomyToFragilitySource,fragilityFileDir)
# Calling the function "OriginId_to_Polygon" to get a dictionary with keys as the origin_id and the value as the respective polygon.
OriginIdToPolygonMap = losslib.OriginId_to_Polygon(PolygonSource,exposureType)
# Define a dictionary with keys as the ground-motion type and value as the column
# number of the ground-motion type in the shakemap file.
gmDict = {"PGA": 2, "SA(0.3)": 3, "SA(0.6)": 4, "SA(1.0)": 5, "SA(1)": 5}
# Calling the function "Taxonomy_to_Fragility" to get a dictionary with keys as the
# taxonomy and the values as both the fragility function name (excluding ".csv" part)
# and column of ground-motion_type in ground-motion_field file.
taxonomyToFragilityMap = losslib.Taxonomy_to_Fragility(
gmDict, taxonomyToFragilitySource, fragilityFileDir
)
# Calling the function "OriginId_to_Polygon" to get a dictionary with keys as the origin_id
# and the value as the respective polygon.
OriginIdToPolygonMap = losslib.OriginId_to_Polygon(PolygonSource, exposureType)
# Define number of columns that contain the data in the fragiliy function files.
cls = range(1,101)
# Just a trick to have multiple commas between each result element, since we do not want a single comma as the delimiter due to having ploygons as a result element.
a=[0,2,4,6,8]
cls = range(1, 101)
# Just a trick to have multiple commas between each result element, since we do
# not want a single comma as the delimiter due to having ploygons as a result element.
a = [0, 2, 4, 6, 8]
# Looping through each line of the exposure file to do the computations line by line.
for asset in range(exposures.shape[0]):
taxonomy=taxonomies.iloc[asset]
taxonomy = taxonomies.iloc[asset]
fragilityFileName = taxonomyToFragilityMap[taxonomy][0] + ".csv"
tot_num_buildings=tot_num_buildings_row.iloc[asset]
lon=lons.iloc[asset]
lat=lats.iloc[asset]
asset_id=assetids.iloc[asset]
origin_id=originids.iloc[asset]
# Since for the OBM exposure files we also need to know the cell-id that the building is located in, referred as "RespectiveCellid" and the polygon of the buiding.
if exposureType == 'OBM':
[polygon,RespectiveCellid] = OriginIdToPolygonMap[origin_id]
tot_num_buildings = tot_num_buildings_row.iloc[asset]
lon = lons.iloc[asset]
lat = lats.iloc[asset]
asset_id = assetids.iloc[asset]
origin_id = originids.iloc[asset]
# Since for the OBM exposure files we also need to know the cell-id that the
# building is located in, referred as "RespectiveCellid" and the polygon of
# the buiding.
if exposureType == "OBM":
[polygon, RespectiveCellid] = OriginIdToPolygonMap[origin_id]
else:
polygon = OriginIdToPolygonMap[origin_id]
# Read fragility functions as numpy arrays.
fragility_function = np.loadtxt(fragilityFileDir+"/"+fragilityFileName\
, delimiter=",", usecols=cls)
# Achieve the ground-motion value from the fullGroundMotionField. Please note that the "fullGroundMotionField" contains many identical lines (since so many assets have same locations (same location leads to same ground-motion value)) because we want it to have same number of lines and same orders as the exposure file, so that in this line we read the same line number as the exposure file.
gm_value = fullGroundMotionField[asset,\
taxonomyToFragilityMap[taxonomy][1]]
fragility_function = np.loadtxt(
fragilityFileDir + "/" + fragilityFileName, delimiter=",", usecols=cls
)
# Achieve the ground-motion value from the fullGroundMotionField. Please note
# that the "fullGroundMotionField" contains many identical lines (since so many
# assets have same locations (same location leads to same ground-motion value))
# because we want it to have same number of lines and same orders as the exposure file,
# so that in this line we read the same line number as the exposure file.
gm_value = fullGroundMotionField[asset, taxonomyToFragilityMap[taxonomy][1]]
# Achieve Probabilities of exceedance and occurance.
[PoEs, PoOs] = losslib.get_PoEs(fragility_function, gm_value)
# Compute damage by assets
dmg_by_asset = [i * tot_num_buildings for i in PoOs]
for h in a:
dmg_by_asset.insert(h,'')
dmg_by_asset.insert(h, "")
# Append results
if exposureType == 'OBM':
arr0=[polygon,'',origin_id,'','cell_' + RespectiveCellid,'',asset_id\
,'',lon,'',lat,'',taxonomy,'',gm_value,'',PoEs,'',PoOs\
,'',tot_num_buildings]
if exposureType == "OBM":
arr0 = [
polygon,
"",
origin_id,
"",
"cell_" + RespectiveCellid,
"",
asset_id,
"",
lon,
"",
lat,
"",
taxonomy,
"",
gm_value,
"",
PoEs,
"",
PoOs,
"",
tot_num_buildings,
]
else:
arr0=[polygon,'',origin_id,'',asset_id,'',lon,'',lat,'',taxonomy\
,'',gm_value,'',PoEs,'',PoOs,'',tot_num_buildings]
arr0 = [
polygon,
"",
origin_id,
"",
asset_id,
"",
lon,
"",
lat,
"",
taxonomy,
"",
gm_value,
"",
PoEs,
"",
PoOs,
"",
tot_num_buildings,
]
arr0.extend(dmg_by_asset)
append_list_as_row(Result_path, arr0)
#print('time now',datetime.datetime.now)
print('Execution time of the script',(datetime.datetime.now() - startTime))
print("Execution time of the script", (datetime.datetime.now() - startTime))
......@@ -7,8 +7,17 @@ import csv
import datetime
import losslib
def main(Result_path,fragilityFileDir,exposures_path,\
taxonomy_conversion_path,shakemap_path,polygonSource_path,exposureType,method):
def main(
Result_path,
fragilityFileDir,
exposures_path,
taxonomy_conversion_path,
shakemap_path,
polygonSource_path,
exposureType,
method,
):
# This function prepares the inputs and calls the functions needed.
startTime = datetime.datetime.now()
print(startTime)
......@@ -17,86 +26,178 @@ taxonomy_conversion_path,shakemap_path,polygonSource_path,exposureType,method):
# Read Files as Numpy arrays or Pandas data-frames
exposures = pd.read_csv(exposures_path)
groundMotionField = np.loadtxt(shakemap_path,delimiter=',',skiprows=1)
groundMotionField = np.loadtxt(shakemap_path, delimiter=",", skiprows=1)
taxonomyToFragilitySource = csv.reader(open(taxonomy_conversion_path))
next(taxonomyToFragilitySource, None)
PolygonSource = csv.reader(open(polygonSource_path),delimiter=';')
PolygonSource = csv.reader(open(polygonSource_path), delimiter=";")
next(PolygonSource, None)
# Read Columns
taxonomies=exposures.taxonomy
tot_num_buildings_row=exposures.number
lons=exposures.lon
lats=exposures.lat
assetids=exposures.id
originids=exposures.origin_id
#originids=exposures.id_3
taxonomies = exposures.taxonomy
tot_num_buildings_row = exposures.number
lons = exposures.lon
lats = exposures.lat
assetids = exposures.id
originids = exposures.origin_id
# originids=exposures.id_3
# Prepare Result File
def append_list_as_row(file_name, list_of_elem):
with open(file_name, 'a+', newline='') as write_obj:
with open(file_name, "a+", newline="") as write_obj:
csv_writer = csv.writer(write_obj)
csv_writer.writerow(list_of_elem)
if exposureType == 'OBM':
title=['polygon','','origin_id','','RelativeCellid','','asset_id',\
'','lon','','lat','','taxonomy','','gmfValue','','PoEs',\
'','PoOs','','tot_num_buildings','','structural_No-damage','',\
'structural_Slight','','structural_Moderate','','structural_Extensive'\
,'','structural_Complete']
if exposureType == "OBM":
title = [
"polygon",
"",
"origin_id",
"",
"RelativeCellid",
"",
"asset_id",
"",
"lon",
"",
"lat",
"",
"taxonomy",
"",
"gmfValue",
"",
"PoEs",
"",
"PoOs",
"",
"tot_num_buildings",
"",
"structural_No-damage",
"",
"structural_Slight",
"",
"structural_Moderate",
"",
"structural_Extensive",
"",
"structural_Complete",
]
else:
title=['polygon','','origin_id','','asset_id','','lon','','lat','',\
'taxonomy','','gmfValue','','PoEs','','PoOs','','tot_num_buildings',\
'','structural_No-damage','','structural_Slight','',\
'structural_Moderate','','structural_Extensive','',\
'structural_Complete']
title = [
"polygon",
"",
"origin_id",
"",
"asset_id",
"",
"lon",
"",
"lat",
"",
"taxonomy",
"",
"gmfValue",
"",
"PoEs",
"",
"PoOs",
"",
"tot_num_buildings",
"",
"structural_No-damage",
"",
"structural_Slight",
"",
"structural_Moderate",
"",
"structural_Extensive",
"",
"structural_Complete",
]
append_list_as_row(Result_path, title)
# COMPUTATION
fullGroundMotionField = losslib.Get_Full_GMF(groundMotionField,lons,lats,method)
gmDict = {'PGA' : 2 , 'SA(0.3)' : 3, 'SA(0.6)' : 4, 'SA(1.0)' : 5\
, 'SA(1)' : 5}
taxonomyToFragilityMap = losslib.Taxonomy_to_Fragility(gmDict,\
taxonomyToFragilitySource,fragilityFileDir)
OriginIdToPolygonMap = losslib.OriginId_to_Polygon(PolygonSource,exposureType)
#number of columns that contain the data in the fragiliy function files.
cls = range(1,101)
a=[0,2,4,6,8]
# Computation
fullGroundMotionField = losslib.Get_Full_GMF(groundMotionField, lons, lats, method)
gmDict = {"PGA": 2, "SA(0.3)": 3, "SA(0.6)": 4, "SA(1.0)": 5, "SA(1)": 5}
taxonomyToFragilityMap = losslib.Taxonomy_to_Fragility(
gmDict, taxonomyToFragilitySource, fragilityFileDir
)
OriginIdToPolygonMap = losslib.OriginId_to_Polygon(PolygonSource, exposureType)
# Number of columns that contain the data in the fragiliy function files
cls = range(1, 101)
a = [0, 2, 4, 6, 8]
for asset in range(exposures.shape[0]):
taxonomy=taxonomies[asset]
taxonomy = taxonomies[asset]
fragilityFileName = taxonomyToFragilityMap[taxonomy][0] + ".csv"
tot_num_buildings=tot_num_buildings_row[asset]
lon=lons[asset]
lat=lats[asset]
asset_id=assetids[asset]
origin_id=originids[asset]
if exposureType == 'OBM':
[polygon,relativeCellid] = OriginIdToPolygonMap[origin_id]
tot_num_buildings = tot_num_buildings_row[asset]
lon = lons[asset]
lat = lats[asset]
asset_id = assetids[asset]
origin_id = originids[asset]
if exposureType == "OBM":
[polygon, relativeCellid] = OriginIdToPolygonMap[origin_id]