From 67b7314225c56ddd9522627dcf7546518a7d9e94 Mon Sep 17 00:00:00 2001 From: jinyan1214 Date: Fri, 14 Jun 2024 13:36:43 +0800 Subject: [PATCH 01/59] landslide initial --- .../HazardSimulationEQ.py | 3 + .../database/CMakeLists.txt | 2 +- ...Wills_etal_2015_CA_Geologic_Properties.csv | 19 + .../regionalGroundMotion/landslide.py | 483 ++++++++++++++++++ .../regionalGroundMotion/liquefaction.py | 4 +- 5 files changed, 508 insertions(+), 3 deletions(-) create mode 100644 modules/performRegionalEventSimulation/regionalGroundMotion/database/groundfailure/Wills_etal_2015_CA_Geologic_Properties.csv create mode 100644 modules/performRegionalEventSimulation/regionalGroundMotion/landslide.py diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py index 9eceb4ce4..52ffec56b 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py @@ -342,6 +342,9 @@ def hazard_job(hazard_info): ln_im_mr, mag_maf, im_list ) gf_im_list += settlement_info['Output'] + if "Liquefaction" in ground_failure_info.keys(): + import landslide + diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/database/CMakeLists.txt b/modules/performRegionalEventSimulation/regionalGroundMotion/database/CMakeLists.txt index f79a961e9..0276b2ded 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/database/CMakeLists.txt +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/database/CMakeLists.txt @@ -1,2 +1,2 @@ add_subdirectory(gmdb) - +add_subdirectory(groundfailure) diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/database/groundfailure/Wills_etal_2015_CA_Geologic_Properties.csv b/modules/performRegionalEventSimulation/regionalGroundMotion/database/groundfailure/Wills_etal_2015_CA_Geologic_Properties.csv new file mode 100644 index 000000000..1e00416b8 --- /dev/null +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/database/groundfailure/Wills_etal_2015_CA_Geologic_Properties.csv @@ -0,0 +1,19 @@ +Unit Abbreviation,Friction Angle - Mean (degrees),Friction Angle - Median (degrees),Friction Angle - CoV (%),Friction Angle - Min (degrees),Friction Angle - Max (degrees),Cohesion - Mean (kPa),Cohesion - Median (kPa),Cohesion - CoV (%),Cohesion - Min (kPa),Cohesion - Max (kPa) +adf,9999,9999,0,9999,9999,9999,9999,0,9999,9999 +Qi,17,19,52,3,28,15.75,11.97,52,5.99,27.53 +af/Qi,9999,9999,0,9999,9999,9999,9999,0,9999,9999 +Qal1,23,23,46,8,44,32.46,23.94,82,1.96,82.83 +Qal2,23,23,46,8,44,32.46,23.94,82,1.96,82.83 +Qal3,23,23,46,8,44,32.46,23.94,82,1.96,82.83 +Qoa,29,30,37,13,46,33.13,23.94,106,0.05,91.21 +Qs,36,37,13,13,46,10.58,4.79,170,0.05,43.09 +QT,26,26,42,28,42,43.33,35.91,79,0.05,100.55 +Tsh,27,27,40,9,45,40.79,29.93,117,2.35,111.85 +Tss,27,27,40,9,45,40.79,29.93,117,2.35,111.85 +Tv,30,29,46,15,44,25.57,27.53,65,5.46,43.52 +sp,28,26,42,13,48,48.17,35.91,97,2.39,149.63 +Kss,24,24,42,8,40,36.48,28.73,85,1.1,101.6 +KJf,26,25,39,12,43,43.24,29.21,113,2.92,106.77 +crystalline,26,26,35,13,38,18.15,16.76,82,0.05,42.66 +crystalin2,40,40,25,30,50,23.94,23.94,100,0.05,35.91 +water,9999,9999,0,9999,9999,9999,9999,0,9999,9999 diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/landslide.py b/modules/performRegionalEventSimulation/regionalGroundMotion/landslide.py new file mode 100644 index 000000000..474540206 --- /dev/null +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/landslide.py @@ -0,0 +1,483 @@ +import numpy as np +import rasterio as rio +from scipy.interpolate import interp2d +import sys, warnings, shapely, pandas, os +from pyproj import Transformer +from pyproj import CRS +from enum import Enum +import geopandas as gpd +from scipy.spatial import ConvexHull +import pandas as pd + +## Helper functions +def sampleRaster(raster_file_path, raster_crs, x, y, interp_scheme = 'nearest',\ + dtype = None): + """performs 2D interpolation at (x,y) pairs. Accepted interp_scheme = 'nearest', 'linear', 'cubic', and 'quintic'""" + print(f"Sampling from the Raster File: {os.path.basename(raster_file_path)}...") + invalid_value = np.nan + xy_crs = CRS.from_user_input(4326) + raster_crs = CRS.from_user_input(raster_crs) + with rio.open(raster_file_path) as raster_file: + try: + raster_data = raster_file.read() + if raster_data.shape[0] > 1: + warnings.warn(f"More than one band in the file {raster_file_path}, the first band is used.") + except: + sys.exit(f"Can not read data from {raster_file_path}") + if xy_crs != raster_crs: + # make transformer for reprojection + transformer_xy_to_data = Transformer.from_crs(xy_crs, raster_crs,\ + always_xy=True) + # reproject and store + x_proj, y_proj = transformer_xy_to_data.transform(x, y) + x = x_proj + y = y_proj + n_sample = len(x) + if interp_scheme == 'nearest': + sample = np.array([val[0] for val in raster_file.sample(list(zip(x,y)))]) + else: + # create x and y ticks for grid + x_tick = np.linspace(raster_file.bounds.left, \ + raster_file.bounds.right, raster_file.width, endpoint=False) + y_tick = np.linspace(raster_file.bounds.bottom,\ + raster_file.bounds.top, raster_file.height, endpoint=False) + # create interp2d function + interp_function = interp2d( + x_tick, y_tick, np.flipud(raster_file.read(1)), + kind=interp_scheme, fill_value=invalid_value) + # get samples + sample = np.transpose( + [interp_function(x[i],y[i]) for i in range(n_sample)] + )[0] + # convert to target datatype + if dtype is not None: + sample = sample.astype(dtype) + # clean up invalid values (returned as 1e38 by NumPy) + sample[abs(sample)>1e10] = invalid_value + return sample + +## Helper functions +def sampleVector(vector_file_path, vector_crs, x, y, dtype = None): + """performs spatial join of vector_file with xy'""" + print(f"Sampling from the Vector File: {os.path.basename(vector_file_path)}...") + invalid_value = np.nan + xy_crs = CRS.from_user_input(4326) + vector_gdf = gpd.read_file(vector_file_path) + if vector_gdf.crs != vector_crs: + sys.exit(f"The CRS of vector file {vector_file_path} is {vector_gdf.crs}, and doesn't match the input CRS ({xy_crs}) defined for liquefaction triggering models") + if xy_crs != vector_crs: + # make transformer for reprojection + transformer_xy_to_data = Transformer.from_crs(xy_crs, vector_crs,\ + always_xy=True) + # reproject and store + x_proj, y_proj = transformer_xy_to_data.transform(x, y) + x = x_proj + y = y_proj + # Create a convex hull containing all sites + sites = np.array([x, y]).transpose() + try: + hull = ConvexHull(sites) + vertices = hull.vertices + vertices = sites[np.append(vertices, vertices[0])] + centroid = np.mean(vertices, axis=0) + vertices = vertices + 0.05 * (vertices - centroid) + RoI = shapely.geometry.Polygon(vertices) + except: + centroid = shapely.geometry.Point(np.mean(x), np.mean(y)) + points = [shapely.geometry.Point(x[i], y[i]) for i in range(len(x))] + if len(points) == 1: + distances = [0.1] # Degree + else: + distances = [point.distance(centroid) for point in points] + max_distance = max(distances)*1.2 + angles = np.linspace(0, 2 * np.pi, 36) + circle_points = [(centroid.x + max_distance * np.cos(angle), \ + centroid.y + max_distance * np.sin(angle)) for angle in angles] + RoI = shapely.geometry.Polygon(circle_points) + data = dict() + for col in vector_gdf.columns: + data.update({col:[]}) + for row_index in vector_gdf.index: + new_geom = RoI.intersection(vector_gdf.loc[row_index, 'geometry']) + if new_geom.is_empty: + continue + columns = list(vector_gdf.columns) + columns.remove('geometry') + for col in columns: + data[col].append(vector_gdf.loc[row_index, col]) + data['geometry'].append(new_geom) + del vector_gdf + gdf_roi = gpd.GeoDataFrame(data, geometry="geometry", crs=4326) + geometry = [shapely.geometry.Point(lon, lat) for lon, lat in zip(x, y)] + gdf_sites = gpd.GeoDataFrame(geometry=geometry, crs=4326).reset_index() + merged = gpd.GeoDataFrame.sjoin(gdf_roi, gdf_sites, how = 'inner', predicate = 'contains') + merged = merged.set_index('index_right').sort_index().drop(columns=['geometry']) + gdf_sites = pandas.merge(gdf_sites, merged, on = 'index', how = 'left') + gdf_sites.drop(columns=['geometry', 'index'], inplace=True) + return gdf_sites + +def find_additional_output_req(liq_info, current_step): + additional_output_keys = [] + if current_step == 'Triggering': + trigging_parameters = liq_info['Triggering']\ + ['Parameters'].keys() + triger_dist_water = liq_info['Triggering']['Parameters'].get('DistWater', None) + if triger_dist_water is None: + return additional_output_keys + lat_dist_water = liq_info['LateralSpreading']['Parameters'].get('DistWater', None) + if 'LateralSpreading' in liq_info.keys(): + lat_dist_water = liq_info['LateralSpreading']['Parameters'].get('DistWater', None) + if (liq_info['LateralSpreading']['Model'] == 'Hazus2020')\ + and (lat_dist_water==triger_dist_water): + additional_output_keys.append('dist_to_water') + return additional_output_keys + +def infer_from_geologic_map(map_path, map_crs, lon_station, lat_station): + gdf_units = sampleVector(map_path, map_crs, lon_station, lat_station, dtype = None) + gdf_units = gdf_units['UnitAbbr', 'geometry'] + gdf_units = gdf_units.fillna('water') + default_geo_prop_fpath = os.path.join(os.path.abspath(__file__), 'database',\ + 'groundfailure', 'Wills_etal_2015_CA_Geologic_Properties.csv') + default_geo_prop = pd.read_csv(default_geo_prop_fpath) + unique_geo_unit = np.unique(gdf_units['UnitAbbr']) + phi_mean = np.empty_like(gdf_units['UnitAbbr']) + coh_mean = np.empty_like(gdf_units['UnitAbbr']) + for each in unique_geo_unit: + rows_with_geo_unit = np.where(gdf_units['UnitAbbr'].values==each)[0] + rows_for_param = np.where(default_geo_prop['Unit Abbreviation'].values==each)[0][0] + phi_mean[rows_with_geo_unit] = \ + default_geo_prop['Friction Angle - Median (degrees)'][rows_for_param] + coh_mean[rows_with_geo_unit] = \ + default_geo_prop['Cohesion - Median (kPa)'][rows_for_param] + return phi_mean, coh_mean + +def erf2(x): + """modified from https://www.johndcook.com/blog/2009/01/19/stand-alone-error-function-erf""" + # constants + a1 = 0.254829592 + a2 = -0.284496736 + a3 = 1.421413741 + a4 = -1.453152027 + a5 = 1.061405429 + p = 0.3275911 + # Save the sign of x + signs = np.sign(x) + x = np.abs(x) + # A & S 7.1.26 + t = 1.0/(1.0 + p*x) + y = 1.0 - (((((a5*t + a4)*t) + a3)*t + a2)*t + a1)*t*np.exp(-x**2) + return signs*y + +def norm2_cdf(x, loc, scale): + """ + modified implementation of norm.cdf function from numba_stats, using self-implemented erf function + https://github.com/HDembinski/numba-stats/blob/main/src/numba_stats/norm.py + """ + inter = (x - loc)/scale + return 0.5 * (1 + erf2(inter * np.sqrt(0.5))) + +def erf2_2d(x): + """modified from https://www.johndcook.com/blog/2009/01/19/stand-alone-error-function-erf""" + # constants + a1 = 0.254829592 + a2 = -0.284496736 + a3 = 1.421413741 + a4 = -1.453152027 + a5 = 1.061405429 + p = 0.3275911 + # Save the sign of x + signs = np.sign(x) + x = np.abs(x) + # A & S 7.1.26 + t = 1.0/(1.0 + p*x) + y = 1.0 - (((((a5*t + a4)*t) + a3)*t + a2)*t + a1)*t*np.exp(-x**2) + return signs*y + +def norm2_cdf_2d(x, loc, scale): + """ + modified implementation of norm.cdf function from numba_stats, using self-implemented erf function + https://github.com/HDembinski/numba-stats/blob/main/src/numba_stats/norm.py + """ + inter = (x - loc)/scale + return 0.5 * (1 + erf2_2d(inter * np.sqrt(0.5))) + +def nb_round(x, decimals): + out = np.empty_like(x) + return np.round_(x, decimals, out) + +def erfinv_coeff(order=20): + # initialize + c = np.empty(order+1) + # starting value + c[0] = 1 + for i in range(1,order+1): + c[i] = sum([c[j]*c[i-1-j]/(j+1)/(2*j+1) for j in range(i)]) + # return + return c + +def erfinv(x, order=20): + """returns inverse erf(x)""" + # get coeffcients + c = erfinv_coeff(order) + # initialize + root_pi_over_2 = np.sqrt(np.pi)/2 + y = np.zeros(x.shape) + for i in range(order): + y += c[i]/(2*i+1)*(root_pi_over_2*x)**(2*i+1) + # return + return y + +def norm2_ppf(p, loc, scale): + """ + modified implementation of norm.ppf function from numba_stats, using self-implemented erfinv function + https://github.com/HDembinski/numba-stats/blob/main/src/numba_stats/norm.py + """ + inter = np.sqrt(2) * erfinv(2*p-1,order=20) + return scale * inter + loc + +def erfinv_2d(x, order=20): + """returns inverse erf(x)""" + # get coeffcients + c = erfinv_coeff(order) + # initialize + root_pi_over_2 = np.sqrt(np.pi)/2 + y = np.zeros(x.shape) + for i in range(order): + y += c[i]/(2*i+1)*(root_pi_over_2*x)**(2*i+1) + # return + return y + +def norm2_ppf_2d(p, loc, scale): + """ + modified implementation of norm.ppf function from numba_stats, using self-implemented erfinv function + https://github.com/HDembinski/numba-stats/blob/main/src/numba_stats/norm.py + """ + inter = np.sqrt(2) * erfinv_2d(2*p-1,order=20) + return scale * inter + loc + +class Landslide: + def __init__(self) -> None: + pass + +# ----------------------------------------------------------- +class BrayMacedo2019(Landslide): + """ + Compute landslide deformation at a given location using the Bray and Macedo (2007) probabilistic model. + Regression models based on three sets of ground motions are provided: + + 1. **Ordinary**: **d** = f(ky, Sa(T), Ts, M) + 2. **Near-fault**: **d** = f(ky, Sa(T), Ts, M, pgv) - unavailable for this version of OpenSRA + 3. **General** (default): **d** = f(ky, Sa(T), Ts, M, pgv) - unavailable for this version of OpenSRA + + The default relationship for **ky** uses **coh_soil**, **phi_soil**, **gamma_soil**, **t_slope**, **slope** + + **PGA** is used in place of **Sa(T)** (i.e., Ts=0) + + Parameters + ---------- + From upstream PBEE: + pga: float, np.ndarray or list + [g] peak ground acceleration + mag: float, np.ndarray or list + moment magnitude + + Geotechnical/geologic: + slope: float, np.ndarray or list + [deg] slope angle + t_slope: float, np.ndarray or list + [m] slope thickness (infinite-slope problem) + gamma_soil: float, np.ndarray or list + [kN/m^3] unit weight of soil + phi_soil: float, np.ndarray or list + [deg] friction angle of soil + coh_soil: float, np.ndarray or list + [kPa] cohesion of soil + + Fixed: + + Returns + ------- + pgdef : float, np.ndarray + [m] permanent ground deformation + sigma_pgdef : float, np.ndarray + aleatory variability for ln(pgdef) + + References + ---------- + .. [1] Bray, J.D., and Macedo, J., 2019, Procedure for Estimating Shear-Induced Seismic Slope Displacement for Shallow Crustal Earthquakes, Journal of Geotechnical and Geoenvironmental Engineering, vol. 145, pp. 12, 04019106. + + """ + def __init__(self, parameters, stations) -> None: + self.stations = stations + self.parameters = parameters + self.slope = None #(km) + self.t_slope = None #(km) + self.gamma_soil = None #(km) + self.phi_soil = None #(m) + self.coh_soil = None # (mm) + self.interpolate_spatial_parameters(parameters) + + def interpolate_spatial_parameters(self, parameters): + # site coordinate in CRS 4326 + lat_station = [site['lat'] for site in self.stations] + lon_station = [site['lon'] for site in self.stations] + # slope + if parameters["Slope"] == "Defined (\"Slope\") in Site File (.csv)": + self.slope = np.array([site['Slope'] for site in self.stations]) + else: + self.slope = sampleRaster(parameters["Slope"], parameters["inputCRS"],\ + lon_station, lat_station) + # t_slope + if parameters["SlopeThickness"] == "Defined (\"SlopeThickness\") in Site File (.csv)": + self.t_slope = np.array([site['SlopeThickness'] for site in self.stations]) + elif parameters["SlopeThickness"] == "Use constant value (m)": + self.t_slope = np.array(parameters["SlopeThicknessValue"]) + else: + self.t_slope = sampleRaster(parameters["SlopeThickness"], parameters["inputCRS"],\ + lon_station, lat_station) + # gamma_soil + if parameters["GammaSoil"] == "Defined (\"GammaSoil\") in Site File (.csv)": + self.gamma_soil = np.array([site['GammaSoil'] for site in self.stations]) + elif parameters["GammaSoil"] == "Use constant value (m)": + self.gamma_soil = np.array(parameters["GammaSoilValue"]) + elif parameters["GammaSoil"] == "Infer from Geologic Map": + self.gamma_soil = infer_from_geologic_map(parameters["GammaSoilGeoMap"],\ + parameters['inputCRS'], lon_station, lat_station) + else: + self.gamma_soil = sampleRaster(parameters["GammaSoil"], parameters["inputCRS"],\ + lon_station, lat_station) + # coh_soil + if parameters["CohesionSoil"] == "Defined (\"CohesionSoil\") in Site File (.csv)": + self.coh_soil = np.array([site['CohesionSoil'] for site in self.stations]) + elif parameters["CohesionSoil"] == "Use constant value (m)": + self.coh_soil = np.array(parameters["CohesionSoilValue"]) + elif parameters["CohesionSoil"] == "Infer from Geologic Map": + self.coh_soil = infer_from_geologic_map(parameters["CohesionSoilGeoMap"],\ + parameters['inputCRS'], lon_station, lat_station) + else: + self.coh_soil = sampleRaster(parameters["CohesionSoil"], parameters["inputCRS"],\ + lon_station, lat_station) + + print("Initiation finished") + + def run(self, ln_im_data, eq_data, im_list, output_keys, additional_output_keys = []): + if ('PGA' in im_list): + num_stations = len(self.stations) + num_scenarios = len(eq_data) + PGA_col_id = [i for i, x in enumerate(im_list) if x == 'PGA'][0] + for scenario_id in range(num_scenarios): + num_rlzs = ln_im_data[scenario_id].shape[2] + im_data_scen = np.zeros([num_stations,\ + len(im_list)+len(output_keys), num_rlzs]) + im_data_scen[:,0:len(im_list),:] = ln_im_data[scenario_id] + for rlz_id in range(num_rlzs): + pga = np.exp(ln_im_data[scenario_id][:,PGA_col_id,rlz_id]) + mag = float(eq_data[scenario_id][0]) + model_output = self.model(pga, mag, self.slope, self.t_slope, + self.gamma_soil, self.phi_soil, + self.coh_soil) + for i, key in enumerate(output_keys): + im_data_scen[:,len(im_list)+i,rlz_id] = model_output[key] + ln_im_data[scenario_id] = im_data_scen + im_list = im_list + output_keys + additional_output = dict() + for key in additional_output_keys: + item = getattr(self, key, None) + if item is None: + warnings.warn(f"Additional output {key} is not avaliable in the landslide model 'BrayMacedo2019'.") + else: + additional_output.update({key:item}) + else: + sys.exit(f"'PGA' is missing in the selected intensity measures and the landslide model 'BrayMacedo2019' can not be computed.") + # print(f"At least one of 'PGA' and 'PGV' is missing in the selected intensity measures and the liquefaction trigging model 'ZhuEtal2017' can not be computed."\ + # , file=sys.stderr) + # sys.stderr.write("test") + # sys.exit(-1) + return ln_im_data, eq_data, im_list, additional_output + + def model( + self, + pga, mag, # upstream PBEE RV + slope, t_slope, gamma_soil, phi_soil, coh_soil, # geotechnical/geologic + return_inter_params=False # to get intermediate params + ): + """Model""" + + # get dimensions + ndim = pga.ndim + if ndim == 1: + n_site = len(pga) + n_sample = 1 + shape = (n_site) + else: + shape = pga.shape + n_site = shape[0] + n_sample = shape[1] + + # initialize + pgdef = np.zeros(shape) + ky = np.zeros(shape) + prob_d_eq_0 = np.zeros(shape) + ln_pgdef_trunc = np.zeros(shape) + nonzero_median_cdf = np.zeros(shape) + + # convert from deg to rad + slope_rad = slope*np.pi/180 + phi_soil_rad = phi_soil*np.pi/180 + + # yield acceleration + ky = np.tan(phi_soil_rad-slope_rad) + \ + coh_soil/( + gamma_soil * t_slope * np.cos(slope_rad)**2 * \ + (1+np.tan(phi_soil_rad)*np.tan(slope_rad))) + ky = np.maximum(ky,0.01) # to avoid ky = 0 + + # aleatory + sigma_val = 0.72 + + # deformation, eq 3b + ln_pgdef_trunc = \ + -4.684 + \ + -2.482*np.log(ky) + \ + -0.244*(np.log(ky))**2 + \ + 0.344*np.log(ky)*np.log(pga) + \ + 2.649*np.log(pga) + \ + -0.090*(np.log(pga))**2 + \ + 0.603*mag # cm + nonzero_ln_pgdef = ln_pgdef_trunc.copy() + + # probability of zero displacement, eq. 2 with Ts=0 + if ndim == 1: + prob_d_eq_0 = 1 - norm2_cdf( + -2.480 + \ + -2.970*np.log(ky) + \ + -0.120*(np.log(ky))**2 + \ + 2.780*np.log(pga), + 0, 1) + else: + prob_d_eq_0 = 1 - norm2_cdf_2d( + -2.480 + \ + -2.970*np.log(ky) + \ + -0.120*(np.log(ky))**2 + \ + 2.780*np.log(pga), + 0, 1) + prob_d_eq_0 = nb_round(prob_d_eq_0, decimals=15) + + # apply non-zero displacement correction/condition, eq 11 + nonzero_median_cdf = 1 - .5/(1-prob_d_eq_0) + + # loop through numper of samples + if ndim == 1: + nonzero_ln_pgdef[nonzero_median_cdf>0] = ln_pgdef_trunc[nonzero_median_cdf>0] + \ + sigma_val*norm2_ppf(nonzero_median_cdf[nonzero_median_cdf>0], 0.0, 1.0) + else: + for i in range(n_sample): + cond = nonzero_median_cdf[:,i]>0 + nonzero_ln_pgdef[cond,i] = ln_pgdef_trunc[cond,i] + \ + sigma_val*norm2_ppf(nonzero_median_cdf[cond,i], 0.0, 1.0) + + # rest of actions + pgdef = np.exp(nonzero_ln_pgdef)/100 # also convert from cm to m + pgdef = np.maximum(pgdef,1e-5) # limit to + output = {'lsd_PGD_h':pgdef} + return output \ No newline at end of file diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/liquefaction.py b/modules/performRegionalEventSimulation/regionalGroundMotion/liquefaction.py index 83fb48de8..ef9909e57 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/liquefaction.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/liquefaction.py @@ -242,7 +242,7 @@ def interpolate_spatial_parameters(self, parameters): self.precip = sampleRaster(parameters["Precipitation"], parameters["inputCRS"],\ lon_station, lat_station) self.vs30 = np.array([site['vs30'] for site in self.stations]) - print("Sampling finished") + print("Initiation finished") def run(self, ln_im_data, eq_data, im_list, output_keys, additional_output_keys): if ('PGA' in im_list) and ('PGV' in im_list): @@ -431,7 +431,7 @@ def interpolate_spatial_parameters(self, parameters): self.liq_susc = np.array(self.liq_susc) # liq_susc = liq_susc_samples[parameters["SusceptibilityKey"]].fillna("NaN") # self.liq_susc = liq_susc.to_numpy() - print("Sampling finished") + print("Initiation finished") def run(self, ln_im_data, eq_data, im_list, output_keys, additional_output_keys): From a2dfe72923cac542d94f69ead27eb5ab9aad7956 Mon Sep 17 00:00:00 2001 From: jinyan1214 Date: Mon, 15 Jul 2024 13:59:49 -0700 Subject: [PATCH 02/59] landslide backend run --- .../ComputeIntensityMeasure.py | 24 +++---- .../HazardSimulationEQ.py | 9 ++- .../database/groundfailure/CMakeLists.txt | 2 + .../regionalGroundMotion/landslide.py | 71 ++++++++++--------- .../regionalGroundMotion/liquefaction.py | 1 - 5 files changed, 61 insertions(+), 46 deletions(-) create mode 100644 modules/performRegionalEventSimulation/regionalGroundMotion/database/groundfailure/CMakeLists.txt diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/ComputeIntensityMeasure.py b/modules/performRegionalEventSimulation/regionalGroundMotion/ComputeIntensityMeasure.py index e300811a6..5d58f1304 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/ComputeIntensityMeasure.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/ComputeIntensityMeasure.py @@ -837,21 +837,21 @@ def export_im(stations, im_list, im_data, eq_data, output_dir, filename, csv_fla }) df = pd.DataFrame(df) # Combine PGD from liquefaction, landslide and fault - if 'liq_PGD_h' in df.columns or 'ls_PGD_h'in df.columns or 'fd_PGD_h' in df.columns: + if 'liq_PGD_h' in df.columns or 'lsd_PGD_h'in df.columns or 'fd_PGD_h' in df.columns: PGD_h = np.zeros(df.shape[0]) if 'liq_PGD_h' in df.columns: PGD_h += df['liq_PGD_h'].to_numpy() - if 'ls_PGD_h' in df.columns: - PGD_h += df['ls_PGD_h'].to_numpy() + if 'lsd_PGD_h' in df.columns: + PGD_h += df['lsd_PGD_h'].to_numpy() if 'fd_PGD_h' in df.columns: PGD_h += df['fd_PGD_h'].to_numpy() df['PGD_h'] = PGD_h - if 'liq_PGD_v' in df.columns or 'ls_PGD_v'in df.columns or 'fd_PGD_v' in df.columns: + if 'liq_PGD_v' in df.columns or 'lsd_PGD_v'in df.columns or 'fd_PGD_v' in df.columns: PGD_v = np.zeros(df.shape[0]) if 'liq_PGD_v' in df.columns: PGD_v += df['liq_PGD_v'].to_numpy() - if 'ls_PGD_v' in df.columns: - PGD_v += df['ls_PGD_v'].to_numpy() + if 'lsd_PGD_v' in df.columns: + PGD_v += df['lsd_PGD_v'].to_numpy() if 'fd_PGD_v' in df.columns: PGD_v += df['fd_PGD_v'].to_numpy() df['PGD_v'] = PGD_v @@ -891,21 +891,21 @@ def export_im(stations, im_list, im_data, eq_data, output_dir, filename, csv_fla }) df = pd.DataFrame(df) # Combine PGD from liquefaction, landslide and fault - if 'liq_PGD_h' in df.columns or 'ls_PGD_h'in df.columns or 'fd_PGD_h' in df.columns: + if 'liq_PGD_h' in df.columns or 'lsd_PGD_h'in df.columns or 'fd_PGD_h' in df.columns: PGD_h = np.zeros(df.shape[0]) if 'liq_PGD_h' in df.columns: PGD_h += df['liq_PGD_h'].to_numpy() - if 'ls_PGD_h' in df.columns: - PGD_h += df['ls_PGD_h'].to_numpy() + if 'lsd_PGD_h' in df.columns: + PGD_h += df['lsd_PGD_h'].to_numpy() if 'fd_PGD_h' in df.columns: PGD_h += df['fd_PGD_h'].to_numpy() df['PGD_h'] = PGD_h - if 'liq_PGD_v' in df.columns or 'ls_PGD_v'in df.columns or 'fd_PGD_v' in df.columns: + if 'liq_PGD_v' in df.columns or 'lsd_PGD_v'in df.columns or 'fd_PGD_v' in df.columns: PGD_v = np.zeros(df.shape[0]) if 'liq_PGD_v' in df.columns: PGD_v += df['liq_PGD_v'].to_numpy() - if 'ls_PGD_v' in df.columns: - PGD_v += df['ls_PGD_v'].to_numpy() + if 'lsd_PGD_v' in df.columns: + PGD_v += df['lsd_PGD_v'].to_numpy() if 'fd_PGD_v' in df.columns: PGD_v += df['fd_PGD_v'].to_numpy() df['PGD_v'] = PGD_v diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py index 52ffec56b..2ba637fab 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py @@ -342,8 +342,15 @@ def hazard_job(hazard_info): ln_im_mr, mag_maf, im_list ) gf_im_list += settlement_info['Output'] - if "Liquefaction" in ground_failure_info.keys(): + if "Landslide" in ground_failure_info.keys(): import landslide + lsld_info = ground_failure_info['Landslide'] + lsld_model = getattr(landslide, lsld_info['Model'])(\ + lsld_info["Parameters"], stations) + ln_im_mr, mag_maf, im_list = lsld_model.run( + ln_im_mr, mag_maf, im_list + ) + gf_im_list += lsld_info['Output'] diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/database/groundfailure/CMakeLists.txt b/modules/performRegionalEventSimulation/regionalGroundMotion/database/groundfailure/CMakeLists.txt new file mode 100644 index 000000000..d5da071f5 --- /dev/null +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/database/groundfailure/CMakeLists.txt @@ -0,0 +1,2 @@ +simcenter_add_file(NAME Wills_etal_2015_CA_Geologic_Properties.csv) + diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/landslide.py b/modules/performRegionalEventSimulation/regionalGroundMotion/landslide.py index 474540206..1ead4ebe3 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/landslide.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/landslide.py @@ -134,16 +134,16 @@ def find_additional_output_req(liq_info, current_step): def infer_from_geologic_map(map_path, map_crs, lon_station, lat_station): gdf_units = sampleVector(map_path, map_crs, lon_station, lat_station, dtype = None) - gdf_units = gdf_units['UnitAbbr', 'geometry'] + gdf_units = gdf_units['PTYPE'] gdf_units = gdf_units.fillna('water') - default_geo_prop_fpath = os.path.join(os.path.abspath(__file__), 'database',\ + default_geo_prop_fpath = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'database',\ 'groundfailure', 'Wills_etal_2015_CA_Geologic_Properties.csv') default_geo_prop = pd.read_csv(default_geo_prop_fpath) - unique_geo_unit = np.unique(gdf_units['UnitAbbr']) - phi_mean = np.empty_like(gdf_units['UnitAbbr']) - coh_mean = np.empty_like(gdf_units['UnitAbbr']) + unique_geo_unit = np.unique(gdf_units) + phi_mean = np.empty_like(gdf_units) + coh_mean = np.empty_like(gdf_units) for each in unique_geo_unit: - rows_with_geo_unit = np.where(gdf_units['UnitAbbr'].values==each)[0] + rows_with_geo_unit = np.where(gdf_units.values==each)[0] rows_for_param = np.where(default_geo_prop['Unit Abbreviation'].values==each)[0][0] phi_mean[rows_with_geo_unit] = \ default_geo_prop['Friction Angle - Median (degrees)'][rows_for_param] @@ -331,36 +331,49 @@ def interpolate_spatial_parameters(self, parameters): if parameters["SlopeThickness"] == "Defined (\"SlopeThickness\") in Site File (.csv)": self.t_slope = np.array([site['SlopeThickness'] for site in self.stations]) elif parameters["SlopeThickness"] == "Use constant value (m)": - self.t_slope = np.array(parameters["SlopeThicknessValue"]) + self.t_slope = np.array([parameters["SlopeThicknessValue"]]*len(self.stations)) else: self.t_slope = sampleRaster(parameters["SlopeThickness"], parameters["inputCRS"],\ lon_station, lat_station) # gamma_soil if parameters["GammaSoil"] == "Defined (\"GammaSoil\") in Site File (.csv)": self.gamma_soil = np.array([site['GammaSoil'] for site in self.stations]) - elif parameters["GammaSoil"] == "Use constant value (m)": - self.gamma_soil = np.array(parameters["GammaSoilValue"]) - elif parameters["GammaSoil"] == "Infer from Geologic Map": - self.gamma_soil = infer_from_geologic_map(parameters["GammaSoilGeoMap"],\ - parameters['inputCRS'], lon_station, lat_station) + elif parameters["GammaSoil"] == "Use constant value (kN/m^3)": + self.gamma_soil = np.array(parameters["GammaSoilValue"]*len(self.stations)) else: self.gamma_soil = sampleRaster(parameters["GammaSoil"], parameters["inputCRS"],\ lon_station, lat_station) - # coh_soil - if parameters["CohesionSoil"] == "Defined (\"CohesionSoil\") in Site File (.csv)": - self.coh_soil = np.array([site['CohesionSoil'] for site in self.stations]) - elif parameters["CohesionSoil"] == "Use constant value (m)": - self.coh_soil = np.array(parameters["CohesionSoilValue"]) - elif parameters["CohesionSoil"] == "Infer from Geologic Map": - self.coh_soil = infer_from_geologic_map(parameters["CohesionSoilGeoMap"],\ - parameters['inputCRS'], lon_station, lat_station) + # phi_soil + if parameters["PhiSoil"] == "Defined (\"PhiSoil\") in Site File (.csv)": + self.phi_soil = np.array([site['PhiSoil'] for site in self.stations]) + elif parameters["PhiSoil"] == "Use constant value (deg)": + self.phi_soil = np.array(parameters["PhiSoilValue"]*len(self.stations)) + elif parameters["PhiSoil"] == "Infer from Geologic Map": + if parameters["CohesionSoil"] == "Infer from Geologic Map": + self.phi_soil, self.coh_soil = infer_from_geologic_map(parameters["GeologicMap"],\ + parameters['inputCRS'], lon_station, lat_station) + else: + self.phi_soil, _ = infer_from_geologic_map(parameters["GeologicMap"],\ + parameters['inputCRS'], lon_station, lat_station) else: - self.coh_soil = sampleRaster(parameters["CohesionSoil"], parameters["inputCRS"],\ + self.phi_soil = sampleRaster(parameters["CohesionSoil"], parameters["inputCRS"],\ lon_station, lat_station) + # coh_soil + if self.coh_soil is None: + if parameters["CohesionSoil"] == "Defined (\"CohesionSoil\") in Site File (.csv)": + self.coh_soil = np.array([site['CohesionSoil'] for site in self.stations]) + elif parameters["CohesionSoil"] == "Use constant value (kPa)": + self.coh_soil = np.array(parameters["CohesionSoilValue"]*len(self.stations)) + elif parameters["CohesionSoil"] == "Infer from Geologic Map": + self.coh_soil = infer_from_geologic_map(parameters["GeologicMap"],\ + parameters['inputCRS'], lon_station, lat_station) + else: + self.coh_soil = sampleRaster(parameters["CohesionSoil"], parameters["inputCRS"],\ + lon_station, lat_station) print("Initiation finished") - def run(self, ln_im_data, eq_data, im_list, output_keys, additional_output_keys = []): + def run(self, ln_im_data, eq_data, im_list, output_keys=['lsd_PGD_h'], additional_output_keys = []): if ('PGA' in im_list): num_stations = len(self.stations) num_scenarios = len(eq_data) @@ -380,20 +393,13 @@ def run(self, ln_im_data, eq_data, im_list, output_keys, additional_output_keys im_data_scen[:,len(im_list)+i,rlz_id] = model_output[key] ln_im_data[scenario_id] = im_data_scen im_list = im_list + output_keys - additional_output = dict() - for key in additional_output_keys: - item = getattr(self, key, None) - if item is None: - warnings.warn(f"Additional output {key} is not avaliable in the landslide model 'BrayMacedo2019'.") - else: - additional_output.update({key:item}) else: sys.exit(f"'PGA' is missing in the selected intensity measures and the landslide model 'BrayMacedo2019' can not be computed.") # print(f"At least one of 'PGA' and 'PGV' is missing in the selected intensity measures and the liquefaction trigging model 'ZhuEtal2017' can not be computed."\ # , file=sys.stderr) # sys.stderr.write("test") # sys.exit(-1) - return ln_im_data, eq_data, im_list, additional_output + return ln_im_data, eq_data, im_list, def model( self, @@ -422,8 +428,9 @@ def model( nonzero_median_cdf = np.zeros(shape) # convert from deg to rad - slope_rad = slope*np.pi/180 - phi_soil_rad = phi_soil*np.pi/180 + slope_rad = (slope*np.pi/180).astype(np.float32) + phi_soil_rad = (phi_soil*np.pi/180).astype(np.float32) + coh_soil = coh_soil.astype(np.float32) # yield acceleration ky = np.tan(phi_soil_rad-slope_rad) + \ diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/liquefaction.py b/modules/performRegionalEventSimulation/regionalGroundMotion/liquefaction.py index ef9909e57..d508fe850 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/liquefaction.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/liquefaction.py @@ -123,7 +123,6 @@ def find_additional_output_req(liq_info, current_step): triger_dist_water = liq_info['Triggering']['Parameters'].get('DistWater', None) if triger_dist_water is None: return additional_output_keys - lat_dist_water = liq_info['LateralSpreading']['Parameters'].get('DistWater', None) if 'LateralSpreading' in liq_info.keys(): lat_dist_water = liq_info['LateralSpreading']['Parameters'].get('DistWater', None) if (liq_info['LateralSpreading']['Model'] == 'Hazus2020')\ From 158b11704389b7e70b9450d30b1280d7d2a55e0d Mon Sep 17 00:00:00 2001 From: Justin Bonus Date: Sat, 27 Jul 2024 13:23:35 -0700 Subject: [PATCH 03/59] Try to fix Appveyor on Windows Visual Studio 2019 / macOS clang / Ubuntu18.04 gcc-9 Trying to update the appveyor to use appropriate Conan version, properly specify Python 3.9, set a version matching the {last 2 digits of year}:{month}:{build} format, etc For now it is set to clone my personal SimCenterBackendApplications repo, so that I can trigger an appveyor build with my commits there, but this should be reverted for the main NHERI-SimCenter repo --- appveyor.yml | 31 ++++++++++++++++++------------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/appveyor.yml b/appveyor.yml index 04d963dae..8a6b04ba3 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -1,11 +1,11 @@ -version: 1.0.{build} +version: 24.07.{build} image: + - Visual Studio 2019 - macOS - Ubuntu1804 - - Visual Studio 2019 -stack: python 3.7 +stack: python 3.9 for: # macOS @@ -18,14 +18,15 @@ for: init: - export PATH="$HOME/Qt/5.15.2/clang_64/bin:$HOME/venv3.9/bin:$PATH" - - python -m pip install --upgrade pip - - pip install conan + - python3 -m pip install --upgrade pip + - pip install conan==1.60.1 - conan user - conan remote add simcenter https://nherisimcenter.jfrog.io/artifactory/api/conan/simcenter install: - uname - - git clone https://github.com/NHERI-SimCenter/SimCenterBackendApplications.git + - rm -rf SimCenterBackendApplications + - git clone https://github.com/JustinBonus/SimCenterBackendApplications.git build_script: @@ -39,6 +40,7 @@ for: - qmake --version - gcc --version - python --version + - python3 --version # Ubuntu1804 - @@ -49,18 +51,19 @@ for: clone_folder: ~/SimCenter init: - - export PATH="$HOME/Qt/5.15.2/gcc_64/bin:$HOME/venv3.8.6/bin:$PATH" + - export PATH="$HOME/Qt/5.15.2/gcc_64/bin:$HOME/venv3.9/bin:$PATH" - export PATH="/home/appveyor/.local/bin:$PATH" install: - uname - sudo update-alternatives --set gcc /usr/bin/gcc-9 - sudo apt-get -y install libglu1-mesa-dev freeglut3-dev mesa-common-dev libblas-dev liblapack-dev - - python -m pip install --upgrade pip - - pip install conan + - python3 -m pip install --upgrade pip + - pip install conan==1.60.1 - conan user - conan remote add simcenter https://nherisimcenter.jfrog.io/artifactory/api/conan/simcenter - - git clone https://github.com/NHERI-SimCenter/SimCenterBackendApplications.git + - rm -rf SimCenterBackendApplications + - git clone https://github.com/JustinBonus/SimCenterBackendApplications.git build_script: # build SimCenterBackendApplications @@ -73,6 +76,7 @@ for: - qmake --version - gcc --version - python --version + - python3 --version # Visual Studio 2019 - @@ -85,11 +89,11 @@ for: init: - cmd: set PYTHON=C:\PYTHON38-x64 - - cmd: set PYTHONNET_PYDLL=%PYTHON%\python3.8.dll + - cmd: set PYTHONNET_PYDLL=%PYTHON%\python3.9.dll - cmd: set QT=C:\Qt\5.15.2\msvc2019_64\bin - cmd: set PATH=%PYTHON%;%PYTHON%\Scripts;%QT%;%PATH% - cmd: call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Auxiliary\Build\vcvars64.bat" - - cmd: pip.exe install conan + - cmd: pip.exe install conan==1.60.1 - cmd: conan user - cmd: conan profile new default --detect - cmd: conan profile show default @@ -99,7 +103,8 @@ for: - cmd: echo %PATH% install: - - cmd: git clone https://github.com/NHERI-SimCenter/SimCenterBackendApplications.git + - cmd: rm -rf SimCenterBackendApplications + - cmd: git clone https://github.com/JustinBonus/SimCenterBackendApplications.git - cmd: dir build_script: From 3e664875907b10e5c5db1e5df78eaaa47e19c622 Mon Sep 17 00:00:00 2001 From: Justin Bonus Date: Sat, 27 Jul 2024 13:52:07 -0700 Subject: [PATCH 04/59] Revert clone repo to refer to NHERI-SimCenter after successful appveyor build --- appveyor.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/appveyor.yml b/appveyor.yml index 8a6b04ba3..10253539b 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -26,7 +26,7 @@ for: install: - uname - rm -rf SimCenterBackendApplications - - git clone https://github.com/JustinBonus/SimCenterBackendApplications.git + - git clone https://github.com/NHERI-SimCenter/SimCenterBackendApplications.git build_script: @@ -63,7 +63,7 @@ for: - conan user - conan remote add simcenter https://nherisimcenter.jfrog.io/artifactory/api/conan/simcenter - rm -rf SimCenterBackendApplications - - git clone https://github.com/JustinBonus/SimCenterBackendApplications.git + - git clone https://github.com/NHERI-SimCenter/SimCenterBackendApplications.git build_script: # build SimCenterBackendApplications @@ -104,7 +104,7 @@ for: install: - cmd: rm -rf SimCenterBackendApplications - - cmd: git clone https://github.com/JustinBonus/SimCenterBackendApplications.git + - cmd: git clone https://github.com/NHERI-SimCenter/SimCenterBackendApplications.git - cmd: dir build_script: From b5d96c1e6d4b43715c4925537299ab7ea01e5826 Mon Sep 17 00:00:00 2001 From: jinyan1214 Date: Wed, 31 Jul 2024 15:59:43 -0700 Subject: [PATCH 05/59] JZ: R2D landslide front-back ends compatibility --- .../regionalGroundMotion/CMakeLists.txt | 1 + .../regionalGroundMotion/CreateStation.py | 3 +- .../HazardSimulationEQ.py | 15 +++++---- .../regionalGroundMotion/landslide.py | 32 +++++++++---------- 4 files changed, 27 insertions(+), 24 deletions(-) diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/CMakeLists.txt b/modules/performRegionalEventSimulation/regionalGroundMotion/CMakeLists.txt index e82a2052c..3d4f51a06 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/CMakeLists.txt +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/CMakeLists.txt @@ -13,4 +13,5 @@ simcenter_add_python_script(SCRIPT HazardOccurrence.py) simcenter_add_python_script(SCRIPT USGS_API.py) simcenter_add_python_script(SCRIPT ScenarioForecast.py) simcenter_add_python_script(SCRIPT liquefaction.py) +simcenter_add_python_script(SCRIPT landslide.py) simcenter_add_python_script(SCRIPT GMSimulators.py) diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/CreateStation.py b/modules/performRegionalEventSimulation/regionalGroundMotion/CreateStation.py index b2e2fbb3a..8e53e3dd5 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/CreateStation.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/CreateStation.py @@ -370,7 +370,8 @@ def create_stations(input_file, output_file, filterIDs, vs30Config, z1Config, z2 else: tmp.update({'vsInferred': (1 if vs30Config['Parameters']['vsInferred'] else 0) }) for key in ['liqSusc', 'gwDepth', 'distWater', 'distCoast', 'distRiver',\ - 'precipitation']: + 'precipitation', 'slope', 'slopeThickness', 'gammaSoil', 'phiSoil',\ + 'cohesionSoil']: if stn.get(key, None) is not None: tmp.update({key:stn.get(key)}) ground_failure_input_keys.add(key) diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py index 2ba637fab..76719b201 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py @@ -344,13 +344,14 @@ def hazard_job(hazard_info): gf_im_list += settlement_info['Output'] if "Landslide" in ground_failure_info.keys(): import landslide - lsld_info = ground_failure_info['Landslide'] - lsld_model = getattr(landslide, lsld_info['Model'])(\ - lsld_info["Parameters"], stations) - ln_im_mr, mag_maf, im_list = lsld_model.run( - ln_im_mr, mag_maf, im_list - ) - gf_im_list += lsld_info['Output'] + if 'Landslide' in ground_failure_info['Landslide'].keys(): + lsld_info = ground_failure_info['Landslide']['Landslide'] + lsld_model = getattr(landslide, lsld_info['Model'])(\ + lsld_info["Parameters"], stations) + ln_im_mr, mag_maf, im_list = lsld_model.run( + ln_im_mr, mag_maf, im_list + ) + gf_im_list += lsld_info['Output'] diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/landslide.py b/modules/performRegionalEventSimulation/regionalGroundMotion/landslide.py index 1ead4ebe3..8e2fd39c7 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/landslide.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/landslide.py @@ -322,34 +322,34 @@ def interpolate_spatial_parameters(self, parameters): lat_station = [site['lat'] for site in self.stations] lon_station = [site['lon'] for site in self.stations] # slope - if parameters["Slope"] == "Defined (\"Slope\") in Site File (.csv)": - self.slope = np.array([site['Slope'] for site in self.stations]) + if parameters["Slope"] == "Defined (\"slope\") in Site File (.csv)": + self.slope = np.array([site['slope'] for site in self.stations]) else: self.slope = sampleRaster(parameters["Slope"], parameters["inputCRS"],\ lon_station, lat_station) # t_slope - if parameters["SlopeThickness"] == "Defined (\"SlopeThickness\") in Site File (.csv)": - self.t_slope = np.array([site['SlopeThickness'] for site in self.stations]) + if parameters["SlopeThickness"] == "Defined (\"slopeThickness\") in Site File (.csv)": + self.t_slope = np.array([site['slopeThickness'] for site in self.stations]) elif parameters["SlopeThickness"] == "Use constant value (m)": self.t_slope = np.array([parameters["SlopeThicknessValue"]]*len(self.stations)) else: self.t_slope = sampleRaster(parameters["SlopeThickness"], parameters["inputCRS"],\ lon_station, lat_station) # gamma_soil - if parameters["GammaSoil"] == "Defined (\"GammaSoil\") in Site File (.csv)": - self.gamma_soil = np.array([site['GammaSoil'] for site in self.stations]) + if parameters["GammaSoil"] == "Defined (\"gammaSoil\") in Site File (.csv)": + self.gamma_soil = np.array([site['gammaSoil'] for site in self.stations]) elif parameters["GammaSoil"] == "Use constant value (kN/m^3)": - self.gamma_soil = np.array(parameters["GammaSoilValue"]*len(self.stations)) + self.gamma_soil = np.array([parameters["GammaSoilValue"]]*len(self.stations)) else: self.gamma_soil = sampleRaster(parameters["GammaSoil"], parameters["inputCRS"],\ lon_station, lat_station) # phi_soil - if parameters["PhiSoil"] == "Defined (\"PhiSoil\") in Site File (.csv)": - self.phi_soil = np.array([site['PhiSoil'] for site in self.stations]) + if parameters["PhiSoil"] == "Defined (\"phiSoil\") in Site File (.csv)": + self.phi_soil = np.array([site['phiSoil'] for site in self.stations]) elif parameters["PhiSoil"] == "Use constant value (deg)": - self.phi_soil = np.array(parameters["PhiSoilValue"]*len(self.stations)) - elif parameters["PhiSoil"] == "Infer from Geologic Map": - if parameters["CohesionSoil"] == "Infer from Geologic Map": + self.phi_soil = np.array([parameters["PhiSoilValue"]]*len(self.stations)) + elif parameters["PhiSoil"] == "Infer from Geologic Map (Bain et al. 2022)": + if parameters["CohesionSoil"] == "Infer from Geologic Map (Bain et al. 2022)": self.phi_soil, self.coh_soil = infer_from_geologic_map(parameters["GeologicMap"],\ parameters['inputCRS'], lon_station, lat_station) else: @@ -360,11 +360,11 @@ def interpolate_spatial_parameters(self, parameters): lon_station, lat_station) # coh_soil if self.coh_soil is None: - if parameters["CohesionSoil"] == "Defined (\"CohesionSoil\") in Site File (.csv)": - self.coh_soil = np.array([site['CohesionSoil'] for site in self.stations]) + if parameters["CohesionSoil"] == "Defined (\"cohesionSoil\") in Site File (.csv)": + self.coh_soil = np.array([site['cohesionSoil'] for site in self.stations]) elif parameters["CohesionSoil"] == "Use constant value (kPa)": - self.coh_soil = np.array(parameters["CohesionSoilValue"]*len(self.stations)) - elif parameters["CohesionSoil"] == "Infer from Geologic Map": + self.coh_soil = np.array([parameters["CohesionSoilValue"]]*len(self.stations)) + elif parameters["CohesionSoil"] == "Infer from Geologic Map (Bain et al. 2022)": self.coh_soil = infer_from_geologic_map(parameters["GeologicMap"],\ parameters['inputCRS'], lon_station, lat_station) else: From e9bef0385e24450a32febfc0e4ec0ca3fd09123b Mon Sep 17 00:00:00 2001 From: jinyan1214 Date: Tue, 13 Aug 2024 18:08:37 -0700 Subject: [PATCH 06/59] Fix needed imports when merging ruff dafe fixes --- .../regionalGroundMotion/HazardSimulationEQ.py | 1 + .../regionalGroundMotion/ScenarioForecast.py | 1 + 2 files changed, 2 insertions(+) diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py index e1c3f4ab1..a242a8eb9 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py @@ -550,6 +550,7 @@ def hazard_job(hazard_info): # noqa: C901, D103, PLR0914, PLR0915 if importlib.util.find_spec('jpype') is None: subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'JPype1']) # noqa: S603 import jpype + from jpype import imports from jpype.types import * # noqa: F403 memory_total = psutil.virtual_memory().total / (1024.0**3) diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/ScenarioForecast.py b/modules/performRegionalEventSimulation/regionalGroundMotion/ScenarioForecast.py index 94fbca023..7664f6e62 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/ScenarioForecast.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/ScenarioForecast.py @@ -99,6 +99,7 @@ if importlib.util.find_spec('jpype') is None: subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'JPype1']) # noqa: S603 import jpype + from jpype import imports from jpype.types import * # noqa: F403 memory_total = psutil.virtual_memory().total / (1024.0**3) From a50555dcf9f72371e43410077d7a122ee748c0c9 Mon Sep 17 00:00:00 2001 From: jinyan1214 Date: Wed, 14 Aug 2024 10:50:01 -0700 Subject: [PATCH 07/59] after codespell --- modules/performREC/pyrecodes/run_pyrecodes.py | 2 +- .../regionalGroundMotion/ScenarioForecast.py | 3 ++- .../regionalGroundMotion/landslide.py | 4 ++-- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/modules/performREC/pyrecodes/run_pyrecodes.py b/modules/performREC/pyrecodes/run_pyrecodes.py index 87cbf2123..4b41c8251 100644 --- a/modules/performREC/pyrecodes/run_pyrecodes.py +++ b/modules/performREC/pyrecodes/run_pyrecodes.py @@ -183,7 +183,7 @@ def run_pyrecodes(rec_config, inputRWHALE, parallelType, mpiExec, numPROC): comm.Barrier() # if rank 0, gather result_agg and resilience_results, write to file - # note that the gathered results dosen't follow the order in realization_to_run + # note that the gathered results doesn't follow the order in realization_to_run # but this order is not needed when calculating mean and std if doParallel: # gather results_agg diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/ScenarioForecast.py b/modules/performRegionalEventSimulation/regionalGroundMotion/ScenarioForecast.py index 7664f6e62..5f4725fa4 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/ScenarioForecast.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/ScenarioForecast.py @@ -99,7 +99,8 @@ if importlib.util.find_spec('jpype') is None: subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'JPype1']) # noqa: S603 import jpype - from jpype import imports + # from jpype import imports + import jpype.imports from jpype.types import * # noqa: F403 memory_total = psutil.virtual_memory().total / (1024.0**3) diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/landslide.py b/modules/performRegionalEventSimulation/regionalGroundMotion/landslide.py index 8e2fd39c7..a901df9b9 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/landslide.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/landslide.py @@ -217,7 +217,7 @@ def erfinv_coeff(order=20): def erfinv(x, order=20): """returns inverse erf(x)""" - # get coeffcients + # get coefficients c = erfinv_coeff(order) # initialize root_pi_over_2 = np.sqrt(np.pi)/2 @@ -237,7 +237,7 @@ def norm2_ppf(p, loc, scale): def erfinv_2d(x, order=20): """returns inverse erf(x)""" - # get coeffcients + # get coefficients c = erfinv_coeff(order) # initialize root_pi_over_2 = np.sqrt(np.pi)/2 From c81daeb744a67f13cb960279a22783b436024245 Mon Sep 17 00:00:00 2001 From: jinyan1214 Date: Wed, 14 Aug 2024 11:17:23 -0700 Subject: [PATCH 08/59] after ruff check --add-noqa --- modules/Workflow/computeResponseSpectrum.py | 8 +- modules/Workflow/createGM4BIM.py | 44 +- modules/Workflow/whale/main.py | 32 +- modules/common/simcenter_common.py | 12 +- modules/createEVENT/CFDEvent/CFDEvent.py | 2 +- .../EmptyDomainCFD/EmptyDomainCFD.py | 2 +- .../EmptyDomainCFD/post_process_output.py | 14 +- .../GeoClawOpenFOAM/AddBuildingForces.py | 4 +- .../createEVENT/GeoClawOpenFOAM/GeoClaw.py | 2 +- .../GeoClawOpenFOAM/GeoClawBathy.py | 2 +- .../GeoClawOpenFOAM/GetOpenFOAMEvent.py | 8 +- modules/createEVENT/GeoClawOpenFOAM/flume.py | 6 +- .../createEVENT/GeoClawOpenFOAM/hydroUtils.py | 10 +- .../GeoClawOpenFOAM/of7Alpboundary.py | 6 +- .../GeoClawOpenFOAM/of7Building.py | 4 +- .../createEVENT/GeoClawOpenFOAM/of7Decomp.py | 4 +- .../GeoClawOpenFOAM/of7Geometry.py | 4 +- .../createEVENT/GeoClawOpenFOAM/of7Initial.py | 6 +- .../GeoClawOpenFOAM/of7Materials.py | 6 +- .../createEVENT/GeoClawOpenFOAM/of7Meshing.py | 10 +- .../createEVENT/GeoClawOpenFOAM/of7Others.py | 4 +- .../GeoClawOpenFOAM/of7Prboundary.py | 6 +- .../createEVENT/GeoClawOpenFOAM/of7Process.py | 8 +- .../GeoClawOpenFOAM/of7PtDboundary.py | 10 +- .../createEVENT/GeoClawOpenFOAM/of7Solve.py | 12 +- .../GeoClawOpenFOAM/of7Turbulence.py | 4 +- .../GeoClawOpenFOAM/of7Uboundary.py | 8 +- .../createEVENT/GeoClawOpenFOAM/openfoam7.py | 26 +- .../createEVENT/GeoClawOpenFOAM/osuFlume.py | 2 +- .../createEVENT/GeoClawOpenFOAM/userFlume.py | 2 +- .../IsolatedBuildingCFD.py | 2 +- .../createEVENT/Istanbul/IstanbulStations.py | 2 +- modules/createEVENT/M9/M9API.py | 2 +- modules/createEVENT/M9/M9Stations.py | 2 +- modules/createEVENT/MPM/MPM.py | 2 +- .../createEVENT/MPM/post_process_output.py | 14 +- .../SurroundedBuildingCFD.py | 2 +- .../post_process_output.py | 14 +- .../coupledDigitalTwin/CoupledDigitalTwin.py | 2 +- .../IntensityMeasureComputer.py | 2 +- .../siteResponse/RegionalSiteResponse.py | 10 +- .../stochasticWave/StochasticWave.py | 2 +- modules/createSAM/AutoSDA/beam_component.py | 2 +- modules/createSAM/AutoSDA/column_component.py | 2 +- modules/createSAM/AutoSDA/connection_part.py | 2 +- modules/createSAM/AutoSDA/help_functions.py | 32 +- modules/performDL/pelicun3/DL_visuals.py | 42 +- .../performHUA/pyincore_data/censusutil.py | 10 +- modules/performREC/pyrecodes/run_pyrecodes.py | 584 ++++++++++------- .../regionalGroundMotion/CreateStation.py | 30 +- .../regionalGroundMotion/FetchOpenQuake.py | 20 +- .../regionalGroundMotion/HazardOccurrence.py | 12 +- .../HazardSimulationEQ.py | 17 +- .../regionalGroundMotion/ScenarioForecast.py | 1 + .../gmpe/CorrelationModel.py | 22 +- .../gmpe/SignificantDurationModel.py | 6 +- .../regionalGroundMotion/gmpe/openSHAGMPE.py | 2 +- .../regionalGroundMotion/landslide.py | 592 +++++++++++------- .../regionalGroundMotion/liquefaction.py | 24 +- .../ComputeIntensityMeasure.py | 4 +- .../regionalWindField/CreateScenario.py | 32 +- .../regionalWindField/CreateStation.py | 2 +- .../regionalWindField/WindFieldSimulation.py | 4 +- modules/performUQ/SimCenterUQ/PLoM/PLoM.py | 12 +- .../SimCenterUQ/PLoM/PLoM_library.py | 20 +- modules/performUQ/SimCenterUQ/PLoM/general.py | 20 +- modules/performUQ/SimCenterUQ/runPLoM.py | 8 +- .../performUQ/UCSD_UQ/defaultLogLikeScript.py | 2 +- modules/performUQ/UCSD_UQ/mwg_sampler.py | 2 +- modules/performUQ/UCSD_UQ/runFEM.py | 2 +- modules/performUQ/UCSD_UQ/runTMCMC.py | 2 +- .../performUQ/common/ERAClasses/ERACond.py | 12 +- .../performUQ/common/ERAClasses/ERADist.py | 160 ++--- .../performUQ/common/ERAClasses/ERANataf.py | 32 +- .../performUQ/common/ERAClasses/ERARosen.py | 28 +- modules/performUQ/other/UQpyRunner.py | 2 +- .../systemPerformance/REWET/REWET/Damage.py | 24 +- .../REWET/REWET/EnhancedWNTR/epanet/io.py | 22 +- .../REWET/REWET/EnhancedWNTR/network/model.py | 2 +- .../REWET/REWET/EnhancedWNTR/sim/epanet.py | 14 +- .../REWET/REWET/EnhancedWNTR/sim/io.py | 12 +- .../REWET/REWET/Input/Policy_IO.py | 6 +- .../REWET/REWET/Input/Settings.py | 2 +- .../REWET/REWET/Output/GUI_Curve_API.py | 16 +- .../systemPerformance/REWET/REWET/initial.py | 2 +- .../REWET/REWET/restoration/base.py | 8 +- .../REWET/REWET/restoration/io.py | 6 +- .../REWET/REWET/restoration/model.py | 12 +- .../REWET/REWET/restoration/registry.py | 12 +- .../systemPerformance/REWET/REWET/timeline.py | 4 +- .../systemPerformance/REWET/preprocessorIO.py | 2 +- 91 files changed, 1253 insertions(+), 959 deletions(-) diff --git a/modules/Workflow/computeResponseSpectrum.py b/modules/Workflow/computeResponseSpectrum.py index 66c4296ec..24871ed74 100644 --- a/modules/Workflow/computeResponseSpectrum.py +++ b/modules/Workflow/computeResponseSpectrum.py @@ -23,7 +23,7 @@ def convert_accel_units(acceleration, from_, to_='cm/s/s'): # noqa: C901 acceleration = np.asarray(acceleration) if from_ == 'g': if to_ == 'g': - return acceleration + return acceleration # noqa: DOC201 if to_ in m_sec_square: return acceleration * g if to_ in cm_sec_square: @@ -70,7 +70,7 @@ def get_velocity_displacement( velocity = time_step * cumtrapz(acceleration, initial=0.0) if displacement is None: displacement = time_step * cumtrapz(velocity, initial=0.0) - return velocity, displacement + return velocity, displacement # noqa: DOC201 class NewmarkBeta: @@ -160,7 +160,7 @@ def run(self): 'PGV': np.max(np.fabs(self.velocity)), 'PGD': np.max(np.fabs(self.displacement)), } - return self.response_spectrum, time_series, accel, vel, disp + return self.response_spectrum, time_series, accel, vel, disp # noqa: DOC201 def _newmark_beta(self, omega, cval, kval): # noqa: ARG002 """Newmark-beta integral @@ -216,4 +216,4 @@ def _newmark_beta(self, omega, cval, kval): # noqa: ARG002 disp[j, :] = delta_u + disp[j - 1, :] a_t[j, :] = ground_acc[j] + accel[j, :] - return accel, vel, disp, a_t + return accel, vel, disp, a_t # noqa: DOC201 diff --git a/modules/Workflow/createGM4BIM.py b/modules/Workflow/createGM4BIM.py index 3bc5f2297..889fa3d1b 100644 --- a/modules/Workflow/createGM4BIM.py +++ b/modules/Workflow/createGM4BIM.py @@ -75,7 +75,7 @@ def get_scale_factors(input_units, output_units): # noqa: C901 unit_time = output_units.get('time', 'sec') f_time = globals().get(unit_time, None) if f_time is None: - raise ValueError(f'Specified time unit not recognized: {unit_time}') # noqa: DOC501, EM102, TRY003 + raise ValueError(f'Specified time unit not recognized: {unit_time}') # noqa: DOC501, EM102, RUF100, TRY003 scale_factors = {} @@ -88,7 +88,7 @@ def get_scale_factors(input_units, output_units): # noqa: C901 # get the scale factor to standard units f_in = globals().get(input_unit, None) if f_in is None: - raise ValueError( # noqa: DOC501, TRY003 + raise ValueError( # noqa: DOC501, RUF100, TRY003 f'Input unit for event files not recognized: {input_unit}' # noqa: EM102 ) @@ -98,7 +98,7 @@ def get_scale_factors(input_units, output_units): # noqa: C901 unit_type = base_unit_type if unit_type is None: - raise ValueError(f'Failed to identify unit type: {input_unit}') # noqa: DOC501, EM102, TRY003 + raise ValueError(f'Failed to identify unit type: {input_unit}') # noqa: DOC501, EM102, RUF100, TRY003 # the output unit depends on the unit type if unit_type == 'acceleration': @@ -111,7 +111,7 @@ def get_scale_factors(input_units, output_units): # noqa: C901 f_out = 1.0 / f_length else: - raise ValueError( # noqa: DOC501, TRY003 + raise ValueError( # noqa: DOC501, RUF100, TRY003 f'Unexpected unit type in workflow: {unit_type}' # noqa: EM102 ) @@ -120,7 +120,7 @@ def get_scale_factors(input_units, output_units): # noqa: C901 scale_factors.update({input_name: f_scale}) - return scale_factors + return scale_factors # noqa: DOC201 def createFilesForEventGrid(inputDir, outputDir, removeInputDir): # noqa: C901, D103, N802, N803, PLR0914, PLR0915 @@ -410,28 +410,28 @@ def createFilesForEventGrid(inputDir, outputDir, removeInputDir): # noqa: C901, m_pgd_y = 0.0 s_pgd_y = 0.0 # add to dictionary - dict_im[('type', 'loc', 'dir', 'stat')].append(int(siteID)) + dict_im[('type', 'loc', 'dir', 'stat')].append(int(siteID)) # noqa: RUF031 # pga - dict_im[('PGA', 0, 1, 'median')].append(m_pga_x) - dict_im[('PGA', 0, 1, 'beta')].append(s_pga_x) - dict_im[('PGA', 0, 2, 'median')].append(m_pga_y) - dict_im[('PGA', 0, 2, 'beta')].append(s_pga_y) + dict_im[('PGA', 0, 1, 'median')].append(m_pga_x) # noqa: RUF031 + dict_im[('PGA', 0, 1, 'beta')].append(s_pga_x) # noqa: RUF031 + dict_im[('PGA', 0, 2, 'median')].append(m_pga_y) # noqa: RUF031 + dict_im[('PGA', 0, 2, 'beta')].append(s_pga_y) # noqa: RUF031 # pgv - dict_im[('PGV', 0, 1, 'median')].append(m_pgv_x) - dict_im[('PGV', 0, 1, 'beta')].append(s_pgv_x) - dict_im[('PGV', 0, 2, 'median')].append(m_pgv_y) - dict_im[('PGV', 0, 2, 'beta')].append(s_pgv_y) + dict_im[('PGV', 0, 1, 'median')].append(m_pgv_x) # noqa: RUF031 + dict_im[('PGV', 0, 1, 'beta')].append(s_pgv_x) # noqa: RUF031 + dict_im[('PGV', 0, 2, 'median')].append(m_pgv_y) # noqa: RUF031 + dict_im[('PGV', 0, 2, 'beta')].append(s_pgv_y) # noqa: RUF031 # pgd - dict_im[('PGD', 0, 1, 'median')].append(m_pgd_x) - dict_im[('PGD', 0, 1, 'beta')].append(s_pgd_x) - dict_im[('PGD', 0, 2, 'median')].append(m_pgd_y) - dict_im[('PGD', 0, 2, 'beta')].append(s_pgd_y) + dict_im[('PGD', 0, 1, 'median')].append(m_pgd_x) # noqa: RUF031 + dict_im[('PGD', 0, 1, 'beta')].append(s_pgd_x) # noqa: RUF031 + dict_im[('PGD', 0, 2, 'median')].append(m_pgd_y) # noqa: RUF031 + dict_im[('PGD', 0, 2, 'beta')].append(s_pgd_y) # noqa: RUF031 for jj, Ti in enumerate(periods): # noqa: N806 cur_sa = f'SA({Ti}s)' - dict_im[(cur_sa, 0, 1, 'median')].append(m_psa_x[jj]) - dict_im[(cur_sa, 0, 1, 'beta')].append(s_psa_x[jj]) - dict_im[(cur_sa, 0, 2, 'median')].append(m_psa_y[jj]) - dict_im[(cur_sa, 0, 2, 'beta')].append(s_psa_y[jj]) + dict_im[(cur_sa, 0, 1, 'median')].append(m_psa_x[jj]) # noqa: RUF031 + dict_im[(cur_sa, 0, 1, 'beta')].append(s_psa_x[jj]) # noqa: RUF031 + dict_im[(cur_sa, 0, 2, 'median')].append(m_psa_y[jj]) # noqa: RUF031 + dict_im[(cur_sa, 0, 2, 'beta')].append(s_psa_y[jj]) # noqa: RUF031 # aggregate for cur_key, cur_value in dict_im.items(): diff --git a/modules/Workflow/whale/main.py b/modules/Workflow/whale/main.py index 6f92dd86a..260fdba53 100644 --- a/modules/Workflow/whale/main.py +++ b/modules/Workflow/whale/main.py @@ -310,7 +310,7 @@ def create_command(command_list, enforced_python=None): for command_arg in command_list[1:]: command += f'"{command_arg}" ' - return command + return command # noqa: DOC201 def run_command(command): @@ -357,7 +357,7 @@ def run_command(command): py_script.main(arg_list) - return '', '' + return '', '' # noqa: DOC201 else: # noqa: RET505 # fmk with Shell=True not working on older windows machines, new approach needed for quoted command .. turn into a list @@ -668,7 +668,7 @@ def get_command_list(self, app_path, force_posix=False): # noqa: FBT002, C901 # pp.pprint(arg_list) - return arg_list + return arg_list # noqa: DOC201 class Workflow: # noqa: PLR0904 @@ -857,7 +857,7 @@ def _register_app_type(self, app_type, app_dict, sub_app=''): # noqa: C901 if app_type_obj == None: # noqa: E711 err = 'The application ' + app_type + ' is not found in the app registry' - raise WorkFlowInputError(err) # noqa: DOC501 + raise WorkFlowInputError(err) # noqa: DOC501, RUF100 # Finally check to see if the app registry contains the provided application if app_type_obj.get(app_in) == None: # noqa: E711 @@ -866,7 +866,7 @@ def _register_app_type(self, app_type, app_dict, sub_app=''): # noqa: C901 + app_in ) print('Error', app_in) # noqa: T201 - raise WorkFlowInputError(err) # noqa: DOC501 + raise WorkFlowInputError(err) # noqa: DOC501, RUF100 appData = app_dict['ApplicationData'] # noqa: N806 # @@ -878,7 +878,7 @@ def _register_app_type(self, app_type, app_dict, sub_app=''): # noqa: C901 # Check if the app object was created successfully if app_object is None: - raise WorkFlowInputError(f'Application deep copy failed for {app_type}') # noqa: DOC501, EM102, TRY003 + raise WorkFlowInputError(f'Application deep copy failed for {app_type}') # noqa: DOC501, EM102, RUF100, TRY003 # only assign the app to the workflow if it has an executable if app_object.rel_path is None: @@ -1081,7 +1081,7 @@ def _parse_inputs(self): # noqa: C901 # Events are special because they are in an array if 'Events' in requested_apps: if len(requested_apps['Events']) > 1: - raise WorkFlowInputError( # noqa: DOC501, TRY003 + raise WorkFlowInputError( # noqa: DOC501, RUF100, TRY003 'Currently, WHALE only supports a single event.' # noqa: EM101 ) for event in requested_apps['Events'][ @@ -1104,7 +1104,7 @@ def _parse_inputs(self): # noqa: C901 ) if app_object is None: - raise WorkFlowInputError( # noqa: DOC501 + raise WorkFlowInputError( # noqa: DOC501, RUF100 'Application entry missing for {}'.format('Events') # noqa: EM103 ) @@ -1114,12 +1114,12 @@ def _parse_inputs(self): # noqa: C901 self.workflow_apps['Event'] = app_object else: - raise WorkFlowInputError( # noqa: DOC501, TRY003 + raise WorkFlowInputError( # noqa: DOC501, RUF100, TRY003 'Currently, only earthquake and wind events are supported. ' # noqa: EM102 f'EventClassification must be Earthquake, not {eventClassification}' ) else: - raise WorkFlowInputError('Need Event Classification') # noqa: DOC501, EM101, TRY003 + raise WorkFlowInputError('Need Event Classification') # noqa: DOC501, EM101, RUF100, TRY003 # Figure out what types of assets are coming into the analysis assetObjs = requested_apps.get('Assets', None) # noqa: N806 @@ -1130,7 +1130,7 @@ def _parse_inputs(self): # noqa: C901 # Check if asset list is not empty if len(assetObjs) == 0: - raise WorkFlowInputError('The provided asset object is empty') # noqa: DOC501, EM101, TRY003 + raise WorkFlowInputError('The provided asset object is empty') # noqa: DOC501, EM101, RUF100, TRY003 # Iterate through the asset objects for assetObj in assetObjs: # noqa: N806 @@ -1316,7 +1316,7 @@ def create_asset_files(self): log_div() - return assetFilesList + return assetFilesList # noqa: DOC201 def augment_asset_files(self): # noqa: C901 """Short description @@ -1504,7 +1504,7 @@ def augment_asset_files(self): # noqa: C901 ) log_div() - return assetFilesList + return assetFilesList # noqa: DOC201 def perform_system_performance_assessment(self, asset_type): """For an asset type run the system level performance assessment application @@ -1525,7 +1525,7 @@ def perform_system_performance_assessment(self, asset_type): prepend_timestamp=False, ) log_div() - return False + return False # noqa: DOC201 if performance_app.rel_path == None: # noqa: E711 log_msg( @@ -1905,7 +1905,7 @@ def init_simdir(self, asst_id=None, AIM_file_path='AIM.json'): # noqa: C901, N8 prepend_timestamp=False, ) log_div() - return dst + return dst # noqa: DOC201 def cleanup_simdir(self, asst_id): """Short description @@ -2730,7 +2730,7 @@ def estimate_losses( # noqa: C901 ], ) if ('PID', '0') in df_res.columns: - del df_res[('PID', '0')] + del df_res[('PID', '0')] # noqa: RUF031 # store the EDP statistics in the output DF for col in np.transpose(col_info): diff --git a/modules/common/simcenter_common.py b/modules/common/simcenter_common.py index 03f3a1054..6e4b319c4 100644 --- a/modules/common/simcenter_common.py +++ b/modules/common/simcenter_common.py @@ -237,7 +237,7 @@ def get_scale_factors(input_units, output_units): # noqa: C901 unit_time = output_units.get('time', 'sec') f_time = globals().get(unit_time, None) if f_time is None: - raise ValueError(f'Specified time unit not recognized: {unit_time}') # noqa: DOC501, EM102, TRY003 + raise ValueError(f'Specified time unit not recognized: {unit_time}') # noqa: DOC501, EM102, RUF100, TRY003 scale_factors = {} @@ -253,7 +253,7 @@ def get_scale_factors(input_units, output_units): # noqa: C901 f_in = globals().get(input_unit, None) if f_in is None: - raise ValueError(f'Input unit not recognized: {input_unit}') # noqa: DOC501, EM102, TRY003 + raise ValueError(f'Input unit not recognized: {input_unit}') # noqa: DOC501, EM102, RUF100, TRY003 unit_type = None for base_unit_type, unit_set in globals()['unit_types'].items(): @@ -261,7 +261,7 @@ def get_scale_factors(input_units, output_units): # noqa: C901 unit_type = base_unit_type if unit_type is None: - raise ValueError(f'Failed to identify unit type: {input_unit}') # noqa: DOC501, EM102, TRY003 + raise ValueError(f'Failed to identify unit type: {input_unit}') # noqa: DOC501, EM102, RUF100, TRY003 # the output unit depends on the unit type if unit_type == 'acceleration': @@ -274,7 +274,7 @@ def get_scale_factors(input_units, output_units): # noqa: C901 f_out = 1.0 / f_length else: - raise ValueError( # noqa: DOC501, TRY003 + raise ValueError( # noqa: DOC501, RUF100, TRY003 f'Unexpected unit type in workflow: {unit_type}' # noqa: EM102 ) @@ -283,7 +283,7 @@ def get_scale_factors(input_units, output_units): # noqa: C901 scale_factors.update({input_name: f_scale}) - return scale_factors + return scale_factors # noqa: DOC201 def get_unit_bases(input_units): @@ -306,4 +306,4 @@ def get_unit_bases(input_units): input_unit_bases = cur_unit_bases break - return input_unit_bases + return input_unit_bases # noqa: DOC201 diff --git a/modules/createEVENT/CFDEvent/CFDEvent.py b/modules/createEVENT/CFDEvent/CFDEvent.py index f3a58f36a..01326808c 100644 --- a/modules/createEVENT/CFDEvent/CFDEvent.py +++ b/modules/createEVENT/CFDEvent/CFDEvent.py @@ -13,7 +13,7 @@ def directionToDof(direction): # noqa: N802 """Converts direction to degree of freedom""" # noqa: D400, D401 directioMap = {'X': 1, 'Y': 2, 'Z': 3} # noqa: N806 - return directioMap[direction] + return directioMap[direction] # noqa: DOC201 def addFloorForceToEvent(patternsArray, force, direction, floor): # noqa: ARG001, N802, N803 diff --git a/modules/createEVENT/EmptyDomainCFD/EmptyDomainCFD.py b/modules/createEVENT/EmptyDomainCFD/EmptyDomainCFD.py index 1db2a3ca1..449aa07f6 100644 --- a/modules/createEVENT/EmptyDomainCFD/EmptyDomainCFD.py +++ b/modules/createEVENT/EmptyDomainCFD/EmptyDomainCFD.py @@ -13,7 +13,7 @@ def directionToDof(direction): # noqa: N802 """Converts direction to degree of freedom""" # noqa: D400, D401 directioMap = {'X': 1, 'Y': 2, 'Z': 3} # noqa: N806 - return directioMap[direction] + return directioMap[direction] # noqa: DOC201 def addFloorForceToEvent(patternsArray, force, direction, floor): # noqa: ARG001, N802, N803 diff --git a/modules/createEVENT/EmptyDomainCFD/post_process_output.py b/modules/createEVENT/EmptyDomainCFD/post_process_output.py index f11de48cf..160877c2e 100644 --- a/modules/createEVENT/EmptyDomainCFD/post_process_output.py +++ b/modules/createEVENT/EmptyDomainCFD/post_process_output.py @@ -88,7 +88,7 @@ def readPressureProbes(fileName): # noqa: N802, N803 time = np.asarray(time, dtype=np.float32) p = np.asarray(p, dtype=np.float32) - return probes, time, p + return probes, time, p # noqa: DOC201 def read_pressure_data(file_names): @@ -291,7 +291,7 @@ def read_openFoam_scalar_field(file_name): # noqa: N802 sField = np.asarray(sField, dtype=np.float32) # noqa: N806 - return sField # noqa: RET504 + return sField # noqa: DOC201, RET504 def read_openFoam_vector_field(file_name): # noqa: N802 @@ -312,7 +312,7 @@ def read_openFoam_vector_field(file_name): # noqa: N802 vField = np.asarray(vField, dtype=np.float32) # noqa: N806 - return vField # noqa: RET504 + return vField # noqa: DOC201, RET504 def read_openFoam_tensor_field(file_name): # noqa: N802 @@ -340,7 +340,7 @@ def read_openFoam_tensor_field(file_name): # noqa: N802 vField = np.asarray(vField, dtype=np.float32) # noqa: N806 - return vField # noqa: RET504 + return vField # noqa: DOC201, RET504 def read_openFoam_symmetric_tensor_field(file_name): # noqa: N802 @@ -367,7 +367,7 @@ def read_openFoam_symmetric_tensor_field(file_name): # noqa: N802 vField = np.asarray(vField, dtype=np.float32) # noqa: N806 - return vField # noqa: RET504 + return vField # noqa: DOC201, RET504 def read_velocity_data(path): @@ -462,7 +462,7 @@ def read_velocity_probes(fileName): # noqa: N803 time = np.asarray(time, dtype=np.float32) U = np.asarray(U, dtype=np.float32) # noqa: N806 - return probes, time, U + return probes, time, U # noqa: DOC201 def calculate_length_scale(u, uav, dt, min_corr=0.0): @@ -481,7 +481,7 @@ def calculate_length_scale(u, uav, dt, min_corr=0.0): L = uav * np.trapz(corr, dx=dt) # noqa: NPY201, N806 - return L # noqa: RET504 + return L # noqa: DOC201, RET504 def psd(x, dt, nseg): # noqa: F811 diff --git a/modules/createEVENT/GeoClawOpenFOAM/AddBuildingForces.py b/modules/createEVENT/GeoClawOpenFOAM/AddBuildingForces.py index f932a5f86..46235bb09 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/AddBuildingForces.py +++ b/modules/createEVENT/GeoClawOpenFOAM/AddBuildingForces.py @@ -9,7 +9,7 @@ def validateCaseDirectoryStructure(caseDir): # noqa: N802, N803 It also checks that system directory contains the controlDict """ # noqa: D205, D400, D401, D404 if not os.path.isdir(caseDir): # noqa: PTH112 - return False + return False # noqa: DOC201 caseDirList = os.listdir(caseDir) # noqa: N806 necessaryDirs = ['0', 'constant', 'system'] # noqa: N806 @@ -27,7 +27,7 @@ def findFunctionsDictionary(controlDictLines): # noqa: N802, N803 """This method will find functions dictionary in the controlDict""" # noqa: D400, D401, D404 for line in controlDictLines: if line.startswith('functions'): - return (True, controlDictLines.index(line) + 2) + return (True, controlDictLines.index(line) + 2) # noqa: DOC201 return [False, len(controlDictLines)] diff --git a/modules/createEVENT/GeoClawOpenFOAM/GeoClaw.py b/modules/createEVENT/GeoClawOpenFOAM/GeoClaw.py index 491aff653..dc0544dec 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/GeoClaw.py +++ b/modules/createEVENT/GeoClawOpenFOAM/GeoClaw.py @@ -80,4 +80,4 @@ def creategeom(self, data, path): # Points of interest bottompts = self.getbathy(maxvalues, minvalues, data) # noqa: F841 - return 0 + return 0 # noqa: DOC201 diff --git a/modules/createEVENT/GeoClawOpenFOAM/GeoClawBathy.py b/modules/createEVENT/GeoClawOpenFOAM/GeoClawBathy.py index 2fed354a4..280b462bf 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/GeoClawBathy.py +++ b/modules/createEVENT/GeoClawOpenFOAM/GeoClawBathy.py @@ -61,4 +61,4 @@ def creategeom(self, data, path): # noqa: ARG002, PLR6301 # Create a utilities object hydroutil = hydroUtils() # noqa: F841 - return 0 + return 0 # noqa: DOC201 diff --git a/modules/createEVENT/GeoClawOpenFOAM/GetOpenFOAMEvent.py b/modules/createEVENT/GeoClawOpenFOAM/GetOpenFOAMEvent.py index c4098273e..24f05dccd 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/GetOpenFOAMEvent.py +++ b/modules/createEVENT/GeoClawOpenFOAM/GetOpenFOAMEvent.py @@ -16,7 +16,7 @@ def validateCaseDirectoryStructure(caseDir): # noqa: N802, N803 It also checks that system directory contains the controlDict """ # noqa: D205, D400, D401, D404 if not os.path.isdir(caseDir): # noqa: PTH112 - return False + return False # noqa: DOC201 caseDirList = os.listdir(caseDir) # noqa: N806 necessaryDirs = ['0', 'constant', 'system', 'postProcessing'] # noqa: N806 @@ -36,7 +36,7 @@ def parseForceComponents(forceArray): # noqa: N802, N803 x = float(components[0]) y = float(components[1]) z = float(components[2]) - return [x, y, z] + return [x, y, z] # noqa: DOC201 def ReadOpenFOAMForces(buildingForcesPath, floorsCount, startTime): # noqa: N802, N803 @@ -77,14 +77,14 @@ def ReadOpenFOAMForces(buildingForcesPath, floorsCount, startTime): # noqa: N80 forces[i].Y.append(fpry + fvy + fpoy) forces[i].Z.append(fprz + fvz + fpoz) - return [deltaT, forces] + return [deltaT, forces] # noqa: DOC201 def directionToDof(direction): # noqa: N802 """Converts direction to degree of freedom""" # noqa: D400, D401 directioMap = {'X': 1, 'Y': 2, 'Z': 3} # noqa: N806 - return directioMap[direction] + return directioMap[direction] # noqa: DOC201 def addFloorForceToEvent( # noqa: N802 diff --git a/modules/createEVENT/GeoClawOpenFOAM/flume.py b/modules/createEVENT/GeoClawOpenFOAM/flume.py index d7cd179d7..12a2cb8e7 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/flume.py +++ b/modules/createEVENT/GeoClawOpenFOAM/flume.py @@ -110,7 +110,7 @@ def generateflume(self, breadth, path): ) # Write bottom STL file # Return extreme values - return extremeval + return extremeval # noqa: DOC201 ############################################################# def flumedata(self, IpPTFile): # noqa: N803 @@ -178,7 +178,7 @@ def flumedata(self, IpPTFile): # noqa: N803 self.npt = np.delete(self.npt, noindexes, axis=0) # Return extreme values - return extremeval + return extremeval # noqa: DOC201 #################################################################### def right(self): @@ -431,4 +431,4 @@ def extremedata(self, extreme, breadth): # noqa: PLR6301 ) tempfileID.close # noqa: B018 - return 0 + return 0 # noqa: DOC201 diff --git a/modules/createEVENT/GeoClawOpenFOAM/hydroUtils.py b/modules/createEVENT/GeoClawOpenFOAM/hydroUtils.py index 2722a554f..a9c4eae15 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/hydroUtils.py +++ b/modules/createEVENT/GeoClawOpenFOAM/hydroUtils.py @@ -90,7 +90,7 @@ def extract(self, obj, path, ind, arr): # noqa: C901 else: arr.append(None) - return arr + return arr # noqa: DOC201 ############################################################# def extract_element_from_json(self, obj, path): @@ -106,7 +106,7 @@ def extract_element_from_json(self, obj, path): """ # noqa: D205, D401 if isinstance(obj, dict): # noqa: RET503 - return self.extract(obj, path, 0, []) + return self.extract(obj, path, 0, []) # noqa: DOC201 elif isinstance(obj, list): # noqa: RET505 outer_arr = [] for item in obj: @@ -129,7 +129,7 @@ def general_header(self): # noqa: PLR6301 | | O | \\*---------------------------------------------------------------------------*/ \n\n""" # noqa: W291 - return header # noqa: RET504 + return header # noqa: DOC201, RET504 #################################################################### def of7header(self, OFclass, location, filename): # noqa: N803, PLR6301 @@ -156,7 +156,7 @@ class {OFclass}; }} // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //\n\n""" # noqa: W291 - return header # noqa: RET504 + return header # noqa: DOC201, RET504 ############################################################# def hydrolog(self, projname, fipath): @@ -210,4 +210,4 @@ def getlist(self, data): # noqa: PLR6301 data = data.replace(',', ' ') results = [float(n) for n in data.split()] - return results # noqa: RET504 + return results # noqa: DOC201, RET504 diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Alpboundary.py b/modules/createEVENT/GeoClawOpenFOAM/of7Alpboundary.py index de03da459..bad5dbab3 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7Alpboundary.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7Alpboundary.py @@ -89,7 +89,7 @@ def Alptext(self, data, patches): # noqa: N802 Alptext = Alptext + '}\n\n' # noqa: N806, PLR6104 # Return the text for velocity BC - return Alptext # noqa: RET504 + return Alptext # noqa: DOC201, RET504 ############################################################# def Alpheader(self): # noqa: N802, PLR6301 @@ -114,7 +114,7 @@ def Alpheader(self): # noqa: N802, PLR6301 header = header + 'internalField\tuniform\t0;\n\n' # noqa: PLR6104 # Return the header for U file - return header # noqa: RET504 + return header # noqa: DOC201, RET504 ############################################################# def Alppatchtext(self, Alptype, patchname): # noqa: ARG002, N802, N803, PLR6301 @@ -140,4 +140,4 @@ def Alppatchtext(self, Alptype, patchname): # noqa: ARG002, N802, N803, PLR6301 Alptext = Alptext + 'type\tzeroGradient;\n\t}\n' # noqa: N806, PLR6104 # Return the header for U file - return Alptext + return Alptext # noqa: DOC201 diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Building.py b/modules/createEVENT/GeoClawOpenFOAM/of7Building.py index 3f3956ba1..acefbf5bd 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7Building.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7Building.py @@ -100,7 +100,7 @@ def buildcheck(self, data, path): # noqa: C901, PLR0911, PLR6301 data, ['Events', 'BuildingSTLFile'] ) if stlfile == [None]: - return -1 + return -1 # noqa: DOC201 else: # noqa: RET505 stlfile = ', '.join( hydroutil.extract_element_from_json( @@ -218,7 +218,7 @@ def createbuilds(self, data, path): elif buildeftype == 'Parameters': self.buildpara(data, path) - return 0 + return 0 # noqa: DOC201 ############################################################# def buildmanual(self, data, path): diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Decomp.py b/modules/createEVENT/GeoClawOpenFOAM/of7Decomp.py index 11af41f32..bf7396354 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7Decomp.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7Decomp.py @@ -75,7 +75,7 @@ def decomptext(self, data): decomptext = decomptext + 'method\tscotch;\n\n' # noqa: PLR6104 - return decomptext # noqa: RET504 + return decomptext # noqa: DOC201, RET504 ############################################################# def decompheader(self): # noqa: PLR6301 @@ -97,7 +97,7 @@ def decompheader(self): # noqa: PLR6301 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //\n\n""" # noqa: W291 # Return the header for U file - return header # noqa: RET504 + return header # noqa: DOC201, RET504 ############################################################# def scripts(self, data, path): # noqa: ARG002, PLR6301 diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Geometry.py b/modules/createEVENT/GeoClawOpenFOAM/of7Geometry.py index dc41e46c9..9f9670cb1 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7Geometry.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7Geometry.py @@ -82,7 +82,7 @@ def geomcheck(self, data, path): # noqa: C901, PLR0911, PLR6301 data, ['Events', 'NumBathymetryFiles'] ) if numbathy == [None]: - return -1 + return -1 # noqa: DOC201 else: # noqa: RET505 numbathy = ', '.join( hydroutil.extract_element_from_json( @@ -250,7 +250,7 @@ def createOFSTL(self, data, path): # noqa: C901, N802, PLR6301 # Create geometry (i.e. STL files) and extreme file ecode = finalgeom.creategeom(data, path) if ecode < 0: - return -1 + return -1 # noqa: DOC201 # Bathymetry only elif int(simtype) == 2: # noqa: PLR2004 diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Initial.py b/modules/createEVENT/GeoClawOpenFOAM/of7Initial.py index da6e38042..1b2431cac 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7Initial.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7Initial.py @@ -173,7 +173,7 @@ def alphatext(self, data, fipath): alphatext = alphatext + '\n);' # noqa: PLR6104 - return alphatext # noqa: RET504 + return alphatext # noqa: DOC201, RET504 ############################################################# def alphaheader(self): # noqa: PLR6301 @@ -195,7 +195,7 @@ def alphaheader(self): # noqa: PLR6301 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //\n\n""" # noqa: W291 # Return the header for U file - return header # noqa: RET504 + return header # noqa: DOC201, RET504 ############################################################# def alphacheck(self, data, fipath): # noqa: PLR6301 @@ -220,7 +220,7 @@ def alphacheck(self, data, fipath): # noqa: PLR6301 fname = 'SWAlpha.txt' swalphafile = os.path.join(fipath, fname) # noqa: PTH118 if not os.path.exists(swalphafile): # noqa: PTH110 - return -1 + return -1 # noqa: DOC201 # For all types other than the shallow water else: diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Materials.py b/modules/createEVENT/GeoClawOpenFOAM/of7Materials.py index ea7a5190f..17f07fa86 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7Materials.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7Materials.py @@ -120,7 +120,7 @@ def mattext(self, data): mattext = mattext + 'sigma\t[1 0 -2 0 0 0 0]\t' + sigma + ';\n' - return mattext # noqa: RET504 + return mattext # noqa: DOC201, RET504 ############################################################# def matheader(self): # noqa: PLR6301 @@ -142,7 +142,7 @@ def matheader(self): # noqa: PLR6301 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //\n\n""" # noqa: W291 # Return the header for U file - return header # noqa: RET504 + return header # noqa: DOC201, RET504 ############################################################# def matcheck(self, data): # noqa: PLR6301 @@ -162,7 +162,7 @@ def matcheck(self, data): # noqa: PLR6301 data, ['Events', 'WaterViscosity'] ) if nuwater == [None]: - return -1 + return -1 # noqa: DOC201 # Exponent nuwaterexp = hydroutil.extract_element_from_json( data, ['Events', 'WaterViscosityExp'] diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Meshing.py b/modules/createEVENT/GeoClawOpenFOAM/of7Meshing.py index 8f852de88..d02cb6300 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7Meshing.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7Meshing.py @@ -72,7 +72,7 @@ def meshcheck(self, data, fipath): # noqa: PLR6301 # If hydro mesher - nothing to check if int(mesher[0]) == 0: - return 0 + return 0 # noqa: DOC201 # Other mesh software elif int(mesher[0]) == 1: # noqa: RET505 @@ -126,7 +126,7 @@ def meshheader(self, fileobjec): # noqa: PLR6301 ) # Return the header for meshing file - return header # noqa: RET504 + return header # noqa: DOC201, RET504 ############################################################# def bmeshtext(self, data): @@ -284,7 +284,7 @@ def bmeshtext(self, data): # Add merge patch pairs bmeshtext = bmeshtext + 'mergePatchPairs\n(\n);\n' # noqa: PLR6104 - return bmeshtext # noqa: RET504 + return bmeshtext # noqa: DOC201, RET504 ############################################################# def sfetext(self): @@ -320,7 +320,7 @@ def sfetext(self): elif int(data_geoext[6]) == 3: # noqa: PLR2004 sfetext = sfetext + 'OtherBuilding.stl\n' + stlinfo + '\n\n' - return sfetext + return sfetext # noqa: DOC201 ############################################################# def shmtext(self, data): @@ -505,7 +505,7 @@ def shmtext(self, data): shmtext = shmtext + 'debug\t0;\n' # noqa: PLR6104 shmtext = shmtext + 'mergeTolerance\t1E-6;\n' # noqa: PLR6104 - return shmtext # noqa: RET504 + return shmtext # noqa: DOC201, RET504 ############################################################# def scripts(self, data, path): # noqa: C901, PLR6301 diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Others.py b/modules/createEVENT/GeoClawOpenFOAM/of7Others.py index e8da1252a..206d429e9 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7Others.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7Others.py @@ -78,7 +78,7 @@ def othersheader(self, fileclas, fileloc, fileobjec): # noqa: PLR6301 ) # Return the header for U file - return header # noqa: RET504 + return header # noqa: DOC201, RET504 ############################################################# def gfiletext(self, data): @@ -140,4 +140,4 @@ def gfiletext(self, data): + ');\n' ) - return gfiletext # noqa: RET504 + return gfiletext # noqa: DOC201, RET504 diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Prboundary.py b/modules/createEVENT/GeoClawOpenFOAM/of7Prboundary.py index f44d4ba98..957991624 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7Prboundary.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7Prboundary.py @@ -93,7 +93,7 @@ def Prtext(self, data, patches): # noqa: N802 prtext = prtext + '}\n\n' # noqa: PLR6104 # Return the text for velocity BC - return prtext # noqa: RET504 + return prtext # noqa: DOC201, RET504 ############################################################# def Prheader(self): # noqa: N802, PLR6301 @@ -118,7 +118,7 @@ def Prheader(self): # noqa: N802, PLR6301 header = header + 'internalField\tuniform\t0;\n\n' # noqa: PLR6104 # Return the header for U file - return header # noqa: RET504 + return header # noqa: DOC201, RET504 ############################################################# def Prpatchtext(self, data, Prtype, patchname): # noqa: C901, N802, N803, PLR6301 @@ -208,4 +208,4 @@ def Prpatchtext(self, data, Prtype, patchname): # noqa: C901, N802, N803, PLR63 Prtext = Prtext + 'type\tempty;\n\t}\n' # noqa: N806, PLR6104 # Return the header for U file - return Prtext + return Prtext # noqa: DOC201 diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Process.py b/modules/createEVENT/GeoClawOpenFOAM/of7Process.py index 72682f23d..eaed69feb 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7Process.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7Process.py @@ -132,7 +132,7 @@ def pprocesstext(self, data, path): # noqa: PLR6301 sampletext = sampletext + ');\n\n' # noqa: PLR6104 sampletext = sampletext + 'fields\t' + fieldtext + ';\n' - return sampletext # noqa: RET504 + return sampletext # noqa: DOC201, RET504 ############################################################# def pprocesscdict(self, data, path): # noqa: C901, PLR6301 @@ -275,7 +275,7 @@ def pprocesscdict(self, data, path): # noqa: C901, PLR6301 cdicttext = cdicttext + '\t\tfields\t' + fieldtext + ';\n' cdicttext = cdicttext + '\t}\n}' # noqa: PLR6104 - return cdicttext # noqa: RET504 + return cdicttext # noqa: DOC201, RET504 ############################################################# def scripts(self, data, path): # noqa: ARG002, PLR6301 @@ -293,7 +293,7 @@ def scripts(self, data, path): # noqa: ARG002, PLR6301 data, ['Events', 'Postprocessing'] ) if pprocess == [None]: - return 0 + return 0 # noqa: DOC201 else: # noqa: RET505 pprocess = ', '.join( hydroutil.extract_element_from_json( @@ -350,7 +350,7 @@ def pprocesscheck(self, data, path): # noqa: PLR6301 ) if pprocess == 'No': - return 0 + return 0 # noqa: DOC201 else: # noqa: RET505 pprocessV = ', '.join( # noqa: N806 hydroutil.extract_element_from_json(data, ['Events', 'PPVelocity']) diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7PtDboundary.py b/modules/createEVENT/GeoClawOpenFOAM/of7PtDboundary.py index 72a5a8615..9a72c2653 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7PtDboundary.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7PtDboundary.py @@ -115,7 +115,7 @@ def PtDcheck(self, data, patches): # noqa: N802, PLR6301 if (int(Utype) == 103) or (int(Utype) == 104): # noqa: PLR2004 numMovWall += 1 # noqa: N806 if numMovWall > 0: - return 1 + return 1 # noqa: DOC201 if numMovWall == 0: return 0 @@ -169,7 +169,7 @@ def PtDtext(self, data, fipath, patches): # noqa: N802 ptdtext = ptdtext + '}\n\n' # noqa: PLR6104 # Return the text for pointDisplacement - return ptdtext # noqa: RET504 + return ptdtext # noqa: DOC201, RET504 ############################################################# def PtDheader(self): # noqa: N802, PLR6301 @@ -194,7 +194,7 @@ def PtDheader(self): # noqa: N802, PLR6301 header = header + 'internalField\tuniform (0 0 0);\n\n' # noqa: PLR6104 # Return the header for U file - return header # noqa: RET504 + return header # noqa: DOC201, RET504 ############################################################# def PtDpatchtext(self, data, Utype, patchname, fipath): # noqa: ARG002, N802, N803 @@ -243,7 +243,7 @@ def PtDpatchtext(self, data, Utype, patchname, fipath): # noqa: ARG002, N802, N PtDtext = PtDtext + 'value\tuniform (0 0 0);\n' # noqa: N806, PLR6104 PtDtext = PtDtext + '\t}\n' # noqa: N806, PLR6104 - return PtDtext + return PtDtext # noqa: DOC201 ############################################################# def getNormal(self, patchname): # noqa: N802, PLR6301 @@ -267,4 +267,4 @@ def getNormal(self, patchname): # noqa: N802, PLR6301 elif (patchname == 'Building') or (patchname == 'OtherBuilding'): # noqa: PLR1714 normal = '1 0 0' - return normal + return normal # noqa: DOC201 diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Solve.py b/modules/createEVENT/GeoClawOpenFOAM/of7Solve.py index 02ca4fc12..6e62f2f43 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7Solve.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7Solve.py @@ -73,7 +73,7 @@ def solverheader(self, fileobjec): # noqa: PLR6301 ) # Return the header for U file - return header # noqa: RET504 + return header # noqa: DOC201, RET504 ############################################################# def fvSchemetext(self, data): # noqa: ARG002, N802 @@ -163,7 +163,7 @@ def fvSchemetext(self, data): # noqa: ARG002, N802 fvSchemetext = fvSchemetext + 'alpha.water;\n' # noqa: N806, PLR6104 fvSchemetext = fvSchemetext + '}\n' # noqa: N806, PLR6104 - return fvSchemetext # noqa: RET504 + return fvSchemetext # noqa: DOC201, RET504 ############################################################# def fvSolntext(self, data): # noqa: N802 @@ -280,7 +280,7 @@ def fvSolntext(self, data): # noqa: N802 fvSolntext = fvSolntext + 'fields\n\t{\n\t}\n\t' # noqa: N806, PLR6104 fvSolntext = fvSolntext + 'equations\n\t{\n\t\t".*"\t1;\n\t}\n}' # noqa: N806, PLR6104 - return fvSolntext # noqa: RET504 + return fvSolntext # noqa: DOC201, RET504 ############################################################# def cdicttext(self, data): @@ -349,7 +349,7 @@ def cdicttext(self, data): cdicttext = cdicttext + 'maxAlphaCo \t 1.0;\n\n' # noqa: PLR6104 cdicttext = cdicttext + 'maxDeltaT \t 1;\n\n' # noqa: PLR6104 - return cdicttext # noqa: RET504 + return cdicttext # noqa: DOC201, RET504 ############################################################# def cdictcheck(self, data): # noqa: PLR6301 @@ -366,7 +366,7 @@ def cdictcheck(self, data): # noqa: PLR6301 # Start time startT = hydroutil.extract_element_from_json(data, ['Events', 'StartTime']) # noqa: N806 if startT == [None]: - return -1 + return -1 # noqa: DOC201 # End time endT = hydroutil.extract_element_from_json(data, ['Events', 'EndTime']) # noqa: N806 @@ -489,4 +489,4 @@ def cdictFtext(self, data): # noqa: N802 cdicttext = cdicttext + 'direction\t(1 0 0);\n\t\t\t' # noqa: PLR6104 cdicttext = cdicttext + 'cumulative\tno;\n\t\t}\n\t}\n}' # noqa: PLR6104 - return cdicttext # noqa: RET504 + return cdicttext # noqa: DOC201, RET504 diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Turbulence.py b/modules/createEVENT/GeoClawOpenFOAM/of7Turbulence.py index d3e42227f..aba38ce8b 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7Turbulence.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7Turbulence.py @@ -84,7 +84,7 @@ def turbtext(self, data): turbtext = turbtext + '\tturbulence\ton;\n' # noqa: PLR6104 turbtext = turbtext + '\tprintCoeffs\ton;\n}\n' # noqa: PLR6104 - return turbtext + return turbtext # noqa: DOC201 ############################################################# def turbheader(self): # noqa: PLR6301 @@ -106,4 +106,4 @@ def turbheader(self): # noqa: PLR6301 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //\n\n""" # noqa: W291 # Return the header for U file - return header # noqa: RET504 + return header # noqa: DOC201, RET504 diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Uboundary.py b/modules/createEVENT/GeoClawOpenFOAM/of7Uboundary.py index d2f95e84d..eb16f74e5 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7Uboundary.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7Uboundary.py @@ -105,7 +105,7 @@ def Utext(self, data, fipath, patches): # noqa: N802 utext = utext + '}\n\n' # noqa: PLR6104 # Return the text for velocity BC - return utext # noqa: RET504 + return utext # noqa: DOC201, RET504 ############################################################# def Uheader(self): # noqa: N802, PLR6301 @@ -130,7 +130,7 @@ def Uheader(self): # noqa: N802, PLR6301 header = header + 'internalField\tuniform (0 0 0);\n\n' # noqa: PLR6104 # Return the header for U file - return header # noqa: RET504 + return header # noqa: DOC201, RET504 ############################################################# def Upatchtext(self, data, Utype, patchname, fipath, numMovWall): # noqa: C901, N802, N803 @@ -345,7 +345,7 @@ def Upatchtext(self, data, Utype, patchname, fipath, numMovWall): # noqa: C901, Utext = Utext + 'type\tempty;\n\t}\n' # noqa: N806, PLR6104 # Return the header for U file - return Utext + return Utext # noqa: DOC201 ############################################################# def Uchecks(self, data, fipath, patches): # noqa: C901, N802, PLR6301 @@ -384,7 +384,7 @@ def Uchecks(self, data, fipath, patches): # noqa: C901, N802, PLR6301 # Checking for multiple moving walls numMovWall += 1 # noqa: N806 if numMovWall > 1: - return -1 + return -1 # noqa: DOC201 # Check for existing moving wall files dispfilename = hydroutil.extract_element_from_json( diff --git a/modules/createEVENT/GeoClawOpenFOAM/openfoam7.py b/modules/createEVENT/GeoClawOpenFOAM/openfoam7.py index 8ec5da444..1bd139d66 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/openfoam7.py +++ b/modules/createEVENT/GeoClawOpenFOAM/openfoam7.py @@ -169,7 +169,7 @@ def createfolder(self, data, path, args): # noqa: PLR6301 scriptfile.close() # Return completion flag - return 0 + return 0 # noqa: DOC201 ############################################################# def creategeometry(self, data, path): # noqa: PLR6301 @@ -192,7 +192,7 @@ def creategeometry(self, data, path): # noqa: PLR6301 # Create the geometry related files Geometry = of7Geometry() # noqa: N806 if int(mesher[0]) == 1: - return 0 + return 0 # noqa: DOC201 elif int(mesher[0]) == 0 or int(mesher[0]) == 2: # noqa: RET505, PLR2004 geomcode = Geometry.geomcheck(data, path) if geomcode == -1: @@ -245,7 +245,7 @@ def createmesh(self, data, path): # noqa: PLR6301 Meshing = of7Meshing() # noqa: N806 meshcode = Meshing.meshcheck(data, path) if meshcode == -1: - return -1 + return -1 # noqa: DOC201 elif int(mesher[0]) == 0: # noqa: RET505 # blockMesh bmeshtext = Meshing.bmeshtext(data) @@ -295,7 +295,7 @@ def materials(self, data, path): # noqa: PLR6301 Materials = of7Materials() # noqa: N806 matcode = Materials.matcheck(data) if matcode == -1: - return -1 + return -1 # noqa: DOC201 else: # noqa: RET505 mattext = Materials.mattext(data) fname = 'transportProperties' @@ -320,7 +320,7 @@ def initial(self, data, path): # noqa: PLR6301 Inicond = of7Initial() # noqa: N806 initcode = Inicond.alphacheck(data, path) if initcode == -1: - return -1 + return -1 # noqa: DOC201 else: # noqa: RET505 alphatext = Inicond.alphatext(data, path) fname = 'setFieldsDict' @@ -355,7 +355,7 @@ def boundary(self, data, path): # noqa: PLR6301 # Check for boundary conditions here ecode = Uboundary.Uchecks(data, path, patches) if ecode == -1: - return -1 + return -1 # noqa: DOC201 else: # noqa: RET505 # Write the U-file if no errors # Path to the file @@ -421,7 +421,7 @@ def turbulence(self, data, path): # noqa: PLR6301 turbfile.write(turbtext) turbfile.close() - return 0 + return 0 # noqa: DOC201 ############################################################# def parallelize(self, data, path): # noqa: PLR6301 @@ -445,7 +445,7 @@ def parallelize(self, data, path): # noqa: PLR6301 # Scripts Decomp.scripts(data, path) - return 0 + return 0 # noqa: DOC201 ############################################################# def solve(self, data, path): # noqa: PLR6301 @@ -478,7 +478,7 @@ def solve(self, data, path): # noqa: PLR6301 # controlDict ecode = Solve.cdictcheck(data) if ecode == -1: - return -1 + return -1 # noqa: DOC201 else: # noqa: RET505 cdicttext = Solve.cdicttext(data) fname = 'controlDict' @@ -516,7 +516,7 @@ def others(self, data, path): # noqa: PLR6301 gfile.write(gfiletext) gfile.close() - return 0 + return 0 # noqa: DOC201 ############################################################# def dakota(self, args): # noqa: PLR6301 @@ -533,7 +533,7 @@ def dakota(self, args): # noqa: PLR6301 # Dakota Scripts dakota.dakotascripts(args) - return 0 + return 0 # noqa: DOC201 ############################################################# def postprocessing(self, data, path): # noqa: PLR6301 @@ -550,7 +550,7 @@ def postprocessing(self, data, path): # noqa: PLR6301 # controlDict ecode = pprocess.pprocesscheck(data, path) if ecode == -1: - return -1 + return -1 # noqa: DOC201 elif ecode == 0: # noqa: RET505 return 0 else: @@ -589,4 +589,4 @@ def cleaning(self, args, path): # noqa: PLR6301 # Dakota Scripts cleaner.cleaning(args, path) - return 0 + return 0 # noqa: DOC201 diff --git a/modules/createEVENT/GeoClawOpenFOAM/osuFlume.py b/modules/createEVENT/GeoClawOpenFOAM/osuFlume.py index a81430f26..ce0906e06 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/osuFlume.py +++ b/modules/createEVENT/GeoClawOpenFOAM/osuFlume.py @@ -104,4 +104,4 @@ def creategeom(self, data, path): # noqa: ARG002, PLR6301 # Write extreme values and building data to temporary file for later usage flumeobj.extremedata(extreme, breadth) - return 0 + return 0 # noqa: DOC201 diff --git a/modules/createEVENT/GeoClawOpenFOAM/userFlume.py b/modules/createEVENT/GeoClawOpenFOAM/userFlume.py index 2ae6bbfbf..42e51dec0 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/userFlume.py +++ b/modules/createEVENT/GeoClawOpenFOAM/userFlume.py @@ -102,4 +102,4 @@ def creategeom(self, data, path): # noqa: PLR6301 # Write extreme values and building data to temporary file for later usage flumeobj.extremedata(extreme, breadth) - return 0 + return 0 # noqa: DOC201 diff --git a/modules/createEVENT/IsolatedBuildingCFD/IsolatedBuildingCFD.py b/modules/createEVENT/IsolatedBuildingCFD/IsolatedBuildingCFD.py index 995c5c327..90b043020 100644 --- a/modules/createEVENT/IsolatedBuildingCFD/IsolatedBuildingCFD.py +++ b/modules/createEVENT/IsolatedBuildingCFD/IsolatedBuildingCFD.py @@ -13,7 +13,7 @@ def directionToDof(direction): # noqa: N802 """Converts direction to degree of freedom""" # noqa: D400, D401 directioMap = {'X': 1, 'Y': 2, 'Z': 3} # noqa: N806 - return directioMap[direction] + return directioMap[direction] # noqa: DOC201 def addFloorForceToEvent(patternsArray, force, direction, floor): # noqa: ARG001, N802, N803 diff --git a/modules/createEVENT/Istanbul/IstanbulStations.py b/modules/createEVENT/Istanbul/IstanbulStations.py index a8d2cb2c6..8ad973292 100644 --- a/modules/createEVENT/Istanbul/IstanbulStations.py +++ b/modules/createEVENT/Istanbul/IstanbulStations.py @@ -190,7 +190,7 @@ def haversine(lat1, lon1, lat2, lon2): r = 6371 # Radius of the Earth in kilometers distance = r * c - return distance # noqa: RET504 + return distance # noqa: DOC201, RET504 if __name__ == '__main__': diff --git a/modules/createEVENT/M9/M9API.py b/modules/createEVENT/M9/M9API.py index 3e1abb4e5..cd63320e8 100644 --- a/modules/createEVENT/M9/M9API.py +++ b/modules/createEVENT/M9/M9API.py @@ -332,4 +332,4 @@ def haversine(lat1, lon1, lat2, lon2): r = 6371 # Radius of the Earth in kilometers distance = r * c - return distance # noqa: RET504 + return distance # noqa: DOC201, RET504 diff --git a/modules/createEVENT/M9/M9Stations.py b/modules/createEVENT/M9/M9Stations.py index aa393b17f..43d29fd49 100644 --- a/modules/createEVENT/M9/M9Stations.py +++ b/modules/createEVENT/M9/M9Stations.py @@ -229,4 +229,4 @@ def haversine(lat1, lon1, lat2, lon2): r = 6371 # Radius of the Earth in kilometers distance = r * c - return distance # noqa: RET504 + return distance # noqa: DOC201, RET504 diff --git a/modules/createEVENT/MPM/MPM.py b/modules/createEVENT/MPM/MPM.py index 47f0b8832..eda226128 100644 --- a/modules/createEVENT/MPM/MPM.py +++ b/modules/createEVENT/MPM/MPM.py @@ -13,7 +13,7 @@ def directionToDof(direction): # noqa: N802 """Converts direction to degree of freedom""" # noqa: D400, D401 directioMap = {'X': 1, 'Y': 2, 'Z': 3} # noqa: N806 - return directioMap[direction] + return directioMap[direction] # noqa: DOC201 def addFloorForceToEvent( # noqa: N802 diff --git a/modules/createEVENT/MPM/post_process_output.py b/modules/createEVENT/MPM/post_process_output.py index f11de48cf..160877c2e 100644 --- a/modules/createEVENT/MPM/post_process_output.py +++ b/modules/createEVENT/MPM/post_process_output.py @@ -88,7 +88,7 @@ def readPressureProbes(fileName): # noqa: N802, N803 time = np.asarray(time, dtype=np.float32) p = np.asarray(p, dtype=np.float32) - return probes, time, p + return probes, time, p # noqa: DOC201 def read_pressure_data(file_names): @@ -291,7 +291,7 @@ def read_openFoam_scalar_field(file_name): # noqa: N802 sField = np.asarray(sField, dtype=np.float32) # noqa: N806 - return sField # noqa: RET504 + return sField # noqa: DOC201, RET504 def read_openFoam_vector_field(file_name): # noqa: N802 @@ -312,7 +312,7 @@ def read_openFoam_vector_field(file_name): # noqa: N802 vField = np.asarray(vField, dtype=np.float32) # noqa: N806 - return vField # noqa: RET504 + return vField # noqa: DOC201, RET504 def read_openFoam_tensor_field(file_name): # noqa: N802 @@ -340,7 +340,7 @@ def read_openFoam_tensor_field(file_name): # noqa: N802 vField = np.asarray(vField, dtype=np.float32) # noqa: N806 - return vField # noqa: RET504 + return vField # noqa: DOC201, RET504 def read_openFoam_symmetric_tensor_field(file_name): # noqa: N802 @@ -367,7 +367,7 @@ def read_openFoam_symmetric_tensor_field(file_name): # noqa: N802 vField = np.asarray(vField, dtype=np.float32) # noqa: N806 - return vField # noqa: RET504 + return vField # noqa: DOC201, RET504 def read_velocity_data(path): @@ -462,7 +462,7 @@ def read_velocity_probes(fileName): # noqa: N803 time = np.asarray(time, dtype=np.float32) U = np.asarray(U, dtype=np.float32) # noqa: N806 - return probes, time, U + return probes, time, U # noqa: DOC201 def calculate_length_scale(u, uav, dt, min_corr=0.0): @@ -481,7 +481,7 @@ def calculate_length_scale(u, uav, dt, min_corr=0.0): L = uav * np.trapz(corr, dx=dt) # noqa: NPY201, N806 - return L # noqa: RET504 + return L # noqa: DOC201, RET504 def psd(x, dt, nseg): # noqa: F811 diff --git a/modules/createEVENT/SurroundedBuildingCFD/SurroundedBuildingCFD.py b/modules/createEVENT/SurroundedBuildingCFD/SurroundedBuildingCFD.py index d48fbddb6..31e6a8d1a 100644 --- a/modules/createEVENT/SurroundedBuildingCFD/SurroundedBuildingCFD.py +++ b/modules/createEVENT/SurroundedBuildingCFD/SurroundedBuildingCFD.py @@ -13,7 +13,7 @@ def directionToDof(direction): # noqa: N802 """Converts direction to degree of freedom""" # noqa: D400, D401 directioMap = {'X': 1, 'Y': 2, 'Z': 3} # noqa: N806 - return directioMap[direction] + return directioMap[direction] # noqa: DOC201 def addFloorForceToEvent(patternsArray, force, direction, floor): # noqa: ARG001, N802, N803 diff --git a/modules/createEVENT/SurroundedBuildingCFD/post_process_output.py b/modules/createEVENT/SurroundedBuildingCFD/post_process_output.py index f11de48cf..160877c2e 100644 --- a/modules/createEVENT/SurroundedBuildingCFD/post_process_output.py +++ b/modules/createEVENT/SurroundedBuildingCFD/post_process_output.py @@ -88,7 +88,7 @@ def readPressureProbes(fileName): # noqa: N802, N803 time = np.asarray(time, dtype=np.float32) p = np.asarray(p, dtype=np.float32) - return probes, time, p + return probes, time, p # noqa: DOC201 def read_pressure_data(file_names): @@ -291,7 +291,7 @@ def read_openFoam_scalar_field(file_name): # noqa: N802 sField = np.asarray(sField, dtype=np.float32) # noqa: N806 - return sField # noqa: RET504 + return sField # noqa: DOC201, RET504 def read_openFoam_vector_field(file_name): # noqa: N802 @@ -312,7 +312,7 @@ def read_openFoam_vector_field(file_name): # noqa: N802 vField = np.asarray(vField, dtype=np.float32) # noqa: N806 - return vField # noqa: RET504 + return vField # noqa: DOC201, RET504 def read_openFoam_tensor_field(file_name): # noqa: N802 @@ -340,7 +340,7 @@ def read_openFoam_tensor_field(file_name): # noqa: N802 vField = np.asarray(vField, dtype=np.float32) # noqa: N806 - return vField # noqa: RET504 + return vField # noqa: DOC201, RET504 def read_openFoam_symmetric_tensor_field(file_name): # noqa: N802 @@ -367,7 +367,7 @@ def read_openFoam_symmetric_tensor_field(file_name): # noqa: N802 vField = np.asarray(vField, dtype=np.float32) # noqa: N806 - return vField # noqa: RET504 + return vField # noqa: DOC201, RET504 def read_velocity_data(path): @@ -462,7 +462,7 @@ def read_velocity_probes(fileName): # noqa: N803 time = np.asarray(time, dtype=np.float32) U = np.asarray(U, dtype=np.float32) # noqa: N806 - return probes, time, U + return probes, time, U # noqa: DOC201 def calculate_length_scale(u, uav, dt, min_corr=0.0): @@ -481,7 +481,7 @@ def calculate_length_scale(u, uav, dt, min_corr=0.0): L = uav * np.trapz(corr, dx=dt) # noqa: NPY201, N806 - return L # noqa: RET504 + return L # noqa: DOC201, RET504 def psd(x, dt, nseg): # noqa: F811 diff --git a/modules/createEVENT/coupledDigitalTwin/CoupledDigitalTwin.py b/modules/createEVENT/coupledDigitalTwin/CoupledDigitalTwin.py index 256bf0eff..68282a2a0 100644 --- a/modules/createEVENT/coupledDigitalTwin/CoupledDigitalTwin.py +++ b/modules/createEVENT/coupledDigitalTwin/CoupledDigitalTwin.py @@ -13,7 +13,7 @@ def directionToDof(direction): # noqa: N802 """Converts direction to degree of freedom""" # noqa: D400, D401 directioMap = {'X': 1, 'Y': 2, 'Z': 3} # noqa: N806 - return directioMap[direction] + return directioMap[direction] # noqa: DOC201 def addFloorForceToEvent(patternsArray, force, direction, floor): # noqa: ARG001, N802, N803 diff --git a/modules/createEVENT/groundMotionIM/IntensityMeasureComputer.py b/modules/createEVENT/groundMotionIM/IntensityMeasureComputer.py index df764531c..ba2372a62 100644 --- a/modules/createEVENT/groundMotionIM/IntensityMeasureComputer.py +++ b/modules/createEVENT/groundMotionIM/IntensityMeasureComputer.py @@ -167,7 +167,7 @@ def convert_accel_units(self, acceleration, from_, to_='cm/sec/sec'): # noqa: C acceleration = np.asarray(acceleration) if from_ == 'g': if to_ == 'g': - return acceleration + return acceleration # noqa: DOC201 if to_ in self.km_sec_square: return acceleration * self.g / 1000.0 if to_ in self.m_sec_square: diff --git a/modules/createEVENT/siteResponse/RegionalSiteResponse.py b/modules/createEVENT/siteResponse/RegionalSiteResponse.py index 5a01d37e0..d626663b8 100644 --- a/modules/createEVENT/siteResponse/RegionalSiteResponse.py +++ b/modules/createEVENT/siteResponse/RegionalSiteResponse.py @@ -86,7 +86,7 @@ def get_scale_factors(input_units, output_units): # noqa: C901 unit_time = output_units.get('time', 'sec') f_time = globals().get(unit_time, None) if f_time is None: - raise ValueError(f'Specified time unit not recognized: {unit_time}') # noqa: DOC501, EM102, TRY003 + raise ValueError(f'Specified time unit not recognized: {unit_time}') # noqa: DOC501, EM102, RUF100, TRY003 scale_factors = {} @@ -99,7 +99,7 @@ def get_scale_factors(input_units, output_units): # noqa: C901 # get the scale factor to standard units f_in = globals().get(input_unit, None) if f_in is None: - raise ValueError( # noqa: DOC501, TRY003 + raise ValueError( # noqa: DOC501, RUF100, TRY003 f'Input unit for event files not recognized: {input_unit}' # noqa: EM102 ) @@ -109,7 +109,7 @@ def get_scale_factors(input_units, output_units): # noqa: C901 unit_type = base_unit_type if unit_type is None: - raise ValueError(f'Failed to identify unit type: {input_unit}') # noqa: DOC501, EM102, TRY003 + raise ValueError(f'Failed to identify unit type: {input_unit}') # noqa: DOC501, EM102, RUF100, TRY003 # the output unit depends on the unit type if unit_type == 'acceleration': @@ -122,7 +122,7 @@ def get_scale_factors(input_units, output_units): # noqa: C901 f_out = 1.0 / f_length else: - raise ValueError( # noqa: DOC501, TRY003 + raise ValueError( # noqa: DOC501, RUF100, TRY003 f'Unexpected unit type in workflow: {unit_type}' # noqa: EM102 ) @@ -131,7 +131,7 @@ def get_scale_factors(input_units, output_units): # noqa: C901 scale_factors.update({input_name: f_scale}) - return scale_factors + return scale_factors # noqa: DOC201 def postProcess(evtName, input_units, f_scale_units): # noqa: N802, N803, D103 diff --git a/modules/createEVENT/stochasticWave/StochasticWave.py b/modules/createEVENT/stochasticWave/StochasticWave.py index 4c22a4d03..8e870168e 100644 --- a/modules/createEVENT/stochasticWave/StochasticWave.py +++ b/modules/createEVENT/stochasticWave/StochasticWave.py @@ -100,7 +100,7 @@ def directionToDof(direction): # noqa: N802 """Converts direction to degree of freedom""" # noqa: D400, D401 directioMap = {'X': 1, 'Y': 2, 'Z': 3} # noqa: N806 - return directioMap[direction] + return directioMap[direction] # noqa: DOC201 def addFloorForceToEvent(patternsList, timeSeriesList, force, direction, floor): # noqa: N802, N803 diff --git a/modules/createSAM/AutoSDA/beam_component.py b/modules/createSAM/AutoSDA/beam_component.py index 041bb51cf..1b423f2c3 100644 --- a/modules/createSAM/AutoSDA/beam_component.py +++ b/modules/createSAM/AutoSDA/beam_component.py @@ -181,7 +181,7 @@ def check_flag(self): for key in self.is_feasible.keys(): # noqa: SIM118 if self.is_feasible[key] == False: # noqa: E712 self.flag = False - return self.flag + return self.flag # noqa: DOC201 def compute_demand_capacity_ratio(self): """This method is used to compute demand to capacity ratios. diff --git a/modules/createSAM/AutoSDA/column_component.py b/modules/createSAM/AutoSDA/column_component.py index cbd83b50e..fcdc29ee3 100644 --- a/modules/createSAM/AutoSDA/column_component.py +++ b/modules/createSAM/AutoSDA/column_component.py @@ -264,7 +264,7 @@ def check_flag(self): for key in self.is_feasible.keys(): # noqa: SIM118 if self.is_feasible[key] == False: # noqa: E712 self.flag = False - return self.flag + return self.flag # noqa: DOC201 def compute_demand_capacity_ratio(self): """This method is used to calculate the demand to capacity ratios for column components diff --git a/modules/createSAM/AutoSDA/connection_part.py b/modules/createSAM/AutoSDA/connection_part.py index 7459d1190..f987d68b9 100644 --- a/modules/createSAM/AutoSDA/connection_part.py +++ b/modules/createSAM/AutoSDA/connection_part.py @@ -740,4 +740,4 @@ def check_flag(self): for key in self.is_feasible.keys(): # noqa: SIM118 if self.is_feasible[key] == False: # noqa: E712 self.flag = False - return self.flag + return self.flag # noqa: DOC201 diff --git a/modules/createSAM/AutoSDA/help_functions.py b/modules/createSAM/AutoSDA/help_functions.py index 6fbdb23c6..e57b48b57 100644 --- a/modules/createSAM/AutoSDA/help_functions.py +++ b/modules/createSAM/AutoSDA/help_functions.py @@ -50,7 +50,7 @@ def determine_Fa_coefficient(site_class, Ss): # noqa: C901, N802, N803 Fa = None # noqa: N806 print('Site class is entered with an invalid value') # noqa: T201 - return Fa + return Fa # noqa: DOC201 def determine_Fv_coefficient(site_class, S1): # noqa: C901, N802, N803 @@ -94,7 +94,7 @@ def determine_Fv_coefficient(site_class, S1): # noqa: C901, N802, N803 Fv = None # noqa: N806 print('Site class is entered with an invalid value') # noqa: T201 - return Fv + return Fv # noqa: DOC201 def calculate_DBE_acceleration(Ss, S1, Fa, Fv): # noqa: N802, N803 @@ -111,7 +111,7 @@ def calculate_DBE_acceleration(Ss, S1, Fa, Fv): # noqa: N802, N803 SM1 = Fv * S1 # noqa: N806 SDS = 2 / 3 * SMS # noqa: N806 SD1 = 2 / 3 * SM1 # noqa: N806 - return SMS, SM1, SDS, SD1 + return SMS, SM1, SDS, SD1 # noqa: DOC201 def determine_Cu_coefficient(SD1): # noqa: N802, N803 @@ -133,7 +133,7 @@ def determine_Cu_coefficient(SD1): # noqa: N802, N803 else: Cu = 1.4 # noqa: N806 - return Cu + return Cu # noqa: DOC201 def determine_floor_height( @@ -161,7 +161,7 @@ def determine_floor_height( level - 2 ) - return floor_height + return floor_height # noqa: DOC201 def calculate_Cs_coefficient(SDS, SD1, S1, T, TL, R, Ie): # noqa: N802, N803 @@ -212,7 +212,7 @@ def calculate_Cs_coefficient(SDS, SD1, S1, T, TL, R, Ie): # noqa: N802, N803 else: pass - return Cs + return Cs # noqa: DOC201 def determine_k_coeficient(period): @@ -227,7 +227,7 @@ def determine_k_coeficient(period): else: k = 1 + 0.5 * (period - 0.5) - return k + return k # noqa: DOC201 def calculate_seismic_force(base_shear, floor_weight, floor_height, k): @@ -252,7 +252,7 @@ def calculate_seismic_force(base_shear, floor_weight, floor_height, k): for story in range(len(floor_weight) - 1, -1, -1): story_shear[story] = np.sum(seismic_force[story:]) - return seismic_force, story_shear + return seismic_force, story_shear # noqa: DOC201 def find_section_candidate(target_depth, section_database): @@ -267,7 +267,7 @@ def find_section_candidate(target_depth, section_database): if match: candidate_index.append(indx) candidates = section_database.loc[candidate_index, 'section size'] - return candidates # noqa: RET504 + return candidates # noqa: DOC201, RET504 def search_member_size(target_name, target_quantity, candidate, section_database): @@ -299,7 +299,7 @@ def search_member_size(target_name, target_quantity, candidate, section_database section_size = section_database.loc[ candidate_index[min_index[0][0]], 'section size' ] - return section_size + return section_size # noqa: DOC201 def search_section_property(target_size, section_database): @@ -316,7 +316,7 @@ def search_section_property(target_size, section_database): for indx in np.array(section_database['index']): if target_size == section_database.loc[indx, 'section size']: section_info = section_database.loc[indx, :] - return section_info.to_dict() + return section_info.to_dict() # noqa: DOC201 except: # noqa: E722 sys.stderr.write( 'Error: wrong size nominated!\nNo such size exists in section database!' @@ -336,7 +336,7 @@ def decrease_member_size(candidate, current_size): # This means the smallest candidate still cannot make design drift close to drift limit, # which further means the smallest section candidate is too large. sys.stderr.write('The lower bound for depth initialization is too large!\n') - return candidate[candidate_pool_index + 1] + return candidate[candidate_pool_index + 1] # noqa: DOC201 def extract_depth(size): @@ -346,7 +346,7 @@ def extract_depth(size): """ # noqa: D205, D400, D401, D404 # Use Python regular expression to extract the char between 'W' and 'X', which then become depth output = re.findall(r'.*W(.*)X.*', size) - return int(output[0]) + return int(output[0]) # noqa: DOC201 def extract_weight(size): @@ -357,7 +357,7 @@ def extract_weight(size): # Use Python regular expression to extract the char after 'W' to the end of the string, # which then becomes weight output = re.findall(r'.X(.*)', size) - return int(output[0]) + return int(output[0]) # noqa: DOC201 def constructability_helper( # noqa: C901 @@ -541,7 +541,7 @@ def constructability_helper( # noqa: C901 variation_story.pop() # Update the ending index for next "identical story block" ending_index = variation_story[-1] - return section_size + return section_size # noqa: DOC201 # # Loop over all stories from top to bottom to consider the constructability # starting_story = total_story - 1 @@ -596,4 +596,4 @@ def increase_member_size(candidate, current_size): if candidate_pool_index - 1 < 0: # Make sure the index does not exceed the bound # This means the largest candidate still fails to satisfy the requirement sys.stderr.write('The upper bound for depth initialization is too small!\n') - return candidate[candidate_pool_index - 1] + return candidate[candidate_pool_index - 1] # noqa: DOC201 diff --git a/modules/performDL/pelicun3/DL_visuals.py b/modules/performDL/pelicun3/DL_visuals.py index 57eaf8b5b..ee5e55fc2 100644 --- a/modules/performDL/pelicun3/DL_visuals.py +++ b/modules/performDL/pelicun3/DL_visuals.py @@ -116,26 +116,26 @@ def plot_fragility(comp_db_path, output_path, create_zip='0'): # noqa: C901, D1 5: cl.scales['5']['seq']['Reds'], } - if comp_data.loc[('Incomplete', '')] != 1: + if comp_data.loc[('Incomplete', '')] != 1: # noqa: RUF031 p_min, p_max = 0.01, 0.9 d_min = np.inf d_max = -np.inf LS_count = 0 # noqa: N806 for LS in limit_states: # noqa: N806 - if comp_data.loc[(LS, 'Family')] == 'normal': + if comp_data.loc[(LS, 'Family')] == 'normal': # noqa: RUF031 d_min_i, d_max_i = norm.ppf( [p_min, p_max], - loc=comp_data.loc[(LS, 'Theta_0')], - scale=comp_data.loc[(LS, 'Theta_1')] - * comp_data.loc[(LS, 'Theta_0')], + loc=comp_data.loc[(LS, 'Theta_0')], # noqa: RUF031 + scale=comp_data.loc[(LS, 'Theta_1')] # noqa: RUF031 + * comp_data.loc[(LS, 'Theta_0')], # noqa: RUF031 ) - elif comp_data.loc[(LS, 'Family')] == 'lognormal': + elif comp_data.loc[(LS, 'Family')] == 'lognormal': # noqa: RUF031 d_min_i, d_max_i = np.exp( norm.ppf( [p_min, p_max], - loc=np.log(comp_data.loc[(LS, 'Theta_0')]), - scale=comp_data.loc[(LS, 'Theta_1')], + loc=np.log(comp_data.loc[(LS, 'Theta_0')]), # noqa: RUF031 + scale=comp_data.loc[(LS, 'Theta_1')], # noqa: RUF031 ) ) else: @@ -149,18 +149,18 @@ def plot_fragility(comp_db_path, output_path, create_zip='0'): # noqa: C901, D1 demand_vals = np.linspace(d_min, d_max, num=100) for i_ls, LS in enumerate(limit_states): # noqa: N806 - if comp_data.loc[(LS, 'Family')] == 'normal': + if comp_data.loc[(LS, 'Family')] == 'normal': # noqa: RUF031 cdf_vals = norm.cdf( demand_vals, - loc=comp_data.loc[(LS, 'Theta_0')], - scale=comp_data.loc[(LS, 'Theta_1')] - * comp_data.loc[(LS, 'Theta_0')], + loc=comp_data.loc[(LS, 'Theta_0')], # noqa: RUF031 + scale=comp_data.loc[(LS, 'Theta_1')] # noqa: RUF031 + * comp_data.loc[(LS, 'Theta_0')], # noqa: RUF031 ) - elif comp_data.loc[(LS, 'Family')] == 'lognormal': + elif comp_data.loc[(LS, 'Family')] == 'lognormal': # noqa: RUF031 cdf_vals = norm.cdf( np.log(demand_vals), - loc=np.log(comp_data.loc[(LS, 'Theta_0')]), - scale=comp_data.loc[(LS, 'Theta_1')], + loc=np.log(comp_data.loc[(LS, 'Theta_0')]), # noqa: RUF031 + scale=comp_data.loc[(LS, 'Theta_1')], # noqa: RUF031 ) else: continue @@ -385,11 +385,11 @@ def plot_fragility(comp_db_path, output_path, create_zip='0'): # noqa: C901, D1 gridcolor='rgb(192,192,192)', ) - demand_unit = comp_data.loc[('Demand', 'Unit')] + demand_unit = comp_data.loc[('Demand', 'Unit')] # noqa: RUF031 if demand_unit == 'unitless': demand_unit = '-' fig.update_xaxes( - title_text=f"{comp_data.loc[('Demand', 'Type')]} [{demand_unit}]", + title_text=f"{comp_data.loc[('Demand', 'Type')]} [{demand_unit}]", # noqa: RUF031 **shared_ax_props, ) @@ -465,7 +465,7 @@ def plot_repair(comp_db_path, output_path, create_zip='0'): # noqa: C901, D103, # perform plotting for each repair consequence type independently for c_type in repair_df.loc[comp_id].index: # load the component-specific part of the database - comp_data = repair_df.loc[(comp_id, c_type)] + comp_data = repair_df.loc[(comp_id, c_type)] # noqa: RUF031 # and the component-specific metadata - if it exists if repair_meta != None: # noqa: E711 @@ -620,7 +620,7 @@ def plot_repair(comp_db_path, output_path, create_zip='0'): # noqa: C901, D103, ), } - if comp_data.loc[('Incomplete', '')] != 1: + if comp_data.loc[('Incomplete', '')] != 1: # noqa: RUF031 # set the parameters for displaying uncertainty p_min, p_max = 0.16, 0.84 # +- 1 std # noqa: F841 @@ -923,13 +923,13 @@ def plot_repair(comp_db_path, output_path, create_zip='0'): # noqa: C901, D103, gridcolor='rgb(220,220,220)', ) - quantity_unit = comp_data.loc[('Quantity', 'Unit')] + quantity_unit = comp_data.loc[('Quantity', 'Unit')] # noqa: RUF031 if quantity_unit in ['unitless', '1 EA', '1 ea']: # noqa: PLR6201 quantity_unit = '-' elif quantity_unit.split()[0] == '1': quantity_unit = quantity_unit.split()[1] - dv_unit = comp_data.loc[('DV', 'Unit')] + dv_unit = comp_data.loc[('DV', 'Unit')] # noqa: RUF031 if dv_unit == 'unitless': dv_unit = '-' diff --git a/modules/performHUA/pyincore_data/censusutil.py b/modules/performHUA/pyincore_data/censusutil.py index b34f8f1c9..e733b6f35 100644 --- a/modules/performHUA/pyincore_data/censusutil.py +++ b/modules/performHUA/pyincore_data/censusutil.py @@ -16,7 +16,7 @@ import geopandas as gpd import pandas as pd import requests -from pyincore_data import globals +from pyincore_data import globals # noqa: A004 logger = globals.LOGGER @@ -62,7 +62,7 @@ def generate_census_api_url( if county is None: error_msg = 'State and county value must be provided when geo_type is provided.' logger.error(error_msg) - raise Exception(error_msg) # noqa: DOC501, TRY002 + raise Exception(error_msg) # noqa: DOC501, RUF100, TRY002 # Set up url for Census API base_url = f'https://api.census.gov/data/{year}/{data_source}' @@ -107,7 +107,7 @@ def request_census_api(data_url): api_json = request_json.json() api_df = pd.DataFrame(columns=api_json[0], data=api_json[1:]) - return api_df # noqa: RET504 + return api_df # noqa: DOC201, RET504 @staticmethod def get_blockdata_for_demographics( # noqa: C901 @@ -191,7 +191,7 @@ def get_blockdata_for_demographics( # noqa: C901 else: print('Only 2000, 2010, and 2020 decennial census supported') # noqa: T201 - return None + return None # noqa: DOC201 # Make directory to save output if not os.path.exists(output_dir): # noqa: PTH110 @@ -860,7 +860,7 @@ def get_blockgroupdata_for_income( # noqa: C901 print('Done creating household income shapefile') # noqa: T201 - return cen_blockgroup[save_columns] + return cen_blockgroup[save_columns] # noqa: DOC201 @staticmethod def convert_dislocation_gpd_to_shapefile(in_gpd, programname, savefile): diff --git a/modules/performREC/pyrecodes/run_pyrecodes.py b/modules/performREC/pyrecodes/run_pyrecodes.py index 4b41c8251..c98eeb1c5 100644 --- a/modules/performREC/pyrecodes/run_pyrecodes.py +++ b/modules/performREC/pyrecodes/run_pyrecodes.py @@ -1,182 +1,238 @@ -import json, os, shapely, argparse, sys, ujson, importlib +import json, os, shapely, argparse, sys, ujson, importlib # noqa: CPY001, INP001, I001, E401, D100 import geopandas as gpd import numpy as np import pandas as pd from pathlib import Path + # Delete below when pyrecodes can be installed as stand alone -import sys +import sys # noqa: F811 + sys.path.insert(0, '/Users/jinyanzhao/Desktop/SimCenterBuild/r2d_pyrecodes/') from pyrecodes import main -def run_pyrecodes(rec_config, inputRWHALE, parallelType, mpiExec, numPROC): - +def run_pyrecodes(rec_config, inputRWHALE, parallelType, mpiExec, numPROC): # noqa: ARG001, C901, N803, D103 # Initiate directory - rec_ouput_dir = os.path.join(inputRWHALE['runDir'],"Results", "Recovery") - if not os.path.exists(rec_ouput_dir): - os.mkdir(rec_ouput_dir) + rec_ouput_dir = os.path.join(inputRWHALE['runDir'], 'Results', 'Recovery') # noqa: PTH118 + if not os.path.exists(rec_ouput_dir): # noqa: PTH110 + os.mkdir(rec_ouput_dir) # noqa: PTH102 # Find the realizations to run damage_input = rec_config.pop('DamageInput') - realizations_to_run = select_realizations_to_run(\ - damage_input,inputRWHALE) - + realizations_to_run = select_realizations_to_run(damage_input, inputRWHALE) + # Replace SimCenterDefault with correct path - cmp_lib = rec_config["ComponentLibrary"] + cmp_lib = rec_config['ComponentLibrary'] if cmp_lib.startswith('SimCenterDefault'): cmp_lib_name = cmp_lib.split('/')[1] - cmp_lib_dir = os.path.dirname(os.path.realpath(__file__)) - cmp_lib = os.path.join(cmp_lib_dir, cmp_lib_name) - rec_config["ComponentLibrary"] = cmp_lib + cmp_lib_dir = os.path.dirname(os.path.realpath(__file__)) # noqa: PTH120 + cmp_lib = os.path.join(cmp_lib_dir, cmp_lib_name) # noqa: PTH118 + rec_config['ComponentLibrary'] = cmp_lib # loop through each realizations. Needs to be parallelized # Create the base of system configuration json system_configuration = create_system_configuration(rec_config) # Create the base of main json - main_json = dict() - main_json.update({"ComponentLibrary": { - "ComponentLibraryCreatorClass": "JSONComponentLibraryCreator", - "ComponentLibraryFile": rec_config["ComponentLibrary"] - }}) + main_json = dict() # noqa: C408 + main_json.update( + { + 'ComponentLibrary': { + 'ComponentLibraryCreatorClass': 'JSONComponentLibraryCreator', + 'ComponentLibraryFile': rec_config['ComponentLibrary'], + } + } + ) # initialize a dict to accumulate recovery results stats - result_det_path = os.path.join(inputRWHALE['runDir'],"Results", - f"Results_det.json") - with open(result_det_path, 'r') as f: + result_det_path = os.path.join( # noqa: PTH118 + inputRWHALE['runDir'], 'Results', f'Results_det.json' # noqa: F541 + ) + with open(result_det_path, 'r') as f: # noqa: PTH123, PLW1514, UP015 results_det = json.load(f) - result_agg = dict() - resilience_results = dict() + result_agg = dict() # noqa: C408 + resilience_results = dict() # noqa: C408 # Loop through realizations and run pyrecodes - numP = 1 - procID = 0 - doParallel = False - mpi_spec = importlib.util.find_spec("mpi4py") + numP = 1 # noqa: N806 + procID = 0 # noqa: N806 + doParallel = False # noqa: N806 + mpi_spec = importlib.util.find_spec('mpi4py') found = mpi_spec is not None if found and parallelType == 'parRUN': - import mpi4py - from mpi4py import MPI + import mpi4py # noqa: PLC0415 + from mpi4py import MPI # noqa: PLC0415 + comm = MPI.COMM_WORLD - numP = comm.Get_size() - procID = comm.Get_rank() - if numP < 2: - doParallel = False - numP = 1 - procID = 0 + numP = comm.Get_size() # noqa: N806 + procID = comm.Get_rank() # noqa: N806 + if numP < 2: # noqa: PLR2004 + doParallel = False # noqa: N806 + numP = 1 # noqa: N806 + procID = 0 # noqa: N806 else: - doParallel = True + doParallel = True # noqa: N806 count = 0 - needsInitiation = True + needsInitiation = True # noqa: N806 ind_in_rank = 0 - for ind, rlz_ind in enumerate(realizations_to_run): + for ind, rlz_ind in enumerate(realizations_to_run): # noqa: B007, PLR1702, FURB148 # Create a realization directory if count % numP == procID: - rlz_dir = os.path.join(rec_ouput_dir,str(rlz_ind)) - if not os.path.exists(rlz_dir): - os.mkdir(rlz_dir) + rlz_dir = os.path.join(rec_ouput_dir, str(rlz_ind)) # noqa: PTH118 + if not os.path.exists(rlz_dir): # noqa: PTH110 + os.mkdir(rlz_dir) # noqa: PTH102 # Update the system_configuration json - damage_rlz_file = os.path.join(inputRWHALE['runDir'],"Results",\ - f"Results_{int(rlz_ind)}.json") - DamageInput = {"Type": "R2DDamageInput", - "Parameters": {"DamageFile": damage_rlz_file}} - system_configuration.update({"DamageInput":DamageInput}) + damage_rlz_file = os.path.join( # noqa: PTH118 + inputRWHALE['runDir'], 'Results', f'Results_{int(rlz_ind)}.json' + ) + DamageInput = { # noqa: N806 + 'Type': 'R2DDamageInput', + 'Parameters': {'DamageFile': damage_rlz_file}, + } + system_configuration.update({'DamageInput': DamageInput}) # Write the system_configureation to a file - system_configuration_file = os.path.join(rlz_dir, \ - "SystemConfiguration.json") - with open(system_configuration_file, 'w') as f: + system_configuration_file = os.path.join( # noqa: PTH118 + rlz_dir, 'SystemConfiguration.json' + ) + with open(system_configuration_file, 'w') as f: # noqa: PTH123, PLW1514 ujson.dump(system_configuration, f) - + # Update the main json - main_json.update({"System": { - "SystemCreatorClass": "ConcreteSystemCreator", - "SystemClass": "BuiltEnvironmentSystem", - "SystemConfigurationFile": system_configuration_file - }}) + main_json.update( + { + 'System': { + 'SystemCreatorClass': 'ConcreteSystemCreator', + 'SystemClass': 'BuiltEnvironmentSystem', + 'SystemConfigurationFile': system_configuration_file, + } + } + ) # Write the main json to a file - main_file = os.path.join(rlz_dir, "main.json") - with open(main_file, 'w') as f: + main_file = os.path.join(rlz_dir, 'main.json') # noqa: PTH118 + with open(main_file, 'w') as f: # noqa: PTH123, PLW1514 ujson.dump(main_json, f) system = main.run(main_file) system.calculate_resilience() - # Append the recovery time to results_rlz + # Append the recovery time to results_rlz if needsInitiation: - needsInitiation = False - num_of_rlz_per_rank = int(np.floor(len(realizations_to_run)/numP)) - if procID < len(realizations_to_run)%numP: + needsInitiation = False # noqa: N806 + num_of_rlz_per_rank = int(np.floor(len(realizations_to_run) / numP)) + if procID < len(realizations_to_run) % numP: num_of_rlz_per_rank += 1 # Initialize resilience_results - resilience_results_buffer = dict() + resilience_results_buffer = dict() # noqa: C408 resilience_calculator_id = 0 - resilience_results.update({ - "time_steps": list(range(0, system.MAX_TIME_STEP+1)) - }) - resources_to_plot = system.resilience_calculators[resilience_calculator_id].system_supply.keys() - for resource_name in resources_to_plot: - resilience_results_buffer.update({ - resource_name: { - "Supply": np.zeros([num_of_rlz_per_rank, system.MAX_TIME_STEP+1]), - "Demand": np.zeros([num_of_rlz_per_rank, system.MAX_TIME_STEP+1]), - "Consumption": np.zeros([num_of_rlz_per_rank, system.MAX_TIME_STEP+1]) + resilience_results.update( + {'time_steps': list(range(0, system.MAX_TIME_STEP + 1))} # noqa: PIE808 + ) + resources_to_plot = system.resilience_calculators[ + resilience_calculator_id + ].system_supply.keys() + for resource_name in resources_to_plot: + resilience_results_buffer.update( + { + resource_name: { + 'Supply': np.zeros( + [num_of_rlz_per_rank, system.MAX_TIME_STEP + 1] + ), + 'Demand': np.zeros( + [num_of_rlz_per_rank, system.MAX_TIME_STEP + 1] + ), + 'Consumption': np.zeros( + [num_of_rlz_per_rank, system.MAX_TIME_STEP + 1] + ), + } } - }) + ) # Initialize result_agg - result_agg_buffer = dict() + result_agg_buffer = dict() # noqa: C408 for asset_type, item in results_det.items(): - asset_type_result = dict() + asset_type_result = dict() # noqa: C408 for asset_subtype, asset_subtype_item in item.items(): - asset_subtype_result = dict() - for aim_id, aim in asset_subtype_item.items(): - asset_subtype_result.update({aim_id:{ - "RecoveryDuration":np.zeros(num_of_rlz_per_rank) - }}) - asset_type_result.update({asset_subtype:asset_subtype_result}) - result_agg_buffer.update({asset_type:asset_type_result}) + asset_subtype_result = dict() # noqa: C408 + for aim_id, aim in asset_subtype_item.items(): # noqa: B007 + asset_subtype_result.update( + { + aim_id: { + 'RecoveryDuration': np.zeros( + num_of_rlz_per_rank + ) + } + } + ) + asset_type_result.update( + {asset_subtype: asset_subtype_result} + ) + result_agg_buffer.update({asset_type: asset_type_result}) del results_det - - resilience_result_rlz_i = dict() + + resilience_result_rlz_i = dict() # noqa: C408 for resource_name in resources_to_plot: - resilience_result_rlz_i.update({ - "time_steps": list(range(0, system.time_step+1)), + resilience_result_rlz_i.update( + { + 'time_steps': list(range(0, system.time_step + 1)), # noqa: PIE808 resource_name: { - "Supply": system.resilience_calculators[resilience_calculator_id].system_supply[resource_name][:system.time_step+1], - "Demand": system.resilience_calculators[resilience_calculator_id].system_demand[resource_name][:system.time_step+1], - "Consumption": system.resilience_calculators[resilience_calculator_id].system_consumption[resource_name][:system.time_step+1] - } + 'Supply': system.resilience_calculators[ + resilience_calculator_id + ].system_supply[resource_name][: system.time_step + 1], + 'Demand': system.resilience_calculators[ + resilience_calculator_id + ].system_demand[resource_name][: system.time_step + 1], + 'Consumption': system.resilience_calculators[ + resilience_calculator_id + ].system_consumption[resource_name][ + : system.time_step + 1 + ], + }, } - ) - resilience_results_buffer[resource_name]['Supply'][ind_in_rank,:system.time_step+1] = \ - system.resilience_calculators[resilience_calculator_id].system_supply[resource_name][:system.time_step+1] - resilience_results_buffer[resource_name]['Demand'][ind_in_rank,:system.time_step+1] = \ - system.resilience_calculators[resilience_calculator_id].system_demand[resource_name][:system.time_step+1] - resilience_results_buffer[resource_name]['Consumption'][ind_in_rank,:system.time_step+1] = \ - system.resilience_calculators[resilience_calculator_id].system_consumption[resource_name][:system.time_step+1] - resilience_result_rlz_i_file = os.path.join(rlz_dir, "ResilienceResult.json") - with open(resilience_result_rlz_i_file, 'w') as f: + ) + resilience_results_buffer[resource_name]['Supply'][ + ind_in_rank, : system.time_step + 1 + ] = system.resilience_calculators[ + resilience_calculator_id + ].system_supply[resource_name][: system.time_step + 1] + resilience_results_buffer[resource_name]['Demand'][ + ind_in_rank, : system.time_step + 1 + ] = system.resilience_calculators[ + resilience_calculator_id + ].system_demand[resource_name][: system.time_step + 1] + resilience_results_buffer[resource_name]['Consumption'][ + ind_in_rank, : system.time_step + 1 + ] = system.resilience_calculators[ + resilience_calculator_id + ].system_consumption[resource_name][: system.time_step + 1] + resilience_result_rlz_i_file = os.path.join( # noqa: PTH118 + rlz_dir, 'ResilienceResult.json' + ) + with open(resilience_result_rlz_i_file, 'w') as f: # noqa: PTH123, PLW1514 ujson.dump(resilience_result_rlz_i, f) - result_file_name = os.path.join(inputRWHALE['runDir'],"Results", - f"Results_{rlz_ind}.json") - with open(result_file_name, 'r') as f: + result_file_name = os.path.join( # noqa: PTH118 + inputRWHALE['runDir'], 'Results', f'Results_{rlz_ind}.json' + ) + with open(result_file_name, 'r') as f: # noqa: PTH123, PLW1514, UP015 results = json.load(f) for comp in system.components: if getattr(comp, 'r2d_comp', False) is True: - recovery_duration = getattr(comp, 'recoverd_time_step',system.MAX_TIME_STEP) - \ - system.DISASTER_TIME_STEP + recovery_duration = ( + getattr(comp, 'recoverd_time_step', system.MAX_TIME_STEP) + - system.DISASTER_TIME_STEP + ) recovery_duration = max(0, recovery_duration) - results[comp.asset_type][comp.asset_subtype][comp.aim_id].update({ - "Recovery": {"Duration":recovery_duration} - }) - result_agg_buffer[comp.asset_type][comp.asset_subtype][comp.aim_id]\ - ['RecoveryDuration'][ind_in_rank] = recovery_duration - with open(result_file_name, 'w') as f: + results[comp.asset_type][comp.asset_subtype][comp.aim_id].update( + {'Recovery': {'Duration': recovery_duration}} + ) + result_agg_buffer[comp.asset_type][comp.asset_subtype][ + comp.aim_id + ]['RecoveryDuration'][ind_in_rank] = recovery_duration + with open(result_file_name, 'w') as f: # noqa: PTH123, PLW1514 ujson.dump(results, f) ind_in_rank += 1 - count = count + 1 + count = count + 1 # noqa: PLR6104 # wait for all to finish if doParallel: @@ -188,127 +244,196 @@ def run_pyrecodes(rec_config, inputRWHALE, parallelType, mpiExec, numPROC): if doParallel: # gather results_agg for asset_type, item in result_agg_buffer.items(): - asset_type_result = dict() + asset_type_result = dict() # noqa: C408 for asset_subtype, asset_subtype_item in item.items(): - asset_subtype_result = dict() - for aim_id, aim in asset_subtype_item.items(): - asset_subtype_result.update({aim_id:{ - "RecoveryDuration":comm.gather(result_agg_buffer[asset_type][asset_subtype], root=0) - }}) - asset_type_result.update({asset_subtype:asset_subtype_result}) - result_agg.update({asset_type:asset_type_result}) + asset_subtype_result = dict() # noqa: C408 + for aim_id, aim in asset_subtype_item.items(): # noqa: B007 + asset_subtype_result.update( + { + aim_id: { + 'RecoveryDuration': comm.gather( + result_agg_buffer[asset_type][asset_subtype], + root=0, + ) + } + } + ) + asset_type_result.update({asset_subtype: asset_subtype_result}) + result_agg.update({asset_type: asset_type_result}) # gather resilience_resutls for resource_name in resources_to_plot: if procID == 0: - resilience_results.update({ - resource_name: { - "Supply": np.zeros([len(realizations_to_run), system.MAX_TIME_STEP+1]), - "Demand": np.zeros([len(realizations_to_run), system.MAX_TIME_STEP+1]), - "Consumption": np.zeros([len(realizations_to_run), system.MAX_TIME_STEP+1]) + resilience_results.update( + { + resource_name: { + 'Supply': np.zeros( + [len(realizations_to_run), system.MAX_TIME_STEP + 1] + ), + 'Demand': np.zeros( + [len(realizations_to_run), system.MAX_TIME_STEP + 1] + ), + 'Consumption': np.zeros( + [len(realizations_to_run), system.MAX_TIME_STEP + 1] + ), + } } - }) - comm.gather(resilience_results_buffer[resource_name]["Supply"], - resilience_results[resource_name]["Supply"], root=0) - comm.gather(resilience_results_buffer[resource_name]["Demand"], - resilience_results[resource_name]["Demand"], root=0) - comm.gather(resilience_results_buffer[resource_name]["Consumption"], - resilience_results[resource_name]["Consumption"], root=0) + ) + comm.gather( + resilience_results_buffer[resource_name]['Supply'], + resilience_results[resource_name]['Supply'], + root=0, + ) + comm.gather( + resilience_results_buffer[resource_name]['Demand'], + resilience_results[resource_name]['Demand'], + root=0, + ) + comm.gather( + resilience_results_buffer[resource_name]['Consumption'], + resilience_results[resource_name]['Consumption'], + root=0, + ) else: - for resource_name in resources_to_plot: - resilience_results.update({ - resource_name: resilience_results_buffer[resource_name] - }) + for resource_name in resources_to_plot: + resilience_results.update( + {resource_name: resilience_results_buffer[resource_name]} + ) result_agg = result_agg_buffer - if procID==0: - # Calculate stats of the results and add to results_det.json - with open(result_det_path, 'r') as f: + if procID == 0: + # Calculate stats of the results and add to results_det.json + with open(result_det_path, 'r') as f: # noqa: PTH123, PLW1514, UP015 results_det = json.load(f) for asset_type, item in result_agg.items(): for asset_subtype, asset_subtype_item in item.items(): for aim_id, aim in asset_subtype_item.items(): - if 'R2Dres' not in results_det[asset_type][asset_subtype][aim_id].keys(): - results_det[asset_type][asset_subtype][aim_id].update({'R2Dres':{}}) - results_det[asset_type][asset_subtype][aim_id]['R2Dres'].update({ - "R2Dres_mean_RecoveryDuration":aim['RecoveryDuration'].mean(), - "R2Dres_std_RecoveryDuration":aim['RecoveryDuration'].std() - }) - with open(result_det_path, 'w') as f: + if ( + 'R2Dres' # noqa: SIM118 + not in results_det[asset_type][asset_subtype][aim_id].keys() + ): + results_det[asset_type][asset_subtype][aim_id].update( + {'R2Dres': {}} + ) + results_det[asset_type][asset_subtype][aim_id]['R2Dres'].update( + { + 'R2Dres_mean_RecoveryDuration': aim[ + 'RecoveryDuration' + ].mean(), + 'R2Dres_std_RecoveryDuration': aim[ + 'RecoveryDuration' + ].std(), + } + ) + with open(result_det_path, 'w') as f: # noqa: PTH123, PLW1514 ujson.dump(results_det, f) - - recovery_result_path = os.path.join(rec_ouput_dir, "ResilienceResult.json") - for resource_name in resources_to_plot: - resilience_results[resource_name].update({ - 'R2Dres_mean_Supply':resilience_results[resource_name]['Supply'].mean(axis=0).tolist(), - 'R2Dres_std_Supply':resilience_results[resource_name]['Supply'].std(axis=0).tolist(), - 'R2Dres_mean_Demand':resilience_results[resource_name]['Demand'].mean(axis=0).tolist(), - 'R2Dres_std_Demand':resilience_results[resource_name]['Demand'].std(axis=0).tolist(), - 'R2Dres_mean_Consumption':resilience_results[resource_name]['Consumption'].mean(axis=0).tolist(), - 'R2Dres_std_Consumption':resilience_results[resource_name]['Consumption'].std(axis=0).tolist() - }) - resilience_results[resource_name].pop("Supply") - resilience_results[resource_name].pop("Demand") - resilience_results[resource_name].pop("Consumption") - - - with open(recovery_result_path, 'w') as f: + + recovery_result_path = os.path.join(rec_ouput_dir, 'ResilienceResult.json') # noqa: PTH118 + for resource_name in resources_to_plot: + resilience_results[resource_name].update( + { + 'R2Dres_mean_Supply': resilience_results[resource_name]['Supply'] + .mean(axis=0) + .tolist(), + 'R2Dres_std_Supply': resilience_results[resource_name]['Supply'] + .std(axis=0) + .tolist(), + 'R2Dres_mean_Demand': resilience_results[resource_name]['Demand'] + .mean(axis=0) + .tolist(), + 'R2Dres_std_Demand': resilience_results[resource_name]['Demand'] + .std(axis=0) + .tolist(), + 'R2Dres_mean_Consumption': resilience_results[resource_name][ + 'Consumption' + ] + .mean(axis=0) + .tolist(), + 'R2Dres_std_Consumption': resilience_results[resource_name][ + 'Consumption' + ] + .std(axis=0) + .tolist(), + } + ) + resilience_results[resource_name].pop('Supply') + resilience_results[resource_name].pop('Demand') + resilience_results[resource_name].pop('Consumption') + + with open(recovery_result_path, 'w') as f: # noqa: PTH123, PLW1514 ujson.dump(resilience_results, f) # Below are for development use - from pyrecodes import GeoVisualizer as gvis + from pyrecodes import GeoVisualizer as gvis # noqa: N813, PLC0415 + geo_visualizer = gvis.R2D_GeoVisualizer(system.components) geo_visualizer.plot_component_localities() - from pyrecodes import Plotter + from pyrecodes import Plotter # noqa: PLC0415 + plotter_object = Plotter.Plotter() x_axis_label = 'Time step [day]' - resources_to_plot = ['Shelter', 'FunctionalHousing', 'ElectricPower', 'PotableWater'] + resources_to_plot = [ + 'Shelter', + 'FunctionalHousing', + 'ElectricPower', + 'PotableWater', + ] resource_units = ['[beds/day]', '[beds/day]', '[MWh/day]', '[RC/day]'] # define which resilience calculator to use to plot the supply/demand/consumption of the resources # they are ordered as in the system configuration file resilience_calculator_id = 0 - for i, resource_name in enumerate(resources_to_plot): + for i, resource_name in enumerate(resources_to_plot): y_axis_label = f'{resource_name} {resource_units[i]} | {system.resilience_calculators[resilience_calculator_id].scope}' - axis_object = plotter_object.setup_lor_plot_fig(x_axis_label, y_axis_label) - time_range = system.time_step+1 - time_steps_before_event = 10 # - plotter_object.plot_single_resource(list(range(-time_steps_before_event, time_range)), - resilience_results[resource_name]['R2Dres_mean_Supply'][:time_range], - resilience_results[resource_name]['R2Dres_mean_Demand'][:time_range], - resilience_results[resource_name]['R2Dres_mean_Consumption'][:time_range], - axis_object, warmup=time_steps_before_event) - print() -def create_system_configuration(rec_config): + axis_object = plotter_object.setup_lor_plot_fig(x_axis_label, y_axis_label) + time_range = system.time_step + 1 + time_steps_before_event = 10 + plotter_object.plot_single_resource( + list(range(-time_steps_before_event, time_range)), + resilience_results[resource_name]['R2Dres_mean_Supply'][:time_range], + resilience_results[resource_name]['R2Dres_mean_Demand'][:time_range], + resilience_results[resource_name]['R2Dres_mean_Consumption'][ + :time_range + ], + axis_object, + warmup=time_steps_before_event, + ) + print() # noqa: T201 + + +def create_system_configuration(rec_config): # noqa: D103 content_config = rec_config.pop('Content') system_configuration = rec_config.copy() if content_config['Creator'] == 'FromJsonFile': - with open(content_config['FilePath'], 'r') as f: + with open(content_config['FilePath'], 'r') as f: # noqa: PTH123, PLW1514, UP015 content = json.load(f) - system_configuration.update({"Content":content}) + system_configuration.update({'Content': content}) elif content_config['Creator'] == 'LocalityGeoJSON': # think how users can input RecoveryResourceSupplier and Resources pass - + return system_configuration -def select_realizations_to_run(damage_input, inputRWHALE): - rlzs_num = min([item['ApplicationData']['Realizations'] \ - for _, item in inputRWHALE['Applications']['DL'].items()]) +def select_realizations_to_run(damage_input, inputRWHALE): # noqa: N803, D103 + rlzs_num = min( + [ # noqa: C419 + item['ApplicationData']['Realizations'] + for _, item in inputRWHALE['Applications']['DL'].items() + ] + ) rlzs_available = np.array(range(rlzs_num)) if damage_input['Type'] == 'R2DDamageRealization': rlz_filter = damage_input['Parameters']['Filter'] rlzs_requested = [] for rlzs in rlz_filter.split(','): - if "-" in rlzs: - rlzs_low, rlzs_high = rlzs.split("-") - rlzs_requested += list(range(int(rlzs_low), int(rlzs_high)+1)) + if '-' in rlzs: + rlzs_low, rlzs_high = rlzs.split('-') + rlzs_requested += list(range(int(rlzs_low), int(rlzs_high) + 1)) else: rlzs_requested.append(int(rlzs)) rlzs_requested = np.array(rlzs_requested) - rlzs_in_available = np.in1d(rlzs_requested, rlzs_available) + rlzs_in_available = np.in1d(rlzs_requested, rlzs_available) # noqa: NPY201 if rlzs_in_available.sum() != 0: - rlzs_to_run = rlzs_requested[ - np.where(rlzs_in_available)[0]] + rlzs_to_run = rlzs_requested[np.where(rlzs_in_available)[0]] else: rlzs_to_run = [] if damage_input['Type'] == 'R2DDamageSample': @@ -316,50 +441,65 @@ def select_realizations_to_run(damage_input, inputRWHALE): seed = damage_input['Parameters']['SampleSize'] if sample_size < rlzs_num: np.random.seed(seed) - rlzs_to_run = np.sort(np.random.choice(rlzs_available, sample_size,\ - replace = False)).tolist() + rlzs_to_run = np.sort( + np.random.choice(rlzs_available, sample_size, replace=False) + ).tolist() else: rlzs_to_run = np.sort(rlzs_available).tolist() return rlzs_to_run -if __name__ == '__main__': - #Defining the command line arguments +if __name__ == '__main__': + # Defining the command line arguments - workflowArgParser = argparse.ArgumentParser( - "Run Pyrecodes from the NHERI SimCenter rWHALE workflow for a set of assets.", - allow_abbrev=False) + workflowArgParser = argparse.ArgumentParser( # noqa: N816 + 'Run Pyrecodes from the NHERI SimCenter rWHALE workflow for a set of assets.', + allow_abbrev=False, + ) - workflowArgParser.add_argument("-c", "--configJsonPath", - help="Configuration file for running perycode") - workflowArgParser.add_argument("-i", "--inputRWHALEPath", - help="Configuration file specifying the rwhale applications and data " - "used") - workflowArgParser.add_argument("-p", "--parallelType", + workflowArgParser.add_argument( + '-c', '--configJsonPath', help='Configuration file for running perycode' + ) + workflowArgParser.add_argument( + '-i', + '--inputRWHALEPath', + help='Configuration file specifying the rwhale applications and data ' + 'used', + ) + workflowArgParser.add_argument( + '-p', + '--parallelType', default='seqRUN', - help="How parallel runs: options seqRUN, parSETUP, parRUN") - workflowArgParser.add_argument("-m", "--mpiexec", + help='How parallel runs: options seqRUN, parSETUP, parRUN', + ) + workflowArgParser.add_argument( + '-m', + '--mpiexec', default='mpiexec', - help="How mpi runs, e.g. ibrun, mpirun, mpiexec") - workflowArgParser.add_argument("-n", "--numP", + help='How mpi runs, e.g. ibrun, mpirun, mpiexec', + ) + workflowArgParser.add_argument( + '-n', + '--numP', default='8', - help="If parallel, how many jobs to start with mpiexec option") + help='If parallel, how many jobs to start with mpiexec option', + ) - #Parsing the command line arguments - wfArgs = workflowArgParser.parse_args() + # Parsing the command line arguments + wfArgs = workflowArgParser.parse_args() # noqa: N816 - #Calling the main workflow method and passing the parsed arguments - numPROC = int(wfArgs.numP) + # Calling the main workflow method and passing the parsed arguments + numPROC = int(wfArgs.numP) # noqa: N816 - with open(Path(wfArgs.configJsonPath).resolve(), 'r') as f: + with open(Path(wfArgs.configJsonPath).resolve(), 'r') as f: # noqa: PTH123, PLW1514, UP015 rec_config = json.load(f) - with open(Path(wfArgs.inputRWHALEPath).resolve(), 'r') as f: - inputRWHALE = json.load(f) - - run_pyrecodes(rec_config=rec_config,\ - inputRWHALE=inputRWHALE, - parallelType = wfArgs.parallelType, - mpiExec = wfArgs.mpiexec, - numPROC = numPROC) - - \ No newline at end of file + with open(Path(wfArgs.inputRWHALEPath).resolve(), 'r') as f: # noqa: PTH123, PLW1514, UP015 + inputRWHALE = json.load(f) # noqa: N816 + + run_pyrecodes( + rec_config=rec_config, + inputRWHALE=inputRWHALE, + parallelType=wfArgs.parallelType, + mpiExec=wfArgs.mpiexec, + numPROC=numPROC, + ) diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/CreateStation.py b/modules/performRegionalEventSimulation/regionalGroundMotion/CreateStation.py index 0bd879000..bf724a34c 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/CreateStation.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/CreateStation.py @@ -114,7 +114,7 @@ def create_stations( # noqa: C901, PLR0912, PLR0915 stn_df = pd.read_csv(input_file, header=0, index_col=0) except: # noqa: E722 run_tag = 0 - return run_tag # noqa: RET504 + return run_tag # noqa: DOC201, RET504 # Max and Min IDs if len(filterIDs) > 0: stns_requested = [] @@ -547,7 +547,7 @@ def create_stations( # noqa: C901, PLR0912, PLR0915 'slopeThickness', 'gammaSoil', 'phiSoil', - 'cohesionSoil' + 'cohesionSoil', ]: if stn.get(key, None) is not None: tmp.update({key: stn.get(key)}) @@ -609,7 +609,7 @@ def create_gridded_stations( gstn_df = pd.read_csv(input_file, header=0, index_col=0) except: # noqa: E722 run_tag = 1 - return run_tag # noqa: RET504 + return run_tag # noqa: DOC201, RET504 if np.max(gstn_df.index.values) != 2: # noqa: PLR2004 run_tag = 1 return run_tag # noqa: RET504 @@ -662,7 +662,7 @@ def get_vs30_global(lat, lon): ) vs30 = [float(interpFunc(x, y)) for x, y in zip(lon, lat)] # return - return vs30 # noqa: RET504 + return vs30 # noqa: DOC201, RET504 def get_vs30_thompson(lat, lon): @@ -694,21 +694,21 @@ def get_vs30_thompson(lat, lon): vs30 = [float(interpFunc(x, y)) for x, y in zip(lon, lat)] # return - return vs30 # noqa: RET504 + return vs30 # noqa: DOC201, RET504 def get_z1(vs30): """Compute z1 based on the prediction equation by Chiou and Youngs (2013) (unit of vs30 is meter/second and z1 is meter)""" # noqa: D400 z1 = np.exp(-7.15 / 4.0 * np.log((vs30**4 + 571.0**4) / (1360.0**4 + 571.0**4))) # return - return z1 # noqa: RET504 + return z1 # noqa: DOC201, RET504 def get_z25(z1): """Compute z25 based on the prediction equation by Campbell and Bozorgnia (2013)""" # noqa: D400 z25 = 0.748 + 2.218 * z1 # return - return z25 # noqa: RET504 + return z25 # noqa: DOC201, RET504 def get_z25fromVs(vs): # noqa: N802 @@ -717,7 +717,7 @@ def get_z25fromVs(vs): # noqa: N802 """ # noqa: D205, D400 z25 = (7.089 - 1.144 * np.log(vs)) * 1000 # return - return z25 # noqa: RET504 + return z25 # noqa: DOC201, RET504 def get_zTR_global(lat, lon): # noqa: N802 @@ -743,7 +743,7 @@ def get_zTR_global(lat, lon): # noqa: N802 ) zTR = [float(interpFunc(x, y)) for x, y in zip(lon, lat)] # noqa: N806 # return - return zTR # noqa: RET504 + return zTR # noqa: DOC201, RET504 def export_site_prop(stn_file, output_dir, filename): @@ -811,7 +811,7 @@ def get_zTR_ncm(lat, lon): # noqa: N802 # get the top bedrock data zTR.append(abs(cur_res['response']['results'][0]['profiles'][0]['top'])) # return - return zTR + return zTR # noqa: DOC201 def get_vsp_ncm(lat, lon, depth): @@ -850,7 +850,7 @@ def get_vsp_ncm(lat, lon, depth): if len(vsp) == 1: vsp = vsp[0] # return - return vsp + return vsp # noqa: DOC201 def compute_vs30_from_vsp(depthp, vsp): @@ -868,7 +868,7 @@ def compute_vs30_from_vsp(depthp, vsp): # Computing the Vs30 vs30p = 30.0 / np.sum(delta_t) # return - return vs30p # noqa: RET504 + return vs30p # noqa: DOC201, RET504 def get_vs30_ncm(lat, lon): @@ -895,7 +895,7 @@ def get_vs30_ncm(lat, lon): ) vs30.append(760.0) # return - return vs30 + return vs30 # noqa: DOC201 def get_soil_model_ba(param=None): @@ -925,7 +925,7 @@ def get_soil_model_ba(param=None): else: res = None - return res + return res # noqa: DOC201 def get_soil_model_ei(param=None): @@ -940,7 +940,7 @@ def get_soil_model_ei(param=None): else: res = None - return res + return res # noqa: DOC201 def get_soil_model_user(df_stn, model_fun): # noqa: D103 diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/FetchOpenQuake.py b/modules/performRegionalEventSimulation/regionalGroundMotion/FetchOpenQuake.py index a09550e27..3f4f87688 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/FetchOpenQuake.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/FetchOpenQuake.py @@ -630,7 +630,7 @@ def oq_run_classical_psha( # noqa: C901 export_realizations('realizations', dstore) except: # noqa: E722 print('FetchOpenQuake: Classical PSHA failed.') # noqa: T201 - return 1 + return 1 # noqa: DOC201 elif vtag == 11: # noqa: PLR2004 try: print(f'FetchOpenQuake: running Version {oq_version}.') # noqa: T201 @@ -680,7 +680,7 @@ def oq_run_classical_psha( # noqa: C901 try: params['hazard_calculation_id'] = str(calc_ids[hc_id]) except IndexError: - raise SystemExit( # noqa: B904, DOC501 + raise SystemExit( # noqa: B904, DOC501, RUF100 'There are %d old calculations, cannot ' 'retrieve the %s' % (len(calc_ids), hc_id) ) @@ -845,7 +845,7 @@ def oq_read_uhs_classical_psha(scen_info, event_info, dir_info): mag_maf.append([0.0, float(list_IMs[0].split('~')[0]), 0.0]) # return - return ln_psa_mr, mag_maf, im_list + return ln_psa_mr, mag_maf, im_list # noqa: DOC201 class OpenQuakeHazardCalc: # noqa: D101 @@ -991,7 +991,7 @@ def run_calc(self): # noqa: C901 oq.ground_motion_fields is False and oq.hazard_curves_from_gmfs is False ): - return {} + return {} # noqa: DOC201 elif 'rupture_model' not in oq.inputs: logging.warning( 'There is no rupture_model, the calculator will just ' @@ -1275,7 +1275,7 @@ def eval_calc(self): # noqa: C901, PLR0912, PLR0915 dctx = computer.dctx.roundup(cur_gs.minimum_distance) if computer.distribution is None: if computer.correlation_model: - raise ValueError( # noqa: DOC501, TRY003, TRY301 + raise ValueError( # noqa: DOC501, RUF100, TRY003, TRY301 'truncation_level=0 requires ' # noqa: EM101 'no correlation model' ) @@ -1295,7 +1295,7 @@ def eval_calc(self): # noqa: C901, PLR0912, PLR0915 # of interest. # In this case, we also assume no correlation model is used. if computer.correlation_model: - raise CorrelationButNoInterIntraStdDevs( # noqa: DOC501, TRY301 + raise CorrelationButNoInterIntraStdDevs( # noqa: DOC501, RUF100, TRY301 computer.correlation_model, cur_gs ) @@ -1371,7 +1371,7 @@ def eval_calc(self): # noqa: C901, PLR0912, PLR0915 dctx = computer.dctx.roundup(cur_gs.minimum_distance) if computer.truncation_level == 0: if computer.correlation_model: - raise ValueError( # noqa: DOC501, TRY003, TRY301 + raise ValueError( # noqa: DOC501, RUF100, TRY003, TRY301 'truncation_level=0 requires ' # noqa: EM101 'no correlation model' ) @@ -1391,7 +1391,7 @@ def eval_calc(self): # noqa: C901, PLR0912, PLR0915 # of interest. # In this case, we also assume no correlation model is used. if computer.correlation_model: - raise CorrelationButNoInterIntraStdDevs( # noqa: DOC501, TRY301 + raise CorrelationButNoInterIntraStdDevs( # noqa: DOC501, RUF100, TRY301 computer.correlation_model, cur_gs ) @@ -1550,7 +1550,7 @@ def eval_calc(self): # noqa: C901, PLR0912, PLR0915 } # return - return res # noqa: RET504 + return res # noqa: DOC201, RET504 def calculator_build_events_from_sources(self): # noqa: C901 """Prefilter the composite source model and store the source_info""" # noqa: D400 @@ -1666,7 +1666,7 @@ def __str__(self): # noqa: D105 def to_imt_unit_values(vals, imt): """Exponentiate the values unless the IMT is MMI""" # noqa: D400 if str(imt) == 'MMI': - return vals + return vals # noqa: DOC201 return np.exp(vals) diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardOccurrence.py b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardOccurrence.py index 54bf7a752..6518421cf 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardOccurrence.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardOccurrence.py @@ -654,7 +654,7 @@ def _input_check(self): print( # noqa: T201 'OccurrenceModel_ManzourDavidson2016._input_check: no return period is defined.' ) - return False + return False # noqa: DOC201 # shape of exceedance probability if len(self.im_exceedance_probs.shape) != 3: # noqa: PLR2004 print( # noqa: T201 @@ -730,8 +730,8 @@ def _opt_initialization(self): itertools.product(range(self.num_sites), range(self.num_return_periods)) ) self.prob += pulp.lpSum( - self.return_periods[j] * self.e_plus[(i, j)] - + self.return_periods[j] * self.e_minus[(i, j)] + self.return_periods[j] * self.e_plus[(i, j)] # noqa: RUF031 + + self.return_periods[j] * self.e_minus[(i, j)] # noqa: RUF031 for (i, j) in comb_sites_rps ) @@ -757,7 +757,7 @@ def _opt_initialization(self): <= self.num_scenarios ) - return True + return True # noqa: DOC201 def solve_opt(self): """target_function: compute the target function to be minimized @@ -853,7 +853,7 @@ def _input_check(self): print( # noqa: T201 'OccurrenceModel_Wangetal2023._input_check: no return period is defined.' ) - return False + return False # noqa: DOC201 # shape of exceedance probability if len(self.im_exceedance_probs.shape) != 3: # noqa: PLR2004 print( # noqa: T201 @@ -916,7 +916,7 @@ def _opt_initialization(self): self.X_weighted = np.dot(self.W, self.X) self.y_weighted = np.dot(self.W, self.y) - return True + return True # noqa: DOC201 def solve_opt(self): """LASSO regression""" # noqa: D400 diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py index a242a8eb9..779fdb70d 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py @@ -470,17 +470,18 @@ def hazard_job(hazard_info): # noqa: C901, D103, PLR0914, PLR0915 ln_im_mr, mag_maf, im_list ) gf_im_list += settlement_info['Output'] - if "Landslide" in ground_failure_info.keys(): - import landslide - if 'Landslide' in ground_failure_info['Landslide'].keys(): + if 'Landslide' in ground_failure_info.keys(): # noqa: SIM118 + import landslide # noqa: PLC0415 + + if 'Landslide' in ground_failure_info['Landslide'].keys(): # noqa: SIM118 lsld_info = ground_failure_info['Landslide']['Landslide'] - lsld_model = getattr(landslide, lsld_info['Model'])(\ - lsld_info["Parameters"], stations) + lsld_model = getattr(landslide, lsld_info['Model'])( + lsld_info['Parameters'], stations + ) ln_im_mr, mag_maf, im_list = lsld_model.run( - ln_im_mr, mag_maf, im_list - ) + ln_im_mr, mag_maf, im_list + ) gf_im_list += lsld_info['Output'] - if event_info['SaveIM'] and ln_im_mr: print('HazardSimulation: saving simulated intensity measures.') # noqa: T201 diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/ScenarioForecast.py b/modules/performRegionalEventSimulation/regionalGroundMotion/ScenarioForecast.py index 5f4725fa4..95112fa86 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/ScenarioForecast.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/ScenarioForecast.py @@ -99,6 +99,7 @@ if importlib.util.find_spec('jpype') is None: subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'JPype1']) # noqa: S603 import jpype + # from jpype import imports import jpype.imports from jpype.types import * # noqa: F403 diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/CorrelationModel.py b/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/CorrelationModel.py index 5c2ea75c3..357973d27 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/CorrelationModel.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/CorrelationModel.py @@ -65,7 +65,7 @@ def baker_jayaram_correlation_2008(im1, im2, flag_orth=False): # noqa: FBT002, elif im1.startswith('PGA'): T1 = 0.0 # noqa: N806 else: - return 0.0 + return 0.0 # noqa: DOC201 if im2.startswith('SA'): T2 = float(im2[3:-1]) # noqa: N806 elif im2.startswith('PGA'): @@ -126,7 +126,7 @@ def bradley_correlation_2011(IM, T=None, flag_Ds=True): # noqa: FBT002, C901, N # PGA if IM == 'PGA': # noqa: RET503 if flag_Ds: - return -0.442 + return -0.442 # noqa: DOC201 else: # noqa: RET505 return -0.305 elif IM == 'PGV': @@ -252,7 +252,7 @@ def jayaram_baker_correlation_2009(im, h, flag_clustering=False): # noqa: FBT00 else: b = 40.7 - 15.0 * T rho = np.exp(-3.0 * h / b) - return rho # noqa: RET504 + return rho # noqa: DOC201, RET504 def load_loth_baker_correlation_2013(datapath): @@ -270,7 +270,7 @@ def load_loth_baker_correlation_2013(datapath): B2 = pd.read_csv(datapath + 'loth_baker_correlation_2013_B2.csv', header=0) # noqa: N806 B1 = pd.read_csv(datapath + 'loth_baker_correlation_2013_B1.csv', header=0) # noqa: N806 B3 = pd.read_csv(datapath + 'loth_baker_correlation_2013_B3.csv', header=0) # noqa: N806 - return B1, B2, B3 + return B1, B2, B3 # noqa: DOC201 def compute_rho_loth_baker_correlation_2013(T1, T2, h, B1, B2, B3): # noqa: N803 @@ -303,7 +303,7 @@ def compute_rho_loth_baker_correlation_2013(T1, T2, h, B1, B2, B3): # noqa: N80 Ch = b1 * np.exp(-3.0 * h / 20.0) + b2 * np.exp(-3.0 * h / 70.0) + b3 * (h == 0) # noqa: N806 # Correlation coefficient rho = Ch - return rho # noqa: RET504 + return rho # noqa: DOC201, RET504 def loth_baker_correlation_2013(stations, im_name_list, num_simu): # noqa: C901 @@ -373,7 +373,7 @@ def loth_baker_correlation_2013(stations, im_name_list, num_simu): # noqa: C901 .swapaxes(1, 2) ) # return - return residuals # noqa: RET504 + return residuals # noqa: DOC201, RET504 def load_markhvida_ceferino_baker_correlation_2017(datapath): @@ -404,7 +404,7 @@ def load_markhvida_ceferino_baker_correlation_2017(datapath): index_col=None, header=0, ) - return MCB_model, MCB_pca, MCB_var + return MCB_model, MCB_pca, MCB_var # noqa: DOC201 def markhvida_ceferino_baker_correlation_2017( # noqa: C901 @@ -521,7 +521,7 @@ def markhvida_ceferino_baker_correlation_2017( # noqa: C901 if tmp_periods > model_Tmax: residuals = np.concatenate((residuals, Tmax_residuals), axis=1) # return - return residuals + return residuals # noqa: DOC201 def load_du_ning_correlation_2021(datapath): @@ -548,7 +548,7 @@ def load_du_ning_correlation_2021(datapath): DN_var = pd.read_csv( # noqa: N806 datapath + 'du_ning_correlation_2021_var_scale.csv', index_col=None, header=0 ) - return DN_model, DN_pca, DN_var + return DN_model, DN_pca, DN_var # noqa: DOC201 def du_ning_correlation_2021(stations, im_name_list, num_simu, num_pc=23): @@ -657,7 +657,7 @@ def du_ning_correlation_2021(stations, im_name_list, num_simu, num_pc=23): ) # return - return residuals + return residuals # noqa: DOC201 def baker_bradley_correlation_2017(im1=None, im2=None): # noqa: C901 @@ -686,7 +686,7 @@ def baker_bradley_correlation_2017(im1=None, im2=None): # noqa: C901 print( # noqa: T201 f'CorrelationModel.baker_bradley_correlation_2017: warning - return 0.0 for unknown {im1}' ) - return 0.0 + return 0.0 # noqa: DOC201 im_list.append(tmp_tag) period_list.append(None) if im2.startswith('SA'): diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/SignificantDurationModel.py b/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/SignificantDurationModel.py index 57f02c96b..2a017cbf4 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/SignificantDurationModel.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/SignificantDurationModel.py @@ -65,7 +65,7 @@ def abrahamson_silva_ds_1999( print( # noqa: T201 "SignificantDurationModel.abrahamson_silva_ds_1999: duration_type='DS575H','DS575V','DS595H','DS595V'?" ) - return None, None + return None, None # noqa: DOC201 # modeling coefficients beta = [3.2, 3.2, 3.2, 3.2] b1 = [5.204, 4.610, 5.204, 4.610] @@ -140,7 +140,7 @@ def bommer_stafford_alarcon_ds_2009( print( # noqa: T201 "SignificantDurationModel.bommer_stafford_alarcon_ds_2009: duration_type='DS575H','DS595H'?" ) - return None, None, None, None + return None, None, None, None # noqa: DOC201 # modeling coefficients c0 = [-5.6298, -2.2393] @@ -205,7 +205,7 @@ def afshari_stewart_ds_2016( # noqa: C901 print( # noqa: T201 "SignificantDurationModel.afshari_stewart_ds_2016: mechanism='unknown','normal','reverse','strike-slip'?" ) - return None, None, None, None + return None, None, None, None # noqa: DOC201 # region map reg_map = {'california': 0, 'japan': 1, 'other': 2} reg_tag = reg_map.get(region.lower(), None) diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/openSHAGMPE.py b/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/openSHAGMPE.py index 61ce5c1ed..b4cf82da8 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/openSHAGMPE.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/openSHAGMPE.py @@ -240,7 +240,7 @@ def calc(self, Mw, rJB, rRup, rX, dip, zTop, vs30, vsInf, z1p0, style): # noqa: stdDev = np.sqrt(tauSq + phiSq) # noqa: N806 - return mean, stdDev, np.sqrt(tauSq), np.sqrt(phiSq) + return mean, stdDev, np.sqrt(tauSq), np.sqrt(phiSq) # noqa: DOC201 # https://github.com/opensha/opensha/blob/master/src/main/java/org/opensha/sha/imr/attenRelImpl/ngaw2/NGAW2_Wrapper.java#L220 def getFaultFromRake(self, rake): # noqa: D102, N802, PLR6301 diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/landslide.py b/modules/performRegionalEventSimulation/regionalGroundMotion/landslide.py index a901df9b9..cd6352725 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/landslide.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/landslide.py @@ -1,7 +1,7 @@ -import numpy as np +import numpy as np # noqa: CPY001, INP001, I001, D100 import rasterio as rio from scipy.interpolate import interp2d -import sys, warnings, shapely, pandas, os +import sys, warnings, shapely, pandas, os # noqa: ICN001, E401 from pyproj import Transformer from pyproj import CRS from enum import Enum @@ -9,11 +9,13 @@ from scipy.spatial import ConvexHull import pandas as pd -## Helper functions -def sampleRaster(raster_file_path, raster_crs, x, y, interp_scheme = 'nearest',\ - dtype = None): - """performs 2D interpolation at (x,y) pairs. Accepted interp_scheme = 'nearest', 'linear', 'cubic', and 'quintic'""" - print(f"Sampling from the Raster File: {os.path.basename(raster_file_path)}...") + +## Helper functions # noqa: E266 +def sampleRaster( # noqa: N802 + raster_file_path, raster_crs, x, y, interp_scheme='nearest', dtype=None +): + """performs 2D interpolation at (x,y) pairs. Accepted interp_scheme = 'nearest', 'linear', 'cubic', and 'quintic'""" # noqa: D400, D401, D403 + print(f'Sampling from the Raster File: {os.path.basename(raster_file_path)}...') # noqa: T201, PTH119 invalid_value = np.nan xy_crs = CRS.from_user_input(4326) raster_crs = CRS.from_user_input(raster_crs) @@ -21,82 +23,108 @@ def sampleRaster(raster_file_path, raster_crs, x, y, interp_scheme = 'nearest',\ try: raster_data = raster_file.read() if raster_data.shape[0] > 1: - warnings.warn(f"More than one band in the file {raster_file_path}, the first band is used.") - except: - sys.exit(f"Can not read data from {raster_file_path}") + warnings.warn( # noqa: B028 + f'More than one band in the file {raster_file_path}, the first band is used.' + ) + except: # noqa: E722 + sys.exit(f'Can not read data from {raster_file_path}') if xy_crs != raster_crs: # make transformer for reprojection - transformer_xy_to_data = Transformer.from_crs(xy_crs, raster_crs,\ - always_xy=True) + transformer_xy_to_data = Transformer.from_crs( + xy_crs, raster_crs, always_xy=True + ) # reproject and store x_proj, y_proj = transformer_xy_to_data.transform(x, y) x = x_proj y = y_proj n_sample = len(x) if interp_scheme == 'nearest': - sample = np.array([val[0] for val in raster_file.sample(list(zip(x,y)))]) + sample = np.array( + [val[0] for val in raster_file.sample(list(zip(x, y)))] + ) else: # create x and y ticks for grid - x_tick = np.linspace(raster_file.bounds.left, \ - raster_file.bounds.right, raster_file.width, endpoint=False) - y_tick = np.linspace(raster_file.bounds.bottom,\ - raster_file.bounds.top, raster_file.height, endpoint=False) + x_tick = np.linspace( + raster_file.bounds.left, + raster_file.bounds.right, + raster_file.width, + endpoint=False, + ) + y_tick = np.linspace( + raster_file.bounds.bottom, + raster_file.bounds.top, + raster_file.height, + endpoint=False, + ) # create interp2d function interp_function = interp2d( - x_tick, y_tick, np.flipud(raster_file.read(1)), - kind=interp_scheme, fill_value=invalid_value) + x_tick, + y_tick, + np.flipud(raster_file.read(1)), + kind=interp_scheme, + fill_value=invalid_value, + ) # get samples sample = np.transpose( - [interp_function(x[i],y[i]) for i in range(n_sample)] + [interp_function(x[i], y[i]) for i in range(n_sample)] )[0] # convert to target datatype if dtype is not None: sample = sample.astype(dtype) # clean up invalid values (returned as 1e38 by NumPy) - sample[abs(sample)>1e10] = invalid_value - return sample + sample[abs(sample) > 1e10] = invalid_value # noqa: PLR2004 + return sample # noqa: DOC201 -## Helper functions -def sampleVector(vector_file_path, vector_crs, x, y, dtype = None): - """performs spatial join of vector_file with xy'""" - print(f"Sampling from the Vector File: {os.path.basename(vector_file_path)}...") - invalid_value = np.nan + +## Helper functions # noqa: E266 +def sampleVector(vector_file_path, vector_crs, x, y, dtype=None): # noqa: ARG001, N802 + """performs spatial join of vector_file with xy'""" # noqa: D400, D401, D403 + print(f'Sampling from the Vector File: {os.path.basename(vector_file_path)}...') # noqa: T201, PTH119 + invalid_value = np.nan # noqa: F841 xy_crs = CRS.from_user_input(4326) vector_gdf = gpd.read_file(vector_file_path) if vector_gdf.crs != vector_crs: - sys.exit(f"The CRS of vector file {vector_file_path} is {vector_gdf.crs}, and doesn't match the input CRS ({xy_crs}) defined for liquefaction triggering models") + sys.exit( + f"The CRS of vector file {vector_file_path} is {vector_gdf.crs}, and doesn't match the input CRS ({xy_crs}) defined for liquefaction triggering models" + ) if xy_crs != vector_crs: # make transformer for reprojection - transformer_xy_to_data = Transformer.from_crs(xy_crs, vector_crs,\ - always_xy=True) + transformer_xy_to_data = Transformer.from_crs( + xy_crs, vector_crs, always_xy=True + ) # reproject and store x_proj, y_proj = transformer_xy_to_data.transform(x, y) x = x_proj y = y_proj - # Create a convex hull containing all sites + # Create a convex hull containing all sites sites = np.array([x, y]).transpose() try: hull = ConvexHull(sites) vertices = hull.vertices vertices = sites[np.append(vertices, vertices[0])] centroid = np.mean(vertices, axis=0) - vertices = vertices + 0.05 * (vertices - centroid) - RoI = shapely.geometry.Polygon(vertices) - except: + vertices = vertices + 0.05 * (vertices - centroid) # noqa: PLR6104 + RoI = shapely.geometry.Polygon(vertices) # noqa: N806 + except: # noqa: E722 centroid = shapely.geometry.Point(np.mean(x), np.mean(y)) points = [shapely.geometry.Point(x[i], y[i]) for i in range(len(x))] if len(points) == 1: - distances = [0.1] # Degree + distances = [0.1] # Degree else: distances = [point.distance(centroid) for point in points] - max_distance = max(distances)*1.2 + max_distance = max(distances) * 1.2 angles = np.linspace(0, 2 * np.pi, 36) - circle_points = [(centroid.x + max_distance * np.cos(angle), \ - centroid.y + max_distance * np.sin(angle)) for angle in angles] - RoI = shapely.geometry.Polygon(circle_points) - data = dict() + circle_points = [ + ( + centroid.x + max_distance * np.cos(angle), + centroid.y + max_distance * np.sin(angle), + ) + for angle in angles + ] + RoI = shapely.geometry.Polygon(circle_points) # noqa: N806 + data = dict() # noqa: C408 for col in vector_gdf.columns: - data.update({col:[]}) + data.update({col: []}) for row_index in vector_gdf.index: new_geom = RoI.intersection(vector_gdf.loc[row_index, 'geometry']) if new_geom.is_empty: @@ -107,158 +135,188 @@ def sampleVector(vector_file_path, vector_crs, x, y, dtype = None): data[col].append(vector_gdf.loc[row_index, col]) data['geometry'].append(new_geom) del vector_gdf - gdf_roi = gpd.GeoDataFrame(data, geometry="geometry", crs=4326) - geometry = [shapely.geometry.Point(lon, lat) for lon, lat in zip(x, y)] + gdf_roi = gpd.GeoDataFrame(data, geometry='geometry', crs=4326) + geometry = [shapely.geometry.Point(lon, lat) for lon, lat in zip(x, y)] # noqa: FURB140 gdf_sites = gpd.GeoDataFrame(geometry=geometry, crs=4326).reset_index() - merged = gpd.GeoDataFrame.sjoin(gdf_roi, gdf_sites, how = 'inner', predicate = 'contains') + merged = gpd.GeoDataFrame.sjoin( + gdf_roi, gdf_sites, how='inner', predicate='contains' + ) merged = merged.set_index('index_right').sort_index().drop(columns=['geometry']) - gdf_sites = pandas.merge(gdf_sites, merged, on = 'index', how = 'left') - gdf_sites.drop(columns=['geometry', 'index'], inplace=True) - return gdf_sites + gdf_sites = pandas.merge(gdf_sites, merged, on='index', how='left') + gdf_sites.drop(columns=['geometry', 'index'], inplace=True) # noqa: PD002 + return gdf_sites # noqa: DOC201 -def find_additional_output_req(liq_info, current_step): + +def find_additional_output_req(liq_info, current_step): # noqa: D103 additional_output_keys = [] if current_step == 'Triggering': - trigging_parameters = liq_info['Triggering']\ - ['Parameters'].keys() - triger_dist_water = liq_info['Triggering']['Parameters'].get('DistWater', None) + trigging_parameters = liq_info['Triggering']['Parameters'].keys() # noqa: F841 + triger_dist_water = liq_info['Triggering']['Parameters'].get( + 'DistWater', None + ) if triger_dist_water is None: return additional_output_keys - lat_dist_water = liq_info['LateralSpreading']['Parameters'].get('DistWater', None) - if 'LateralSpreading' in liq_info.keys(): - lat_dist_water = liq_info['LateralSpreading']['Parameters'].get('DistWater', None) - if (liq_info['LateralSpreading']['Model'] == 'Hazus2020')\ - and (lat_dist_water==triger_dist_water): + lat_dist_water = liq_info['LateralSpreading']['Parameters'].get( + 'DistWater', None + ) + if 'LateralSpreading' in liq_info.keys(): # noqa: SIM118 + lat_dist_water = liq_info['LateralSpreading']['Parameters'].get( + 'DistWater', None + ) + if (liq_info['LateralSpreading']['Model'] == 'Hazus2020') and ( + lat_dist_water == triger_dist_water + ): additional_output_keys.append('dist_to_water') return additional_output_keys -def infer_from_geologic_map(map_path, map_crs, lon_station, lat_station): - gdf_units = sampleVector(map_path, map_crs, lon_station, lat_station, dtype = None) + +def infer_from_geologic_map(map_path, map_crs, lon_station, lat_station): # noqa: D103 + gdf_units = sampleVector(map_path, map_crs, lon_station, lat_station, dtype=None) gdf_units = gdf_units['PTYPE'] gdf_units = gdf_units.fillna('water') - default_geo_prop_fpath = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'database',\ - 'groundfailure', 'Wills_etal_2015_CA_Geologic_Properties.csv') + default_geo_prop_fpath = os.path.join( # noqa: PTH118 + os.path.dirname(os.path.abspath(__file__)), # noqa: PTH100, PTH120 + 'database', + 'groundfailure', + 'Wills_etal_2015_CA_Geologic_Properties.csv', + ) default_geo_prop = pd.read_csv(default_geo_prop_fpath) unique_geo_unit = np.unique(gdf_units) phi_mean = np.empty_like(gdf_units) coh_mean = np.empty_like(gdf_units) for each in unique_geo_unit: - rows_with_geo_unit = np.where(gdf_units.values==each)[0] - rows_for_param = np.where(default_geo_prop['Unit Abbreviation'].values==each)[0][0] - phi_mean[rows_with_geo_unit] = \ - default_geo_prop['Friction Angle - Median (degrees)'][rows_for_param] - coh_mean[rows_with_geo_unit] = \ - default_geo_prop['Cohesion - Median (kPa)'][rows_for_param] + rows_with_geo_unit = np.where(gdf_units.values == each)[0] # noqa: PD011 + rows_for_param = np.where( + default_geo_prop['Unit Abbreviation'].values == each # noqa: PD011 + )[0][0] + phi_mean[rows_with_geo_unit] = default_geo_prop[ + 'Friction Angle - Median (degrees)' + ][rows_for_param] + coh_mean[rows_with_geo_unit] = default_geo_prop['Cohesion - Median (kPa)'][ + rows_for_param + ] return phi_mean, coh_mean + def erf2(x): - """modified from https://www.johndcook.com/blog/2009/01/19/stand-alone-error-function-erf""" + """modified from https://www.johndcook.com/blog/2009/01/19/stand-alone-error-function-erf""" # noqa: D400, D401, D403 # constants - a1 = 0.254829592 + a1 = 0.254829592 a2 = -0.284496736 - a3 = 1.421413741 + a3 = 1.421413741 a4 = -1.453152027 - a5 = 1.061405429 - p = 0.3275911 + a5 = 1.061405429 + p = 0.3275911 # Save the sign of x signs = np.sign(x) x = np.abs(x) # A & S 7.1.26 - t = 1.0/(1.0 + p*x) - y = 1.0 - (((((a5*t + a4)*t) + a3)*t + a2)*t + a1)*t*np.exp(-x**2) - return signs*y + t = 1.0 / (1.0 + p * x) + y = 1.0 - (((((a5 * t + a4) * t) + a3) * t + a2) * t + a1) * t * np.exp(-(x**2)) + return signs * y # noqa: DOC201 + def norm2_cdf(x, loc, scale): """ modified implementation of norm.cdf function from numba_stats, using self-implemented erf function https://github.com/HDembinski/numba-stats/blob/main/src/numba_stats/norm.py - """ - inter = (x - loc)/scale - return 0.5 * (1 + erf2(inter * np.sqrt(0.5))) + """ # noqa: D205, D400, D401 + inter = (x - loc) / scale + return 0.5 * (1 + erf2(inter * np.sqrt(0.5))) # noqa: DOC201 + def erf2_2d(x): - """modified from https://www.johndcook.com/blog/2009/01/19/stand-alone-error-function-erf""" + """modified from https://www.johndcook.com/blog/2009/01/19/stand-alone-error-function-erf""" # noqa: D400, D401, D403 # constants - a1 = 0.254829592 + a1 = 0.254829592 a2 = -0.284496736 - a3 = 1.421413741 + a3 = 1.421413741 a4 = -1.453152027 - a5 = 1.061405429 - p = 0.3275911 + a5 = 1.061405429 + p = 0.3275911 # Save the sign of x signs = np.sign(x) x = np.abs(x) # A & S 7.1.26 - t = 1.0/(1.0 + p*x) - y = 1.0 - (((((a5*t + a4)*t) + a3)*t + a2)*t + a1)*t*np.exp(-x**2) - return signs*y + t = 1.0 / (1.0 + p * x) + y = 1.0 - (((((a5 * t + a4) * t) + a3) * t + a2) * t + a1) * t * np.exp(-(x**2)) + return signs * y # noqa: DOC201 + def norm2_cdf_2d(x, loc, scale): """ modified implementation of norm.cdf function from numba_stats, using self-implemented erf function https://github.com/HDembinski/numba-stats/blob/main/src/numba_stats/norm.py - """ - inter = (x - loc)/scale - return 0.5 * (1 + erf2_2d(inter * np.sqrt(0.5))) + """ # noqa: D205, D400, D401 + inter = (x - loc) / scale + return 0.5 * (1 + erf2_2d(inter * np.sqrt(0.5))) # noqa: DOC201 + -def nb_round(x, decimals): +def nb_round(x, decimals): # noqa: D103 out = np.empty_like(x) - return np.round_(x, decimals, out) + return np.round_(x, decimals, out) # noqa: NPY003, NPY201 + -def erfinv_coeff(order=20): +def erfinv_coeff(order=20): # noqa: D103 # initialize - c = np.empty(order+1) + c = np.empty(order + 1) # starting value c[0] = 1 - for i in range(1,order+1): - c[i] = sum([c[j]*c[i-1-j]/(j+1)/(2*j+1) for j in range(i)]) + for i in range(1, order + 1): + c[i] = sum([c[j] * c[i - 1 - j] / (j + 1) / (2 * j + 1) for j in range(i)]) # noqa: C419 # return return c + def erfinv(x, order=20): - """returns inverse erf(x)""" + """returns inverse erf(x)""" # noqa: D400, D401, D403 # get coefficients c = erfinv_coeff(order) # initialize - root_pi_over_2 = np.sqrt(np.pi)/2 + root_pi_over_2 = np.sqrt(np.pi) / 2 y = np.zeros(x.shape) for i in range(order): - y += c[i]/(2*i+1)*(root_pi_over_2*x)**(2*i+1) + y += c[i] / (2 * i + 1) * (root_pi_over_2 * x) ** (2 * i + 1) # return - return y + return y # noqa: DOC201 + def norm2_ppf(p, loc, scale): """ modified implementation of norm.ppf function from numba_stats, using self-implemented erfinv function https://github.com/HDembinski/numba-stats/blob/main/src/numba_stats/norm.py - """ - inter = np.sqrt(2) * erfinv(2*p-1,order=20) - return scale * inter + loc + """ # noqa: D205, D400, D401 + inter = np.sqrt(2) * erfinv(2 * p - 1, order=20) + return scale * inter + loc # noqa: DOC201 + def erfinv_2d(x, order=20): - """returns inverse erf(x)""" + """returns inverse erf(x)""" # noqa: D400, D401, D403 # get coefficients c = erfinv_coeff(order) # initialize - root_pi_over_2 = np.sqrt(np.pi)/2 + root_pi_over_2 = np.sqrt(np.pi) / 2 y = np.zeros(x.shape) for i in range(order): - y += c[i]/(2*i+1)*(root_pi_over_2*x)**(2*i+1) + y += c[i] / (2 * i + 1) * (root_pi_over_2 * x) ** (2 * i + 1) # return - return y + return y # noqa: DOC201 + def norm2_ppf_2d(p, loc, scale): """ modified implementation of norm.ppf function from numba_stats, using self-implemented erfinv function https://github.com/HDembinski/numba-stats/blob/main/src/numba_stats/norm.py - """ - inter = np.sqrt(2) * erfinv_2d(2*p-1,order=20) - return scale * inter + loc + """ # noqa: D205, D400, D401 + inter = np.sqrt(2) * erfinv_2d(2 * p - 1, order=20) + return scale * inter + loc # noqa: DOC201 + -class Landslide: +class Landslide: # noqa: D101 def __init__(self) -> None: pass + # ----------------------------------------------------------- class BrayMacedo2019(Landslide): """ @@ -280,7 +338,7 @@ class BrayMacedo2019(Landslide): [g] peak ground acceleration mag: float, np.ndarray or list moment magnitude - + Geotechnical/geologic: slope: float, np.ndarray or list [deg] slope angle @@ -292,7 +350,7 @@ class BrayMacedo2019(Landslide): [deg] friction angle of soil coh_soil: float, np.ndarray or list [kPa] cohesion of soil - + Fixed: Returns @@ -301,190 +359,280 @@ class BrayMacedo2019(Landslide): [m] permanent ground deformation sigma_pgdef : float, np.ndarray aleatory variability for ln(pgdef) - + References ---------- .. [1] Bray, J.D., and Macedo, J., 2019, Procedure for Estimating Shear-Induced Seismic Slope Displacement for Shallow Crustal Earthquakes, Journal of Geotechnical and Geoenvironmental Engineering, vol. 145, pp. 12, 04019106. - - """ + + """ # noqa: D205, D400 + def __init__(self, parameters, stations) -> None: self.stations = stations self.parameters = parameters - self.slope = None #(km) - self.t_slope = None #(km) - self.gamma_soil = None #(km) - self.phi_soil = None #(m) - self.coh_soil = None # (mm) + self.slope = None # (km) + self.t_slope = None # (km) + self.gamma_soil = None # (km) + self.phi_soil = None # (m) + self.coh_soil = None # (mm) self.interpolate_spatial_parameters(parameters) - - def interpolate_spatial_parameters(self, parameters): + + def interpolate_spatial_parameters(self, parameters): # noqa: C901, D102 # site coordinate in CRS 4326 lat_station = [site['lat'] for site in self.stations] lon_station = [site['lon'] for site in self.stations] - # slope - if parameters["Slope"] == "Defined (\"slope\") in Site File (.csv)": + # slope + if parameters['Slope'] == 'Defined ("slope") in Site File (.csv)': self.slope = np.array([site['slope'] for site in self.stations]) else: - self.slope = sampleRaster(parameters["Slope"], parameters["inputCRS"],\ - lon_station, lat_station) + self.slope = sampleRaster( + parameters['Slope'], parameters['inputCRS'], lon_station, lat_station + ) # t_slope - if parameters["SlopeThickness"] == "Defined (\"slopeThickness\") in Site File (.csv)": - self.t_slope = np.array([site['slopeThickness'] for site in self.stations]) - elif parameters["SlopeThickness"] == "Use constant value (m)": - self.t_slope = np.array([parameters["SlopeThicknessValue"]]*len(self.stations)) + if ( + parameters['SlopeThickness'] + == 'Defined ("slopeThickness") in Site File (.csv)' + ): + self.t_slope = np.array( + [site['slopeThickness'] for site in self.stations] + ) + elif parameters['SlopeThickness'] == 'Use constant value (m)': + self.t_slope = np.array( + [parameters['SlopeThicknessValue']] * len(self.stations) + ) else: - self.t_slope = sampleRaster(parameters["SlopeThickness"], parameters["inputCRS"],\ - lon_station, lat_station) + self.t_slope = sampleRaster( + parameters['SlopeThickness'], + parameters['inputCRS'], + lon_station, + lat_station, + ) # gamma_soil - if parameters["GammaSoil"] == "Defined (\"gammaSoil\") in Site File (.csv)": + if parameters['GammaSoil'] == 'Defined ("gammaSoil") in Site File (.csv)': self.gamma_soil = np.array([site['gammaSoil'] for site in self.stations]) - elif parameters["GammaSoil"] == "Use constant value (kN/m^3)": - self.gamma_soil = np.array([parameters["GammaSoilValue"]]*len(self.stations)) + elif parameters['GammaSoil'] == 'Use constant value (kN/m^3)': + self.gamma_soil = np.array( + [parameters['GammaSoilValue']] * len(self.stations) + ) else: - self.gamma_soil = sampleRaster(parameters["GammaSoil"], parameters["inputCRS"],\ - lon_station, lat_station) + self.gamma_soil = sampleRaster( + parameters['GammaSoil'], + parameters['inputCRS'], + lon_station, + lat_station, + ) # phi_soil - if parameters["PhiSoil"] == "Defined (\"phiSoil\") in Site File (.csv)": + if parameters['PhiSoil'] == 'Defined ("phiSoil") in Site File (.csv)': self.phi_soil = np.array([site['phiSoil'] for site in self.stations]) - elif parameters["PhiSoil"] == "Use constant value (deg)": - self.phi_soil = np.array([parameters["PhiSoilValue"]]*len(self.stations)) - elif parameters["PhiSoil"] == "Infer from Geologic Map (Bain et al. 2022)": - if parameters["CohesionSoil"] == "Infer from Geologic Map (Bain et al. 2022)": - self.phi_soil, self.coh_soil = infer_from_geologic_map(parameters["GeologicMap"],\ - parameters['inputCRS'], lon_station, lat_station) + elif parameters['PhiSoil'] == 'Use constant value (deg)': + self.phi_soil = np.array( + [parameters['PhiSoilValue']] * len(self.stations) + ) + elif parameters['PhiSoil'] == 'Infer from Geologic Map (Bain et al. 2022)': + if ( + parameters['CohesionSoil'] + == 'Infer from Geologic Map (Bain et al. 2022)' + ): + self.phi_soil, self.coh_soil = infer_from_geologic_map( + parameters['GeologicMap'], + parameters['inputCRS'], + lon_station, + lat_station, + ) else: - self.phi_soil, _ = infer_from_geologic_map(parameters["GeologicMap"],\ - parameters['inputCRS'], lon_station, lat_station) + self.phi_soil, _ = infer_from_geologic_map( + parameters['GeologicMap'], + parameters['inputCRS'], + lon_station, + lat_station, + ) else: - self.phi_soil = sampleRaster(parameters["CohesionSoil"], parameters["inputCRS"],\ - lon_station, lat_station) + self.phi_soil = sampleRaster( + parameters['CohesionSoil'], + parameters['inputCRS'], + lon_station, + lat_station, + ) # coh_soil if self.coh_soil is None: - if parameters["CohesionSoil"] == "Defined (\"cohesionSoil\") in Site File (.csv)": - self.coh_soil = np.array([site['cohesionSoil'] for site in self.stations]) - elif parameters["CohesionSoil"] == "Use constant value (kPa)": - self.coh_soil = np.array([parameters["CohesionSoilValue"]]*len(self.stations)) - elif parameters["CohesionSoil"] == "Infer from Geologic Map (Bain et al. 2022)": - self.coh_soil = infer_from_geologic_map(parameters["GeologicMap"],\ - parameters['inputCRS'], lon_station, lat_station) + if ( + parameters['CohesionSoil'] + == 'Defined ("cohesionSoil") in Site File (.csv)' + ): + self.coh_soil = np.array( + [site['cohesionSoil'] for site in self.stations] + ) + elif parameters['CohesionSoil'] == 'Use constant value (kPa)': + self.coh_soil = np.array( + [parameters['CohesionSoilValue']] * len(self.stations) + ) + elif ( + parameters['CohesionSoil'] + == 'Infer from Geologic Map (Bain et al. 2022)' + ): + self.coh_soil = infer_from_geologic_map( + parameters['GeologicMap'], + parameters['inputCRS'], + lon_station, + lat_station, + ) else: - self.coh_soil = sampleRaster(parameters["CohesionSoil"], parameters["inputCRS"],\ - lon_station, lat_station) + self.coh_soil = sampleRaster( + parameters['CohesionSoil'], + parameters['inputCRS'], + lon_station, + lat_station, + ) - print("Initiation finished") + print('Initiation finished') # noqa: T201 - def run(self, ln_im_data, eq_data, im_list, output_keys=['lsd_PGD_h'], additional_output_keys = []): - if ('PGA' in im_list): + def run( # noqa: D102 + self, + ln_im_data, + eq_data, + im_list, + output_keys=['lsd_PGD_h'], # noqa: B006 + additional_output_keys=[], # noqa: B006, ARG002 + ): + if 'PGA' in im_list: num_stations = len(self.stations) num_scenarios = len(eq_data) - PGA_col_id = [i for i, x in enumerate(im_list) if x == 'PGA'][0] + PGA_col_id = [i for i, x in enumerate(im_list) if x == 'PGA'][0] # noqa: N806, RUF015 for scenario_id in range(num_scenarios): num_rlzs = ln_im_data[scenario_id].shape[2] - im_data_scen = np.zeros([num_stations,\ - len(im_list)+len(output_keys), num_rlzs]) - im_data_scen[:,0:len(im_list),:] = ln_im_data[scenario_id] + im_data_scen = np.zeros( + [num_stations, len(im_list) + len(output_keys), num_rlzs] + ) + im_data_scen[:, 0 : len(im_list), :] = ln_im_data[scenario_id] for rlz_id in range(num_rlzs): - pga = np.exp(ln_im_data[scenario_id][:,PGA_col_id,rlz_id]) + pga = np.exp(ln_im_data[scenario_id][:, PGA_col_id, rlz_id]) mag = float(eq_data[scenario_id][0]) - model_output = self.model(pga, mag, self.slope, self.t_slope, - self.gamma_soil, self.phi_soil, - self.coh_soil) + model_output = self.model( + pga, + mag, + self.slope, + self.t_slope, + self.gamma_soil, + self.phi_soil, + self.coh_soil, + ) for i, key in enumerate(output_keys): - im_data_scen[:,len(im_list)+i,rlz_id] = model_output[key] + im_data_scen[:, len(im_list) + i, rlz_id] = model_output[key] ln_im_data[scenario_id] = im_data_scen - im_list = im_list + output_keys + im_list = im_list + output_keys # noqa: PLR6104 else: - sys.exit(f"'PGA' is missing in the selected intensity measures and the landslide model 'BrayMacedo2019' can not be computed.") + sys.exit( + f"'PGA' is missing in the selected intensity measures and the landslide model 'BrayMacedo2019' can not be computed." # noqa: F541 + ) # print(f"At least one of 'PGA' and 'PGV' is missing in the selected intensity measures and the liquefaction trigging model 'ZhuEtal2017' can not be computed."\ # , file=sys.stderr) # sys.stderr.write("test") # sys.exit(-1) - return ln_im_data, eq_data, im_list, - - def model( + return ( + ln_im_data, + eq_data, + im_list, + ) + + def model( # noqa: PLR6301 self, - pga, mag, # upstream PBEE RV - slope, t_slope, gamma_soil, phi_soil, coh_soil, # geotechnical/geologic - return_inter_params=False # to get intermediate params + pga, + mag, # upstream PBEE RV + slope, + t_slope, + gamma_soil, + phi_soil, + coh_soil, # geotechnical/geologic + return_inter_params=False, # to get intermediate params # noqa: FBT002, ARG002 ): - """Model""" - - # get dimensions + """Model""" # noqa: D202, D400 + + # get dimensions ndim = pga.ndim if ndim == 1: n_site = len(pga) n_sample = 1 - shape = (n_site) + shape = n_site else: shape = pga.shape n_site = shape[0] n_sample = shape[1] - + # initialize pgdef = np.zeros(shape) ky = np.zeros(shape) prob_d_eq_0 = np.zeros(shape) ln_pgdef_trunc = np.zeros(shape) nonzero_median_cdf = np.zeros(shape) - + # convert from deg to rad - slope_rad = (slope*np.pi/180).astype(np.float32) - phi_soil_rad = (phi_soil*np.pi/180).astype(np.float32) + slope_rad = (slope * np.pi / 180).astype(np.float32) + phi_soil_rad = (phi_soil * np.pi / 180).astype(np.float32) coh_soil = coh_soil.astype(np.float32) - + # yield acceleration - ky = np.tan(phi_soil_rad-slope_rad) + \ - coh_soil/( - gamma_soil * t_slope * np.cos(slope_rad)**2 * \ - (1+np.tan(phi_soil_rad)*np.tan(slope_rad))) - ky = np.maximum(ky,0.01) # to avoid ky = 0 + ky = np.tan(phi_soil_rad - slope_rad) + coh_soil / ( + gamma_soil + * t_slope + * np.cos(slope_rad) ** 2 + * (1 + np.tan(phi_soil_rad) * np.tan(slope_rad)) + ) + ky = np.maximum(ky, 0.01) # to avoid ky = 0 # aleatory sigma_val = 0.72 # deformation, eq 3b - ln_pgdef_trunc = \ - -4.684 + \ - -2.482*np.log(ky) + \ - -0.244*(np.log(ky))**2 + \ - 0.344*np.log(ky)*np.log(pga) + \ - 2.649*np.log(pga) + \ - -0.090*(np.log(pga))**2 + \ - 0.603*mag # cm + ln_pgdef_trunc = ( + -4.684 + + -2.482 * np.log(ky) + + -0.244 * (np.log(ky)) ** 2 + + 0.344 * np.log(ky) * np.log(pga) + + 2.649 * np.log(pga) + + -0.090 * (np.log(pga)) ** 2 + + 0.603 * mag + ) # cm nonzero_ln_pgdef = ln_pgdef_trunc.copy() - + # probability of zero displacement, eq. 2 with Ts=0 if ndim == 1: prob_d_eq_0 = 1 - norm2_cdf( - -2.480 + \ - -2.970*np.log(ky) + \ - -0.120*(np.log(ky))**2 + \ - 2.780*np.log(pga), - 0, 1) + -2.480 + + -2.970 * np.log(ky) + + -0.120 * (np.log(ky)) ** 2 + + 2.780 * np.log(pga), + 0, + 1, + ) else: prob_d_eq_0 = 1 - norm2_cdf_2d( - -2.480 + \ - -2.970*np.log(ky) + \ - -0.120*(np.log(ky))**2 + \ - 2.780*np.log(pga), - 0, 1) + -2.480 + + -2.970 * np.log(ky) + + -0.120 * (np.log(ky)) ** 2 + + 2.780 * np.log(pga), + 0, + 1, + ) prob_d_eq_0 = nb_round(prob_d_eq_0, decimals=15) - + # apply non-zero displacement correction/condition, eq 11 - nonzero_median_cdf = 1 - .5/(1-prob_d_eq_0) - + nonzero_median_cdf = 1 - 0.5 / (1 - prob_d_eq_0) + # loop through numper of samples if ndim == 1: - nonzero_ln_pgdef[nonzero_median_cdf>0] = ln_pgdef_trunc[nonzero_median_cdf>0] + \ - sigma_val*norm2_ppf(nonzero_median_cdf[nonzero_median_cdf>0], 0.0, 1.0) + nonzero_ln_pgdef[nonzero_median_cdf > 0] = ln_pgdef_trunc[ + nonzero_median_cdf > 0 + ] + sigma_val * norm2_ppf( + nonzero_median_cdf[nonzero_median_cdf > 0], 0.0, 1.0 + ) else: for i in range(n_sample): - cond = nonzero_median_cdf[:,i]>0 - nonzero_ln_pgdef[cond,i] = ln_pgdef_trunc[cond,i] + \ - sigma_val*norm2_ppf(nonzero_median_cdf[cond,i], 0.0, 1.0) - + cond = nonzero_median_cdf[:, i] > 0 + nonzero_ln_pgdef[cond, i] = ln_pgdef_trunc[ + cond, i + ] + sigma_val * norm2_ppf(nonzero_median_cdf[cond, i], 0.0, 1.0) + # rest of actions - pgdef = np.exp(nonzero_ln_pgdef)/100 # also convert from cm to m - pgdef = np.maximum(pgdef,1e-5) # limit to - output = {'lsd_PGD_h':pgdef} - return output \ No newline at end of file + pgdef = np.exp(nonzero_ln_pgdef) / 100 # also convert from cm to m + pgdef = np.maximum(pgdef, 1e-5) # limit to + output = {'lsd_PGD_h': pgdef} + return output # noqa: RET504, DOC201 diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/liquefaction.py b/modules/performRegionalEventSimulation/regionalGroundMotion/liquefaction.py index 7f0fe1dfc..5eda3b1cc 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/liquefaction.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/liquefaction.py @@ -82,7 +82,7 @@ def sampleRaster( # noqa: N802 sample = sample.astype(dtype) # clean up invalid values (returned as 1e38 by NumPy) sample[abs(sample) > 1e10] = invalid_value # noqa: PLR2004 - return sample + return sample # noqa: DOC201 # Helper functions @@ -96,9 +96,13 @@ def sampleVector(vector_file_path, vector_crs, x, y, dtype=None): # noqa: ARG00 try: user_crs_input = CRS.from_user_input(vector_crs).to_epsg() if vector_gdf.crs.to_epsg() != user_crs_input: - sys.exit(f"The CRS of vector file {vector_file_path} is {vector_gdf.crs}, and doesn't match the input CRS ({xy_crs}) defined for liquefaction triggering models") - except: - print("The input CRS ({xy_crs}) defined for liquefaction triggering models is invalid. The CRS of vector files are used") + sys.exit( + f"The CRS of vector file {vector_file_path} is {vector_gdf.crs}, and doesn't match the input CRS ({xy_crs}) defined for liquefaction triggering models" + ) + except: # noqa: E722 + print( # noqa: T201 + 'The input CRS ({xy_crs}) defined for liquefaction triggering models is invalid. The CRS of vector files are used' # noqa: RUF027 + ) # if vector_gdf.crs != vector_crs: # sys.exit(f"The CRS of vector file {vector_file_path} is {vector_gdf.crs}, and doesn't match the input CRS ({xy_crs}) defined for liquefaction triggering models") @@ -159,7 +163,7 @@ def sampleVector(vector_file_path, vector_crs, x, y, dtype=None): # noqa: ARG00 merged = merged.set_index('index_right').sort_index().drop(columns=['geometry']) gdf_sites = pandas.merge(gdf_sites, merged, on='index', how='left') gdf_sites.drop(columns=['geometry', 'index'], inplace=True) # noqa: PD002 - return gdf_sites + return gdf_sites # noqa: DOC201 def find_additional_output_req(liq_info, current_step): # noqa: D103 @@ -451,7 +455,7 @@ def model(self, pgv, pga, mag): # liq_susc[prob_liq==zero_prob_liq] = 'none' - return {'liq_prob': prob_liq, 'liq_susc': liq_susc} + return {'liq_prob': prob_liq, 'liq_susc': liq_susc} # noqa: DOC201 # ----------------------------------------------------------- @@ -654,7 +658,7 @@ def model( pga_mag = pga / (10**2.24 / mag**2.56) prob_liq[pga_mag < 0.1] = zero_prob_liq # noqa: PLR2004 - return {'liq_prob': prob_liq, 'liq_susc': liq_susc} + return {'liq_prob': prob_liq, 'liq_susc': liq_susc} # noqa: DOC201 # ----------------------------------------------------------- @@ -817,7 +821,7 @@ def model(self, pgv, pga, mag): # for precip > 1700 mm, set prob to "0" prob_liq[self.precip > 1700] = zero_prob_liq # noqa: PLR2004 - return {'liq_prob': prob_liq, 'liq_susc': liq_susc} + return {'liq_prob': prob_liq, 'liq_susc': liq_susc} # noqa: DOC201 # Lateral Spreading: @@ -983,7 +987,7 @@ def model( # output['ratio'] = ratio # return - return output # noqa: RET504 + return output # noqa: DOC201, RET504 # Settlement: @@ -1056,7 +1060,7 @@ def model( pass # return - return output + return output # noqa: DOC201 def run(self, ln_im_data, eq_data, im_list): # noqa: D102 output_keys = ['liq_PGD_v'] diff --git a/modules/performRegionalEventSimulation/regionalWindField/ComputeIntensityMeasure.py b/modules/performRegionalEventSimulation/regionalWindField/ComputeIntensityMeasure.py index f521ec680..806cad47e 100644 --- a/modules/performRegionalEventSimulation/regionalWindField/ComputeIntensityMeasure.py +++ b/modules/performRegionalEventSimulation/regionalWindField/ComputeIntensityMeasure.py @@ -448,7 +448,7 @@ def interp_wind_by_height(pws_ip, height_simu, height_ref): ) # return - return pws_op + return pws_op # noqa: DOC201 def gust_factor_ESDU(gd_c, gd_t): # noqa: N802 @@ -475,7 +475,7 @@ def gust_factor_ESDU(gd_c, gd_t): # noqa: N802 gd_c, gd, gf, left=gf[0], right=gf[-1] ) # return - return gf_t # noqa: RET504 + return gf_t # noqa: DOC201, RET504 def export_pws(stations, pws, output_dir, filename='EventGrid.csv'): # noqa: D103 diff --git a/modules/performRegionalEventSimulation/regionalWindField/CreateScenario.py b/modules/performRegionalEventSimulation/regionalWindField/CreateScenario.py index 8abcf54dd..3635b4fb6 100644 --- a/modules/performRegionalEventSimulation/regionalWindField/CreateScenario.py +++ b/modules/performRegionalEventSimulation/regionalWindField/CreateScenario.py @@ -156,8 +156,8 @@ def create_wind_scenarios(scenario_info, event_info, stations, data_dir): # noq print('CreateScenario: error - no storm name or year is provided.') # noqa: T201 # Searching the storm try: - df_chs = df_hs[df_hs[('NAME', ' ')] == storm_name] - df_chs = df_chs[df_chs[('SEASON', 'Year')] == storm_year] + df_chs = df_hs[df_hs[('NAME', ' ')] == storm_name] # noqa: RUF031 + df_chs = df_chs[df_chs[('SEASON', 'Year')] == storm_year] # noqa: RUF031 except: # noqa: E722 print('CreateScenario: error - the storm is not found.') # noqa: T201 if len(df_chs.values) == 0: @@ -166,10 +166,10 @@ def create_wind_scenarios(scenario_info, event_info, stations, data_dir): # noq # Collecting storm properties track_lat = [] track_lon = [] - for x in df_chs[('USA_LAT', 'degrees_north')].values.tolist(): # noqa: PD011 + for x in df_chs[('USA_LAT', 'degrees_north')].values.tolist(): # noqa: PD011, RUF031 if x != ' ': track_lat.append(float(x)) # noqa: PERF401 - for x in df_chs[('USA_LON', 'degrees_east')].values.tolist(): # noqa: PD011 + for x in df_chs[('USA_LON', 'degrees_east')].values.tolist(): # noqa: PD011, RUF031 if x != ' ': track_lon.append(float(x)) # noqa: PERF401 # If the default option (USA_LAT and USA_LON) is not available, switching to LAT and LON @@ -177,10 +177,10 @@ def create_wind_scenarios(scenario_info, event_info, stations, data_dir): # noq print( # noqa: T201 'CreateScenario: warning - the USA_LAT and USA_LON are not available, switching to LAT and LON.' ) - for x in df_chs[('LAT', 'degrees_north')].values.tolist(): # noqa: PD011 + for x in df_chs[('LAT', 'degrees_north')].values.tolist(): # noqa: PD011, RUF031 if x != ' ': track_lat.append(float(x)) # noqa: PERF401 - for x in df_chs[('LON', 'degrees_east')].values.tolist(): # noqa: PD011 + for x in df_chs[('LON', 'degrees_east')].values.tolist(): # noqa: PD011, RUF031 if x != ' ': track_lon.append(float(x)) # noqa: PERF401 if len(track_lat) == 0: @@ -197,7 +197,7 @@ def create_wind_scenarios(scenario_info, event_info, stations, data_dir): # noq terrain_data = [] # Storm characteristics at the landfall dist2land = [] - for x in df_chs[('DIST2LAND', 'km')]: + for x in df_chs[('DIST2LAND', 'km')]: # noqa: RUF031 if x != ' ': dist2land.append(x) # noqa: PERF401 if len(track_lat) == 0: @@ -237,14 +237,14 @@ def create_wind_scenarios(scenario_info, event_info, stations, data_dir): # noq track_simu = track_lat # Reading data try: - landfall_lat = float(df_chs[('USA_LAT', 'degrees_north')].iloc[tmploc]) - landfall_lon = float(df_chs[('USA_LON', 'degrees_east')].iloc[tmploc]) + landfall_lat = float(df_chs[('USA_LAT', 'degrees_north')].iloc[tmploc]) # noqa: RUF031 + landfall_lon = float(df_chs[('USA_LON', 'degrees_east')].iloc[tmploc]) # noqa: RUF031 except: # noqa: E722 # If the default option (USA_LAT and USA_LON) is not available, switching to LAT and LON - landfall_lat = float(df_chs[('LAT', 'degrees_north')].iloc[tmploc]) - landfall_lon = float(df_chs[('LON', 'degrees_east')].iloc[tmploc]) + landfall_lat = float(df_chs[('LAT', 'degrees_north')].iloc[tmploc]) # noqa: RUF031 + landfall_lon = float(df_chs[('LON', 'degrees_east')].iloc[tmploc]) # noqa: RUF031 try: - landfall_ang = float(df_chs[('STORM_DIR', 'degrees')].iloc[tmploc]) + landfall_ang = float(df_chs[('STORM_DIR', 'degrees')].iloc[tmploc]) # noqa: RUF031 except: # noqa: E722 print('CreateScenario: error - no landing angle is found.') # noqa: T201 if landfall_ang > 180.0: # noqa: PLR2004 @@ -254,7 +254,7 @@ def create_wind_scenarios(scenario_info, event_info, stations, data_dir): # noq - np.min( [ float(x) - for x in df_chs[('USA_PRES', 'mb')] # noqa: PD011 + for x in df_chs[('USA_PRES', 'mb')] # noqa: PD011, RUF031 .iloc[tmploc - 5 :] .values.tolist() if x != ' ' @@ -262,11 +262,11 @@ def create_wind_scenarios(scenario_info, event_info, stations, data_dir): # noq ) ) landfall_spd = ( - float(df_chs[('STORM_SPEED', 'kts')].iloc[tmploc]) * 0.51444 + float(df_chs[('STORM_SPEED', 'kts')].iloc[tmploc]) * 0.51444 # noqa: RUF031 ) # convert knots/s to km/s try: landfall_rad = ( - float(df_chs[('USA_RMW', 'nmile')].iloc[tmploc]) * 1.60934 + float(df_chs[('USA_RMW', 'nmile')].iloc[tmploc]) * 1.60934 # noqa: RUF031 ) # convert nmile to km except: # noqa: E722 # No available radius of maximum wind is found @@ -274,7 +274,7 @@ def create_wind_scenarios(scenario_info, event_info, stations, data_dir): # noq try: # If the default option (USA_RMW) is not available, switching to REUNION_RMW landfall_rad = ( - float(df_chs[('REUNION_RMW', 'nmile')].iloc[tmploc]) * 1.60934 + float(df_chs[('REUNION_RMW', 'nmile')].iloc[tmploc]) * 1.60934 # noqa: RUF031 ) # convert nmile to km except: # noqa: E722 # No available radius of maximum wind is found diff --git a/modules/performRegionalEventSimulation/regionalWindField/CreateStation.py b/modules/performRegionalEventSimulation/regionalWindField/CreateStation.py index a03a64c08..caae970e4 100644 --- a/modules/performRegionalEventSimulation/regionalWindField/CreateStation.py +++ b/modules/performRegionalEventSimulation/regionalWindField/CreateStation.py @@ -68,7 +68,7 @@ def create_stations(input_file, output_file, min_id, max_id): stn_df = pd.read_csv(input_file, header=0, index_col=0) except: # noqa: E722 run_tag = 0 - return run_tag # noqa: RET504 + return run_tag # noqa: DOC201, RET504 # Max and Min IDs stn_ids_min = np.min(stn_df.index.values) stn_ids_max = np.max(stn_df.index.values) diff --git a/modules/performRegionalEventSimulation/regionalWindField/WindFieldSimulation.py b/modules/performRegionalEventSimulation/regionalWindField/WindFieldSimulation.py index d74a470a2..57c128fc1 100644 --- a/modules/performRegionalEventSimulation/regionalWindField/WindFieldSimulation.py +++ b/modules/performRegionalEventSimulation/regionalWindField/WindFieldSimulation.py @@ -168,7 +168,7 @@ def __interp_z0(self, lat, lon): if not z0: z0 = 0.01 # return - return z0 + return z0 # noqa: DOC201 def add_reference_terrain(self, terrain_info): """add_reference_terrainL specifying reference z0 values for a set of polygons @@ -595,4 +595,4 @@ def compute_wind_field(self): # noqa: PLR0914 def get_station_data(self): """get_station_data: returning station data""" # noqa: D400 # return station dictionary - return self.station + return self.station # noqa: DOC201 diff --git a/modules/performUQ/SimCenterUQ/PLoM/PLoM.py b/modules/performUQ/SimCenterUQ/PLoM/PLoM.py index ed6a60e11..78faad354 100644 --- a/modules/performUQ/SimCenterUQ/PLoM/PLoM.py +++ b/modules/performUQ/SimCenterUQ/PLoM/PLoM.py @@ -374,7 +374,7 @@ def _load_h5_plom(self, filename): if cur_var in self.dbserver.get_item_adds() and ATTR_MAP[cur_var]: # noqa: F405 # read in cur_data = store[cur_var] - cur_dshape = tuple( + cur_dshape = tuple( # noqa: C409 [x[0] for x in store['/DS_' + cur_var[1:]].values.tolist()] # noqa: PD011 ) if cur_dshape == (1,): @@ -416,7 +416,7 @@ def _load_h5_data_X(self, filename): # noqa: N802 item_name='X0', col_name=list(self.X0.columns), item=self.X0 ) - return self.X0.to_numpy() + return self.X0.to_numpy() # noqa: DOC201 except: # noqa: E722 return None @@ -491,7 +491,7 @@ def load_h5(self, filename): ) if '/X0' in self.dbserver.get_name_list(): self.X0 = self.dbserver.get_item('X0', table_like=True) - return self.X0.to_numpy() + return self.X0.to_numpy() # noqa: DOC201 else: # noqa: RET505 self.logfile.write_msg( msg='PLoM.load_h5: the original X0 data not found in the loaded data.', @@ -598,7 +598,7 @@ def ConfigTasks(self, task_list=FULL_TASK_LIST): # noqa: C901, N802, F405 msg_type='WARNING', msg_level=0, ) - return False + return False # noqa: DOC201 map_order = [FULL_TASK_LIST.index(x) for x in self.cur_task_list] # noqa: F405 if map_order != sorted(map_order): self.logfile.write_msg( @@ -961,7 +961,7 @@ def DataNormalization(self, X): # noqa: N802, N803, PLR6301 X_scaled, alpha, x_min = plom.scaling(X) # noqa: N806 x_mean = plom.mean(X_scaled) - return X_scaled, alpha, x_min, x_mean + return X_scaled, alpha, x_min, x_mean # noqa: DOC201 def RunPCA(self, X_origin, epsilon_pca): # noqa: N802, N803, D102 # ...PCA... @@ -995,7 +995,7 @@ def RunKDE(self, X, epsilon_kde): # noqa: N802, N803, PLR6301 (s_v, c_v, hat_s_v) = plom.parameters_kde(X) K, b = plom.K(X, epsilon_kde) # noqa: N806 - return s_v, c_v, hat_s_v, K, b + return s_v, c_v, hat_s_v, K, b # noqa: DOC201 def DiffMaps(self, H, K, b, tol=0.1): # noqa: N802, N803, D102 # ..diff maps basis... diff --git a/modules/performUQ/SimCenterUQ/PLoM/PLoM_library.py b/modules/performUQ/SimCenterUQ/PLoM/PLoM_library.py index ac55ef66f..8988315f9 100644 --- a/modules/performUQ/SimCenterUQ/PLoM/PLoM_library.py +++ b/modules/performUQ/SimCenterUQ/PLoM/PLoM_library.py @@ -102,7 +102,7 @@ def kernel(x, y, epsilon): """ # noqa: D205, D400 dist = np.linalg.norm(x - y) ** 2 k = np.exp(-dist / (4 * epsilon)) - return k # noqa: RET504 + return k # noqa: DOC201, RET504 def K(eta, epsilon): # noqa: N802 @@ -124,7 +124,7 @@ def K(eta, epsilon): # noqa: N802 K[i, j] = 1 row_sum = row_sum + 1 # noqa: PLR6104 b[i, i] = row_sum - return K, b + return K, b # noqa: DOC201 def g(K, b): # noqa: N803 @@ -142,7 +142,7 @@ def g(K, b): # noqa: N803 norm = np.diagonal(np.transpose(g).dot(b).dot(g)) sqrt_norm = np.sqrt(1 / norm) g = np.multiply(g, sqrt_norm) - return g, eigenvalues + return g, eigenvalues # noqa: DOC201 def m(eigenvalues, tol=0.1): @@ -153,7 +153,7 @@ def m(eigenvalues, tol=0.1): m = 0 while i < len(eigenvalues) and m == 0: if eigenvalues[i] <= eigenvalues[1] * tol: - return i + 1 + return i + 1 # noqa: DOC201 i = i + 1 # noqa: PLR6104 if m == 0: return max(round(len(eigenvalues) / 10), 3) @@ -170,7 +170,7 @@ def mean(x): x_mean = np.zeros((dim, 1)) for i in range(dim): x_mean[i] = np.mean(x[i, :]) - return x_mean + return x_mean # noqa: DOC201 def covariance(x): @@ -187,7 +187,7 @@ def covariance(x): C = C + (np.resize(x[:, i], x_mean.shape) - x_mean).dot( # noqa: N806, PLR6104 np.transpose(np.resize(x[:, i], x_mean.shape) - x_mean) ) - return C / (N - 1) + return C / (N - 1) # noqa: DOC201 def PCA(x, tol): # noqa: N802 @@ -226,7 +226,7 @@ def PCA(x, tol): # noqa: N802 1 / (mu) ) # no need to do the sqrt because we use the singularvalues eta = mu_sqrt_inv.dot(np.transpose(phi)).dot(x - x_mean) - return ( + return ( # noqa: DOC201 eta, mu, phi, @@ -243,7 +243,7 @@ def parameters_kde(eta): s_v = (4 / (N * (2 + nu))) ** (1 / (nu + 4)) # (4/(N*(2+nu)))**(1/(nu+4)) hat_s_v = s_v / sqrt(s_v**2 + ((N - 1) / N)) c_v = 1 / (sqrt(2 * pi) * hat_s_v) ** nu - return s_v, c_v, hat_s_v + return s_v, c_v, hat_s_v # noqa: DOC201 def kde(y, eta, s_v=None, c_v=None, hat_s_v=None): @@ -254,7 +254,7 @@ def kde(y, eta, s_v=None, c_v=None, hat_s_v=None): N = eta.shape[1] # noqa: N806 if s_v == None or c_v == None or hat_s_v == None: # noqa: E711 s_v, c_v, hat_s_v = parameters_kde(eta) - return c_v * rhoctypes( + return c_v * rhoctypes( # noqa: DOC201 np.resize(y, (y.shape[0] * y.shape[1], 1)), np.resize(np.transpose(eta), (nu * N, 1)), nu, @@ -288,7 +288,7 @@ def PCA2(C_h_hat_eta, beta, tol): # noqa: N802, N803 lambda_c = lambda_c[0:nu_c] psi = psi[:, 0:nu_c] b_c = np.transpose(psi).dot(beta) - return b_c, psi + return b_c, psi # noqa: DOC201 def h_c(eta, g_c, phi, mu, psi, x_mean): # noqa: D103 diff --git a/modules/performUQ/SimCenterUQ/PLoM/general.py b/modules/performUQ/SimCenterUQ/PLoM/general.py index e59f44039..b0d76ac97 100644 --- a/modules/performUQ/SimCenterUQ/PLoM/general.py +++ b/modules/performUQ/SimCenterUQ/PLoM/general.py @@ -149,13 +149,13 @@ def _create_export_dir(self): dir_export = os.path.join(self.db_dir, 'DataOut') # noqa: PTH118 try: os.makedirs(dir_export, exist_ok=True) # noqa: PTH103 - return dir_export # noqa: TRY300 + return dir_export # noqa: DOC201, TRY300 except: # noqa: E722 return None def get_item_adds(self): """Returning the full list of data items""" # noqa: D400, D401 - return self._item_adds + return self._item_adds # noqa: DOC201 def add_item( self, @@ -190,7 +190,7 @@ def add_item( store.close() # noqa: RET503 else: # Not supported data_type - return False + return False # noqa: DOC201 def get_item(self, item_name=None, table_like=False, data_type='Data'): # noqa: FBT002 """Getting a specific data item""" # noqa: D400, D401 @@ -199,7 +199,7 @@ def get_item(self, item_name=None, table_like=False, data_type='Data'): # noqa: store = pd.HDFStore(self.db_path, 'r') try: item = store.get(item_name) - item_shape = tuple( + item_shape = tuple( # noqa: C409 [ x[0] for x in self.get_item_shape( # noqa: PD011 @@ -214,7 +214,7 @@ def get_item(self, item_name=None, table_like=False, data_type='Data'): # noqa: finally: store.close() - return item + return item # noqa: DOC201 elif data_type == 'ConstraintsFile': store = pd.HDFStore(self.db_path, 'r') try: @@ -247,7 +247,7 @@ def get_item_shape(self, item_name=None): item_shape = None store.close() - return item_shape + return item_shape # noqa: DOC201 def get_name_list(self): """Returning the keys of the database""" # noqa: D400, D401 @@ -257,7 +257,7 @@ def get_name_list(self): except: # noqa: E722 name_list = [] store.close() - return name_list + return name_list # noqa: DOC201 def export(self, data_name=None, filename=None, file_format='csv'): """Exporting the specific data item @@ -266,7 +266,7 @@ def export(self, data_name=None, filename=None, file_format='csv'): """ # noqa: D205, D400, D401 d = self.get_item(item_name=data_name[1:], table_like=True) if d is None: - return 1 + return 1 # noqa: DOC201 if filename is None: filename = os.path.join( # noqa: PTH118 self.dir_export, str(data_name).replace('/', '') + '.' + file_format @@ -311,7 +311,7 @@ def refresh_status(self): # previous task not completed -> this task also needs to rerun self.status = False - return self.status + return self.status # noqa: DOC201 # self-check if Counter(self.avail_var_list) == Counter(self.full_var_list) and len( @@ -355,7 +355,7 @@ def refresh_status(self): if not cur_task.status: self.status = False - return self.status + return self.status # noqa: DOC201 while cur_task.next_task: cur_task = cur_task.next_task if not cur_task.status: diff --git a/modules/performUQ/SimCenterUQ/runPLoM.py b/modules/performUQ/SimCenterUQ/runPLoM.py index 9bc4130fe..368a55fe3 100644 --- a/modules/performUQ/SimCenterUQ/runPLoM.py +++ b/modules/performUQ/SimCenterUQ/runPLoM.py @@ -428,7 +428,7 @@ def _create_variables(self, training_data): # check if training data source from simulation if training_data == 'Sampling and Simulation': - return x_dim, y_dim, rv_name, g_name + return x_dim, y_dim, rv_name, g_name # noqa: DOC201 # read X and Y variable names for rv in job_config['randomVariables']: @@ -562,7 +562,7 @@ def _parse_plom_parameters(self, surrogateInfo): # noqa: C901, N803 run_flag = 1 # return - return run_flag + return run_flag # noqa: DOC201 def _set_up_parallel(self): """_set_up_parallel: set up modules and variables for parallel jobs @@ -592,7 +592,7 @@ def _set_up_parallel(self): run_flag = 1 # return - return run_flag + return run_flag # noqa: DOC201 def _load_variables(self, do_sampling, do_simulation): # noqa: C901 """_load_variables: load variables @@ -666,7 +666,7 @@ def _load_variables(self, do_sampling, do_simulation): # noqa: C901 # run_flag = 1 # return - return run_flag + return run_flag # noqa: DOC201 # KZ, 07/24: loading user-defined hyper-parameter files def _load_hyperparameter(self): diff --git a/modules/performUQ/UCSD_UQ/defaultLogLikeScript.py b/modules/performUQ/UCSD_UQ/defaultLogLikeScript.py index 2a455bcdb..76a95dcef 100644 --- a/modules/performUQ/UCSD_UQ/defaultLogLikeScript.py +++ b/modules/performUQ/UCSD_UQ/defaultLogLikeScript.py @@ -142,4 +142,4 @@ def log_likelihood( loglike += ll else: loglike += -np.inf - return loglike + return loglike # noqa: DOC201 diff --git a/modules/performUQ/UCSD_UQ/mwg_sampler.py b/modules/performUQ/UCSD_UQ/mwg_sampler.py index 8e4cfdcd0..d5073bbe6 100644 --- a/modules/performUQ/UCSD_UQ/mwg_sampler.py +++ b/modules/performUQ/UCSD_UQ/mwg_sampler.py @@ -328,7 +328,7 @@ def tune(scale, acc_rate): >0.95 x 10 """ # noqa: D205, D400 if acc_rate < 0.01: # noqa: PLR2004 - return scale * 0.01 + return scale * 0.01 # noqa: DOC201 elif acc_rate < 0.05: # noqa: RET505, PLR2004 return scale * 0.1 elif acc_rate < 0.2: # noqa: PLR2004 diff --git a/modules/performUQ/UCSD_UQ/runFEM.py b/modules/performUQ/UCSD_UQ/runFEM.py index d2c92799e..126e77560 100644 --- a/modules/performUQ/UCSD_UQ/runFEM.py +++ b/modules/performUQ/UCSD_UQ/runFEM.py @@ -101,4 +101,4 @@ def runFEM( # noqa: N802 preds = np.atleast_2d([-np.inf] * sum(edpLengthsList)).reshape((1, -1)) ll = -np.inf - return (ll, preds) + return (ll, preds) # noqa: DOC201 diff --git a/modules/performUQ/UCSD_UQ/runTMCMC.py b/modules/performUQ/UCSD_UQ/runTMCMC.py index c689e5fc4..7a9bed11f 100644 --- a/modules/performUQ/UCSD_UQ/runTMCMC.py +++ b/modules/performUQ/UCSD_UQ/runTMCMC.py @@ -561,4 +561,4 @@ def run_TMCMC( # noqa: N802, PLR0913, PLR0917 f'\n\tShutdown mpi4py executor pool for runType: {run_type}' ) - return mytrace, total_log_evidence + return mytrace, total_log_evidence # noqa: DOC201 diff --git a/modules/performUQ/common/ERAClasses/ERACond.py b/modules/performUQ/common/ERAClasses/ERACond.py index 2f47d13e4..db22ed72d 100644 --- a/modules/performUQ/common/ERAClasses/ERACond.py +++ b/modules/performUQ/common/ERAClasses/ERACond.py @@ -146,7 +146,7 @@ class description. if type(param) == types.LambdaType: # noqa: E721 self.Param = param else: - raise RuntimeError('The input param must be a lambda function.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('The input param must be a lambda function.') # noqa: DOC501, EM101, RUF100, TRY003 self.modParam = param @@ -388,7 +388,7 @@ def equation(param): for i in range(len(Par)): Par[i] = np.squeeze(Par[i]) - return Par + return Par # noqa: DOC201 # %% def condCDF(self, x, cond): # noqa: C901, N802 @@ -442,7 +442,7 @@ def condCDF(self, x, cond): # noqa: C901, N802 elif self.Name == 'weibull': CDF = stats.weibull_min.cdf(x, c=par[1], scale=par[0]) # noqa: N806 - return CDF + return CDF # noqa: DOC201 # %% def condiCDF(self, y, cond): # noqa: C901, N802 @@ -496,7 +496,7 @@ def condiCDF(self, y, cond): # noqa: C901, N802 elif self.Name == 'weibull': iCDF = stats.weibull_min.ppf(y, c=par[1], scale=par[0]) # noqa: N806 - return iCDF + return iCDF # noqa: DOC201 # %% def condPDF(self, x, cond): # noqa: C901, N802 @@ -550,7 +550,7 @@ def condPDF(self, x, cond): # noqa: C901, N802 elif self.Name == 'weibull': PDF = stats.weibull_min.pdf(x, c=par[1], scale=par[0]) # noqa: N806 - return PDF + return PDF # noqa: DOC201 # %% def condRandom(self, cond): # noqa: C901, N802 @@ -602,4 +602,4 @@ def condRandom(self, cond): # noqa: C901, N802 elif self.Name == 'weibull': Random = stats.weibull_min.rvs(c=par[1], scale=par[0]) # noqa: N806 - return Random + return Random # noqa: DOC201 diff --git a/modules/performUQ/common/ERAClasses/ERADist.py b/modules/performUQ/common/ERAClasses/ERADist.py index 96d78b23d..b824c9c6f 100644 --- a/modules/performUQ/common/ERAClasses/ERADist.py +++ b/modules/performUQ/common/ERAClasses/ERADist.py @@ -174,7 +174,7 @@ class description. self.Par = {'n': int(val[0]), 'p': val[1]} self.Dist = stats.binom(n=self.Par['n'], p=self.Par['p']) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The Binomial distribution is not defined for your parameters.' # noqa: EM101 ) @@ -183,7 +183,7 @@ class description. self.Par = {'k': np.around(val[0], 0)} self.Dist = stats.chi2(df=self.Par['k']) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The Chi-Squared distribution is not defined for your parameters.' # noqa: EM101 ) @@ -192,7 +192,7 @@ class description. self.Par = {'lambda': val[0]} self.Dist = stats.expon(scale=1 / self.Par['lambda']) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The Exponential distribution is not defined for your parameters.' # noqa: EM101 ) @@ -205,7 +205,7 @@ class description. loc=self.Par['a_n'], ) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The Frechet distribution is not defined for your parameters.' # noqa: EM101 ) @@ -216,7 +216,7 @@ class description. a=self.Par['k'], scale=1 / self.Par['lambda'] ) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The Gamma distribution is not defined for your parameters.' # noqa: EM101 ) @@ -226,7 +226,7 @@ class description. self.Par = {'p': val} self.Dist = stats.geom(p=self.Par['p']) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The Geometric distribution is not defined for your parameters.' # noqa: EM101 ) @@ -239,7 +239,7 @@ class description. loc=self.Par['epsilon'], ) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The Generalized Extreme Value gistribution is not defined for your parameters.' # noqa: EM101 ) @@ -252,7 +252,7 @@ class description. loc=-self.Par['epsilon'], ) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The Generalized Extreme Value distribution is not defined for your parameters.' # noqa: EM101 ) @@ -263,7 +263,7 @@ class description. scale=self.Par['a_n'], loc=self.Par['b_n'] ) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The Gumbel distribution is not defined for your parameters.' # noqa: EM101 ) @@ -274,7 +274,7 @@ class description. scale=self.Par['a_n'], loc=self.Par['b_n'] ) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The Gumbel distribution is not defined for your parameters.' # noqa: EM101 ) @@ -285,7 +285,7 @@ class description. s=self.Par['sig_lnx'], scale=np.exp(self.Par['mu_lnx']) ) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The Lognormal distribution is not defined for your parameters.' # noqa: EM101 ) @@ -299,7 +299,7 @@ class description. self.Par = {'k': val[0], 'p': val[1]} self.Dist = stats.nbinom(n=self.Par['k'], p=self.Par['p']) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The Negative Binomial distribution is not defined for your parameters.' # noqa: EM101 ) @@ -310,7 +310,7 @@ class description. loc=self.Par['mu'], scale=self.Par['sigma'] ) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The Normal distribution is not defined for your parameters.' # noqa: EM101 ) @@ -323,7 +323,7 @@ class description. loc=self.Par['x_m'], ) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The Pareto distribution is not defined for your parameters.' # noqa: EM101 ) @@ -334,7 +334,7 @@ class description. self.Par = {'lambda': val[0]} self.Dist = stats.poisson(mu=self.Par['lambda']) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The Poisson distribution is not defined for your parameters.' # noqa: EM101 ) @@ -343,7 +343,7 @@ class description. self.Par = {'v': val[0], 't': val[1]} self.Dist = stats.poisson(mu=self.Par['v'] * self.Par['t']) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The Poisson distribution is not defined for your parameters.' # noqa: EM101 ) @@ -353,7 +353,7 @@ class description. self.Par = {'alpha': alpha} self.Dist = stats.rayleigh(scale=self.Par['alpha']) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The Rayleigh distribution is not defined for your parameters.' # noqa: EM101 ) @@ -365,11 +365,11 @@ class description. elif name.lower() == 'truncatednormal': if val[2] >= val[3]: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The upper bound a must be larger than the lower bound b.' # noqa: EM101 ) if val[1] < 0: - raise RuntimeError('sigma must be larger than 0.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('sigma must be larger than 0.') # noqa: DOC501, EM101, RUF100, TRY003 self.Par = { 'mu_n': val[0], 'sig_n': val[1], @@ -390,7 +390,7 @@ class description. scale=self.Par['upper'] - self.Par['lower'], ) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The Uniform distribution is not defined for your parameters.' # noqa: EM101 ) @@ -401,12 +401,12 @@ class description. c=self.Par['k'], scale=self.Par['a_n'] ) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The Weibull distribution is not defined for your parameters.' # noqa: EM101 ) else: - raise RuntimeError("Distribution type '" + name + "' not available.") # noqa: DOC501 + raise RuntimeError("Distribution type '" + name + "' not available.") # noqa: DOC501, RUF100 # ---------------------------------------------------------------------------- # if the distribution is defined by its moments @@ -414,11 +414,11 @@ class description. val = np.array(val, ndmin=1, dtype=float) if val.size > 1 and val[1] < 0: - raise RuntimeError('The standard deviation must be non-negative.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('The standard deviation must be non-negative.') # noqa: DOC501, EM101, RUF100, TRY003 if name.lower() == 'beta': if val[3] <= val[2]: - raise RuntimeError('Please select an other support [a,b].') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('Please select an other support [a,b].') # noqa: DOC501, EM101, RUF100, TRY003 r = ( ((val[3] - val[0]) * (val[0] - val[2]) / val[1] ** 2 - 1) * (val[0] - val[2]) @@ -427,7 +427,7 @@ class description. s = r * (val[3] - val[0]) / (val[0] - val[2]) # Evaluate if distribution can be defined on the parameters if r <= 0 and s <= 0: - raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, RUF100, TRY003 self.Par = {'r': r, 's': s, 'a': val[2], 'b': val[3]} self.Dist = stats.beta( a=self.Par['r'], @@ -444,30 +444,30 @@ class description. if n % 1 <= 10 ** (-4): n = int(n) else: - raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, RUF100, TRY003 if p >= 0 and p <= 1 and n > 0: self.Par = {'n': n, 'p': p} self.Dist = stats.binom(n=self.Par['n'], p=self.Par['p']) else: - raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, RUF100, TRY003 elif name.lower() == 'chisquare': if val[0] > 0 and val[0] < np.inf and val[0] % 1 <= 10 ** (-4): self.Par = {'k': np.around(val[0], 0)} self.Dist = stats.chi2(df=self.Par['k']) else: - raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, RUF100, TRY003 elif name.lower() == 'exponential': try: lam = 1 / val[0] except ZeroDivisionError: - raise RuntimeError('The first moment cannot be zero!') # noqa: B904, DOC501, EM101, TRY003 + raise RuntimeError('The first moment cannot be zero!') # noqa: B904, DOC501, EM101, RUF100, TRY003 if lam >= 0: self.Par = {'lambda': lam} self.Dist = stats.expon(scale=1 / self.Par['lambda']) else: - raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, RUF100, TRY003 elif name.lower() == 'frechet': par0 = 2.0001 @@ -487,7 +487,7 @@ def equation(par): k = sol[0][0] a_n = val[0] / special.gamma(1 - 1 / k) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'fsolve could not converge to a solution, therefore' # noqa: EM101 'the parameters of the Frechet distribution could not be determined.' ) @@ -499,7 +499,7 @@ def equation(par): loc=self.Par['a_n'], ) else: - raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, RUF100, TRY003 elif name.lower() == 'gamma': # Solve system of equations for the parameters @@ -512,7 +512,7 @@ def equation(par): a=self.Par['k'], scale=1 / self.Par['lambda'] ) else: - raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, RUF100, TRY003 elif name.lower() == 'geometric': # Solve Equation for the parameter based on the first moment @@ -521,7 +521,7 @@ def equation(par): self.Par = {'p': p} self.Dist = stats.geom(p=self.Par['p']) else: - raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, RUF100, TRY003 elif name.lower() == 'gev': beta = val[2] @@ -530,7 +530,7 @@ def equation(par): alpha = val[1] * np.sqrt(6) / np.pi # scale parameter epsilon = val[2] - np.euler_gamma * alpha # location parameter elif beta >= 0.5: # noqa: PLR2004 - raise RuntimeError('MOM can only be used for beta < 0.5 .') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('MOM can only be used for beta < 0.5 .') # noqa: DOC501, EM101, RUF100, TRY003 else: alpha = ( abs(beta) @@ -555,7 +555,7 @@ def equation(par): alpha = val[1] * np.sqrt(6) / np.pi # scale parameter epsilon = val[2] + np.euler_gamma * alpha # location parameter elif beta >= 0.5: # noqa: PLR2004 - raise RuntimeError('MOM can only be used for beta < 0.5 .') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('MOM can only be used for beta < 0.5 .') # noqa: DOC501, EM101, RUF100, TRY003 else: alpha = ( abs(beta) @@ -583,7 +583,7 @@ def equation(par): scale=self.Par['a_n'], loc=self.Par['b_n'] ) else: - raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, RUF100, TRY003 elif name.lower() == 'gumbelmin': # solve two equations for the parameters of the distribution @@ -595,11 +595,11 @@ def equation(par): scale=self.Par['a_n'], loc=self.Par['b_n'] ) else: - raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, RUF100, TRY003 elif name.lower() == 'lognormal': if val[0] <= 0: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'Please select other moments, the first moment must be greater than zero.' # noqa: EM101 ) # solve two equations for the parameters of the distribution @@ -621,9 +621,9 @@ def equation(par): self.Par = {'k': k, 'p': p} self.Dist = stats.nbinom(n=self.Par['k'], p=self.Par['p']) else: - raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, RUF100, TRY003 else: - raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, RUF100, TRY003 elif (name.lower() == 'normal') or (name.lower() == 'gaussian'): self.Par = {'mu': val[0], 'sigma': val[1]} @@ -640,7 +640,7 @@ def equation(par): loc=self.Par['x_m'], ) else: - raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, RUF100, TRY003 elif name.lower() == 'poisson': n = len(val) @@ -649,17 +649,17 @@ def equation(par): self.Par = {'lambda': val[0]} self.Dist = stats.poisson(mu=self.Par['lambda']) else: - raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, RUF100, TRY003 if n == 2: # noqa: PLR2004 if val[0] > 0 and val[1] > 0: v = val[0] / val[1] if val[1] <= 0: - raise RuntimeError('t must be positive.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('t must be positive.') # noqa: DOC501, EM101, RUF100, TRY003 self.Par = {'v': v, 't': val[1]} self.Dist = stats.poisson(mu=self.Par['v'] * self.Par['t']) else: - raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, RUF100, TRY003 elif name.lower() == 'rayleigh': alpha = val[0] / np.sqrt(np.pi / 2) @@ -667,7 +667,7 @@ def equation(par): self.Par = {'alpha': alpha} self.Dist = stats.rayleigh(scale=self.Par['alpha']) else: - raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, RUF100, TRY003 elif (name.lower() == 'standardnormal') or ( name.lower() == 'standardgaussian' @@ -677,11 +677,11 @@ def equation(par): elif name.lower() == 'truncatednormal': if val[2] >= val[3]: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The upper bound a must be larger than the lower bound b.' # noqa: EM101 ) if val[0] <= val[2] or val[0] >= val[3]: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The mean of the distribution must be within the interval [a,b].' # noqa: EM101 ) @@ -727,7 +727,7 @@ def equation(par): b=b_mod, ) else: - raise RuntimeError('fsolve did not converge.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('fsolve did not converge.') # noqa: DOC501, EM101, RUF100, TRY003 elif name.lower() == 'uniform': # compute parameters @@ -756,7 +756,7 @@ def equation(par): k = sol[0][0] a_n = val[0] / special.gamma(1 + 1 / k) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'fsolve could not converge to a solution, therefore' # noqa: EM101 'the parameters of the Weibull distribution could not be determined.' ) @@ -766,17 +766,17 @@ def equation(par): c=self.Par['k'], scale=self.Par['a_n'] ) else: - raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, RUF100, TRY003 else: - raise RuntimeError("Distribution type '" + name + "' not available.") # noqa: DOC501 + raise RuntimeError("Distribution type '" + name + "' not available.") # noqa: DOC501, RUF100 # ---------------------------------------------------------------------------- # if the distribution is to be fitted to a data vector elif opt.upper() == 'DATA': if name.lower() == 'beta': if val[2] <= val[1]: - raise RuntimeError('Please select a different support [a,b].') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('Please select a different support [a,b].') # noqa: DOC501, EM101, RUF100, TRY003 if min(val[0]) >= val[1] and max(val[0]) <= val[2]: pars = stats.beta.fit( val[0], floc=val[1], fscale=val[2] - val[1] @@ -789,7 +789,7 @@ def equation(par): scale=self.Par['b'] - self.Par['a'], ) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The given samples must be in the support range [a,b].' # noqa: EM101 ) @@ -798,12 +798,12 @@ def equation(par): if val[1] % 1 <= 10 ** (-4) and val[1] > 0: val[1] = int(val[1]) else: - raise RuntimeError('n must be a positive integer.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('n must be a positive integer.') # noqa: DOC501, EM101, RUF100, TRY003 X = np.array(val[0]) # noqa: N806 if all((X) % 1 <= 10 ** (-4)) and all(X >= 0) and all(val[1] >= X): X = np.around(X, 0) # noqa: N806 else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The given samples must be integers in the range [0,n].' # noqa: EM101 ) val[0] = np.mean(val[0]) / val[1] @@ -816,7 +816,7 @@ def equation(par): self.Par = {'k': np.around(pars[0], 0)} self.Dist = stats.chi2(df=self.Par['k']) else: - raise RuntimeError('The given samples must be non-negative.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('The given samples must be non-negative.') # noqa: DOC501, EM101, RUF100, TRY003 elif name.lower() == 'exponential': if min(val) >= 0: @@ -824,11 +824,11 @@ def equation(par): self.Par = {'lambda': 1 / pars[1]} self.Dist = stats.expon(scale=1 / self.Par['lambda']) else: - raise RuntimeError('The given samples must be non-negative.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('The given samples must be non-negative.') # noqa: DOC501, EM101, RUF100, TRY003 elif name.lower() == 'frechet': if min(val) < 0: - raise RuntimeError('The given samples must be non-negative.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('The given samples must be non-negative.') # noqa: DOC501, EM101, RUF100, TRY003 def equation(par): return -np.sum( @@ -852,7 +852,7 @@ def equation(par): loc=self.Par['a_n'], ) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'Maximum likelihood estimation did not converge.' # noqa: EM101 ) @@ -868,7 +868,7 @@ def equation(par): self.Par = {'p': 1 / np.mean(val)} self.Dist = stats.geom(p=self.Par['p']) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The given samples must be integers larger than 0.' # noqa: EM101 ) @@ -916,7 +916,7 @@ def equation(par): p = np.mean(val) / (np.mean(val) + np.var(val)) k = np.mean(val) * p if k == 0: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'No suitable parameters can be estimated from the given data.' # noqa: EM101 ) k = round( @@ -954,11 +954,11 @@ def equation(par): loc=self.Par['x_m'], ) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'Maximum likelihood estimation did not converge.' # noqa: EM101 ) else: - raise RuntimeError('The given data must be positive.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('The given data must be positive.') # noqa: DOC501, EM101, RUF100, TRY003 elif name.lower() == 'poisson': n = len(val) @@ -966,20 +966,20 @@ def equation(par): X = val[0] # noqa: N806 t = val[1] if t <= 0: - raise RuntimeError('t must be positive.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('t must be positive.') # noqa: DOC501, EM101, RUF100, TRY003 if all(X >= 0) and all(X % 1 == 0): v = np.mean(X) / t self.Par = {'v': v, 't': t} self.Dist = stats.poisson(mu=self.Par['v'] * self.Par['t']) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The given samples must be non-negative integers.' # noqa: EM101 ) elif all(val >= 0) and all(val % 1 == 0): self.Par = {'lambda': np.mean(val)} self.Dist = stats.poisson(mu=self.Par['lambda']) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The given samples must be non-negative integers.' # noqa: EM101 ) @@ -991,11 +991,11 @@ def equation(par): elif name.lower() == 'truncatednormal': X = val[0] # noqa: N806 if val[1] >= val[2]: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The upper bound a must be larger than the lower bound b.' # noqa: EM101 ) if not (all(val[1] <= X) and all(val[2] >= X)): - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The given samples must be in the range [a,b].' # noqa: EM101 ) @@ -1029,7 +1029,7 @@ def equation(par): b=b_mod, ) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'Maximum likelihood estimation did not converge.' # noqa: EM101 ) @@ -1046,16 +1046,16 @@ def equation(par): self.Dist = stats.weibull_min(c=self.Par['k'], scale=self.Par['a_n']) else: - raise RuntimeError("Distribution type '" + name + "' not available.") # noqa: DOC501 + raise RuntimeError("Distribution type '" + name + "' not available.") # noqa: DOC501, RUF100 else: - raise RuntimeError('Unknown option :' + opt) # noqa: DOC501 + raise RuntimeError('Unknown option :' + opt) # noqa: DOC501, RUF100 # %% def mean(self): """Returns the mean of the distribution.""" # noqa: D401 if self.Name == 'gevmin': - return -self.Dist.mean() + return -self.Dist.mean() # noqa: DOC201 elif self.Name == 'negativebinomial': # noqa: RET505 return self.Dist.mean() + self.Par['k'] @@ -1066,13 +1066,13 @@ def mean(self): # %% def std(self): """Returns the standard deviation of the distribution.""" # noqa: D401 - return self.Dist.std() + return self.Dist.std() # noqa: DOC201 # %% def pdf(self, x): """Returns the PDF value.""" # noqa: D401 if self.Name == 'binomial' or self.Name == 'geometric': # noqa: PLR1714 - return self.Dist.pmf(x) + return self.Dist.pmf(x) # noqa: DOC201 elif self.Name == 'gevmin': # noqa: RET505 return self.Dist.pdf(-x) @@ -1090,7 +1090,7 @@ def pdf(self, x): def cdf(self, x): """Returns the CDF value.""" # noqa: D401 if self.Name == 'gevmin': - return 1 - self.Dist.cdf(-x) # <-- this is not a proper cdf ! + return 1 - self.Dist.cdf(-x) # <-- this is not a proper cdf ! # noqa: DOC201 elif self.Name == 'negativebinomial': # noqa: RET505 return self.Dist.cdf(x - self.Par['k']) @@ -1104,7 +1104,7 @@ def random(self, size=None): object. """ # noqa: D205, D401 if self.Name == 'gevmin': - return self.Dist.rvs(size=size) * (-1) + return self.Dist.rvs(size=size) * (-1) # noqa: DOC201 elif self.Name == 'negativebinomial': # noqa: RET505 samples = self.Dist.rvs(size=size) + self.Par['k'] @@ -1118,7 +1118,7 @@ def random(self, size=None): def icdf(self, y): """Returns the value of the inverse CDF.""" # noqa: D401 if self.Name == 'gevmin': - return -self.Dist.ppf(1 - y) + return -self.Dist.ppf(1 - y) # noqa: DOC201 elif self.Name == 'negativebinomial': # noqa: RET505 return self.Dist.ppf(y) + self.Par['k'] @@ -1171,7 +1171,7 @@ def gevfit_alt(y): 'The shape parameter of the GEV is not in the range where PWM asymptotic results are valid.' ) - return par + return par # noqa: DOC201 # ------------------------------------------------------------------------------ @@ -1217,4 +1217,4 @@ def gevpwm(y): 'fsolve could not converge to a solution for the PWM estimate.' ) - return par + return par # noqa: DOC201 diff --git a/modules/performUQ/common/ERAClasses/ERANataf.py b/modules/performUQ/common/ERAClasses/ERANataf.py index 16f163331..bd5b23051 100644 --- a/modules/performUQ/common/ERAClasses/ERANataf.py +++ b/modules/performUQ/common/ERAClasses/ERANataf.py @@ -104,17 +104,17 @@ class description. try: np.linalg.cholesky(self.Rho_X) except np.linalg.LinAlgError: - raise RuntimeError( # noqa: B904, DOC501, TRY003 + raise RuntimeError( # noqa: B904, DOC501, RUF100, TRY003 'The given correlation matrix is not positive definite' # noqa: EM101 '--> Nataf transformation is not applicable.' ) if not np.all(self.Rho_X - self.Rho_X.T == 0): - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The given correlation matrix is not symmetric ' # noqa: EM101 '--> Nataf transformation is not applicable.' ) if not np.all(np.diag(self.Rho_X) == 1): - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'Not all diagonal entries of the given correlation matrix are equal to one ' # noqa: EM101 '--> Nataf transformation is not applicable.' ) @@ -246,7 +246,7 @@ def fun(rho0): self.Rho_Z[i, j] = sol[0] self.Rho_Z[j, i] = self.Rho_Z[i, j] else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'brentq and fsolve coul' # noqa: EM101 'd not converge to a ' 'solution of the Nataf ' @@ -255,7 +255,7 @@ def fun(rho0): try: self.A = np.linalg.cholesky(self.Rho_Z) except np.linalg.LinAlgError: - raise RuntimeError( # noqa: B904, DOC501, TRY003 + raise RuntimeError( # noqa: B904, DOC501, RUF100, TRY003 'Transformed correlation matrix is not positive' # noqa: EM101 ' definite --> Nataf transformation is not ' 'applicable.' @@ -298,12 +298,12 @@ def X2U(self, X, Jacobian=False): # noqa: FBT002, N802, N803 # check of the dimensions of input X if X.ndim > 2: # noqa: PLR2004 - raise RuntimeError('X must have not more than two dimensions. ') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('X must have not more than two dimensions. ') # noqa: DOC501, EM101, RUF100, TRY003 if np.shape(X)[1] == 1 and n_dim != 1: # in case that only one point X is given, he can be defined either as row or column vector X = X.T # noqa: N806 if np.shape(X)[1] != n_dim: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'X must be an array of size [n,d], where d is the' # noqa: EM101 ' number of dimensions of the joint distribution.' ) @@ -318,7 +318,7 @@ def X2U(self, X, Jacobian=False): # noqa: FBT002, N802, N803 for i in range(n_dim): diag[i, i] = self.Marginals[i].pdf(X[0, i]) / stats.norm.pdf(Z[i, 0]) Jac = np.linalg.solve(self.A, diag) # noqa: N806 - return np.squeeze(U), Jac + return np.squeeze(U), Jac # noqa: DOC201 else: # noqa: RET505 return np.squeeze(U) @@ -342,7 +342,7 @@ def U2X(self, U, Jacobian=False): # noqa: FBT002, N802, N803 # in case that only one point U is given, he can be defined either as row or column vector U = U.T # noqa: N806 if np.shape(U)[1] != n_dim: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'U must be an array of size [n,d], where d is the' # noqa: EM101 ' number of dimensions of the joint distribution.' ) @@ -359,7 +359,7 @@ def U2X(self, U, Jacobian=False): # noqa: FBT002, N802, N803 for i in range(n_dim): diag[i, i] = stats.norm.pdf(Z[i, 0]) / self.Marginals[i].pdf(X[0, i]) Jac = np.dot(diag, self.A) # noqa: N806 - return np.squeeze(X), Jac + return np.squeeze(X), Jac # noqa: DOC201 else: # noqa: RET505 return np.squeeze(X) @@ -376,7 +376,7 @@ def random(self, n=1): for i in range(n_dim): jr[:, i] = self.Marginals[i].icdf(stats.norm.cdf(Z[i, :])) - return np.squeeze(jr) + return np.squeeze(jr) # noqa: DOC201 # %% def pdf(self, X): # noqa: C901, N803 @@ -402,12 +402,12 @@ def pdf(self, X): # noqa: C901, N803 # check of the dimensions of input X if X.ndim > 2: # noqa: PLR2004 - raise RuntimeError('X must have not more than two dimensions.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('X must have not more than two dimensions.') # noqa: DOC501, EM101, RUF100, TRY003 if np.shape(X)[1] == 1 and n_dim != 1: # in case that only one point X is given, he can be defined either as row or column vector X = X.T # noqa: N806 if np.shape(X)[1] != n_dim: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'X must be an array of size [n,d], where d is the' # noqa: EM101 ' number of dimensions of the joint distribution.' ) @@ -437,7 +437,7 @@ def pdf(self, X): # noqa: C901, N803 jointpdf[i] = 0 if np.size(jointpdf) == 1: - return jointpdf[0] + return jointpdf[0] # noqa: DOC201 else: # noqa: RET505 return jointpdf @@ -461,7 +461,7 @@ def cdf(self, X): # noqa: N803 # in case that only one point X is given, he can be defined either as row or column vector X = X.T # noqa: N806 if np.shape(X)[1] != n_dim: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'X must be an array of size [n,d], where d is the' # noqa: EM101 ' number of dimensions of the joint distribution.' ) @@ -474,7 +474,7 @@ def cdf(self, X): # noqa: N803 U, mean=mu, cov=np.matrix(self.Rho_Z) ) - return jointcdf # noqa: RET504 + return jointcdf # noqa: DOC201, RET504 # %% @staticmethod diff --git a/modules/performUQ/common/ERAClasses/ERARosen.py b/modules/performUQ/common/ERAClasses/ERARosen.py index 5625df024..52d71e468 100644 --- a/modules/performUQ/common/ERAClasses/ERARosen.py +++ b/modules/performUQ/common/ERAClasses/ERARosen.py @@ -105,7 +105,7 @@ class description. if isinstance(dist[i], ERACond): n_parents[i] = dist[i].Param.__code__.co_argcount elif not isinstance(dist[i], ERADist): - raise RuntimeError( # noqa: DOC501, TRY003, TRY004 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003, TRY004 'The objects in dist must be either ERADist or ERACond objects.' # noqa: EM101 ) @@ -118,7 +118,7 @@ class description. for i in range(n_dist + 1): # noqa: B007 adj_prod = np.matmul(adj_prod, adj_mat) if sum(np.diag(adj_prod)) != 0: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The graph defining the dependence between the different ' # noqa: EM101 'distributions must be directed and acyclical.' ) @@ -141,7 +141,7 @@ class description. self.Order = [layers[0], np.concatenate(layers[1:])] self.Layers = layers else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The defined joint distribution consists only of independent distributions.' # noqa: EM101 'This type of joint distribution is not supported by ERARosen.' ) @@ -174,12 +174,12 @@ def X2U(self, X, error=True): # noqa: FBT002, N802, N803 # check of the dimensions of input X if X.ndim > 2: # noqa: PLR2004 - raise RuntimeError('X must have not more than two dimensions. ') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('X must have not more than two dimensions. ') # noqa: DOC501, EM101, RUF100, TRY003 if np.shape(X)[1] == 1 and n_dim != 1: # in case that only one point X is given, he can be defined either as row or column vector X = X.T # noqa: N806 if np.shape(X)[1] != n_dim: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'X must be an array of size [n,d], where d is the' # noqa: EM101 ' number of dimensions of the joint distribution.' ) @@ -200,11 +200,11 @@ def X2U(self, X, error=True): # noqa: FBT002, N802, N803 if error: if not all(np.logical_not(lin_ind)): - raise RuntimeError('Invalid joint distribution was created.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('Invalid joint distribution was created.') # noqa: DOC501, EM101, RUF100, TRY003 else: U[lin_ind, :] = np.nan - return np.squeeze(U) + return np.squeeze(U) # noqa: DOC201 # %% def U2X(self, U, error=True): # noqa: FBT002, N802, N803 @@ -226,7 +226,7 @@ def U2X(self, U, error=True): # noqa: FBT002, N802, N803 # in case that only one point X is given, he can be defined either as row or column vector U = U.T # noqa: N806 if np.shape(U)[1] != n_dim: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'U must be an array of size [n,d], where d is the' # noqa: EM101 ' number of dimensions of the joint distribution.' ) @@ -246,11 +246,11 @@ def U2X(self, U, error=True): # noqa: FBT002, N802, N803 if error: if not np.all(np.logical_not(lin_ind)): - raise RuntimeError('Invalid joint distribution was created.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('Invalid joint distribution was created.') # noqa: DOC501, EM101, RUF100, TRY003 else: X[lin_ind, :] = np.nan - return np.squeeze(X) + return np.squeeze(X) # noqa: DOC201 # %% def pdf(self, X, error=True): # noqa: FBT002, N803 @@ -271,7 +271,7 @@ def pdf(self, X, error=True): # noqa: FBT002, N803 # in case that only one point X is given, he can be defined either as row or column vector X = X.T # noqa: N806 if np.shape(X)[1] != n_dim: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'X must be an array of size [n,d], where d is the' # noqa: EM101 ' number of dimensions of the joint distribution.' ) @@ -290,10 +290,10 @@ def pdf(self, X, error=True): # noqa: FBT002, N803 if error: if not np.all(np.logical_not(nan_ind)): - raise RuntimeError('Invalid joint distribution was created.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('Invalid joint distribution was created.') # noqa: DOC501, EM101, RUF100, TRY003 if np.size(jointpdf) == 1: - return jointpdf[0] + return jointpdf[0] # noqa: DOC201 else: # noqa: RET505 return jointpdf @@ -314,7 +314,7 @@ def random(self, n=1): except ValueError: # noqa: PERF203 raise RuntimeError('Invalid joint distribution was created.') # noqa: B904, DOC501, EM101, TRY003 - return np.squeeze(X) + return np.squeeze(X) # noqa: DOC201 # %% def plotGraph(self, opt=False): # noqa: FBT002, C901, N802 diff --git a/modules/performUQ/other/UQpyRunner.py b/modules/performUQ/other/UQpyRunner.py index 251765fdb..3ce90e804 100644 --- a/modules/performUQ/other/UQpyRunner.py +++ b/modules/performUQ/other/UQpyRunner.py @@ -139,7 +139,7 @@ def runUQ( # noqa: C901, N802, PLR6301 distributionObjects, nsamples=numberOfSamples, random_state=seed ) else: - raise OSError( # noqa: DOC501 + raise OSError( # noqa: DOC501, RUF100 "ERROR: You'll need to update UQpyRunner.py to run your specified" # noqa: ISC003 + ' sampling method!' ) diff --git a/modules/systemPerformance/REWET/REWET/Damage.py b/modules/systemPerformance/REWET/REWET/Damage.py index a7141352c..49f2fb715 100644 --- a/modules/systemPerformance/REWET/REWET/Damage.py +++ b/modules/systemPerformance/REWET/REWET/Damage.py @@ -75,7 +75,7 @@ def readDamageFromPickleFile( # noqa: N802 Returns ------- - """ # noqa: D205, D400, D401, D404, D414 + """ # noqa: D205, D400, D401, D404, D414, DOC202 with open(pickle_file_name, 'rb') as pckf: # noqa: PTH123 w = pickle.load(pckf) # noqa: S301 @@ -276,7 +276,7 @@ def readDamageFromTextFile(self, path): # noqa: N802 elif sline[0].lower() == 'break': if line_length < 3: # noqa: PLR2004 - raise OSError('Line cannot have more than three arguments') # noqa: DOC501, EM101, TRY003 + raise OSError('Line cannot have more than three arguments') # noqa: DOC501, EM101, RUF100, TRY003 # print('Probelm 2') temp_break = {} temp_break['pipe_id'] = sline[1] @@ -313,7 +313,7 @@ def applyNodalDamage(self, WaterNetwork, current_time): # noqa: C901, N802, N80 ------- None. - """ # noqa: D400 + """ # noqa: D400, DOC202 if self.node_damage.empty: print('no node damage at all') # noqa: T201 return @@ -383,7 +383,7 @@ def applyNodalDamage(self, WaterNetwork, current_time): # noqa: C901, N802, N80 pipe_length = val['node_Pipe_Length'] * 1000 if node_name not in WaterNetwork.node_name_list: - raise ValueError( # noqa: DOC501 + raise ValueError( # noqa: DOC501, RUF100 'Node name of damages not in node name list: ' + node_name ) @@ -420,7 +420,7 @@ def applyNodalDamage(self, WaterNetwork, current_time): # noqa: C901, N802, N80 number_of_damages = val['Number_of_damages'] pipe_length = val['node_Pipe_Length'] * 1000 if node_name not in WaterNetwork.node_name_list: - raise ValueError( # noqa: DOC501 + raise ValueError( # noqa: DOC501, RUF100 'Node name of damages not in node name list: ' + node_name ) maximum_node_demand = 10 @@ -484,7 +484,7 @@ def applyNodalDamage(self, WaterNetwork, current_time): # noqa: C901, N802, N80 over_designed_diameter, } else: - raise ValueError('Unknown nodal damage method') # noqa: DOC501, EM101, TRY003 + raise ValueError('Unknown nodal damage method') # noqa: DOC501, EM101, RUF100, TRY003 # return WaterNetwork @@ -807,7 +807,7 @@ def applyPipeDamages(self, WaterNetwork, current_time): # noqa: C901, N802, N80 ratio = cur_damage['damage_loc'] / last_ratio if ratio >= 1: - raise ValueError( # noqa: DOC501 + raise ValueError( # noqa: DOC501, RUF100 'IN LEAK: ratio is bigger than or equal to 1 for pipe:' + repr(pipe_id) + ' ' @@ -863,7 +863,7 @@ def applyPipeDamages(self, WaterNetwork, current_time): # noqa: C901, N802, N80 ratio = cur_damage['damage_loc'] / last_ratio if ratio >= 1: - raise ValueError( # noqa: DOC501 + raise ValueError( # noqa: DOC501, RUF100 'IN BREAK: ratio is bigger than or equal to 1 for pipe:' + repr(pipe_id) + ' ' @@ -940,7 +940,7 @@ def applyPipeDamages(self, WaterNetwork, current_time): # noqa: C901, N802, N80 ) # self._registry.addPipeDamageToDamageRestorationData(pipe_id, 'break', damage_time) else: - raise ValueError( # noqa: DOC501 + raise ValueError( # noqa: DOC501, RUF100 'undefined damage type: ' + repr(cur_damage['type']) + ". Accpetale type of famages are either 'creack' or 'break'." @@ -1043,7 +1043,7 @@ def read_earthquake(self, earthquake_file_name): ------- None. - """ # noqa: D205, DOC502 + """ # noqa: D205, DOC202, DOC502 if type(earthquake_file_name) != str: # noqa: E721 raise ValueError('string is wanted for earthqiake fie name') # noqa: EM101, TRY003 @@ -1080,7 +1080,7 @@ def sortEarthquakeListTimely(self): # noqa: N802 ------- None. - """ # noqa: D400, D401, D404 + """ # noqa: D400, D401, D404, DOC202 self._earthquake.sort_index() self.is_timely_sorted = True @@ -1099,7 +1099,7 @@ def predictDamage(self, wn, iClear=False): # noqa: FBT002, N802, N803 ------- None. - """ # noqa: D401, D404 + """ # noqa: D401, D404, DOC202 if iClear: self.pipe_leak = pd.Series() self.pipe_break = pd.Series() diff --git a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/epanet/io.py b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/epanet/io.py index f7aad0616..b5469199c 100644 --- a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/epanet/io.py +++ b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/epanet/io.py @@ -7,7 +7,7 @@ InpFile BinFile s -""" # noqa: CPY001 +""" # noqa: A005, CPY001 import datetime import difflib @@ -231,7 +231,7 @@ def _clock_time_to_sec(s, am_pm): # noqa: C901 time_sec -= 3600 * 12 if not am: if time_sec >= 3600 * 12: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'Cannot specify am/pm for times greater than 12:00:00' # noqa: EM101 ) time_sec += 3600 * 12 @@ -248,7 +248,7 @@ def _clock_time_to_sec(s, am_pm): # noqa: C901 time_sec -= 3600 * 12 if not am: if time_sec >= 3600 * 12: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'Cannot specify am/pm for times greater than 12:00:00' # noqa: EM101 ) time_sec += 3600 * 12 @@ -262,13 +262,13 @@ def _clock_time_to_sec(s, am_pm): # noqa: C901 time_sec -= 3600 * 12 if not am: if time_sec >= 3600 * 12: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'Cannot specify am/pm for times greater than 12:00:00' # noqa: EM101 ) time_sec += 3600 * 12 return time_sec else: # noqa: RET505 - raise RuntimeError('Time format in ' 'INP file not recognized. ') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('Time format in ' 'INP file not recognized. ') # noqa: DOC501, EM101, RUF100, TRY003 def _sec_to_string(sec): @@ -371,7 +371,7 @@ def read(self, inp_files, wn=None): # noqa: C901 continue elif section is None: logger.debug('Found confusing line: %s', repr(line)) - raise RuntimeError( # noqa: DOC501 + raise RuntimeError( # noqa: DOC501, RUF100 '%(fname)s:%(lnum)d: Non-comment outside of valid section!' % edata ) @@ -3985,7 +3985,7 @@ def contains_section(self, sec): """ # noqa: D205 try: self.get_section(sec) - return True # noqa: TRY300 + return True # noqa: DOC201, TRY300 except NoSectionError: return False @@ -4147,7 +4147,7 @@ def _read_control_line(line, wn, flow_units, control_name): # noqa: C901 elif current[6] == 'BELOW': oper = np.less else: - raise RuntimeError( # noqa: DOC501 + raise RuntimeError( # noqa: DOC501, RUF100 'The following control is not recognized: ' + line ) # OKAY - we are adding in the elevation. This is A PROBLEM @@ -4169,14 +4169,14 @@ def _read_control_line(line, wn, flow_units, control_name): # noqa: C901 node, 'level', oper, threshold, action_obj, control_name ) else: - raise RuntimeError('The following control is not recognized: ' + line) # noqa: DOC501 + raise RuntimeError('The following control is not recognized: ' + line) # noqa: DOC501, RUF100 # control_name = '' # for i in range(len(current)-1): # control_name = control_name + '/' + current[i] # control_name = control_name + '/' + str(round(threshold, 2)) elif 'CLOCKTIME' not in current: # at time if 'TIME' not in current: - raise ValueError(f'Unrecognized line in inp file: {line}') # noqa: DOC501, EM102, TRY003 + raise ValueError(f'Unrecognized line in inp file: {line}') # noqa: DOC501, EM102, RUF100, TRY003 if ':' in current[5]: run_at_time = int(_str_time_to_sec(current[5])) @@ -4345,4 +4345,4 @@ def _diff_inp_files( # noqa: C901 g.write(html_diff) g.close() - return n + return n # noqa: DOC201 diff --git a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/network/model.py b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/network/model.py index c669aedc9..ff332a895 100644 --- a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/network/model.py +++ b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/network/model.py @@ -75,7 +75,7 @@ def updateWaterNetworkModelWithResult( # noqa: C901, N802 ------- None. - """ # noqa: D205, D401 + """ # noqa: D205, D401, DOC202 max_time = result.node['head'].index.max() if latest_simulation_time == None: # noqa: E711 latest_simulation_time = max_time diff --git a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/sim/epanet.py b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/sim/epanet.py index c02c0ef18..2decbbfa0 100644 --- a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/sim/epanet.py +++ b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/sim/epanet.py @@ -229,7 +229,7 @@ def run_sim( # noqa: C901 if run_successful: break - return result_data, run_successful + return result_data, run_successful # noqa: DOC201 def _updateResultStartTime(self, result_data, start_time): # noqa: N802, PLR6301 for res_type, res in result_data.link.items(): # noqa: B007, PERF102 @@ -313,10 +313,10 @@ def _initialize_internal_graph(self): # noqa: C901 from_node_id = self._node_name_to_id[from_node_name] to_node_id = self._node_name_to_id[to_node_name] if (from_node_id, to_node_id) not in n_links: - n_links[(from_node_id, to_node_id)] = 0 - n_links[(to_node_id, from_node_id)] = 0 - n_links[(from_node_id, to_node_id)] += 1 - n_links[(to_node_id, from_node_id)] += 1 + n_links[(from_node_id, to_node_id)] = 0 # noqa: RUF031 + n_links[(to_node_id, from_node_id)] = 0 # noqa: RUF031 + n_links[(from_node_id, to_node_id)] += 1 # noqa: RUF031 + n_links[(to_node_id, from_node_id)] += 1 # noqa: RUF031 rows.append(from_node_id) # noqa: FURB113 cols.append(to_node_id) # noqa: FURB113 rows.append(to_node_id) @@ -379,7 +379,7 @@ def _initialize_internal_graph(self): # noqa: C901 self._node_pairs_with_multiple_links = OrderedDict() for from_node_id, to_node_id in n_links.keys(): # noqa: SIM118 - if n_links[(from_node_id, to_node_id)] > 1: + if n_links[(from_node_id, to_node_id)] > 1: # noqa: RUF031 if ( to_node_id, from_node_id, @@ -390,7 +390,7 @@ def _initialize_internal_graph(self): # noqa: C901 from_node_name = self._node_id_to_name[from_node_id] to_node_name = self._node_id_to_name[to_node_id] tmp_list = self._node_pairs_with_multiple_links[ - (from_node_id, to_node_id) + (from_node_id, to_node_id) # noqa: RUF031 ] = [] for link_name in self._wn.get_links_for_node(from_node_name): link = self._wn.get_link(link_name) diff --git a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/sim/io.py b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/sim/io.py index 2f0128ca0..f6556539d 100644 --- a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/sim/io.py +++ b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/sim/io.py @@ -10,7 +10,7 @@ ---- -""" # noqa: CPY001 +""" # noqa: A005, CPY001 import logging import re @@ -131,7 +131,7 @@ def _is_number(s): """ # noqa: D400, D401 try: float(s) - return True # noqa: TRY300 + return True # noqa: DOC201, TRY300 except ValueError: return False @@ -211,7 +211,7 @@ def _clock_time_to_sec(s, am_pm): # noqa: C901 time_sec -= 3600 * 12 if not am: if time_sec >= 3600 * 12: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'Cannot specify am/pm for times greater than 12:00:00' # noqa: EM101 ) time_sec += 3600 * 12 @@ -228,7 +228,7 @@ def _clock_time_to_sec(s, am_pm): # noqa: C901 time_sec -= 3600 * 12 if not am: if time_sec >= 3600 * 12: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'Cannot specify am/pm for times greater than 12:00:00' # noqa: EM101 ) time_sec += 3600 * 12 @@ -242,13 +242,13 @@ def _clock_time_to_sec(s, am_pm): # noqa: C901 time_sec -= 3600 * 12 if not am: if time_sec >= 3600 * 12: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'Cannot specify am/pm for times greater than 12:00:00' # noqa: EM101 ) time_sec += 3600 * 12 return time_sec else: # noqa: RET505 - raise RuntimeError('Time format in ' 'INP file not recognized. ') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('Time format in ' 'INP file not recognized. ') # noqa: DOC501, EM101, RUF100, TRY003 def _sec_to_string(sec): diff --git a/modules/systemPerformance/REWET/REWET/Input/Policy_IO.py b/modules/systemPerformance/REWET/REWET/Input/Policy_IO.py index b1f309404..35073ee55 100644 --- a/modules/systemPerformance/REWET/REWET/Input/Policy_IO.py +++ b/modules/systemPerformance/REWET/REWET/Input/Policy_IO.py @@ -67,7 +67,7 @@ def __init__(self, definition_file_name): ------- None. - """ # noqa: D400 + """ # noqa: D400, DOC202 # some of the following lines have been adopted from WNTR self.rm = restoration_data() @@ -122,7 +122,7 @@ def __init__(self, definition_file_name): self.config_file_comment.append(line[1:]) continue elif section is None: - raise RuntimeError( # noqa: DOC501 + raise RuntimeError( # noqa: DOC501, RUF100 '%(fname)s:%(lnum)d: Non-comment outside of valid section!' % edata ) @@ -230,7 +230,7 @@ def _read_entities(self): # noqa: C901 ------- None. - """ # noqa: D205, D401 + """ # noqa: D205, D401, DOC202 # Entities is kept for legacy compatibility with the first version damage_group_data = self.sections.get( '[ENTITIES]', self.sections.get('[Damage Group]') diff --git a/modules/systemPerformance/REWET/REWET/Input/Settings.py b/modules/systemPerformance/REWET/REWET/Input/Settings.py index 6f99723f2..2609e672f 100644 --- a/modules/systemPerformance/REWET/REWET/Input/Settings.py +++ b/modules/systemPerformance/REWET/REWET/Input/Settings.py @@ -304,7 +304,7 @@ def importJsonSettings(self, json_file_path): # noqa: N802 for key, val in settings_data.items(): if key not in self: - raise ValueError( # noqa: DOC501, TRY003 + raise ValueError( # noqa: DOC501, RUF100, TRY003 f'REWET settinsg does not have "{key}" as a settings key' # noqa: EM102 ) diff --git a/modules/systemPerformance/REWET/REWET/Output/GUI_Curve_API.py b/modules/systemPerformance/REWET/REWET/Output/GUI_Curve_API.py index 0416960ad..5720f7a7c 100644 --- a/modules/systemPerformance/REWET/REWET/Output/GUI_Curve_API.py +++ b/modules/systemPerformance/REWET/REWET/Output/GUI_Curve_API.py @@ -51,12 +51,12 @@ def QNExceedanceCurve(pr, percentage_list, time_type, time_shift=0): # noqa: N8 ) if type(time_shift) != int: # noqa: E721 - raise ValueError( # noqa: DOC501 + raise ValueError( # noqa: DOC501, RUF100 'Time shift must be integer type: ' + repr(type(time_shift)) + '.' ) if time_shift < 0: - raise ValueError('Time shift ust be bigger than or equal to zero.') # noqa: DOC501, EM101, TRY003 + raise ValueError('Time shift ust be bigger than or equal to zero.') # noqa: DOC501, EM101, RUF100, TRY003 res = {} for percentage in percentage_list: @@ -69,10 +69,10 @@ def QNExceedanceCurve(pr, percentage_list, time_type, time_shift=0): # noqa: N8 elif time_type.lower() == 'day': pr.convertTimeSecondToDay(temp_res, 'restore_time', time_shift) else: - raise ValueError('Uknown time_type: ' + repr(time_type)) # noqa: DOC501 + raise ValueError('Uknown time_type: ' + repr(time_type)) # noqa: DOC501, RUF100 res[percentage] = temp_res - return res + return res # noqa: DOC201 def DLExceedanceCurve(pr, percentage_list, time_type, time_shift=0): # noqa: N802 @@ -102,12 +102,12 @@ def DLExceedanceCurve(pr, percentage_list, time_type, time_shift=0): # noqa: N8 ) if type(time_shift) != int: # noqa: E721 - raise ValueError( # noqa: DOC501 + raise ValueError( # noqa: DOC501, RUF100 'Time shift must be integer type: ' + repr(type(time_shift)) + '.' ) if time_shift < 0: - raise ValueError('Time shift ust be bigger than or equal to zero.') # noqa: DOC501, EM101, TRY003 + raise ValueError('Time shift ust be bigger than or equal to zero.') # noqa: DOC501, EM101, RUF100, TRY003 res = {} for percentage in percentage_list: @@ -120,7 +120,7 @@ def DLExceedanceCurve(pr, percentage_list, time_type, time_shift=0): # noqa: N8 elif time_type.lower() == 'day': pr.convertTimeSecondToDay(temp_res, 'restore_time', time_shift) else: - raise ValueError('Uknown time_type: ' + repr(time_type)) # noqa: DOC501 + raise ValueError('Uknown time_type: ' + repr(time_type)) # noqa: DOC501, RUF100 res[percentage] = temp_res - return res + return res # noqa: DOC201 diff --git a/modules/systemPerformance/REWET/REWET/initial.py b/modules/systemPerformance/REWET/REWET/initial.py index df8f2a479..53f88f7db 100644 --- a/modules/systemPerformance/REWET/REWET/initial.py +++ b/modules/systemPerformance/REWET/REWET/initial.py @@ -46,7 +46,7 @@ def run(self, project_file=None): # noqa: C901 ------- None. - """ # noqa: D205, D401 + """ # noqa: D205, D401, DOC202 settings = Settings() if project_file is not None: project_file = str(project_file) diff --git a/modules/systemPerformance/REWET/REWET/restoration/base.py b/modules/systemPerformance/REWET/REWET/restoration/base.py index fba5a594f..2f0785c2b 100644 --- a/modules/systemPerformance/REWET/REWET/restoration/base.py +++ b/modules/systemPerformance/REWET/REWET/restoration/base.py @@ -232,7 +232,7 @@ def addAgent(self, agent_name, agent_type, definition): # noqa: N802 ------- None. - """ # noqa: D400, D401 + """ # noqa: D400, D401, DOC202 # number_of_agents = int(definition['Number']) agent_speed = self.registry.settings['crew_travel_speed'] temp_agent_data = AgentData( @@ -270,7 +270,7 @@ def setActiveAgents(self, active_agent_ID_list): # noqa: N802, N803 ------- None. - """ # noqa: D400 + """ # noqa: D400, DOC202 for active_agent_ID in active_agent_ID_list: # noqa: N806 self._agents['active'].loc[active_agent_ID] = True @@ -600,7 +600,7 @@ def addShift(self, name, beginning, ending): # noqa: N802 ------- None. - """ # noqa: D400, D401 + """ # noqa: D400, D401, DOC202 if name in self._shift_data: raise ValueError('Shift name already registered') # noqa: EM101, TRY003 if type(beginning) != int and type(beginning) != float: # noqa: E721 @@ -673,7 +673,7 @@ def assignShiftToAgent(self, agent_ID, shift_name): # noqa: N802, N803 ------- None. - """ # noqa: D400, D401 + """ # noqa: D400, D401, DOC202 if agent_ID in self._all_agent_shift_data: raise ValueError('The agent ID currently in Agent ALl Shifts') # noqa: EM101, TRY003 if shift_name not in self._shift_data: diff --git a/modules/systemPerformance/REWET/REWET/restoration/io.py b/modules/systemPerformance/REWET/REWET/restoration/io.py index c03595b62..8c7277831 100644 --- a/modules/systemPerformance/REWET/REWET/restoration/io.py +++ b/modules/systemPerformance/REWET/REWET/restoration/io.py @@ -44,7 +44,7 @@ def __init__(self, restoration_model, definition_file_name): ------- None. - """ # noqa: D400 + """ # noqa: D400, DOC202 # some of the following lines have been adopted from WNTR self.rm = restoration_model self.crew_data = {} @@ -106,7 +106,7 @@ def __init__(self, restoration_model, definition_file_name): self.config_file_comment.append(line[1:]) continue elif section is None: - raise RuntimeError( # noqa: DOC501 + raise RuntimeError( # noqa: DOC501, RUF100 '%(fname)s:%(lnum)d: Non-comment outside of valid section!' % edata ) @@ -1180,7 +1180,7 @@ def _read_config(self): ------- None. - """ # noqa: D205, D400, D401 + """ # noqa: D205, D400, D401, DOC202 edata = OrderedDict() self._crew_file_name = [] self._crew_file_type = [] diff --git a/modules/systemPerformance/REWET/REWET/restoration/model.py b/modules/systemPerformance/REWET/REWET/restoration/model.py index 71f61cad2..862d71480 100644 --- a/modules/systemPerformance/REWET/REWET/restoration/model.py +++ b/modules/systemPerformance/REWET/REWET/restoration/model.py @@ -907,12 +907,12 @@ def updateShifiting(self, time): # noqa: N802 ------- None. - """ # noqa: D400, D401 + """ # noqa: D400, D401, DOC202 if type(time) != int and type(time) != float: # noqa: E721 raise ValueError('Time must be integer not ' + str(type(time))) # noqa: DOC501 time = int(time) if time < 0: - raise ValueError('Time must be bigger than zero') # noqa: DOC501, EM101, TRY003 + raise ValueError('Time must be bigger than zero') # noqa: DOC501, EM101, RUF100, TRY003 next_shift_time = self.shifting.getNextShiftTime(time) # logger.debug('next shitt time = ' + str(next_shift_time)) self._addHardEvent(int(next_shift_time), 'shift') @@ -1555,18 +1555,18 @@ def _addHardEvent(self, next_time, requester, detail=None, current_time=None): if type(next_time) != int and type(next_time) != float: # noqa: E721 raise ValueError('time must be int, not ' + str(type(next_time))) # noqa: DOC501 if detail != None and current_time == None: # noqa: E711 - raise ValueError('When detail is provided, current time cannot be None') # noqa: DOC501, EM101, TRY003 + raise ValueError('When detail is provided, current time cannot be None') # noqa: DOC501, EM101, RUF100, TRY003 minimum_time_devision = int(self._registry.settings['simulation_time_step']) if current_time != None: # noqa: E711 if next_time < current_time: - raise ValueError('Time is smaller than current time') # noqa: DOC501, EM101, TRY003 + raise ValueError('Time is smaller than current time') # noqa: DOC501, EM101, RUF100, TRY003 if detail == None: # noqa: E711 - raise ValueError( # noqa: DOC501, TRY003 + raise ValueError( # noqa: DOC501, RUF100, TRY003 'When current time is provided, detail cannot be None' # noqa: EM101 ) if minimum_time_devision < 0: - raise ValueError('Minimum time division cannot be negative') # noqa: DOC501, EM101, TRY003 + raise ValueError('Minimum time division cannot be negative') # noqa: DOC501, EM101, RUF100, TRY003 name = requester + '-' + detail diff --git a/modules/systemPerformance/REWET/REWET/restoration/registry.py b/modules/systemPerformance/REWET/REWET/restoration/registry.py index 1ce8da5dc..50f120fa2 100644 --- a/modules/systemPerformance/REWET/REWET/restoration/registry.py +++ b/modules/systemPerformance/REWET/REWET/restoration/registry.py @@ -515,7 +515,7 @@ def addPipeDamageToRegistry(self, node_name, data): # noqa: N802 ------- None. - """ # noqa: D400, D401 + """ # noqa: D400, D401, DOC202 # self._pipe_node_damage_status[name] = data leaking_pipe_with_pipeA_orginal_pipe = self._pipe_leak_history[ # noqa: N806 @@ -582,7 +582,7 @@ def addPipeDamageToRegistry(self, node_name, data): # noqa: N802 self._pipe_break_history.loc[node_name, 'Node_B'] = data['node_B'] else: - raise ValueError('Undefined damage type') # noqa: DOC501, EM101, TRY003 + raise ValueError('Undefined damage type') # noqa: DOC501, EM101, RUF100, TRY003 def addGeneralNodeDamageToRegistry(self, node_name, data=None): # noqa: ARG002, N802, D102 self._gnode_damage_table.loc[node_name, 'damage_type'] = None @@ -1280,7 +1280,7 @@ def occupyNode(self, node_name, occupier_name): # noqa: N802 ------- None. - """ # noqa: D400 + """ # noqa: D400, DOC202 if occupier_name in self._occupancy: # if not iNodeCoupled(node_name): raise ValueError( # noqa: TRY003 @@ -1307,7 +1307,7 @@ def removeOccupancy(self, occupier_name): # noqa: N802 ------- None. - """ # noqa: D401 + """ # noqa: D401, DOC202 temp = self._occupancy[self._occupancy == occupier_name] if len(temp) == 0: @@ -1350,7 +1350,7 @@ def whereIsOccupiedByName(self, occupier_name): # noqa: N802 str or series node(s) ID. - """ # noqa: D400, D401 + """ # noqa: D400, D401, DOC202 temp = self._occupancy[self._occupancy == occupier_name] if len(temp) == 0: raise ValueError('there is no occupancy with this name') # noqa: EM101, TRY003 @@ -1387,7 +1387,7 @@ def coupleTwoBreakNodes(self, break_point_1_name, break_point_2_name): # noqa: ------- None. - """ # noqa: D205 + """ # noqa: D205, DOC202 self._pipe_break_node_coupling[break_point_1_name] = break_point_2_name self._pipe_break_node_coupling[break_point_2_name] = break_point_1_name self._break_point_attached_to_mainPipe.append(break_point_1_name) diff --git a/modules/systemPerformance/REWET/REWET/timeline.py b/modules/systemPerformance/REWET/REWET/timeline.py index 964977fa1..12ba6efaa 100644 --- a/modules/systemPerformance/REWET/REWET/timeline.py +++ b/modules/systemPerformance/REWET/REWET/timeline.py @@ -143,7 +143,7 @@ def addEventTime(self, event_distinct_time, event_type='dmg'): # noqa: N802 ------- None. - """ # noqa: D205, D401, D404 + """ # noqa: D205, D401, D404, DOC202 if type(event_distinct_time) != pd.core.series.Series: # noqa: E721 if ( type(event_distinct_time) == numpy.float64 # noqa: E721 @@ -218,7 +218,7 @@ def checkAndAmendTime(self): # noqa: N802 ------- None. - """ # noqa: D205, D401 + """ # noqa: D205, D401, DOC202 first_length = len(self._event_time_register.index) self._event_time_register = self._event_time_register[ self._event_time_register.index <= self._simulation_end_time diff --git a/modules/systemPerformance/REWET/preprocessorIO.py b/modules/systemPerformance/REWET/preprocessorIO.py index bc3d730fc..08bd122eb 100644 --- a/modules/systemPerformance/REWET/preprocessorIO.py +++ b/modules/systemPerformance/REWET/preprocessorIO.py @@ -187,7 +187,7 @@ def save_scenario_table(scenario_table, scenario_table_file_path): ------- None. - """ # noqa: D205, D400, D401 + """ # noqa: D205, D400, D401, DOC202 if isinstance(scenario_table, pd.core.frame.DataFrame): pass elif isinstance(scenario_table, list): From 47d160b8562dd70421c633f09e92db495d740b88 Mon Sep 17 00:00:00 2001 From: jinyan1214 Date: Fri, 16 Aug 2024 15:16:45 -0700 Subject: [PATCH 09/59] ruff check --- modules/Workflow/computeResponseSpectrum.py | 8 ++-- modules/Workflow/createGM4BIM.py | 36 ++++++++--------- modules/Workflow/whale/main.py | 16 ++++---- modules/common/simcenter_common.py | 4 +- modules/createEVENT/CFDEvent/CFDEvent.py | 2 +- .../EmptyDomainCFD/EmptyDomainCFD.py | 2 +- .../EmptyDomainCFD/post_process_output.py | 14 +++---- .../GeoClawOpenFOAM/AddBuildingForces.py | 4 +- .../createEVENT/GeoClawOpenFOAM/GeoClaw.py | 2 +- .../GeoClawOpenFOAM/GeoClawBathy.py | 2 +- .../GeoClawOpenFOAM/GetOpenFOAMEvent.py | 8 ++-- modules/createEVENT/GeoClawOpenFOAM/flume.py | 6 +-- .../createEVENT/GeoClawOpenFOAM/hydroUtils.py | 10 ++--- .../GeoClawOpenFOAM/of7Alpboundary.py | 6 +-- .../GeoClawOpenFOAM/of7Building.py | 4 +- .../createEVENT/GeoClawOpenFOAM/of7Decomp.py | 4 +- .../GeoClawOpenFOAM/of7Geometry.py | 4 +- .../createEVENT/GeoClawOpenFOAM/of7Initial.py | 6 +-- .../GeoClawOpenFOAM/of7Materials.py | 6 +-- .../createEVENT/GeoClawOpenFOAM/of7Meshing.py | 10 ++--- .../createEVENT/GeoClawOpenFOAM/of7Others.py | 4 +- .../GeoClawOpenFOAM/of7Prboundary.py | 6 +-- .../createEVENT/GeoClawOpenFOAM/of7Process.py | 8 ++-- .../GeoClawOpenFOAM/of7PtDboundary.py | 10 ++--- .../createEVENT/GeoClawOpenFOAM/of7Solve.py | 12 +++--- .../GeoClawOpenFOAM/of7Turbulence.py | 4 +- .../GeoClawOpenFOAM/of7Uboundary.py | 8 ++-- .../createEVENT/GeoClawOpenFOAM/openfoam7.py | 26 ++++++------ .../createEVENT/GeoClawOpenFOAM/osuFlume.py | 2 +- .../createEVENT/GeoClawOpenFOAM/userFlume.py | 2 +- .../IsolatedBuildingCFD.py | 2 +- .../createEVENT/Istanbul/IstanbulStations.py | 2 +- modules/createEVENT/M9/M9API.py | 2 +- modules/createEVENT/M9/M9Stations.py | 2 +- modules/createEVENT/MPM/MPM.py | 2 +- .../createEVENT/MPM/post_process_output.py | 14 +++---- .../SurroundedBuildingCFD.py | 2 +- .../post_process_output.py | 14 +++---- .../coupledDigitalTwin/CoupledDigitalTwin.py | 2 +- .../IntensityMeasureComputer.py | 2 +- .../siteResponse/RegionalSiteResponse.py | 2 +- .../stochasticWave/StochasticWave.py | 2 +- modules/createSAM/AutoSDA/beam_component.py | 2 +- modules/createSAM/AutoSDA/column_component.py | 2 +- modules/createSAM/AutoSDA/connection_part.py | 2 +- modules/createSAM/AutoSDA/help_functions.py | 32 +++++++-------- modules/performDL/pelicun3/DL_visuals.py | 40 +++++++++---------- .../performHUA/pyincore_data/censusutil.py | 8 ++-- modules/performREC/pyrecodes/run_pyrecodes.py | 3 +- .../regionalGroundMotion/CreateStation.py | 28 ++++++------- .../regionalGroundMotion/FetchOpenQuake.py | 10 ++--- .../regionalGroundMotion/HazardOccurrence.py | 12 +++--- .../HazardSimulationEQ.py | 2 +- .../gmpe/CorrelationModel.py | 22 +++++----- .../gmpe/SignificantDurationModel.py | 6 +-- .../regionalGroundMotion/gmpe/openSHAGMPE.py | 2 +- .../regionalGroundMotion/landslide.py | 38 +++++++++--------- .../regionalGroundMotion/liquefaction.py | 14 +++---- .../ComputeIntensityMeasure.py | 4 +- .../regionalWindField/CreateScenario.py | 32 +++++++-------- .../regionalWindField/CreateStation.py | 2 +- .../regionalWindField/WindFieldSimulation.py | 4 +- modules/performUQ/SimCenterUQ/PLoM/PLoM.py | 12 +++--- modules/performUQ/SimCenterUQ/PLoM/general.py | 20 +++++----- modules/performUQ/SimCenterUQ/runPLoM.py | 8 ++-- .../performUQ/UCSD_UQ/defaultLogLikeScript.py | 2 +- modules/performUQ/UCSD_UQ/mwg_sampler.py | 2 +- modules/performUQ/UCSD_UQ/runFEM.py | 2 +- modules/performUQ/UCSD_UQ/runTMCMC.py | 2 +- .../performUQ/common/ERAClasses/ERACond.py | 10 ++--- modules/performUQ/other/UQpyRunner.py | 2 +- .../systemPerformance/REWET/REWET/Damage.py | 8 ++-- .../REWET/REWET/EnhancedWNTR/epanet/io.py | 4 +- .../REWET/REWET/EnhancedWNTR/network/model.py | 2 +- .../REWET/REWET/EnhancedWNTR/sim/epanet.py | 6 +-- .../REWET/REWET/EnhancedWNTR/sim/io.py | 2 +- .../REWET/REWET/Input/Policy_IO.py | 4 +- .../REWET/REWET/Output/GUI_Curve_API.py | 4 +- .../systemPerformance/REWET/REWET/initial.py | 2 +- .../REWET/REWET/restoration/base.py | 8 ++-- .../REWET/REWET/restoration/io.py | 4 +- .../REWET/REWET/restoration/model.py | 2 +- .../REWET/REWET/restoration/registry.py | 10 ++--- .../systemPerformance/REWET/REWET/timeline.py | 4 +- .../systemPerformance/REWET/preprocessorIO.py | 2 +- 85 files changed, 335 insertions(+), 336 deletions(-) diff --git a/modules/Workflow/computeResponseSpectrum.py b/modules/Workflow/computeResponseSpectrum.py index 83f2a2d9c..bb4ad3f09 100644 --- a/modules/Workflow/computeResponseSpectrum.py +++ b/modules/Workflow/computeResponseSpectrum.py @@ -23,7 +23,7 @@ def convert_accel_units(acceleration, from_, to_='cm/s/s'): # noqa: C901 acceleration = np.asarray(acceleration) if from_ == 'g': if to_ == 'g': - return acceleration # noqa: DOC201 + return acceleration # noqa: DOC201, RUF100 if to_ in m_sec_square: return acceleration * g if to_ in cm_sec_square: @@ -70,7 +70,7 @@ def get_velocity_displacement( velocity = time_step * cumtrapz(acceleration, initial=0.0) if displacement is None: displacement = time_step * cumtrapz(velocity, initial=0.0) - return velocity, displacement # noqa: DOC201 + return velocity, displacement # noqa: DOC201, RUF100 class NewmarkBeta: @@ -160,7 +160,7 @@ def run(self): 'PGV': np.max(np.fabs(self.velocity)), 'PGD': np.max(np.fabs(self.displacement)), } - return self.response_spectrum, time_series, accel, vel, disp # noqa: DOC201 + return self.response_spectrum, time_series, accel, vel, disp # noqa: DOC201, RUF100 def _newmark_beta(self, omega, cval, kval): # noqa: ARG002 """Newmark-beta integral @@ -216,4 +216,4 @@ def _newmark_beta(self, omega, cval, kval): # noqa: ARG002 disp[j, :] = delta_u + disp[j - 1, :] a_t[j, :] = ground_acc[j] + accel[j, :] - return accel, vel, disp, a_t # noqa: DOC201 + return accel, vel, disp, a_t # noqa: DOC201, RUF100 diff --git a/modules/Workflow/createGM4BIM.py b/modules/Workflow/createGM4BIM.py index 2f3c6a9ad..d2fb8f22d 100644 --- a/modules/Workflow/createGM4BIM.py +++ b/modules/Workflow/createGM4BIM.py @@ -120,7 +120,7 @@ def get_scale_factors(input_units, output_units): # noqa: C901 scale_factors.update({input_name: f_scale}) - return scale_factors # noqa: DOC201 + return scale_factors # noqa: DOC201, RUF100 def createFilesForEventGrid(inputDir, outputDir, removeInputDir): # noqa: C901, N802, N803, D103, PLR0915 @@ -410,28 +410,28 @@ def createFilesForEventGrid(inputDir, outputDir, removeInputDir): # noqa: C901, m_pgd_y = 0.0 s_pgd_y = 0.0 # add to dictionary - dict_im[('type', 'loc', 'dir', 'stat')].append(int(siteID)) # noqa: RUF031 + dict_im[('type', 'loc', 'dir', 'stat')].append(int(siteID)) # noqa: RUF031, RUF100 # pga - dict_im[('PGA', 0, 1, 'median')].append(m_pga_x) # noqa: RUF031 - dict_im[('PGA', 0, 1, 'beta')].append(s_pga_x) # noqa: RUF031 - dict_im[('PGA', 0, 2, 'median')].append(m_pga_y) # noqa: RUF031 - dict_im[('PGA', 0, 2, 'beta')].append(s_pga_y) # noqa: RUF031 + dict_im[('PGA', 0, 1, 'median')].append(m_pga_x) # noqa: RUF031, RUF100 + dict_im[('PGA', 0, 1, 'beta')].append(s_pga_x) # noqa: RUF031, RUF100 + dict_im[('PGA', 0, 2, 'median')].append(m_pga_y) # noqa: RUF031, RUF100 + dict_im[('PGA', 0, 2, 'beta')].append(s_pga_y) # noqa: RUF031, RUF100 # pgv - dict_im[('PGV', 0, 1, 'median')].append(m_pgv_x) # noqa: RUF031 - dict_im[('PGV', 0, 1, 'beta')].append(s_pgv_x) # noqa: RUF031 - dict_im[('PGV', 0, 2, 'median')].append(m_pgv_y) # noqa: RUF031 - dict_im[('PGV', 0, 2, 'beta')].append(s_pgv_y) # noqa: RUF031 + dict_im[('PGV', 0, 1, 'median')].append(m_pgv_x) # noqa: RUF031, RUF100 + dict_im[('PGV', 0, 1, 'beta')].append(s_pgv_x) # noqa: RUF031, RUF100 + dict_im[('PGV', 0, 2, 'median')].append(m_pgv_y) # noqa: RUF031, RUF100 + dict_im[('PGV', 0, 2, 'beta')].append(s_pgv_y) # noqa: RUF031, RUF100 # pgd - dict_im[('PGD', 0, 1, 'median')].append(m_pgd_x) # noqa: RUF031 - dict_im[('PGD', 0, 1, 'beta')].append(s_pgd_x) # noqa: RUF031 - dict_im[('PGD', 0, 2, 'median')].append(m_pgd_y) # noqa: RUF031 - dict_im[('PGD', 0, 2, 'beta')].append(s_pgd_y) # noqa: RUF031 + dict_im[('PGD', 0, 1, 'median')].append(m_pgd_x) # noqa: RUF031, RUF100 + dict_im[('PGD', 0, 1, 'beta')].append(s_pgd_x) # noqa: RUF031, RUF100 + dict_im[('PGD', 0, 2, 'median')].append(m_pgd_y) # noqa: RUF031, RUF100 + dict_im[('PGD', 0, 2, 'beta')].append(s_pgd_y) # noqa: RUF031, RUF100 for jj, Ti in enumerate(periods): # noqa: N806 cur_sa = f'SA({Ti}s)' - dict_im[(cur_sa, 0, 1, 'median')].append(m_psa_x[jj]) # noqa: RUF031 - dict_im[(cur_sa, 0, 1, 'beta')].append(s_psa_x[jj]) # noqa: RUF031 - dict_im[(cur_sa, 0, 2, 'median')].append(m_psa_y[jj]) # noqa: RUF031 - dict_im[(cur_sa, 0, 2, 'beta')].append(s_psa_y[jj]) # noqa: RUF031 + dict_im[(cur_sa, 0, 1, 'median')].append(m_psa_x[jj]) # noqa: RUF031, RUF100 + dict_im[(cur_sa, 0, 1, 'beta')].append(s_psa_x[jj]) # noqa: RUF031, RUF100 + dict_im[(cur_sa, 0, 2, 'median')].append(m_psa_y[jj]) # noqa: RUF031, RUF100 + dict_im[(cur_sa, 0, 2, 'beta')].append(s_psa_y[jj]) # noqa: RUF031, RUF100 # aggregate for cur_key, cur_value in dict_im.items(): diff --git a/modules/Workflow/whale/main.py b/modules/Workflow/whale/main.py index e40adc597..759f8af73 100644 --- a/modules/Workflow/whale/main.py +++ b/modules/Workflow/whale/main.py @@ -310,7 +310,7 @@ def create_command(command_list, enforced_python=None): for command_arg in command_list[1:]: command += f'"{command_arg}" ' - return command # noqa: DOC201 + return command # noqa: DOC201, RUF100 def run_command(command): @@ -357,7 +357,7 @@ def run_command(command): py_script.main(arg_list) - return '', '' # noqa: DOC201 + return '', '' # noqa: DOC201, RUF100 else: # noqa: RET505 # fmk with Shell=True not working on older windows machines, new approach needed for quoted command .. turn into a list @@ -668,7 +668,7 @@ def get_command_list(self, app_path, force_posix=False): # noqa: FBT002, C901 # pp.pprint(arg_list) - return arg_list # noqa: DOC201 + return arg_list # noqa: DOC201, RUF100 class Workflow: @@ -1316,7 +1316,7 @@ def create_asset_files(self): log_div() - return assetFilesList # noqa: DOC201 + return assetFilesList # noqa: DOC201, RUF100 def augment_asset_files(self): # noqa: C901 """Short description @@ -1504,7 +1504,7 @@ def augment_asset_files(self): # noqa: C901 ) log_div() - return assetFilesList # noqa: DOC201 + return assetFilesList # noqa: DOC201, RUF100 def perform_system_performance_assessment(self, asset_type): """For an asset type run the system level performance assessment application @@ -1525,7 +1525,7 @@ def perform_system_performance_assessment(self, asset_type): prepend_timestamp=False, ) log_div() - return False # noqa: DOC201 + return False # noqa: DOC201, RUF100 if performance_app.rel_path == None: # noqa: E711 log_msg( @@ -1909,7 +1909,7 @@ def init_simdir(self, asst_id=None, AIM_file_path='AIM.json'): # noqa: C901, N8 prepend_timestamp=False, ) log_div() - return dst # noqa: DOC201 + return dst # noqa: DOC201, RUF100 def cleanup_simdir(self, asst_id): """Short description @@ -2734,7 +2734,7 @@ def estimate_losses( # noqa: C901 ], ) if ('PID', '0') in df_res.columns: - del df_res[('PID', '0')] # noqa: RUF031 + del df_res[('PID', '0')] # noqa: RUF031, RUF100 # store the EDP statistics in the output DF for col in np.transpose(col_info): diff --git a/modules/common/simcenter_common.py b/modules/common/simcenter_common.py index 15e110eed..c54977fbe 100644 --- a/modules/common/simcenter_common.py +++ b/modules/common/simcenter_common.py @@ -283,7 +283,7 @@ def get_scale_factors(input_units, output_units): # noqa: C901 scale_factors.update({input_name: f_scale}) - return scale_factors # noqa: DOC201 + return scale_factors # noqa: DOC201, RUF100 def get_unit_bases(input_units): @@ -306,4 +306,4 @@ def get_unit_bases(input_units): input_unit_bases = cur_unit_bases break - return input_unit_bases # noqa: DOC201 + return input_unit_bases # noqa: DOC201, RUF100 diff --git a/modules/createEVENT/CFDEvent/CFDEvent.py b/modules/createEVENT/CFDEvent/CFDEvent.py index 7343cb800..884f232eb 100644 --- a/modules/createEVENT/CFDEvent/CFDEvent.py +++ b/modules/createEVENT/CFDEvent/CFDEvent.py @@ -13,7 +13,7 @@ def directionToDof(direction): # noqa: N802 """Converts direction to degree of freedom""" # noqa: D400, D401 directioMap = {'X': 1, 'Y': 2, 'Z': 3} # noqa: N806 - return directioMap[direction] # noqa: DOC201 + return directioMap[direction] # noqa: DOC201, RUF100 def addFloorForceToEvent(patternsArray, force, direction, floor): # noqa: ARG001, N802, N803 diff --git a/modules/createEVENT/EmptyDomainCFD/EmptyDomainCFD.py b/modules/createEVENT/EmptyDomainCFD/EmptyDomainCFD.py index 48fec2e0b..5d309a10f 100644 --- a/modules/createEVENT/EmptyDomainCFD/EmptyDomainCFD.py +++ b/modules/createEVENT/EmptyDomainCFD/EmptyDomainCFD.py @@ -13,7 +13,7 @@ def directionToDof(direction): # noqa: N802 """Converts direction to degree of freedom""" # noqa: D400, D401 directioMap = {'X': 1, 'Y': 2, 'Z': 3} # noqa: N806 - return directioMap[direction] # noqa: DOC201 + return directioMap[direction] # noqa: DOC201, RUF100 def addFloorForceToEvent(patternsArray, force, direction, floor): # noqa: ARG001, N802, N803 diff --git a/modules/createEVENT/EmptyDomainCFD/post_process_output.py b/modules/createEVENT/EmptyDomainCFD/post_process_output.py index 6a32be5ac..3327b8a1f 100644 --- a/modules/createEVENT/EmptyDomainCFD/post_process_output.py +++ b/modules/createEVENT/EmptyDomainCFD/post_process_output.py @@ -88,7 +88,7 @@ def readPressureProbes(fileName): # noqa: N802, N803 time = np.asarray(time, dtype=np.float32) p = np.asarray(p, dtype=np.float32) - return probes, time, p # noqa: DOC201 + return probes, time, p # noqa: DOC201, RUF100 def read_pressure_data(file_names): @@ -291,7 +291,7 @@ def read_openFoam_scalar_field(file_name): # noqa: N802 sField = np.asarray(sField, dtype=np.float32) # noqa: N806 - return sField # noqa: DOC201, RET504 + return sField # noqa: DOC201, RET504, RUF100 def read_openFoam_vector_field(file_name): # noqa: N802 @@ -312,7 +312,7 @@ def read_openFoam_vector_field(file_name): # noqa: N802 vField = np.asarray(vField, dtype=np.float32) # noqa: N806 - return vField # noqa: DOC201, RET504 + return vField # noqa: DOC201, RET504, RUF100 def read_openFoam_tensor_field(file_name): # noqa: N802 @@ -340,7 +340,7 @@ def read_openFoam_tensor_field(file_name): # noqa: N802 vField = np.asarray(vField, dtype=np.float32) # noqa: N806 - return vField # noqa: DOC201, RET504 + return vField # noqa: DOC201, RET504, RUF100 def read_openFoam_symmetric_tensor_field(file_name): # noqa: N802 @@ -367,7 +367,7 @@ def read_openFoam_symmetric_tensor_field(file_name): # noqa: N802 vField = np.asarray(vField, dtype=np.float32) # noqa: N806 - return vField # noqa: DOC201, RET504 + return vField # noqa: DOC201, RET504, RUF100 def read_velocity_data(path): @@ -462,7 +462,7 @@ def read_velocity_probes(fileName): # noqa: N803 time = np.asarray(time, dtype=np.float32) U = np.asarray(U, dtype=np.float32) # noqa: N806 - return probes, time, U # noqa: DOC201 + return probes, time, U # noqa: DOC201, RUF100 def calculate_length_scale(u, uav, dt, min_corr=0.0): @@ -481,7 +481,7 @@ def calculate_length_scale(u, uav, dt, min_corr=0.0): L = uav * np.trapz(corr, dx=dt) # noqa: NPY201, N806 - return L # noqa: DOC201, RET504 + return L # noqa: DOC201, RET504, RUF100 def psd(x, dt, nseg): # noqa: F811 diff --git a/modules/createEVENT/GeoClawOpenFOAM/AddBuildingForces.py b/modules/createEVENT/GeoClawOpenFOAM/AddBuildingForces.py index 9bd04831e..95ee701f9 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/AddBuildingForces.py +++ b/modules/createEVENT/GeoClawOpenFOAM/AddBuildingForces.py @@ -9,7 +9,7 @@ def validateCaseDirectoryStructure(caseDir): # noqa: N802, N803 It also checks that system directory contains the controlDict """ # noqa: D205, D400, D401, D404 if not os.path.isdir(caseDir): # noqa: PTH112 - return False # noqa: DOC201 + return False # noqa: DOC201, RUF100 caseDirList = os.listdir(caseDir) # noqa: N806 necessaryDirs = ['0', 'constant', 'system'] # noqa: N806 @@ -27,7 +27,7 @@ def findFunctionsDictionary(controlDictLines): # noqa: N802, N803 """This method will find functions dictionary in the controlDict""" # noqa: D400, D401, D404 for line in controlDictLines: if line.startswith('functions'): - return (True, controlDictLines.index(line) + 2) # noqa: DOC201 + return (True, controlDictLines.index(line) + 2) # noqa: DOC201, RUF100 return [False, len(controlDictLines)] diff --git a/modules/createEVENT/GeoClawOpenFOAM/GeoClaw.py b/modules/createEVENT/GeoClawOpenFOAM/GeoClaw.py index e029b1a5c..890c5549f 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/GeoClaw.py +++ b/modules/createEVENT/GeoClawOpenFOAM/GeoClaw.py @@ -80,4 +80,4 @@ def creategeom(self, data, path): # Points of interest bottompts = self.getbathy(maxvalues, minvalues, data) # noqa: F841 - return 0 # noqa: DOC201 + return 0 # noqa: DOC201, RUF100 diff --git a/modules/createEVENT/GeoClawOpenFOAM/GeoClawBathy.py b/modules/createEVENT/GeoClawOpenFOAM/GeoClawBathy.py index d2e525bb4..b4c7961f4 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/GeoClawBathy.py +++ b/modules/createEVENT/GeoClawOpenFOAM/GeoClawBathy.py @@ -61,4 +61,4 @@ def creategeom(self, data, path): # noqa: ARG002 # Create a utilities object hydroutil = hydroUtils() # noqa: F841 - return 0 # noqa: DOC201 + return 0 # noqa: DOC201, RUF100 diff --git a/modules/createEVENT/GeoClawOpenFOAM/GetOpenFOAMEvent.py b/modules/createEVENT/GeoClawOpenFOAM/GetOpenFOAMEvent.py index 4e64bbd37..8a80e4cfc 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/GetOpenFOAMEvent.py +++ b/modules/createEVENT/GeoClawOpenFOAM/GetOpenFOAMEvent.py @@ -16,7 +16,7 @@ def validateCaseDirectoryStructure(caseDir): # noqa: N802, N803 It also checks that system directory contains the controlDict """ # noqa: D205, D400, D401, D404 if not os.path.isdir(caseDir): # noqa: PTH112 - return False # noqa: DOC201 + return False # noqa: DOC201, RUF100 caseDirList = os.listdir(caseDir) # noqa: N806 necessaryDirs = ['0', 'constant', 'system', 'postProcessing'] # noqa: N806 @@ -36,7 +36,7 @@ def parseForceComponents(forceArray): # noqa: N802, N803 x = float(components[0]) y = float(components[1]) z = float(components[2]) - return [x, y, z] # noqa: DOC201 + return [x, y, z] # noqa: DOC201, RUF100 def ReadOpenFOAMForces(buildingForcesPath, floorsCount, startTime): # noqa: N802, N803 @@ -77,14 +77,14 @@ def ReadOpenFOAMForces(buildingForcesPath, floorsCount, startTime): # noqa: N80 forces[i].Y.append(fpry + fvy + fpoy) forces[i].Z.append(fprz + fvz + fpoz) - return [deltaT, forces] # noqa: DOC201 + return [deltaT, forces] # noqa: DOC201, RUF100 def directionToDof(direction): # noqa: N802 """Converts direction to degree of freedom""" # noqa: D400, D401 directioMap = {'X': 1, 'Y': 2, 'Z': 3} # noqa: N806 - return directioMap[direction] # noqa: DOC201 + return directioMap[direction] # noqa: DOC201, RUF100 def addFloorForceToEvent( # noqa: N802 diff --git a/modules/createEVENT/GeoClawOpenFOAM/flume.py b/modules/createEVENT/GeoClawOpenFOAM/flume.py index ff99da1ee..bfba2ea13 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/flume.py +++ b/modules/createEVENT/GeoClawOpenFOAM/flume.py @@ -110,7 +110,7 @@ def generateflume(self, breadth, path): ) # Write bottom STL file # Return extreme values - return extremeval # noqa: DOC201 + return extremeval # noqa: DOC201, RUF100 ############################################################# def flumedata(self, IpPTFile): # noqa: N803 @@ -178,7 +178,7 @@ def flumedata(self, IpPTFile): # noqa: N803 self.npt = np.delete(self.npt, noindexes, axis=0) # Return extreme values - return extremeval # noqa: DOC201 + return extremeval # noqa: DOC201, RUF100 #################################################################### def right(self): @@ -431,4 +431,4 @@ def extremedata(self, extreme, breadth): ) tempfileID.close # noqa: B018 - return 0 # noqa: DOC201 + return 0 # noqa: DOC201, RUF100 diff --git a/modules/createEVENT/GeoClawOpenFOAM/hydroUtils.py b/modules/createEVENT/GeoClawOpenFOAM/hydroUtils.py index e4a1f8644..bf32311f5 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/hydroUtils.py +++ b/modules/createEVENT/GeoClawOpenFOAM/hydroUtils.py @@ -90,7 +90,7 @@ def extract(self, obj, path, ind, arr): # noqa: C901 else: arr.append(None) - return arr # noqa: DOC201 + return arr # noqa: DOC201, RUF100 ############################################################# def extract_element_from_json(self, obj, path): @@ -106,7 +106,7 @@ def extract_element_from_json(self, obj, path): """ # noqa: D205, D401 if isinstance(obj, dict): # noqa: RET503 - return self.extract(obj, path, 0, []) # noqa: DOC201 + return self.extract(obj, path, 0, []) # noqa: DOC201, RUF100 elif isinstance(obj, list): # noqa: RET505 outer_arr = [] for item in obj: @@ -129,7 +129,7 @@ def general_header(self): | | O | \\*---------------------------------------------------------------------------*/ \n\n""" # noqa: W291 - return header # noqa: DOC201, RET504 + return header # noqa: DOC201, RET504, RUF100 #################################################################### def of7header(self, OFclass, location, filename): # noqa: N803 @@ -156,7 +156,7 @@ class {OFclass}; }} // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //\n\n""" # noqa: W291 - return header # noqa: DOC201, RET504 + return header # noqa: DOC201, RET504, RUF100 ############################################################# def hydrolog(self, projname, fipath): @@ -210,4 +210,4 @@ def getlist(self, data): data = data.replace(',', ' ') results = [float(n) for n in data.split()] - return results # noqa: DOC201, RET504 + return results # noqa: DOC201, RET504, RUF100 diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Alpboundary.py b/modules/createEVENT/GeoClawOpenFOAM/of7Alpboundary.py index d677896dd..4ac1b0b2f 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7Alpboundary.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7Alpboundary.py @@ -89,7 +89,7 @@ def Alptext(self, data, patches): # noqa: N802 Alptext = Alptext + '}\n\n' # noqa: N806 # Return the text for velocity BC - return Alptext # noqa: DOC201, RET504 + return Alptext # noqa: DOC201, RET504, RUF100 ############################################################# def Alpheader(self): # noqa: N802 @@ -114,7 +114,7 @@ def Alpheader(self): # noqa: N802 header = header + 'internalField\tuniform\t0;\n\n' # Return the header for U file - return header # noqa: DOC201, RET504 + return header # noqa: DOC201, RET504, RUF100 ############################################################# def Alppatchtext(self, Alptype, patchname): # noqa: ARG002, N802, N803 @@ -140,4 +140,4 @@ def Alppatchtext(self, Alptype, patchname): # noqa: ARG002, N802, N803 Alptext = Alptext + 'type\tzeroGradient;\n\t}\n' # noqa: N806 # Return the header for U file - return Alptext # noqa: DOC201 + return Alptext # noqa: DOC201, RUF100 diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Building.py b/modules/createEVENT/GeoClawOpenFOAM/of7Building.py index 7062caa2a..ecb806c7e 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7Building.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7Building.py @@ -100,7 +100,7 @@ def buildcheck(self, data, path): # noqa: C901, PLR0911 data, ['Events', 'BuildingSTLFile'] ) if stlfile == [None]: - return -1 # noqa: DOC201 + return -1 # noqa: DOC201, RUF100 else: # noqa: RET505 stlfile = ', '.join( hydroutil.extract_element_from_json( @@ -218,7 +218,7 @@ def createbuilds(self, data, path): elif buildeftype == 'Parameters': self.buildpara(data, path) - return 0 # noqa: DOC201 + return 0 # noqa: DOC201, RUF100 ############################################################# def buildmanual(self, data, path): diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Decomp.py b/modules/createEVENT/GeoClawOpenFOAM/of7Decomp.py index f929b7f50..40349fa86 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7Decomp.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7Decomp.py @@ -75,7 +75,7 @@ def decomptext(self, data): decomptext = decomptext + 'method\tscotch;\n\n' - return decomptext # noqa: DOC201, RET504 + return decomptext # noqa: DOC201, RET504, RUF100 ############################################################# def decompheader(self): @@ -97,7 +97,7 @@ def decompheader(self): // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //\n\n""" # noqa: W291 # Return the header for U file - return header # noqa: DOC201, RET504 + return header # noqa: DOC201, RET504, RUF100 ############################################################# def scripts(self, data, path): # noqa: ARG002 diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Geometry.py b/modules/createEVENT/GeoClawOpenFOAM/of7Geometry.py index df3126ff3..1c4b78679 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7Geometry.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7Geometry.py @@ -82,7 +82,7 @@ def geomcheck(self, data, path): # noqa: C901, PLR0911 data, ['Events', 'NumBathymetryFiles'] ) if numbathy == [None]: - return -1 # noqa: DOC201 + return -1 # noqa: DOC201, RUF100 else: # noqa: RET505 numbathy = ', '.join( hydroutil.extract_element_from_json( @@ -250,7 +250,7 @@ def createOFSTL(self, data, path): # noqa: C901, N802 # Create geometry (i.e. STL files) and extreme file ecode = finalgeom.creategeom(data, path) if ecode < 0: - return -1 # noqa: DOC201 + return -1 # noqa: DOC201, RUF100 # Bathymetry only elif int(simtype) == 2: # noqa: PLR2004 diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Initial.py b/modules/createEVENT/GeoClawOpenFOAM/of7Initial.py index 4043f2b36..16825f65b 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7Initial.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7Initial.py @@ -173,7 +173,7 @@ def alphatext(self, data, fipath): alphatext = alphatext + '\n);' - return alphatext # noqa: DOC201, RET504 + return alphatext # noqa: DOC201, RET504, RUF100 ############################################################# def alphaheader(self): @@ -195,7 +195,7 @@ def alphaheader(self): // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //\n\n""" # noqa: W291 # Return the header for U file - return header # noqa: DOC201, RET504 + return header # noqa: DOC201, RET504, RUF100 ############################################################# def alphacheck(self, data, fipath): @@ -220,7 +220,7 @@ def alphacheck(self, data, fipath): fname = 'SWAlpha.txt' swalphafile = os.path.join(fipath, fname) # noqa: PTH118 if not os.path.exists(swalphafile): # noqa: PTH110 - return -1 # noqa: DOC201 + return -1 # noqa: DOC201, RUF100 # For all types other than the shallow water else: diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Materials.py b/modules/createEVENT/GeoClawOpenFOAM/of7Materials.py index 5dd10114a..97c77bec6 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7Materials.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7Materials.py @@ -120,7 +120,7 @@ def mattext(self, data): mattext = mattext + 'sigma\t[1 0 -2 0 0 0 0]\t' + sigma + ';\n' - return mattext # noqa: DOC201, RET504 + return mattext # noqa: DOC201, RET504, RUF100 ############################################################# def matheader(self): @@ -142,7 +142,7 @@ def matheader(self): // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //\n\n""" # noqa: W291 # Return the header for U file - return header # noqa: DOC201, RET504 + return header # noqa: DOC201, RET504, RUF100 ############################################################# def matcheck(self, data): @@ -162,7 +162,7 @@ def matcheck(self, data): data, ['Events', 'WaterViscosity'] ) if nuwater == [None]: - return -1 # noqa: DOC201 + return -1 # noqa: DOC201, RUF100 # Exponent nuwaterexp = hydroutil.extract_element_from_json( data, ['Events', 'WaterViscosityExp'] diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Meshing.py b/modules/createEVENT/GeoClawOpenFOAM/of7Meshing.py index abf1c24b0..dc300767a 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7Meshing.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7Meshing.py @@ -72,7 +72,7 @@ def meshcheck(self, data, fipath): # If hydro mesher - nothing to check if int(mesher[0]) == 0: - return 0 # noqa: DOC201 + return 0 # noqa: DOC201, RUF100 # Other mesh software elif int(mesher[0]) == 1: # noqa: RET505 @@ -126,7 +126,7 @@ def meshheader(self, fileobjec): ) # Return the header for meshing file - return header # noqa: DOC201, RET504 + return header # noqa: DOC201, RET504, RUF100 ############################################################# def bmeshtext(self, data): @@ -284,7 +284,7 @@ def bmeshtext(self, data): # Add merge patch pairs bmeshtext = bmeshtext + 'mergePatchPairs\n(\n);\n' - return bmeshtext # noqa: DOC201, RET504 + return bmeshtext # noqa: DOC201, RET504, RUF100 ############################################################# def sfetext(self): @@ -320,7 +320,7 @@ def sfetext(self): elif int(data_geoext[6]) == 3: # noqa: PLR2004 sfetext = sfetext + 'OtherBuilding.stl\n' + stlinfo + '\n\n' - return sfetext # noqa: DOC201 + return sfetext # noqa: DOC201, RUF100 ############################################################# def shmtext(self, data): @@ -505,7 +505,7 @@ def shmtext(self, data): shmtext = shmtext + 'debug\t0;\n' shmtext = shmtext + 'mergeTolerance\t1E-6;\n' - return shmtext # noqa: DOC201, RET504 + return shmtext # noqa: DOC201, RET504, RUF100 ############################################################# def scripts(self, data, path): # noqa: C901 diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Others.py b/modules/createEVENT/GeoClawOpenFOAM/of7Others.py index 3afa97455..a2b0b1bdf 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7Others.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7Others.py @@ -78,7 +78,7 @@ def othersheader(self, fileclas, fileloc, fileobjec): ) # Return the header for U file - return header # noqa: DOC201, RET504 + return header # noqa: DOC201, RET504, RUF100 ############################################################# def gfiletext(self, data): @@ -140,4 +140,4 @@ def gfiletext(self, data): + ');\n' ) - return gfiletext # noqa: DOC201, RET504 + return gfiletext # noqa: DOC201, RET504, RUF100 diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Prboundary.py b/modules/createEVENT/GeoClawOpenFOAM/of7Prboundary.py index d6ee4a088..151d95546 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7Prboundary.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7Prboundary.py @@ -93,7 +93,7 @@ def Prtext(self, data, patches): # noqa: N802 prtext = prtext + '}\n\n' # Return the text for velocity BC - return prtext # noqa: DOC201, RET504 + return prtext # noqa: DOC201, RET504, RUF100 ############################################################# def Prheader(self): # noqa: N802 @@ -118,7 +118,7 @@ def Prheader(self): # noqa: N802 header = header + 'internalField\tuniform\t0;\n\n' # Return the header for U file - return header # noqa: DOC201, RET504 + return header # noqa: DOC201, RET504, RUF100 ############################################################# def Prpatchtext(self, data, Prtype, patchname): # noqa: C901, N802, N803 @@ -208,4 +208,4 @@ def Prpatchtext(self, data, Prtype, patchname): # noqa: C901, N802, N803 Prtext = Prtext + 'type\tempty;\n\t}\n' # noqa: N806 # Return the header for U file - return Prtext # noqa: DOC201 + return Prtext # noqa: DOC201, RUF100 diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Process.py b/modules/createEVENT/GeoClawOpenFOAM/of7Process.py index 2306f71bf..3aa0a8178 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7Process.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7Process.py @@ -132,7 +132,7 @@ def pprocesstext(self, data, path): sampletext = sampletext + ');\n\n' sampletext = sampletext + 'fields\t' + fieldtext + ';\n' - return sampletext # noqa: DOC201, RET504 + return sampletext # noqa: DOC201, RET504, RUF100 ############################################################# def pprocesscdict(self, data, path): # noqa: C901 @@ -275,7 +275,7 @@ def pprocesscdict(self, data, path): # noqa: C901 cdicttext = cdicttext + '\t\tfields\t' + fieldtext + ';\n' cdicttext = cdicttext + '\t}\n}' - return cdicttext # noqa: DOC201, RET504 + return cdicttext # noqa: DOC201, RET504, RUF100 ############################################################# def scripts(self, data, path): # noqa: ARG002 @@ -293,7 +293,7 @@ def scripts(self, data, path): # noqa: ARG002 data, ['Events', 'Postprocessing'] ) if pprocess == [None]: - return 0 # noqa: DOC201 + return 0 # noqa: DOC201, RUF100 else: # noqa: RET505 pprocess = ', '.join( hydroutil.extract_element_from_json( @@ -350,7 +350,7 @@ def pprocesscheck(self, data, path): ) if pprocess == 'No': - return 0 # noqa: DOC201 + return 0 # noqa: DOC201, RUF100 else: # noqa: RET505 pprocessV = ', '.join( # noqa: N806 hydroutil.extract_element_from_json(data, ['Events', 'PPVelocity']) diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7PtDboundary.py b/modules/createEVENT/GeoClawOpenFOAM/of7PtDboundary.py index f338f96da..f337d5032 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7PtDboundary.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7PtDboundary.py @@ -115,7 +115,7 @@ def PtDcheck(self, data, patches): # noqa: N802 if (int(Utype) == 103) or (int(Utype) == 104): # noqa: PLR2004 numMovWall += 1 # noqa: N806 if numMovWall > 0: - return 1 # noqa: DOC201 + return 1 # noqa: DOC201, RUF100 if numMovWall == 0: return 0 @@ -169,7 +169,7 @@ def PtDtext(self, data, fipath, patches): # noqa: N802 ptdtext = ptdtext + '}\n\n' # Return the text for pointDisplacement - return ptdtext # noqa: DOC201, RET504 + return ptdtext # noqa: DOC201, RET504, RUF100 ############################################################# def PtDheader(self): # noqa: N802 @@ -194,7 +194,7 @@ def PtDheader(self): # noqa: N802 header = header + 'internalField\tuniform (0 0 0);\n\n' # Return the header for U file - return header # noqa: DOC201, RET504 + return header # noqa: DOC201, RET504, RUF100 ############################################################# def PtDpatchtext(self, data, Utype, patchname, fipath): # noqa: ARG002, N802, N803 @@ -243,7 +243,7 @@ def PtDpatchtext(self, data, Utype, patchname, fipath): # noqa: ARG002, N802, N PtDtext = PtDtext + 'value\tuniform (0 0 0);\n' # noqa: N806 PtDtext = PtDtext + '\t}\n' # noqa: N806 - return PtDtext # noqa: DOC201 + return PtDtext # noqa: DOC201, RUF100 ############################################################# def getNormal(self, patchname): # noqa: N802 @@ -267,4 +267,4 @@ def getNormal(self, patchname): # noqa: N802 elif (patchname == 'Building') or (patchname == 'OtherBuilding'): # noqa: PLR1714 normal = '1 0 0' - return normal # noqa: DOC201 + return normal # noqa: DOC201, RUF100 diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Solve.py b/modules/createEVENT/GeoClawOpenFOAM/of7Solve.py index c08b721c3..bbdfa077d 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7Solve.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7Solve.py @@ -73,7 +73,7 @@ def solverheader(self, fileobjec): ) # Return the header for U file - return header # noqa: DOC201, RET504 + return header # noqa: DOC201, RET504, RUF100 ############################################################# def fvSchemetext(self, data): # noqa: ARG002, N802 @@ -163,7 +163,7 @@ def fvSchemetext(self, data): # noqa: ARG002, N802 fvSchemetext = fvSchemetext + 'alpha.water;\n' # noqa: N806 fvSchemetext = fvSchemetext + '}\n' # noqa: N806 - return fvSchemetext # noqa: DOC201, RET504 + return fvSchemetext # noqa: DOC201, RET504, RUF100 ############################################################# def fvSolntext(self, data): # noqa: N802 @@ -280,7 +280,7 @@ def fvSolntext(self, data): # noqa: N802 fvSolntext = fvSolntext + 'fields\n\t{\n\t}\n\t' # noqa: N806 fvSolntext = fvSolntext + 'equations\n\t{\n\t\t".*"\t1;\n\t}\n}' # noqa: N806 - return fvSolntext # noqa: DOC201, RET504 + return fvSolntext # noqa: DOC201, RET504, RUF100 ############################################################# def cdicttext(self, data): @@ -349,7 +349,7 @@ def cdicttext(self, data): cdicttext = cdicttext + 'maxAlphaCo \t 1.0;\n\n' cdicttext = cdicttext + 'maxDeltaT \t 1;\n\n' - return cdicttext # noqa: DOC201, RET504 + return cdicttext # noqa: DOC201, RET504, RUF100 ############################################################# def cdictcheck(self, data): @@ -366,7 +366,7 @@ def cdictcheck(self, data): # Start time startT = hydroutil.extract_element_from_json(data, ['Events', 'StartTime']) # noqa: N806 if startT == [None]: - return -1 # noqa: DOC201 + return -1 # noqa: DOC201, RUF100 # End time endT = hydroutil.extract_element_from_json(data, ['Events', 'EndTime']) # noqa: N806 @@ -489,4 +489,4 @@ def cdictFtext(self, data): # noqa: N802 cdicttext = cdicttext + 'direction\t(1 0 0);\n\t\t\t' cdicttext = cdicttext + 'cumulative\tno;\n\t\t}\n\t}\n}' - return cdicttext # noqa: DOC201, RET504 + return cdicttext # noqa: DOC201, RET504, RUF100 diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Turbulence.py b/modules/createEVENT/GeoClawOpenFOAM/of7Turbulence.py index 14be4b9d2..407ceddf8 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7Turbulence.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7Turbulence.py @@ -84,7 +84,7 @@ def turbtext(self, data): turbtext = turbtext + '\tturbulence\ton;\n' turbtext = turbtext + '\tprintCoeffs\ton;\n}\n' - return turbtext # noqa: DOC201 + return turbtext # noqa: DOC201, RUF100 ############################################################# def turbheader(self): @@ -106,4 +106,4 @@ def turbheader(self): // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //\n\n""" # noqa: W291 # Return the header for U file - return header # noqa: DOC201, RET504 + return header # noqa: DOC201, RET504, RUF100 diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Uboundary.py b/modules/createEVENT/GeoClawOpenFOAM/of7Uboundary.py index 87c49472f..283b2f082 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7Uboundary.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7Uboundary.py @@ -105,7 +105,7 @@ def Utext(self, data, fipath, patches): # noqa: N802 utext = utext + '}\n\n' # Return the text for velocity BC - return utext # noqa: DOC201, RET504 + return utext # noqa: DOC201, RET504, RUF100 ############################################################# def Uheader(self): # noqa: N802 @@ -130,7 +130,7 @@ def Uheader(self): # noqa: N802 header = header + 'internalField\tuniform (0 0 0);\n\n' # Return the header for U file - return header # noqa: DOC201, RET504 + return header # noqa: DOC201, RET504, RUF100 ############################################################# def Upatchtext(self, data, Utype, patchname, fipath, numMovWall): # noqa: C901, N802, N803 @@ -345,7 +345,7 @@ def Upatchtext(self, data, Utype, patchname, fipath, numMovWall): # noqa: C901, Utext = Utext + 'type\tempty;\n\t}\n' # noqa: N806 # Return the header for U file - return Utext # noqa: DOC201 + return Utext # noqa: DOC201, RUF100 ############################################################# def Uchecks(self, data, fipath, patches): # noqa: C901, N802 @@ -384,7 +384,7 @@ def Uchecks(self, data, fipath, patches): # noqa: C901, N802 # Checking for multiple moving walls numMovWall += 1 # noqa: N806 if numMovWall > 1: - return -1 # noqa: DOC201 + return -1 # noqa: DOC201, RUF100 # Check for existing moving wall files dispfilename = hydroutil.extract_element_from_json( diff --git a/modules/createEVENT/GeoClawOpenFOAM/openfoam7.py b/modules/createEVENT/GeoClawOpenFOAM/openfoam7.py index 4ed8cc557..d82396707 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/openfoam7.py +++ b/modules/createEVENT/GeoClawOpenFOAM/openfoam7.py @@ -169,7 +169,7 @@ def createfolder(self, data, path, args): scriptfile.close() # Return completion flag - return 0 # noqa: DOC201 + return 0 # noqa: DOC201, RUF100 ############################################################# def creategeometry(self, data, path): @@ -192,7 +192,7 @@ def creategeometry(self, data, path): # Create the geometry related files Geometry = of7Geometry() # noqa: N806 if int(mesher[0]) == 1: - return 0 # noqa: DOC201 + return 0 # noqa: DOC201, RUF100 elif int(mesher[0]) == 0 or int(mesher[0]) == 2: # noqa: RET505, PLR2004 geomcode = Geometry.geomcheck(data, path) if geomcode == -1: @@ -245,7 +245,7 @@ def createmesh(self, data, path): Meshing = of7Meshing() # noqa: N806 meshcode = Meshing.meshcheck(data, path) if meshcode == -1: - return -1 # noqa: DOC201 + return -1 # noqa: DOC201, RUF100 elif int(mesher[0]) == 0: # noqa: RET505 # blockMesh bmeshtext = Meshing.bmeshtext(data) @@ -295,7 +295,7 @@ def materials(self, data, path): Materials = of7Materials() # noqa: N806 matcode = Materials.matcheck(data) if matcode == -1: - return -1 # noqa: DOC201 + return -1 # noqa: DOC201, RUF100 else: # noqa: RET505 mattext = Materials.mattext(data) fname = 'transportProperties' @@ -320,7 +320,7 @@ def initial(self, data, path): Inicond = of7Initial() # noqa: N806 initcode = Inicond.alphacheck(data, path) if initcode == -1: - return -1 # noqa: DOC201 + return -1 # noqa: DOC201, RUF100 else: # noqa: RET505 alphatext = Inicond.alphatext(data, path) fname = 'setFieldsDict' @@ -355,7 +355,7 @@ def boundary(self, data, path): # Check for boundary conditions here ecode = Uboundary.Uchecks(data, path, patches) if ecode == -1: - return -1 # noqa: DOC201 + return -1 # noqa: DOC201, RUF100 else: # noqa: RET505 # Write the U-file if no errors # Path to the file @@ -421,7 +421,7 @@ def turbulence(self, data, path): turbfile.write(turbtext) turbfile.close() - return 0 # noqa: DOC201 + return 0 # noqa: DOC201, RUF100 ############################################################# def parallelize(self, data, path): @@ -445,7 +445,7 @@ def parallelize(self, data, path): # Scripts Decomp.scripts(data, path) - return 0 # noqa: DOC201 + return 0 # noqa: DOC201, RUF100 ############################################################# def solve(self, data, path): @@ -478,7 +478,7 @@ def solve(self, data, path): # controlDict ecode = Solve.cdictcheck(data) if ecode == -1: - return -1 # noqa: DOC201 + return -1 # noqa: DOC201, RUF100 else: # noqa: RET505 cdicttext = Solve.cdicttext(data) fname = 'controlDict' @@ -516,7 +516,7 @@ def others(self, data, path): gfile.write(gfiletext) gfile.close() - return 0 # noqa: DOC201 + return 0 # noqa: DOC201, RUF100 ############################################################# def dakota(self, args): @@ -533,7 +533,7 @@ def dakota(self, args): # Dakota Scripts dakota.dakotascripts(args) - return 0 # noqa: DOC201 + return 0 # noqa: DOC201, RUF100 ############################################################# def postprocessing(self, data, path): @@ -550,7 +550,7 @@ def postprocessing(self, data, path): # controlDict ecode = pprocess.pprocesscheck(data, path) if ecode == -1: - return -1 # noqa: DOC201 + return -1 # noqa: DOC201, RUF100 elif ecode == 0: # noqa: RET505 return 0 else: @@ -589,4 +589,4 @@ def cleaning(self, args, path): # Dakota Scripts cleaner.cleaning(args, path) - return 0 # noqa: DOC201 + return 0 # noqa: DOC201, RUF100 diff --git a/modules/createEVENT/GeoClawOpenFOAM/osuFlume.py b/modules/createEVENT/GeoClawOpenFOAM/osuFlume.py index a14198e9d..be0cef04d 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/osuFlume.py +++ b/modules/createEVENT/GeoClawOpenFOAM/osuFlume.py @@ -104,4 +104,4 @@ def creategeom(self, data, path): # noqa: ARG002 # Write extreme values and building data to temporary file for later usage flumeobj.extremedata(extreme, breadth) - return 0 # noqa: DOC201 + return 0 # noqa: DOC201, RUF100 diff --git a/modules/createEVENT/GeoClawOpenFOAM/userFlume.py b/modules/createEVENT/GeoClawOpenFOAM/userFlume.py index e0767012c..66638127c 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/userFlume.py +++ b/modules/createEVENT/GeoClawOpenFOAM/userFlume.py @@ -102,4 +102,4 @@ def creategeom(self, data, path): # Write extreme values and building data to temporary file for later usage flumeobj.extremedata(extreme, breadth) - return 0 # noqa: DOC201 + return 0 # noqa: DOC201, RUF100 diff --git a/modules/createEVENT/IsolatedBuildingCFD/IsolatedBuildingCFD.py b/modules/createEVENT/IsolatedBuildingCFD/IsolatedBuildingCFD.py index 240ad0459..e561d9dbc 100644 --- a/modules/createEVENT/IsolatedBuildingCFD/IsolatedBuildingCFD.py +++ b/modules/createEVENT/IsolatedBuildingCFD/IsolatedBuildingCFD.py @@ -13,7 +13,7 @@ def directionToDof(direction): # noqa: N802 """Converts direction to degree of freedom""" # noqa: D400, D401 directioMap = {'X': 1, 'Y': 2, 'Z': 3} # noqa: N806 - return directioMap[direction] # noqa: DOC201 + return directioMap[direction] # noqa: DOC201, RUF100 def addFloorForceToEvent(patternsArray, force, direction, floor): # noqa: ARG001, N802, N803 diff --git a/modules/createEVENT/Istanbul/IstanbulStations.py b/modules/createEVENT/Istanbul/IstanbulStations.py index 05eab924c..8145e6ee1 100644 --- a/modules/createEVENT/Istanbul/IstanbulStations.py +++ b/modules/createEVENT/Istanbul/IstanbulStations.py @@ -190,7 +190,7 @@ def haversine(lat1, lon1, lat2, lon2): r = 6371 # Radius of the Earth in kilometers distance = r * c - return distance # noqa: DOC201, RET504 + return distance # noqa: DOC201, RET504, RUF100 if __name__ == '__main__': diff --git a/modules/createEVENT/M9/M9API.py b/modules/createEVENT/M9/M9API.py index 4e8744751..ec6a49ea0 100644 --- a/modules/createEVENT/M9/M9API.py +++ b/modules/createEVENT/M9/M9API.py @@ -332,4 +332,4 @@ def haversine(lat1, lon1, lat2, lon2): r = 6371 # Radius of the Earth in kilometers distance = r * c - return distance # noqa: DOC201, RET504 + return distance # noqa: DOC201, RET504, RUF100 diff --git a/modules/createEVENT/M9/M9Stations.py b/modules/createEVENT/M9/M9Stations.py index 2ade560b6..f52352c90 100644 --- a/modules/createEVENT/M9/M9Stations.py +++ b/modules/createEVENT/M9/M9Stations.py @@ -229,4 +229,4 @@ def haversine(lat1, lon1, lat2, lon2): r = 6371 # Radius of the Earth in kilometers distance = r * c - return distance # noqa: DOC201, RET504 + return distance # noqa: DOC201, RET504, RUF100 diff --git a/modules/createEVENT/MPM/MPM.py b/modules/createEVENT/MPM/MPM.py index 43246d132..5a2d55a45 100644 --- a/modules/createEVENT/MPM/MPM.py +++ b/modules/createEVENT/MPM/MPM.py @@ -13,7 +13,7 @@ def directionToDof(direction): # noqa: N802 """Converts direction to degree of freedom""" # noqa: D400, D401 directioMap = {'X': 1, 'Y': 2, 'Z': 3} # noqa: N806 - return directioMap[direction] # noqa: DOC201 + return directioMap[direction] # noqa: DOC201, RUF100 def addFloorForceToEvent( # noqa: N802 diff --git a/modules/createEVENT/MPM/post_process_output.py b/modules/createEVENT/MPM/post_process_output.py index 6a32be5ac..3327b8a1f 100644 --- a/modules/createEVENT/MPM/post_process_output.py +++ b/modules/createEVENT/MPM/post_process_output.py @@ -88,7 +88,7 @@ def readPressureProbes(fileName): # noqa: N802, N803 time = np.asarray(time, dtype=np.float32) p = np.asarray(p, dtype=np.float32) - return probes, time, p # noqa: DOC201 + return probes, time, p # noqa: DOC201, RUF100 def read_pressure_data(file_names): @@ -291,7 +291,7 @@ def read_openFoam_scalar_field(file_name): # noqa: N802 sField = np.asarray(sField, dtype=np.float32) # noqa: N806 - return sField # noqa: DOC201, RET504 + return sField # noqa: DOC201, RET504, RUF100 def read_openFoam_vector_field(file_name): # noqa: N802 @@ -312,7 +312,7 @@ def read_openFoam_vector_field(file_name): # noqa: N802 vField = np.asarray(vField, dtype=np.float32) # noqa: N806 - return vField # noqa: DOC201, RET504 + return vField # noqa: DOC201, RET504, RUF100 def read_openFoam_tensor_field(file_name): # noqa: N802 @@ -340,7 +340,7 @@ def read_openFoam_tensor_field(file_name): # noqa: N802 vField = np.asarray(vField, dtype=np.float32) # noqa: N806 - return vField # noqa: DOC201, RET504 + return vField # noqa: DOC201, RET504, RUF100 def read_openFoam_symmetric_tensor_field(file_name): # noqa: N802 @@ -367,7 +367,7 @@ def read_openFoam_symmetric_tensor_field(file_name): # noqa: N802 vField = np.asarray(vField, dtype=np.float32) # noqa: N806 - return vField # noqa: DOC201, RET504 + return vField # noqa: DOC201, RET504, RUF100 def read_velocity_data(path): @@ -462,7 +462,7 @@ def read_velocity_probes(fileName): # noqa: N803 time = np.asarray(time, dtype=np.float32) U = np.asarray(U, dtype=np.float32) # noqa: N806 - return probes, time, U # noqa: DOC201 + return probes, time, U # noqa: DOC201, RUF100 def calculate_length_scale(u, uav, dt, min_corr=0.0): @@ -481,7 +481,7 @@ def calculate_length_scale(u, uav, dt, min_corr=0.0): L = uav * np.trapz(corr, dx=dt) # noqa: NPY201, N806 - return L # noqa: DOC201, RET504 + return L # noqa: DOC201, RET504, RUF100 def psd(x, dt, nseg): # noqa: F811 diff --git a/modules/createEVENT/SurroundedBuildingCFD/SurroundedBuildingCFD.py b/modules/createEVENT/SurroundedBuildingCFD/SurroundedBuildingCFD.py index a31ebbac7..e7065e2ff 100644 --- a/modules/createEVENT/SurroundedBuildingCFD/SurroundedBuildingCFD.py +++ b/modules/createEVENT/SurroundedBuildingCFD/SurroundedBuildingCFD.py @@ -13,7 +13,7 @@ def directionToDof(direction): # noqa: N802 """Converts direction to degree of freedom""" # noqa: D400, D401 directioMap = {'X': 1, 'Y': 2, 'Z': 3} # noqa: N806 - return directioMap[direction] # noqa: DOC201 + return directioMap[direction] # noqa: DOC201, RUF100 def addFloorForceToEvent(patternsArray, force, direction, floor): # noqa: ARG001, N802, N803 diff --git a/modules/createEVENT/SurroundedBuildingCFD/post_process_output.py b/modules/createEVENT/SurroundedBuildingCFD/post_process_output.py index 6a32be5ac..3327b8a1f 100644 --- a/modules/createEVENT/SurroundedBuildingCFD/post_process_output.py +++ b/modules/createEVENT/SurroundedBuildingCFD/post_process_output.py @@ -88,7 +88,7 @@ def readPressureProbes(fileName): # noqa: N802, N803 time = np.asarray(time, dtype=np.float32) p = np.asarray(p, dtype=np.float32) - return probes, time, p # noqa: DOC201 + return probes, time, p # noqa: DOC201, RUF100 def read_pressure_data(file_names): @@ -291,7 +291,7 @@ def read_openFoam_scalar_field(file_name): # noqa: N802 sField = np.asarray(sField, dtype=np.float32) # noqa: N806 - return sField # noqa: DOC201, RET504 + return sField # noqa: DOC201, RET504, RUF100 def read_openFoam_vector_field(file_name): # noqa: N802 @@ -312,7 +312,7 @@ def read_openFoam_vector_field(file_name): # noqa: N802 vField = np.asarray(vField, dtype=np.float32) # noqa: N806 - return vField # noqa: DOC201, RET504 + return vField # noqa: DOC201, RET504, RUF100 def read_openFoam_tensor_field(file_name): # noqa: N802 @@ -340,7 +340,7 @@ def read_openFoam_tensor_field(file_name): # noqa: N802 vField = np.asarray(vField, dtype=np.float32) # noqa: N806 - return vField # noqa: DOC201, RET504 + return vField # noqa: DOC201, RET504, RUF100 def read_openFoam_symmetric_tensor_field(file_name): # noqa: N802 @@ -367,7 +367,7 @@ def read_openFoam_symmetric_tensor_field(file_name): # noqa: N802 vField = np.asarray(vField, dtype=np.float32) # noqa: N806 - return vField # noqa: DOC201, RET504 + return vField # noqa: DOC201, RET504, RUF100 def read_velocity_data(path): @@ -462,7 +462,7 @@ def read_velocity_probes(fileName): # noqa: N803 time = np.asarray(time, dtype=np.float32) U = np.asarray(U, dtype=np.float32) # noqa: N806 - return probes, time, U # noqa: DOC201 + return probes, time, U # noqa: DOC201, RUF100 def calculate_length_scale(u, uav, dt, min_corr=0.0): @@ -481,7 +481,7 @@ def calculate_length_scale(u, uav, dt, min_corr=0.0): L = uav * np.trapz(corr, dx=dt) # noqa: NPY201, N806 - return L # noqa: DOC201, RET504 + return L # noqa: DOC201, RET504, RUF100 def psd(x, dt, nseg): # noqa: F811 diff --git a/modules/createEVENT/coupledDigitalTwin/CoupledDigitalTwin.py b/modules/createEVENT/coupledDigitalTwin/CoupledDigitalTwin.py index 66728136a..1caab63cd 100644 --- a/modules/createEVENT/coupledDigitalTwin/CoupledDigitalTwin.py +++ b/modules/createEVENT/coupledDigitalTwin/CoupledDigitalTwin.py @@ -13,7 +13,7 @@ def directionToDof(direction): # noqa: N802 """Converts direction to degree of freedom""" # noqa: D400, D401 directioMap = {'X': 1, 'Y': 2, 'Z': 3} # noqa: N806 - return directioMap[direction] # noqa: DOC201 + return directioMap[direction] # noqa: DOC201, RUF100 def addFloorForceToEvent(patternsArray, force, direction, floor): # noqa: ARG001, N802, N803 diff --git a/modules/createEVENT/groundMotionIM/IntensityMeasureComputer.py b/modules/createEVENT/groundMotionIM/IntensityMeasureComputer.py index c05932380..eb60a5fcb 100644 --- a/modules/createEVENT/groundMotionIM/IntensityMeasureComputer.py +++ b/modules/createEVENT/groundMotionIM/IntensityMeasureComputer.py @@ -167,7 +167,7 @@ def convert_accel_units(self, acceleration, from_, to_='cm/sec/sec'): # noqa: C acceleration = np.asarray(acceleration) if from_ == 'g': if to_ == 'g': - return acceleration # noqa: DOC201 + return acceleration # noqa: DOC201, RUF100 if to_ in self.km_sec_square: return acceleration * self.g / 1000.0 if to_ in self.m_sec_square: diff --git a/modules/createEVENT/siteResponse/RegionalSiteResponse.py b/modules/createEVENT/siteResponse/RegionalSiteResponse.py index 35ef081a0..edce432d0 100644 --- a/modules/createEVENT/siteResponse/RegionalSiteResponse.py +++ b/modules/createEVENT/siteResponse/RegionalSiteResponse.py @@ -131,7 +131,7 @@ def get_scale_factors(input_units, output_units): # noqa: C901 scale_factors.update({input_name: f_scale}) - return scale_factors # noqa: DOC201 + return scale_factors # noqa: DOC201, RUF100 def postProcess(evtName, input_units, f_scale_units): # noqa: N802, N803, D103 diff --git a/modules/createEVENT/stochasticWave/StochasticWave.py b/modules/createEVENT/stochasticWave/StochasticWave.py index e8fda748f..98971436f 100644 --- a/modules/createEVENT/stochasticWave/StochasticWave.py +++ b/modules/createEVENT/stochasticWave/StochasticWave.py @@ -100,7 +100,7 @@ def directionToDof(direction): # noqa: N802 """Converts direction to degree of freedom""" # noqa: D400, D401 directioMap = {'X': 1, 'Y': 2, 'Z': 3} # noqa: N806 - return directioMap[direction] # noqa: DOC201 + return directioMap[direction] # noqa: DOC201, RUF100 def addFloorForceToEvent(patternsList, timeSeriesList, force, direction, floor): # noqa: N802, N803 diff --git a/modules/createSAM/AutoSDA/beam_component.py b/modules/createSAM/AutoSDA/beam_component.py index b1e840587..ed7b949c7 100644 --- a/modules/createSAM/AutoSDA/beam_component.py +++ b/modules/createSAM/AutoSDA/beam_component.py @@ -181,7 +181,7 @@ def check_flag(self): for key in self.is_feasible.keys(): # noqa: SIM118 if self.is_feasible[key] == False: # noqa: E712 self.flag = False - return self.flag # noqa: DOC201 + return self.flag # noqa: DOC201, RUF100 def compute_demand_capacity_ratio(self): """This method is used to compute demand to capacity ratios. diff --git a/modules/createSAM/AutoSDA/column_component.py b/modules/createSAM/AutoSDA/column_component.py index 4d803081c..c17b6b06e 100644 --- a/modules/createSAM/AutoSDA/column_component.py +++ b/modules/createSAM/AutoSDA/column_component.py @@ -264,7 +264,7 @@ def check_flag(self): for key in self.is_feasible.keys(): # noqa: SIM118 if self.is_feasible[key] == False: # noqa: E712 self.flag = False - return self.flag # noqa: DOC201 + return self.flag # noqa: DOC201, RUF100 def compute_demand_capacity_ratio(self): """This method is used to calculate the demand to capacity ratios for column components diff --git a/modules/createSAM/AutoSDA/connection_part.py b/modules/createSAM/AutoSDA/connection_part.py index c1c246d56..0b2bd3fa6 100644 --- a/modules/createSAM/AutoSDA/connection_part.py +++ b/modules/createSAM/AutoSDA/connection_part.py @@ -740,4 +740,4 @@ def check_flag(self): for key in self.is_feasible.keys(): # noqa: SIM118 if self.is_feasible[key] == False: # noqa: E712 self.flag = False - return self.flag # noqa: DOC201 + return self.flag # noqa: DOC201, RUF100 diff --git a/modules/createSAM/AutoSDA/help_functions.py b/modules/createSAM/AutoSDA/help_functions.py index b65c0cbfa..379209b75 100644 --- a/modules/createSAM/AutoSDA/help_functions.py +++ b/modules/createSAM/AutoSDA/help_functions.py @@ -50,7 +50,7 @@ def determine_Fa_coefficient(site_class, Ss): # noqa: C901, N802, N803 Fa = None # noqa: N806 print('Site class is entered with an invalid value') # noqa: T201 - return Fa # noqa: DOC201 + return Fa # noqa: DOC201, RUF100 def determine_Fv_coefficient(site_class, S1): # noqa: C901, N802, N803 @@ -94,7 +94,7 @@ def determine_Fv_coefficient(site_class, S1): # noqa: C901, N802, N803 Fv = None # noqa: N806 print('Site class is entered with an invalid value') # noqa: T201 - return Fv # noqa: DOC201 + return Fv # noqa: DOC201, RUF100 def calculate_DBE_acceleration(Ss, S1, Fa, Fv): # noqa: N802, N803 @@ -111,7 +111,7 @@ def calculate_DBE_acceleration(Ss, S1, Fa, Fv): # noqa: N802, N803 SM1 = Fv * S1 # noqa: N806 SDS = 2 / 3 * SMS # noqa: N806 SD1 = 2 / 3 * SM1 # noqa: N806 - return SMS, SM1, SDS, SD1 # noqa: DOC201 + return SMS, SM1, SDS, SD1 # noqa: DOC201, RUF100 def determine_Cu_coefficient(SD1): # noqa: N802, N803 @@ -133,7 +133,7 @@ def determine_Cu_coefficient(SD1): # noqa: N802, N803 else: Cu = 1.4 # noqa: N806 - return Cu # noqa: DOC201 + return Cu # noqa: DOC201, RUF100 def determine_floor_height( @@ -161,7 +161,7 @@ def determine_floor_height( level - 2 ) - return floor_height # noqa: DOC201 + return floor_height # noqa: DOC201, RUF100 def calculate_Cs_coefficient(SDS, SD1, S1, T, TL, R, Ie): # noqa: N802, N803 @@ -212,7 +212,7 @@ def calculate_Cs_coefficient(SDS, SD1, S1, T, TL, R, Ie): # noqa: N802, N803 else: pass - return Cs # noqa: DOC201 + return Cs # noqa: DOC201, RUF100 def determine_k_coeficient(period): @@ -227,7 +227,7 @@ def determine_k_coeficient(period): else: k = 1 + 0.5 * (period - 0.5) - return k # noqa: DOC201 + return k # noqa: DOC201, RUF100 def calculate_seismic_force(base_shear, floor_weight, floor_height, k): @@ -252,7 +252,7 @@ def calculate_seismic_force(base_shear, floor_weight, floor_height, k): for story in range(len(floor_weight) - 1, -1, -1): story_shear[story] = np.sum(seismic_force[story:]) - return seismic_force, story_shear # noqa: DOC201 + return seismic_force, story_shear # noqa: DOC201, RUF100 def find_section_candidate(target_depth, section_database): @@ -267,7 +267,7 @@ def find_section_candidate(target_depth, section_database): if match: candidate_index.append(indx) candidates = section_database.loc[candidate_index, 'section size'] - return candidates # noqa: DOC201, RET504 + return candidates # noqa: DOC201, RET504, RUF100 def search_member_size(target_name, target_quantity, candidate, section_database): @@ -299,7 +299,7 @@ def search_member_size(target_name, target_quantity, candidate, section_database section_size = section_database.loc[ candidate_index[min_index[0][0]], 'section size' ] - return section_size # noqa: DOC201 + return section_size # noqa: DOC201, RUF100 def search_section_property(target_size, section_database): @@ -316,7 +316,7 @@ def search_section_property(target_size, section_database): for indx in np.array(section_database['index']): if target_size == section_database.loc[indx, 'section size']: section_info = section_database.loc[indx, :] - return section_info.to_dict() # noqa: DOC201 + return section_info.to_dict() # noqa: DOC201, RUF100 except: # noqa: E722 sys.stderr.write( 'Error: wrong size nominated!\nNo such size exists in section database!' @@ -336,7 +336,7 @@ def decrease_member_size(candidate, current_size): # This means the smallest candidate still cannot make design drift close to drift limit, # which further means the smallest section candidate is too large. sys.stderr.write('The lower bound for depth initialization is too large!\n') - return candidate[candidate_pool_index + 1] # noqa: DOC201 + return candidate[candidate_pool_index + 1] # noqa: DOC201, RUF100 def extract_depth(size): @@ -346,7 +346,7 @@ def extract_depth(size): """ # noqa: D205, D400, D401, D404 # Use Python regular expression to extract the char between 'W' and 'X', which then become depth output = re.findall(r'.*W(.*)X.*', size) - return int(output[0]) # noqa: DOC201 + return int(output[0]) # noqa: DOC201, RUF100 def extract_weight(size): @@ -357,7 +357,7 @@ def extract_weight(size): # Use Python regular expression to extract the char after 'W' to the end of the string, # which then becomes weight output = re.findall(r'.X(.*)', size) - return int(output[0]) # noqa: DOC201 + return int(output[0]) # noqa: DOC201, RUF100 def constructability_helper( # noqa: C901 @@ -541,7 +541,7 @@ def constructability_helper( # noqa: C901 variation_story.pop() # Update the ending index for next "identical story block" ending_index = variation_story[-1] - return section_size # noqa: DOC201 + return section_size # noqa: DOC201, RUF100 # # Loop over all stories from top to bottom to consider the constructability # starting_story = total_story - 1 @@ -596,4 +596,4 @@ def increase_member_size(candidate, current_size): if candidate_pool_index - 1 < 0: # Make sure the index does not exceed the bound # This means the largest candidate still fails to satisfy the requirement sys.stderr.write('The upper bound for depth initialization is too small!\n') - return candidate[candidate_pool_index - 1] # noqa: DOC201 + return candidate[candidate_pool_index - 1] # noqa: DOC201, RUF100 diff --git a/modules/performDL/pelicun3/DL_visuals.py b/modules/performDL/pelicun3/DL_visuals.py index 66b42cef7..43cc80d39 100644 --- a/modules/performDL/pelicun3/DL_visuals.py +++ b/modules/performDL/pelicun3/DL_visuals.py @@ -116,26 +116,26 @@ def plot_fragility(comp_db_path, output_path, create_zip='0'): # noqa: C901, D1 5: cl.scales['5']['seq']['Reds'], } - if comp_data.loc[('Incomplete', '')] != 1: # noqa: RUF031 + if comp_data.loc[('Incomplete', '')] != 1: # noqa: RUF031, RUF100 p_min, p_max = 0.01, 0.9 d_min = np.inf d_max = -np.inf LS_count = 0 # noqa: N806 for LS in limit_states: # noqa: N806 - if comp_data.loc[(LS, 'Family')] == 'normal': # noqa: RUF031 + if comp_data.loc[(LS, 'Family')] == 'normal': # noqa: RUF031, RUF100 d_min_i, d_max_i = norm.ppf( [p_min, p_max], - loc=comp_data.loc[(LS, 'Theta_0')], # noqa: RUF031 - scale=comp_data.loc[(LS, 'Theta_1')] # noqa: RUF031 - * comp_data.loc[(LS, 'Theta_0')], # noqa: RUF031 + loc=comp_data.loc[(LS, 'Theta_0')], # noqa: RUF031, RUF100 + scale=comp_data.loc[(LS, 'Theta_1')] # noqa: RUF031, RUF100 + * comp_data.loc[(LS, 'Theta_0')], # noqa: RUF031, RUF100 ) - elif comp_data.loc[(LS, 'Family')] == 'lognormal': # noqa: RUF031 + elif comp_data.loc[(LS, 'Family')] == 'lognormal': # noqa: RUF031, RUF100 d_min_i, d_max_i = np.exp( norm.ppf( [p_min, p_max], - loc=np.log(comp_data.loc[(LS, 'Theta_0')]), # noqa: RUF031 - scale=comp_data.loc[(LS, 'Theta_1')], # noqa: RUF031 + loc=np.log(comp_data.loc[(LS, 'Theta_0')]), # noqa: RUF031, RUF100 + scale=comp_data.loc[(LS, 'Theta_1')], # noqa: RUF031, RUF100 ) ) else: @@ -149,18 +149,18 @@ def plot_fragility(comp_db_path, output_path, create_zip='0'): # noqa: C901, D1 demand_vals = np.linspace(d_min, d_max, num=100) for i_ls, LS in enumerate(limit_states): # noqa: N806 - if comp_data.loc[(LS, 'Family')] == 'normal': # noqa: RUF031 + if comp_data.loc[(LS, 'Family')] == 'normal': # noqa: RUF031, RUF100 cdf_vals = norm.cdf( demand_vals, - loc=comp_data.loc[(LS, 'Theta_0')], # noqa: RUF031 - scale=comp_data.loc[(LS, 'Theta_1')] # noqa: RUF031 - * comp_data.loc[(LS, 'Theta_0')], # noqa: RUF031 + loc=comp_data.loc[(LS, 'Theta_0')], # noqa: RUF031, RUF100 + scale=comp_data.loc[(LS, 'Theta_1')] # noqa: RUF031, RUF100 + * comp_data.loc[(LS, 'Theta_0')], # noqa: RUF031, RUF100 ) - elif comp_data.loc[(LS, 'Family')] == 'lognormal': # noqa: RUF031 + elif comp_data.loc[(LS, 'Family')] == 'lognormal': # noqa: RUF031, RUF100 cdf_vals = norm.cdf( np.log(demand_vals), - loc=np.log(comp_data.loc[(LS, 'Theta_0')]), # noqa: RUF031 - scale=comp_data.loc[(LS, 'Theta_1')], # noqa: RUF031 + loc=np.log(comp_data.loc[(LS, 'Theta_0')]), # noqa: RUF031, RUF100 + scale=comp_data.loc[(LS, 'Theta_1')], # noqa: RUF031, RUF100 ) else: continue @@ -385,11 +385,11 @@ def plot_fragility(comp_db_path, output_path, create_zip='0'): # noqa: C901, D1 gridcolor='rgb(192,192,192)', ) - demand_unit = comp_data.loc[('Demand', 'Unit')] # noqa: RUF031 + demand_unit = comp_data.loc[('Demand', 'Unit')] # noqa: RUF031, RUF100 if demand_unit == 'unitless': demand_unit = '-' fig.update_xaxes( - title_text=f"{comp_data.loc[('Demand', 'Type')]} [{demand_unit}]", # noqa: RUF031 + title_text=f"{comp_data.loc[('Demand', 'Type')]} [{demand_unit}]", # noqa: RUF031, RUF100 **shared_ax_props, ) @@ -465,7 +465,7 @@ def plot_repair(comp_db_path, output_path, create_zip='0'): # noqa: C901, D103, # perform plotting for each repair consequence type independently for c_type in repair_df.loc[comp_id].index: # load the component-specific part of the database - comp_data = repair_df.loc[(comp_id, c_type)] # noqa: RUF031 + comp_data = repair_df.loc[(comp_id, c_type)] # noqa: RUF031, RUF100 # and the component-specific metadata - if it exists if repair_meta != None: # noqa: E711 @@ -620,7 +620,7 @@ def plot_repair(comp_db_path, output_path, create_zip='0'): # noqa: C901, D103, ), } - if comp_data.loc[('Incomplete', '')] != 1: # noqa: RUF031 + if comp_data.loc[('Incomplete', '')] != 1: # noqa: RUF031, RUF100 # set the parameters for displaying uncertainty p_min, p_max = 0.16, 0.84 # +- 1 std # noqa: F841 @@ -929,7 +929,7 @@ def plot_repair(comp_db_path, output_path, create_zip='0'): # noqa: C901, D103, elif quantity_unit.split()[0] == '1': quantity_unit = quantity_unit.split()[1] - dv_unit = comp_data.loc[('DV', 'Unit')] # noqa: RUF031 + dv_unit = comp_data.loc[('DV', 'Unit')] # noqa: RUF031, RUF100 if dv_unit == 'unitless': dv_unit = '-' diff --git a/modules/performHUA/pyincore_data/censusutil.py b/modules/performHUA/pyincore_data/censusutil.py index 55b97b6f4..d17253710 100644 --- a/modules/performHUA/pyincore_data/censusutil.py +++ b/modules/performHUA/pyincore_data/censusutil.py @@ -16,7 +16,7 @@ import geopandas as gpd import pandas as pd import requests -from pyincore_data import globals # noqa: A004 +from pyincore_data import globals # noqa: A004, RUF100 logger = globals.LOGGER @@ -107,7 +107,7 @@ def request_census_api(data_url): api_json = request_json.json() api_df = pd.DataFrame(columns=api_json[0], data=api_json[1:]) - return api_df # noqa: DOC201, RET504 + return api_df # noqa: DOC201, RET504, RUF100 @staticmethod def get_blockdata_for_demographics( # noqa: C901 @@ -191,7 +191,7 @@ def get_blockdata_for_demographics( # noqa: C901 else: print('Only 2000, 2010, and 2020 decennial census supported') # noqa: T201 - return None # noqa: DOC201 + return None # noqa: DOC201, RUF100 # Make directory to save output if not os.path.exists(output_dir): # noqa: PTH110 @@ -860,7 +860,7 @@ def get_blockgroupdata_for_income( # noqa: C901 print('Done creating household income shapefile') # noqa: T201 - return cen_blockgroup[save_columns] # noqa: DOC201 + return cen_blockgroup[save_columns] # noqa: DOC201, RUF100 @staticmethod def convert_dislocation_gpd_to_shapefile(in_gpd, programname, savefile): diff --git a/modules/performREC/pyrecodes/run_pyrecodes.py b/modules/performREC/pyrecodes/run_pyrecodes.py index c173370e0..c141ffb8d 100644 --- a/modules/performREC/pyrecodes/run_pyrecodes.py +++ b/modules/performREC/pyrecodes/run_pyrecodes.py @@ -235,7 +235,7 @@ def run_pyrecodes(rec_config, inputRWHALE, parallelType, mpiExec, numPROC): # n ujson.dump(results, f) ind_in_rank += 1 - count = count + 1 # noqa: PLR6104 + count = count + 1 # noqa: PLR6104, RUF100 # wait for all to finish if doParallel: @@ -504,4 +504,3 @@ def select_realizations_to_run(damage_input, inputRWHALE): # noqa: N803, D103 mpiExec=wfArgs.mpiexec, numPROC=numPROC, ) - diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/CreateStation.py b/modules/performRegionalEventSimulation/regionalGroundMotion/CreateStation.py index 46ddda580..c875cc8c5 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/CreateStation.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/CreateStation.py @@ -114,7 +114,7 @@ def create_stations( # noqa: C901, PLR0912, PLR0915 stn_df = pd.read_csv(input_file, header=0, index_col=0) except: # noqa: E722 run_tag = 0 - return run_tag # noqa: DOC201, RET504 + return run_tag # noqa: DOC201, RET504, RUF100 # Max and Min IDs if len(filterIDs) > 0: stns_requested = [] @@ -609,7 +609,7 @@ def create_gridded_stations( gstn_df = pd.read_csv(input_file, header=0, index_col=0) except: # noqa: E722 run_tag = 1 - return run_tag # noqa: DOC201, RET504 + return run_tag # noqa: DOC201, RET504, RUF100 if np.max(gstn_df.index.values) != 2: # noqa: PLR2004 run_tag = 1 return run_tag # noqa: RET504 @@ -662,7 +662,7 @@ def get_vs30_global(lat, lon): ) vs30 = [float(interpFunc(x, y)) for x, y in zip(lon, lat)] # return - return vs30 # noqa: DOC201, RET504 + return vs30 # noqa: DOC201, RET504, RUF100 def get_vs30_thompson(lat, lon): @@ -694,21 +694,21 @@ def get_vs30_thompson(lat, lon): vs30 = [float(interpFunc(x, y)) for x, y in zip(lon, lat)] # return - return vs30 # noqa: DOC201, RET504 + return vs30 # noqa: DOC201, RET504, RUF100 def get_z1(vs30): """Compute z1 based on the prediction equation by Chiou and Youngs (2013) (unit of vs30 is meter/second and z1 is meter)""" # noqa: D400 z1 = np.exp(-7.15 / 4.0 * np.log((vs30**4 + 571.0**4) / (1360.0**4 + 571.0**4))) # return - return z1 # noqa: DOC201, RET504 + return z1 # noqa: DOC201, RET504, RUF100 def get_z25(z1): """Compute z25 based on the prediction equation by Campbell and Bozorgnia (2013)""" # noqa: D400 z25 = 0.748 + 2.218 * z1 # return - return z25 # noqa: DOC201, RET504 + return z25 # noqa: DOC201, RET504, RUF100 def get_z25fromVs(vs): # noqa: N802 @@ -717,7 +717,7 @@ def get_z25fromVs(vs): # noqa: N802 """ # noqa: D205, D400 z25 = (7.089 - 1.144 * np.log(vs)) * 1000 # return - return z25 # noqa: DOC201, RET504 + return z25 # noqa: DOC201, RET504, RUF100 def get_zTR_global(lat, lon): # noqa: N802 @@ -743,7 +743,7 @@ def get_zTR_global(lat, lon): # noqa: N802 ) zTR = [float(interpFunc(x, y)) for x, y in zip(lon, lat)] # noqa: N806 # return - return zTR # noqa: DOC201, RET504 + return zTR # noqa: DOC201, RET504, RUF100 def export_site_prop(stn_file, output_dir, filename): @@ -811,7 +811,7 @@ def get_zTR_ncm(lat, lon): # noqa: N802 # get the top bedrock data zTR.append(abs(cur_res['response']['results'][0]['profiles'][0]['top'])) # return - return zTR # noqa: DOC201 + return zTR # noqa: DOC201, RUF100 def get_vsp_ncm(lat, lon, depth): @@ -850,7 +850,7 @@ def get_vsp_ncm(lat, lon, depth): if len(vsp) == 1: vsp = vsp[0] # return - return vsp # noqa: DOC201 + return vsp # noqa: DOC201, RUF100 def compute_vs30_from_vsp(depthp, vsp): @@ -868,7 +868,7 @@ def compute_vs30_from_vsp(depthp, vsp): # Computing the Vs30 vs30p = 30.0 / np.sum(delta_t) # return - return vs30p # noqa: DOC201, RET504 + return vs30p # noqa: DOC201, RET504, RUF100 def get_vs30_ncm(lat, lon): @@ -895,7 +895,7 @@ def get_vs30_ncm(lat, lon): ) vs30.append(760.0) # return - return vs30 # noqa: DOC201 + return vs30 # noqa: DOC201, RUF100 def get_soil_model_ba(param=None): @@ -925,7 +925,7 @@ def get_soil_model_ba(param=None): else: res = None - return res # noqa: DOC201 + return res # noqa: DOC201, RUF100 def get_soil_model_ei(param=None): @@ -940,7 +940,7 @@ def get_soil_model_ei(param=None): else: res = None - return res # noqa: DOC201 + return res # noqa: DOC201, RUF100 def get_soil_model_user(df_stn, model_fun): # noqa: D103 diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/FetchOpenQuake.py b/modules/performRegionalEventSimulation/regionalGroundMotion/FetchOpenQuake.py index cd1615ab0..2a7cf2574 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/FetchOpenQuake.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/FetchOpenQuake.py @@ -630,7 +630,7 @@ def oq_run_classical_psha( # noqa: C901 export_realizations('realizations', dstore) except: # noqa: E722 print('FetchOpenQuake: Classical PSHA failed.') # noqa: T201 - return 1 # noqa: DOC201 + return 1 # noqa: DOC201, RUF100 elif vtag == 11: # noqa: PLR2004 try: print(f'FetchOpenQuake: running Version {oq_version}.') # noqa: T201 @@ -845,7 +845,7 @@ def oq_read_uhs_classical_psha(scen_info, event_info, dir_info): mag_maf.append([0.0, float(list_IMs[0].split('~')[0]), 0.0]) # return - return ln_psa_mr, mag_maf, im_list # noqa: DOC201 + return ln_psa_mr, mag_maf, im_list # noqa: DOC201, RUF100 class OpenQuakeHazardCalc: # noqa: D101 @@ -991,7 +991,7 @@ def run_calc(self): # noqa: C901 oq.ground_motion_fields is False and oq.hazard_curves_from_gmfs is False ): - return {} # noqa: DOC201 + return {} # noqa: DOC201, RUF100 elif 'rupture_model' not in oq.inputs: logging.warning( 'There is no rupture_model, the calculator will just ' @@ -1550,7 +1550,7 @@ def eval_calc(self): # noqa: C901, PLR0912, PLR0915 } # return - return res # noqa: DOC201, RET504 + return res # noqa: DOC201, RET504, RUF100 def calculator_build_events_from_sources(self): # noqa: C901 """Prefilter the composite source model and store the source_info""" # noqa: D400 @@ -1666,7 +1666,7 @@ def __str__(self): # noqa: D105 def to_imt_unit_values(vals, imt): """Exponentiate the values unless the IMT is MMI""" # noqa: D400 if str(imt) == 'MMI': - return vals # noqa: DOC201 + return vals # noqa: DOC201, RUF100 return np.exp(vals) diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardOccurrence.py b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardOccurrence.py index 093217cc3..15a748471 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardOccurrence.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardOccurrence.py @@ -654,7 +654,7 @@ def _input_check(self): print( # noqa: T201 'OccurrenceModel_ManzourDavidson2016._input_check: no return period is defined.' ) - return False # noqa: DOC201 + return False # noqa: DOC201, RUF100 # shape of exceedance probability if len(self.im_exceedance_probs.shape) != 3: # noqa: PLR2004 print( # noqa: T201 @@ -730,8 +730,8 @@ def _opt_initialization(self): itertools.product(range(self.num_sites), range(self.num_return_periods)) ) self.prob += pulp.lpSum( - self.return_periods[j] * self.e_plus[(i, j)] # noqa: RUF031 - + self.return_periods[j] * self.e_minus[(i, j)] # noqa: RUF031 + self.return_periods[j] * self.e_plus[(i, j)] # noqa: RUF031, RUF100 + + self.return_periods[j] * self.e_minus[(i, j)] # noqa: RUF031, RUF100 for (i, j) in comb_sites_rps ) @@ -757,7 +757,7 @@ def _opt_initialization(self): <= self.num_scenarios ) - return True # noqa: DOC201 + return True # noqa: DOC201, RUF100 def solve_opt(self): """target_function: compute the target function to be minimized @@ -853,7 +853,7 @@ def _input_check(self): print( # noqa: T201 'OccurrenceModel_Wangetal2023._input_check: no return period is defined.' ) - return False # noqa: DOC201 + return False # noqa: DOC201, RUF100 # shape of exceedance probability if len(self.im_exceedance_probs.shape) != 3: # noqa: PLR2004 print( # noqa: T201 @@ -916,7 +916,7 @@ def _opt_initialization(self): self.X_weighted = np.dot(self.W, self.X) self.y_weighted = np.dot(self.W, self.y) - return True # noqa: DOC201 + return True # noqa: DOC201, RUF100 def solve_opt(self): """LASSO regression""" # noqa: D400 diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py index 98ff9cd60..3d9eaed21 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py @@ -471,7 +471,7 @@ def hazard_job(hazard_info): # noqa: C901, D103, PLR0915 ) gf_im_list += settlement_info['Output'] if 'Landslide' in ground_failure_info.keys(): # noqa: SIM118 - import landslide # noqa: PLC0415 + import landslide # noqa: PLC0415, RUF100 if 'Landslide' in ground_failure_info['Landslide'].keys(): # noqa: SIM118 lsld_info = ground_failure_info['Landslide']['Landslide'] diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/CorrelationModel.py b/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/CorrelationModel.py index 17c8b1fa1..d6e820aac 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/CorrelationModel.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/CorrelationModel.py @@ -65,7 +65,7 @@ def baker_jayaram_correlation_2008(im1, im2, flag_orth=False): # noqa: FBT002, elif im1.startswith('PGA'): T1 = 0.0 # noqa: N806 else: - return 0.0 # noqa: DOC201 + return 0.0 # noqa: DOC201, RUF100 if im2.startswith('SA'): T2 = float(im2[3:-1]) # noqa: N806 elif im2.startswith('PGA'): @@ -126,7 +126,7 @@ def bradley_correlation_2011(IM, T=None, flag_Ds=True): # noqa: FBT002, C901, N # PGA if IM == 'PGA': # noqa: RET503 if flag_Ds: - return -0.442 # noqa: DOC201 + return -0.442 # noqa: DOC201, RUF100 else: # noqa: RET505 return -0.305 elif IM == 'PGV': @@ -252,7 +252,7 @@ def jayaram_baker_correlation_2009(im, h, flag_clustering=False): # noqa: FBT00 else: b = 40.7 - 15.0 * T rho = np.exp(-3.0 * h / b) - return rho # noqa: DOC201, RET504 + return rho # noqa: DOC201, RET504, RUF100 def load_loth_baker_correlation_2013(datapath): @@ -270,7 +270,7 @@ def load_loth_baker_correlation_2013(datapath): B2 = pd.read_csv(datapath + 'loth_baker_correlation_2013_B2.csv', header=0) # noqa: N806 B1 = pd.read_csv(datapath + 'loth_baker_correlation_2013_B1.csv', header=0) # noqa: N806 B3 = pd.read_csv(datapath + 'loth_baker_correlation_2013_B3.csv', header=0) # noqa: N806 - return B1, B2, B3 # noqa: DOC201 + return B1, B2, B3 # noqa: DOC201, RUF100 def compute_rho_loth_baker_correlation_2013(T1, T2, h, B1, B2, B3): # noqa: N803 @@ -303,7 +303,7 @@ def compute_rho_loth_baker_correlation_2013(T1, T2, h, B1, B2, B3): # noqa: N80 Ch = b1 * np.exp(-3.0 * h / 20.0) + b2 * np.exp(-3.0 * h / 70.0) + b3 * (h == 0) # noqa: N806 # Correlation coefficient rho = Ch - return rho # noqa: DOC201, RET504 + return rho # noqa: DOC201, RET504, RUF100 def loth_baker_correlation_2013(stations, im_name_list, num_simu): # noqa: C901 @@ -373,7 +373,7 @@ def loth_baker_correlation_2013(stations, im_name_list, num_simu): # noqa: C901 .swapaxes(1, 2) ) # return - return residuals # noqa: DOC201, RET504 + return residuals # noqa: DOC201, RET504, RUF100 def load_markhvida_ceferino_baker_correlation_2017(datapath): @@ -404,7 +404,7 @@ def load_markhvida_ceferino_baker_correlation_2017(datapath): index_col=None, header=0, ) - return MCB_model, MCB_pca, MCB_var # noqa: DOC201 + return MCB_model, MCB_pca, MCB_var # noqa: DOC201, RUF100 def markhvida_ceferino_baker_correlation_2017( # noqa: C901 @@ -521,7 +521,7 @@ def markhvida_ceferino_baker_correlation_2017( # noqa: C901 if tmp_periods > model_Tmax: residuals = np.concatenate((residuals, Tmax_residuals), axis=1) # return - return residuals # noqa: DOC201 + return residuals # noqa: DOC201, RUF100 def load_du_ning_correlation_2021(datapath): @@ -548,7 +548,7 @@ def load_du_ning_correlation_2021(datapath): DN_var = pd.read_csv( # noqa: N806 datapath + 'du_ning_correlation_2021_var_scale.csv', index_col=None, header=0 ) - return DN_model, DN_pca, DN_var # noqa: DOC201 + return DN_model, DN_pca, DN_var # noqa: DOC201, RUF100 def du_ning_correlation_2021(stations, im_name_list, num_simu, num_pc=23): @@ -657,7 +657,7 @@ def du_ning_correlation_2021(stations, im_name_list, num_simu, num_pc=23): ) # return - return residuals # noqa: DOC201 + return residuals # noqa: DOC201, RUF100 def baker_bradley_correlation_2017(im1=None, im2=None): # noqa: C901 @@ -686,7 +686,7 @@ def baker_bradley_correlation_2017(im1=None, im2=None): # noqa: C901 print( # noqa: T201 f'CorrelationModel.baker_bradley_correlation_2017: warning - return 0.0 for unknown {im1}' ) - return 0.0 # noqa: DOC201 + return 0.0 # noqa: DOC201, RUF100 im_list.append(tmp_tag) period_list.append(None) if im2.startswith('SA'): diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/SignificantDurationModel.py b/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/SignificantDurationModel.py index 2a017cbf4..137adec5d 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/SignificantDurationModel.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/SignificantDurationModel.py @@ -65,7 +65,7 @@ def abrahamson_silva_ds_1999( print( # noqa: T201 "SignificantDurationModel.abrahamson_silva_ds_1999: duration_type='DS575H','DS575V','DS595H','DS595V'?" ) - return None, None # noqa: DOC201 + return None, None # noqa: DOC201, RUF100 # modeling coefficients beta = [3.2, 3.2, 3.2, 3.2] b1 = [5.204, 4.610, 5.204, 4.610] @@ -140,7 +140,7 @@ def bommer_stafford_alarcon_ds_2009( print( # noqa: T201 "SignificantDurationModel.bommer_stafford_alarcon_ds_2009: duration_type='DS575H','DS595H'?" ) - return None, None, None, None # noqa: DOC201 + return None, None, None, None # noqa: DOC201, RUF100 # modeling coefficients c0 = [-5.6298, -2.2393] @@ -205,7 +205,7 @@ def afshari_stewart_ds_2016( # noqa: C901 print( # noqa: T201 "SignificantDurationModel.afshari_stewart_ds_2016: mechanism='unknown','normal','reverse','strike-slip'?" ) - return None, None, None, None # noqa: DOC201 + return None, None, None, None # noqa: DOC201, RUF100 # region map reg_map = {'california': 0, 'japan': 1, 'other': 2} reg_tag = reg_map.get(region.lower(), None) diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/openSHAGMPE.py b/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/openSHAGMPE.py index be96864de..3a6c527ec 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/openSHAGMPE.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/openSHAGMPE.py @@ -240,7 +240,7 @@ def calc(self, Mw, rJB, rRup, rX, dip, zTop, vs30, vsInf, z1p0, style): # noqa: stdDev = np.sqrt(tauSq + phiSq) # noqa: N806 - return mean, stdDev, np.sqrt(tauSq), np.sqrt(phiSq) # noqa: DOC201 + return mean, stdDev, np.sqrt(tauSq), np.sqrt(phiSq) # noqa: DOC201, RUF100 # https://github.com/opensha/opensha/blob/master/src/main/java/org/opensha/sha/imr/attenRelImpl/ngaw2/NGAW2_Wrapper.java#L220 def getFaultFromRake(self, rake): # noqa: N802, D102 diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/landslide.py b/modules/performRegionalEventSimulation/regionalGroundMotion/landslide.py index cd6352725..d4119aba9 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/landslide.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/landslide.py @@ -1,4 +1,4 @@ -import numpy as np # noqa: CPY001, INP001, I001, D100 +import numpy as np # noqa: CPY001, D100, I001, INP001, RUF100 import rasterio as rio from scipy.interpolate import interp2d import sys, warnings, shapely, pandas, os # noqa: ICN001, E401 @@ -10,7 +10,7 @@ import pandas as pd -## Helper functions # noqa: E266 +## Helper functions # noqa: E266, RUF100 def sampleRaster( # noqa: N802 raster_file_path, raster_crs, x, y, interp_scheme='nearest', dtype=None ): @@ -73,10 +73,10 @@ def sampleRaster( # noqa: N802 sample = sample.astype(dtype) # clean up invalid values (returned as 1e38 by NumPy) sample[abs(sample) > 1e10] = invalid_value # noqa: PLR2004 - return sample # noqa: DOC201 + return sample # noqa: DOC201, RUF100 -## Helper functions # noqa: E266 +## Helper functions # noqa: E266, RUF100 def sampleVector(vector_file_path, vector_crs, x, y, dtype=None): # noqa: ARG001, N802 """performs spatial join of vector_file with xy'""" # noqa: D400, D401, D403 print(f'Sampling from the Vector File: {os.path.basename(vector_file_path)}...') # noqa: T201, PTH119 @@ -103,7 +103,7 @@ def sampleVector(vector_file_path, vector_crs, x, y, dtype=None): # noqa: ARG00 vertices = hull.vertices vertices = sites[np.append(vertices, vertices[0])] centroid = np.mean(vertices, axis=0) - vertices = vertices + 0.05 * (vertices - centroid) # noqa: PLR6104 + vertices = vertices + 0.05 * (vertices - centroid) # noqa: PLR6104, RUF100 RoI = shapely.geometry.Polygon(vertices) # noqa: N806 except: # noqa: E722 centroid = shapely.geometry.Point(np.mean(x), np.mean(y)) @@ -136,7 +136,7 @@ def sampleVector(vector_file_path, vector_crs, x, y, dtype=None): # noqa: ARG00 data['geometry'].append(new_geom) del vector_gdf gdf_roi = gpd.GeoDataFrame(data, geometry='geometry', crs=4326) - geometry = [shapely.geometry.Point(lon, lat) for lon, lat in zip(x, y)] # noqa: FURB140 + geometry = [shapely.geometry.Point(lon, lat) for lon, lat in zip(x, y)] # noqa: FURB140, RUF100 gdf_sites = gpd.GeoDataFrame(geometry=geometry, crs=4326).reset_index() merged = gpd.GeoDataFrame.sjoin( gdf_roi, gdf_sites, how='inner', predicate='contains' @@ -144,7 +144,7 @@ def sampleVector(vector_file_path, vector_crs, x, y, dtype=None): # noqa: ARG00 merged = merged.set_index('index_right').sort_index().drop(columns=['geometry']) gdf_sites = pandas.merge(gdf_sites, merged, on='index', how='left') gdf_sites.drop(columns=['geometry', 'index'], inplace=True) # noqa: PD002 - return gdf_sites # noqa: DOC201 + return gdf_sites # noqa: DOC201, RUF100 def find_additional_output_req(liq_info, current_step): # noqa: D103 @@ -213,7 +213,7 @@ def erf2(x): # A & S 7.1.26 t = 1.0 / (1.0 + p * x) y = 1.0 - (((((a5 * t + a4) * t) + a3) * t + a2) * t + a1) * t * np.exp(-(x**2)) - return signs * y # noqa: DOC201 + return signs * y # noqa: DOC201, RUF100 def norm2_cdf(x, loc, scale): @@ -222,7 +222,7 @@ def norm2_cdf(x, loc, scale): https://github.com/HDembinski/numba-stats/blob/main/src/numba_stats/norm.py """ # noqa: D205, D400, D401 inter = (x - loc) / scale - return 0.5 * (1 + erf2(inter * np.sqrt(0.5))) # noqa: DOC201 + return 0.5 * (1 + erf2(inter * np.sqrt(0.5))) # noqa: DOC201, RUF100 def erf2_2d(x): @@ -240,7 +240,7 @@ def erf2_2d(x): # A & S 7.1.26 t = 1.0 / (1.0 + p * x) y = 1.0 - (((((a5 * t + a4) * t) + a3) * t + a2) * t + a1) * t * np.exp(-(x**2)) - return signs * y # noqa: DOC201 + return signs * y # noqa: DOC201, RUF100 def norm2_cdf_2d(x, loc, scale): @@ -249,7 +249,7 @@ def norm2_cdf_2d(x, loc, scale): https://github.com/HDembinski/numba-stats/blob/main/src/numba_stats/norm.py """ # noqa: D205, D400, D401 inter = (x - loc) / scale - return 0.5 * (1 + erf2_2d(inter * np.sqrt(0.5))) # noqa: DOC201 + return 0.5 * (1 + erf2_2d(inter * np.sqrt(0.5))) # noqa: DOC201, RUF100 def nb_round(x, decimals): # noqa: D103 @@ -263,7 +263,7 @@ def erfinv_coeff(order=20): # noqa: D103 # starting value c[0] = 1 for i in range(1, order + 1): - c[i] = sum([c[j] * c[i - 1 - j] / (j + 1) / (2 * j + 1) for j in range(i)]) # noqa: C419 + c[i] = sum([c[j] * c[i - 1 - j] / (j + 1) / (2 * j + 1) for j in range(i)]) # noqa: C419, RUF100 # return return c @@ -278,7 +278,7 @@ def erfinv(x, order=20): for i in range(order): y += c[i] / (2 * i + 1) * (root_pi_over_2 * x) ** (2 * i + 1) # return - return y # noqa: DOC201 + return y # noqa: DOC201, RUF100 def norm2_ppf(p, loc, scale): @@ -287,7 +287,7 @@ def norm2_ppf(p, loc, scale): https://github.com/HDembinski/numba-stats/blob/main/src/numba_stats/norm.py """ # noqa: D205, D400, D401 inter = np.sqrt(2) * erfinv(2 * p - 1, order=20) - return scale * inter + loc # noqa: DOC201 + return scale * inter + loc # noqa: DOC201, RUF100 def erfinv_2d(x, order=20): @@ -300,7 +300,7 @@ def erfinv_2d(x, order=20): for i in range(order): y += c[i] / (2 * i + 1) * (root_pi_over_2 * x) ** (2 * i + 1) # return - return y # noqa: DOC201 + return y # noqa: DOC201, RUF100 def norm2_ppf_2d(p, loc, scale): @@ -309,7 +309,7 @@ def norm2_ppf_2d(p, loc, scale): https://github.com/HDembinski/numba-stats/blob/main/src/numba_stats/norm.py """ # noqa: D205, D400, D401 inter = np.sqrt(2) * erfinv_2d(2 * p - 1, order=20) - return scale * inter + loc # noqa: DOC201 + return scale * inter + loc # noqa: DOC201, RUF100 class Landslide: # noqa: D101 @@ -518,7 +518,7 @@ def run( # noqa: D102 for i, key in enumerate(output_keys): im_data_scen[:, len(im_list) + i, rlz_id] = model_output[key] ln_im_data[scenario_id] = im_data_scen - im_list = im_list + output_keys # noqa: PLR6104 + im_list = im_list + output_keys # noqa: PLR6104, RUF100 else: sys.exit( f"'PGA' is missing in the selected intensity measures and the landslide model 'BrayMacedo2019' can not be computed." # noqa: F541 @@ -533,7 +533,7 @@ def run( # noqa: D102 im_list, ) - def model( # noqa: PLR6301 + def model( # noqa: PLR6301, RUF100 self, pga, mag, # upstream PBEE RV @@ -635,4 +635,4 @@ def model( # noqa: PLR6301 pgdef = np.exp(nonzero_ln_pgdef) / 100 # also convert from cm to m pgdef = np.maximum(pgdef, 1e-5) # limit to output = {'lsd_PGD_h': pgdef} - return output # noqa: RET504, DOC201 + return output # noqa: DOC201, RET504, RUF100 diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/liquefaction.py b/modules/performRegionalEventSimulation/regionalGroundMotion/liquefaction.py index 2fa91e360..43bc5d13c 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/liquefaction.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/liquefaction.py @@ -82,7 +82,7 @@ def sampleRaster( # noqa: N802 sample = sample.astype(dtype) # clean up invalid values (returned as 1e38 by NumPy) sample[abs(sample) > 1e10] = invalid_value # noqa: PLR2004 - return sample # noqa: DOC201 + return sample # noqa: DOC201, RUF100 # Helper functions @@ -163,7 +163,7 @@ def sampleVector(vector_file_path, vector_crs, x, y, dtype=None): # noqa: ARG00 merged = merged.set_index('index_right').sort_index().drop(columns=['geometry']) gdf_sites = pandas.merge(gdf_sites, merged, on='index', how='left') gdf_sites.drop(columns=['geometry', 'index'], inplace=True) # noqa: PD002 - return gdf_sites # noqa: DOC201 + return gdf_sites # noqa: DOC201, RUF100 def find_additional_output_req(liq_info, current_step): # noqa: D103 @@ -455,7 +455,7 @@ def model(self, pgv, pga, mag): # liq_susc[prob_liq==zero_prob_liq] = 'none' - return {'liq_prob': prob_liq, 'liq_susc': liq_susc} # noqa: DOC201 + return {'liq_prob': prob_liq, 'liq_susc': liq_susc} # noqa: DOC201, RUF100 # ----------------------------------------------------------- @@ -658,7 +658,7 @@ def model( pga_mag = pga / (10**2.24 / mag**2.56) prob_liq[pga_mag < 0.1] = zero_prob_liq # noqa: PLR2004 - return {'liq_prob': prob_liq, 'liq_susc': liq_susc} # noqa: DOC201 + return {'liq_prob': prob_liq, 'liq_susc': liq_susc} # noqa: DOC201, RUF100 # ----------------------------------------------------------- @@ -821,7 +821,7 @@ def model(self, pgv, pga, mag): # for precip > 1700 mm, set prob to "0" prob_liq[self.precip > 1700] = zero_prob_liq # noqa: PLR2004 - return {'liq_prob': prob_liq, 'liq_susc': liq_susc} # noqa: DOC201 + return {'liq_prob': prob_liq, 'liq_susc': liq_susc} # noqa: DOC201, RUF100 # Lateral Spreading: @@ -987,7 +987,7 @@ def model( # output['ratio'] = ratio # return - return output # noqa: DOC201, RET504 + return output # noqa: DOC201, RET504, RUF100 # Settlement: @@ -1060,7 +1060,7 @@ def model( pass # return - return output # noqa: DOC201 + return output # noqa: DOC201, RUF100 def run(self, ln_im_data, eq_data, im_list): # noqa: D102 output_keys = ['liq_PGD_v'] diff --git a/modules/performRegionalEventSimulation/regionalWindField/ComputeIntensityMeasure.py b/modules/performRegionalEventSimulation/regionalWindField/ComputeIntensityMeasure.py index d619d091d..c32d912f8 100644 --- a/modules/performRegionalEventSimulation/regionalWindField/ComputeIntensityMeasure.py +++ b/modules/performRegionalEventSimulation/regionalWindField/ComputeIntensityMeasure.py @@ -448,7 +448,7 @@ def interp_wind_by_height(pws_ip, height_simu, height_ref): ) # return - return pws_op # noqa: DOC201 + return pws_op # noqa: DOC201, RUF100 def gust_factor_ESDU(gd_c, gd_t): # noqa: N802 @@ -475,7 +475,7 @@ def gust_factor_ESDU(gd_c, gd_t): # noqa: N802 gd_c, gd, gf, left=gf[0], right=gf[-1] ) # return - return gf_t # noqa: DOC201, RET504 + return gf_t # noqa: DOC201, RET504, RUF100 def export_pws(stations, pws, output_dir, filename='EventGrid.csv'): # noqa: D103 diff --git a/modules/performRegionalEventSimulation/regionalWindField/CreateScenario.py b/modules/performRegionalEventSimulation/regionalWindField/CreateScenario.py index bacb23af4..15322c4c6 100644 --- a/modules/performRegionalEventSimulation/regionalWindField/CreateScenario.py +++ b/modules/performRegionalEventSimulation/regionalWindField/CreateScenario.py @@ -156,8 +156,8 @@ def create_wind_scenarios(scenario_info, event_info, stations, data_dir): # noq print('CreateScenario: error - no storm name or year is provided.') # noqa: T201 # Searching the storm try: - df_chs = df_hs[df_hs[('NAME', ' ')] == storm_name] # noqa: RUF031 - df_chs = df_chs[df_chs[('SEASON', 'Year')] == storm_year] # noqa: RUF031 + df_chs = df_hs[df_hs[('NAME', ' ')] == storm_name] # noqa: RUF031, RUF100 + df_chs = df_chs[df_chs[('SEASON', 'Year')] == storm_year] # noqa: RUF031, RUF100 except: # noqa: E722 print('CreateScenario: error - the storm is not found.') # noqa: T201 if len(df_chs.values) == 0: @@ -166,10 +166,10 @@ def create_wind_scenarios(scenario_info, event_info, stations, data_dir): # noq # Collecting storm properties track_lat = [] track_lon = [] - for x in df_chs[('USA_LAT', 'degrees_north')].values.tolist(): # noqa: PD011, RUF031 + for x in df_chs[('USA_LAT', 'degrees_north')].values.tolist(): # noqa: PD011, RUF031, RUF100 if x != ' ': track_lat.append(float(x)) # noqa: PERF401 - for x in df_chs[('USA_LON', 'degrees_east')].values.tolist(): # noqa: PD011, RUF031 + for x in df_chs[('USA_LON', 'degrees_east')].values.tolist(): # noqa: PD011, RUF031, RUF100 if x != ' ': track_lon.append(float(x)) # noqa: PERF401 # If the default option (USA_LAT and USA_LON) is not available, switching to LAT and LON @@ -177,10 +177,10 @@ def create_wind_scenarios(scenario_info, event_info, stations, data_dir): # noq print( # noqa: T201 'CreateScenario: warning - the USA_LAT and USA_LON are not available, switching to LAT and LON.' ) - for x in df_chs[('LAT', 'degrees_north')].values.tolist(): # noqa: PD011, RUF031 + for x in df_chs[('LAT', 'degrees_north')].values.tolist(): # noqa: PD011, RUF031, RUF100 if x != ' ': track_lat.append(float(x)) # noqa: PERF401 - for x in df_chs[('LON', 'degrees_east')].values.tolist(): # noqa: PD011, RUF031 + for x in df_chs[('LON', 'degrees_east')].values.tolist(): # noqa: PD011, RUF031, RUF100 if x != ' ': track_lon.append(float(x)) # noqa: PERF401 if len(track_lat) == 0: @@ -197,7 +197,7 @@ def create_wind_scenarios(scenario_info, event_info, stations, data_dir): # noq terrain_data = [] # Storm characteristics at the landfall dist2land = [] - for x in df_chs[('DIST2LAND', 'km')]: # noqa: RUF031 + for x in df_chs[('DIST2LAND', 'km')]: # noqa: RUF031, RUF100 if x != ' ': dist2land.append(x) # noqa: PERF401 if len(track_lat) == 0: @@ -237,14 +237,14 @@ def create_wind_scenarios(scenario_info, event_info, stations, data_dir): # noq track_simu = track_lat # Reading data try: - landfall_lat = float(df_chs[('USA_LAT', 'degrees_north')].iloc[tmploc]) # noqa: RUF031 - landfall_lon = float(df_chs[('USA_LON', 'degrees_east')].iloc[tmploc]) # noqa: RUF031 + landfall_lat = float(df_chs[('USA_LAT', 'degrees_north')].iloc[tmploc]) # noqa: RUF031, RUF100 + landfall_lon = float(df_chs[('USA_LON', 'degrees_east')].iloc[tmploc]) # noqa: RUF031, RUF100 except: # noqa: E722 # If the default option (USA_LAT and USA_LON) is not available, switching to LAT and LON - landfall_lat = float(df_chs[('LAT', 'degrees_north')].iloc[tmploc]) # noqa: RUF031 - landfall_lon = float(df_chs[('LON', 'degrees_east')].iloc[tmploc]) # noqa: RUF031 + landfall_lat = float(df_chs[('LAT', 'degrees_north')].iloc[tmploc]) # noqa: RUF031, RUF100 + landfall_lon = float(df_chs[('LON', 'degrees_east')].iloc[tmploc]) # noqa: RUF031, RUF100 try: - landfall_ang = float(df_chs[('STORM_DIR', 'degrees')].iloc[tmploc]) # noqa: RUF031 + landfall_ang = float(df_chs[('STORM_DIR', 'degrees')].iloc[tmploc]) # noqa: RUF031, RUF100 except: # noqa: E722 print('CreateScenario: error - no landing angle is found.') # noqa: T201 if landfall_ang > 180.0: # noqa: PLR2004 @@ -254,7 +254,7 @@ def create_wind_scenarios(scenario_info, event_info, stations, data_dir): # noq - np.min( [ float(x) - for x in df_chs[('USA_PRES', 'mb')] # noqa: PD011, RUF031 + for x in df_chs[('USA_PRES', 'mb')] # noqa: PD011, RUF031, RUF100 .iloc[tmploc - 5 :] .values.tolist() if x != ' ' @@ -262,11 +262,11 @@ def create_wind_scenarios(scenario_info, event_info, stations, data_dir): # noq ) ) landfall_spd = ( - float(df_chs[('STORM_SPEED', 'kts')].iloc[tmploc]) * 0.51444 # noqa: RUF031 + float(df_chs[('STORM_SPEED', 'kts')].iloc[tmploc]) * 0.51444 # noqa: RUF031, RUF100 ) # convert knots/s to km/s try: landfall_rad = ( - float(df_chs[('USA_RMW', 'nmile')].iloc[tmploc]) * 1.60934 # noqa: RUF031 + float(df_chs[('USA_RMW', 'nmile')].iloc[tmploc]) * 1.60934 # noqa: RUF031, RUF100 ) # convert nmile to km except: # noqa: E722 # No available radius of maximum wind is found @@ -274,7 +274,7 @@ def create_wind_scenarios(scenario_info, event_info, stations, data_dir): # noq try: # If the default option (USA_RMW) is not available, switching to REUNION_RMW landfall_rad = ( - float(df_chs[('REUNION_RMW', 'nmile')].iloc[tmploc]) * 1.60934 # noqa: RUF031 + float(df_chs[('REUNION_RMW', 'nmile')].iloc[tmploc]) * 1.60934 # noqa: RUF031, RUF100 ) # convert nmile to km except: # noqa: E722 # No available radius of maximum wind is found diff --git a/modules/performRegionalEventSimulation/regionalWindField/CreateStation.py b/modules/performRegionalEventSimulation/regionalWindField/CreateStation.py index 60d1b8a05..4eb3ffa04 100644 --- a/modules/performRegionalEventSimulation/regionalWindField/CreateStation.py +++ b/modules/performRegionalEventSimulation/regionalWindField/CreateStation.py @@ -68,7 +68,7 @@ def create_stations(input_file, output_file, min_id, max_id): stn_df = pd.read_csv(input_file, header=0, index_col=0) except: # noqa: E722 run_tag = 0 - return run_tag # noqa: DOC201, RET504 + return run_tag # noqa: DOC201, RET504, RUF100 # Max and Min IDs stn_ids_min = np.min(stn_df.index.values) stn_ids_max = np.max(stn_df.index.values) diff --git a/modules/performRegionalEventSimulation/regionalWindField/WindFieldSimulation.py b/modules/performRegionalEventSimulation/regionalWindField/WindFieldSimulation.py index 00ab61f9a..4f5e3c41b 100644 --- a/modules/performRegionalEventSimulation/regionalWindField/WindFieldSimulation.py +++ b/modules/performRegionalEventSimulation/regionalWindField/WindFieldSimulation.py @@ -168,7 +168,7 @@ def __interp_z0(self, lat, lon): if not z0: z0 = 0.01 # return - return z0 # noqa: DOC201 + return z0 # noqa: DOC201, RUF100 def add_reference_terrain(self, terrain_info): """add_reference_terrainL specifying reference z0 values for a set of polygons @@ -595,4 +595,4 @@ def compute_wind_field(self): def get_station_data(self): """get_station_data: returning station data""" # noqa: D400 # return station dictionary - return self.station # noqa: DOC201 + return self.station # noqa: DOC201, RUF100 diff --git a/modules/performUQ/SimCenterUQ/PLoM/PLoM.py b/modules/performUQ/SimCenterUQ/PLoM/PLoM.py index f4f082e99..13973260a 100644 --- a/modules/performUQ/SimCenterUQ/PLoM/PLoM.py +++ b/modules/performUQ/SimCenterUQ/PLoM/PLoM.py @@ -374,7 +374,7 @@ def _load_h5_plom(self, filename): if cur_var in self.dbserver.get_item_adds() and ATTR_MAP[cur_var]: # noqa: F405 # read in cur_data = store[cur_var] - cur_dshape = tuple( # noqa: C409 + cur_dshape = tuple( # noqa: C409, RUF100 [x[0] for x in store['/DS_' + cur_var[1:]].values.tolist()] # noqa: PD011 ) if cur_dshape == (1,): @@ -416,7 +416,7 @@ def _load_h5_data_X(self, filename): # noqa: N802 item_name='X0', col_name=list(self.X0.columns), item=self.X0 ) - return self.X0.to_numpy() # noqa: DOC201 + return self.X0.to_numpy() # noqa: DOC201, RUF100 except: # noqa: E722 return None @@ -491,7 +491,7 @@ def load_h5(self, filename): ) if '/X0' in self.dbserver.get_name_list(): self.X0 = self.dbserver.get_item('X0', table_like=True) - return self.X0.to_numpy() # noqa: DOC201 + return self.X0.to_numpy() # noqa: DOC201, RUF100 else: # noqa: RET505 self.logfile.write_msg( msg='PLoM.load_h5: the original X0 data not found in the loaded data.', @@ -598,7 +598,7 @@ def ConfigTasks(self, task_list=FULL_TASK_LIST): # noqa: C901, N802, F405 msg_type='WARNING', msg_level=0, ) - return False # noqa: DOC201 + return False # noqa: DOC201, RUF100 map_order = [FULL_TASK_LIST.index(x) for x in self.cur_task_list] # noqa: F405 if map_order != sorted(map_order): self.logfile.write_msg( @@ -961,7 +961,7 @@ def DataNormalization(self, X): # noqa: N802, N803 X_scaled, alpha, x_min = plom.scaling(X) # noqa: N806 x_mean = plom.mean(X_scaled) - return X_scaled, alpha, x_min, x_mean # noqa: DOC201 + return X_scaled, alpha, x_min, x_mean # noqa: DOC201, RUF100 def RunPCA(self, X_origin, epsilon_pca): # noqa: N802, N803, D102 # ...PCA... @@ -995,7 +995,7 @@ def RunKDE(self, X, epsilon_kde): # noqa: N802, N803 (s_v, c_v, hat_s_v) = plom.parameters_kde(X) K, b = plom.K(X, epsilon_kde) # noqa: N806 - return s_v, c_v, hat_s_v, K, b # noqa: DOC201 + return s_v, c_v, hat_s_v, K, b # noqa: DOC201, RUF100 def DiffMaps(self, H, K, b, tol=0.1): # noqa: N802, N803, D102 # ..diff maps basis... diff --git a/modules/performUQ/SimCenterUQ/PLoM/general.py b/modules/performUQ/SimCenterUQ/PLoM/general.py index 3b908da04..039fbf191 100644 --- a/modules/performUQ/SimCenterUQ/PLoM/general.py +++ b/modules/performUQ/SimCenterUQ/PLoM/general.py @@ -149,13 +149,13 @@ def _create_export_dir(self): dir_export = os.path.join(self.db_dir, 'DataOut') # noqa: PTH118 try: os.makedirs(dir_export, exist_ok=True) # noqa: PTH103 - return dir_export # noqa: DOC201, TRY300 + return dir_export # noqa: DOC201, RUF100, TRY300 except: # noqa: E722 return None def get_item_adds(self): """Returning the full list of data items""" # noqa: D400, D401 - return self._item_adds # noqa: DOC201 + return self._item_adds # noqa: DOC201, RUF100 def add_item( self, @@ -190,7 +190,7 @@ def add_item( store.close() # noqa: RET503 else: # Not supported data_type - return False # noqa: DOC201 + return False # noqa: DOC201, RUF100 def get_item(self, item_name=None, table_like=False, data_type='Data'): # noqa: FBT002 """Getting a specific data item""" # noqa: D400, D401 @@ -199,7 +199,7 @@ def get_item(self, item_name=None, table_like=False, data_type='Data'): # noqa: store = pd.HDFStore(self.db_path, 'r') try: item = store.get(item_name) - item_shape = tuple( # noqa: C409 + item_shape = tuple( # noqa: C409, RUF100 [ x[0] for x in self.get_item_shape( # noqa: PD011 @@ -214,7 +214,7 @@ def get_item(self, item_name=None, table_like=False, data_type='Data'): # noqa: finally: store.close() - return item # noqa: DOC201 + return item # noqa: DOC201, RUF100 elif data_type == 'ConstraintsFile': store = pd.HDFStore(self.db_path, 'r') try: @@ -247,7 +247,7 @@ def get_item_shape(self, item_name=None): item_shape = None store.close() - return item_shape # noqa: DOC201 + return item_shape # noqa: DOC201, RUF100 def get_name_list(self): """Returning the keys of the database""" # noqa: D400, D401 @@ -257,7 +257,7 @@ def get_name_list(self): except: # noqa: E722 name_list = [] store.close() - return name_list # noqa: DOC201 + return name_list # noqa: DOC201, RUF100 def export(self, data_name=None, filename=None, file_format='csv'): """Exporting the specific data item @@ -266,7 +266,7 @@ def export(self, data_name=None, filename=None, file_format='csv'): """ # noqa: D205, D400, D401 d = self.get_item(item_name=data_name[1:], table_like=True) if d is None: - return 1 # noqa: DOC201 + return 1 # noqa: DOC201, RUF100 if filename is None: filename = os.path.join( # noqa: PTH118 self.dir_export, str(data_name).replace('/', '') + '.' + file_format @@ -311,7 +311,7 @@ def refresh_status(self): # previous task not completed -> this task also needs to rerun self.status = False - return self.status # noqa: DOC201 + return self.status # noqa: DOC201, RUF100 # self-check if Counter(self.avail_var_list) == Counter(self.full_var_list) and len( @@ -355,7 +355,7 @@ def refresh_status(self): if not cur_task.status: self.status = False - return self.status # noqa: DOC201 + return self.status # noqa: DOC201, RUF100 while cur_task.next_task: cur_task = cur_task.next_task if not cur_task.status: diff --git a/modules/performUQ/SimCenterUQ/runPLoM.py b/modules/performUQ/SimCenterUQ/runPLoM.py index 2ab8e8b17..bc3f494a6 100644 --- a/modules/performUQ/SimCenterUQ/runPLoM.py +++ b/modules/performUQ/SimCenterUQ/runPLoM.py @@ -428,7 +428,7 @@ def _create_variables(self, training_data): # check if training data source from simulation if training_data == 'Sampling and Simulation': - return x_dim, y_dim, rv_name, g_name # noqa: DOC201 + return x_dim, y_dim, rv_name, g_name # noqa: DOC201, RUF100 # read X and Y variable names for rv in job_config['randomVariables']: @@ -562,7 +562,7 @@ def _parse_plom_parameters(self, surrogateInfo): # noqa: C901, N803 run_flag = 1 # return - return run_flag # noqa: DOC201 + return run_flag # noqa: DOC201, RUF100 def _set_up_parallel(self): """_set_up_parallel: set up modules and variables for parallel jobs @@ -592,7 +592,7 @@ def _set_up_parallel(self): run_flag = 1 # return - return run_flag # noqa: DOC201 + return run_flag # noqa: DOC201, RUF100 def _load_variables(self, do_sampling, do_simulation): # noqa: C901 """_load_variables: load variables @@ -666,7 +666,7 @@ def _load_variables(self, do_sampling, do_simulation): # noqa: C901 # run_flag = 1 # return - return run_flag # noqa: DOC201 + return run_flag # noqa: DOC201, RUF100 # KZ, 07/24: loading user-defined hyper-parameter files def _load_hyperparameter(self): diff --git a/modules/performUQ/UCSD_UQ/defaultLogLikeScript.py b/modules/performUQ/UCSD_UQ/defaultLogLikeScript.py index 4da5ff658..c0e42c7be 100644 --- a/modules/performUQ/UCSD_UQ/defaultLogLikeScript.py +++ b/modules/performUQ/UCSD_UQ/defaultLogLikeScript.py @@ -142,4 +142,4 @@ def log_likelihood( loglike += ll else: loglike += -np.inf - return loglike # noqa: DOC201 + return loglike # noqa: DOC201, RUF100 diff --git a/modules/performUQ/UCSD_UQ/mwg_sampler.py b/modules/performUQ/UCSD_UQ/mwg_sampler.py index 13e8b46d8..4bf1e1ea8 100644 --- a/modules/performUQ/UCSD_UQ/mwg_sampler.py +++ b/modules/performUQ/UCSD_UQ/mwg_sampler.py @@ -328,7 +328,7 @@ def tune(scale, acc_rate): >0.95 x 10 """ # noqa: D205, D400 if acc_rate < 0.01: # noqa: PLR2004 - return scale * 0.01 # noqa: DOC201 + return scale * 0.01 # noqa: DOC201, RUF100 elif acc_rate < 0.05: # noqa: RET505, PLR2004 return scale * 0.1 elif acc_rate < 0.2: # noqa: PLR2004 diff --git a/modules/performUQ/UCSD_UQ/runFEM.py b/modules/performUQ/UCSD_UQ/runFEM.py index 98401eac6..aa5136105 100644 --- a/modules/performUQ/UCSD_UQ/runFEM.py +++ b/modules/performUQ/UCSD_UQ/runFEM.py @@ -101,4 +101,4 @@ def runFEM( # noqa: N802 preds = np.atleast_2d([-np.inf] * sum(edpLengthsList)).reshape((1, -1)) ll = -np.inf - return (ll, preds) # noqa: DOC201 + return (ll, preds) # noqa: DOC201, RUF100 diff --git a/modules/performUQ/UCSD_UQ/runTMCMC.py b/modules/performUQ/UCSD_UQ/runTMCMC.py index 99e841300..6bc59e0bc 100644 --- a/modules/performUQ/UCSD_UQ/runTMCMC.py +++ b/modules/performUQ/UCSD_UQ/runTMCMC.py @@ -561,4 +561,4 @@ def run_TMCMC( # noqa: N802, PLR0913 f'\n\tShutdown mpi4py executor pool for runType: {run_type}' ) - return mytrace, total_log_evidence # noqa: DOC201 + return mytrace, total_log_evidence # noqa: DOC201, RUF100 diff --git a/modules/performUQ/common/ERAClasses/ERACond.py b/modules/performUQ/common/ERAClasses/ERACond.py index e9a40b6b3..cb3dd99fb 100644 --- a/modules/performUQ/common/ERAClasses/ERACond.py +++ b/modules/performUQ/common/ERAClasses/ERACond.py @@ -388,7 +388,7 @@ def equation(param): for i in range(len(Par)): Par[i] = np.squeeze(Par[i]) - return Par # noqa: DOC201 + return Par # noqa: DOC201, RUF100 # %% def condCDF(self, x, cond): # noqa: C901, N802 @@ -442,7 +442,7 @@ def condCDF(self, x, cond): # noqa: C901, N802 elif self.Name == 'weibull': CDF = stats.weibull_min.cdf(x, c=par[1], scale=par[0]) # noqa: N806 - return CDF # noqa: DOC201 + return CDF # noqa: DOC201, RUF100 # %% def condiCDF(self, y, cond): # noqa: C901, N802 @@ -496,7 +496,7 @@ def condiCDF(self, y, cond): # noqa: C901, N802 elif self.Name == 'weibull': iCDF = stats.weibull_min.ppf(y, c=par[1], scale=par[0]) # noqa: N806 - return iCDF # noqa: DOC201 + return iCDF # noqa: DOC201, RUF100 # %% def condPDF(self, x, cond): # noqa: C901, N802 @@ -550,7 +550,7 @@ def condPDF(self, x, cond): # noqa: C901, N802 elif self.Name == 'weibull': PDF = stats.weibull_min.pdf(x, c=par[1], scale=par[0]) # noqa: N806 - return PDF # noqa: DOC201 + return PDF # noqa: DOC201, RUF100 # %% def condRandom(self, cond): # noqa: C901, N802 @@ -602,4 +602,4 @@ def condRandom(self, cond): # noqa: C901, N802 elif self.Name == 'weibull': Random = stats.weibull_min.rvs(c=par[1], scale=par[0]) # noqa: N806 - return Random # noqa: DOC201 + return Random # noqa: DOC201, RUF100 diff --git a/modules/performUQ/other/UQpyRunner.py b/modules/performUQ/other/UQpyRunner.py index 926d1c718..1667cf7b0 100644 --- a/modules/performUQ/other/UQpyRunner.py +++ b/modules/performUQ/other/UQpyRunner.py @@ -192,4 +192,4 @@ def runUQ( # noqa: C901, N802 # Factory for creating UQpy runner class Factory: # noqa: D106 def create(self): # noqa: D102 - return UQpyRunner() \ No newline at end of file + return UQpyRunner() diff --git a/modules/systemPerformance/REWET/REWET/Damage.py b/modules/systemPerformance/REWET/REWET/Damage.py index 2a1181e1e..00e58c304 100644 --- a/modules/systemPerformance/REWET/REWET/Damage.py +++ b/modules/systemPerformance/REWET/REWET/Damage.py @@ -75,7 +75,7 @@ def readDamageFromPickleFile( # noqa: N802 Returns ------- - """ # noqa: D205, D400, D401, D404, D414, DOC202 + """ # noqa: D205, D400, D401, D404, D414, DOC202, RUF100 with open(pickle_file_name, 'rb') as pckf: # noqa: PTH123 w = pickle.load(pckf) # noqa: S301 @@ -313,7 +313,7 @@ def applyNodalDamage(self, WaterNetwork, current_time): # noqa: C901, N802, N80 ------- None. - """ # noqa: D400, DOC202 + """ # noqa: D400, DOC202, RUF100 if self.node_damage.empty: print('no node damage at all') # noqa: T201 return @@ -1078,7 +1078,7 @@ def sortEarthquakeListTimely(self): # noqa: N802 ------- None. - """ # noqa: D400, D401, D404, DOC202 + """ # noqa: D400, D401, D404, DOC202, RUF100 self._earthquake.sort_index() self.is_timely_sorted = True @@ -1097,7 +1097,7 @@ def predictDamage(self, wn, iClear=False): # noqa: FBT002, N802, N803 ------- None. - """ # noqa: D401, D404, DOC202 + """ # noqa: D401, D404, DOC202, RUF100 if iClear: self.pipe_leak = pd.Series() self.pipe_break = pd.Series() diff --git a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/epanet/io.py b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/epanet/io.py index e94400be6..c9fef211c 100644 --- a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/epanet/io.py +++ b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/epanet/io.py @@ -3985,7 +3985,7 @@ def contains_section(self, sec): """ # noqa: D205 try: self.get_section(sec) - return True # noqa: DOC201, TRY300 + return True # noqa: DOC201, RUF100, TRY300 except NoSectionError: return False @@ -4345,4 +4345,4 @@ def _diff_inp_files( # noqa: C901 g.write(html_diff) g.close() - return n # noqa: DOC201 + return n # noqa: DOC201, RUF100 diff --git a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/network/model.py b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/network/model.py index e9a214792..ec54f7220 100644 --- a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/network/model.py +++ b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/network/model.py @@ -75,7 +75,7 @@ def updateWaterNetworkModelWithResult( # noqa: C901, N802 ------- None. - """ # noqa: D205, D401, DOC202 + """ # noqa: D205, D401, DOC202, RUF100 max_time = result.node['head'].index.max() if latest_simulation_time == None: # noqa: E711 latest_simulation_time = max_time diff --git a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/sim/epanet.py b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/sim/epanet.py index 50fde8b0c..e9d529c3b 100644 --- a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/sim/epanet.py +++ b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/sim/epanet.py @@ -229,7 +229,7 @@ def run_sim( # noqa: C901 if run_successful: break - return result_data, run_successful # noqa: DOC201 + return result_data, run_successful # noqa: DOC201, RUF100 def _updateResultStartTime(self, result_data, start_time): # noqa: N802 for res_type, res in result_data.link.items(): # noqa: B007, PERF102 @@ -379,7 +379,7 @@ def _initialize_internal_graph(self): # noqa: C901 self._node_pairs_with_multiple_links = OrderedDict() for from_node_id, to_node_id in n_links.keys(): # noqa: SIM118 - if n_links[(from_node_id, to_node_id)] > 1: # noqa: RUF031 + if n_links[(from_node_id, to_node_id)] > 1: # noqa: RUF031, RUF100 if ( to_node_id, from_node_id, @@ -390,7 +390,7 @@ def _initialize_internal_graph(self): # noqa: C901 from_node_name = self._node_id_to_name[from_node_id] to_node_name = self._node_id_to_name[to_node_id] tmp_list = self._node_pairs_with_multiple_links[ - (from_node_id, to_node_id) # noqa: RUF031 + (from_node_id, to_node_id) # noqa: RUF031, RUF100 ] = [] for link_name in self._wn.get_links_for_node(from_node_name): link = self._wn.get_link(link_name) diff --git a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/sim/io.py b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/sim/io.py index 8c9ccbca0..0422e1318 100644 --- a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/sim/io.py +++ b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/sim/io.py @@ -131,7 +131,7 @@ def _is_number(s): """ # noqa: D400, D401 try: float(s) - return True # noqa: DOC201, TRY300 + return True # noqa: DOC201, RUF100, TRY300 except ValueError: return False diff --git a/modules/systemPerformance/REWET/REWET/Input/Policy_IO.py b/modules/systemPerformance/REWET/REWET/Input/Policy_IO.py index 3e8509f92..68935d5a0 100644 --- a/modules/systemPerformance/REWET/REWET/Input/Policy_IO.py +++ b/modules/systemPerformance/REWET/REWET/Input/Policy_IO.py @@ -67,7 +67,7 @@ def __init__(self, definition_file_name): ------- None. - """ # noqa: D400, DOC202 + """ # noqa: D400, DOC202, RUF100 # some of the following lines have been adopted from WNTR self.rm = restoration_data() @@ -230,7 +230,7 @@ def _read_entities(self): # noqa: C901 ------- None. - """ # noqa: D205, D401, DOC202 + """ # noqa: D205, D401, DOC202, RUF100 # Entities is kept for legacy compatibility with the first version damage_group_data = self.sections.get( '[ENTITIES]', self.sections.get('[Damage Group]') diff --git a/modules/systemPerformance/REWET/REWET/Output/GUI_Curve_API.py b/modules/systemPerformance/REWET/REWET/Output/GUI_Curve_API.py index ba1afcc3c..85c68db92 100644 --- a/modules/systemPerformance/REWET/REWET/Output/GUI_Curve_API.py +++ b/modules/systemPerformance/REWET/REWET/Output/GUI_Curve_API.py @@ -72,7 +72,7 @@ def QNExceedanceCurve(pr, percentage_list, time_type, time_shift=0): # noqa: N8 raise ValueError('Uknown time_type: ' + repr(time_type)) res[percentage] = temp_res - return res # noqa: DOC201 + return res # noqa: DOC201, RUF100 def DLExceedanceCurve(pr, percentage_list, time_type, time_shift=0): # noqa: N802 @@ -123,4 +123,4 @@ def DLExceedanceCurve(pr, percentage_list, time_type, time_shift=0): # noqa: N8 raise ValueError('Uknown time_type: ' + repr(time_type)) res[percentage] = temp_res - return res # noqa: DOC201 + return res # noqa: DOC201, RUF100 diff --git a/modules/systemPerformance/REWET/REWET/initial.py b/modules/systemPerformance/REWET/REWET/initial.py index 8555b6377..06af15b3a 100644 --- a/modules/systemPerformance/REWET/REWET/initial.py +++ b/modules/systemPerformance/REWET/REWET/initial.py @@ -46,7 +46,7 @@ def run(self, project_file=None): # noqa: C901 ------- None. - """ # noqa: D205, D401, DOC202 + """ # noqa: D205, D401, DOC202, RUF100 settings = Settings() if project_file is not None: project_file = str(project_file) diff --git a/modules/systemPerformance/REWET/REWET/restoration/base.py b/modules/systemPerformance/REWET/REWET/restoration/base.py index 9bfaef207..c9cac8ac6 100644 --- a/modules/systemPerformance/REWET/REWET/restoration/base.py +++ b/modules/systemPerformance/REWET/REWET/restoration/base.py @@ -232,7 +232,7 @@ def addAgent(self, agent_name, agent_type, definition): # noqa: N802 ------- None. - """ # noqa: D400, D401, DOC202 + """ # noqa: D400, D401, DOC202, RUF100 # number_of_agents = int(definition['Number']) agent_speed = self.registry.settings['crew_travel_speed'] temp_agent_data = AgentData( @@ -270,7 +270,7 @@ def setActiveAgents(self, active_agent_ID_list): # noqa: N802, N803 ------- None. - """ # noqa: D400, DOC202 + """ # noqa: D400, DOC202, RUF100 for active_agent_ID in active_agent_ID_list: # noqa: N806 self._agents['active'].loc[active_agent_ID] = True @@ -600,7 +600,7 @@ def addShift(self, name, beginning, ending): # noqa: N802 ------- None. - """ # noqa: D400, D401, DOC202 + """ # noqa: D400, D401, DOC202, RUF100 if name in self._shift_data: raise ValueError('Shift name already registered') # noqa: EM101, TRY003 if type(beginning) != int and type(beginning) != float: # noqa: E721 @@ -673,7 +673,7 @@ def assignShiftToAgent(self, agent_ID, shift_name): # noqa: N802, N803 ------- None. - """ # noqa: D400, D401, DOC202 + """ # noqa: D400, D401, DOC202, RUF100 if agent_ID in self._all_agent_shift_data: raise ValueError('The agent ID currently in Agent ALl Shifts') # noqa: EM101, TRY003 if shift_name not in self._shift_data: diff --git a/modules/systemPerformance/REWET/REWET/restoration/io.py b/modules/systemPerformance/REWET/REWET/restoration/io.py index 5fbf41721..ce6e932da 100644 --- a/modules/systemPerformance/REWET/REWET/restoration/io.py +++ b/modules/systemPerformance/REWET/REWET/restoration/io.py @@ -44,7 +44,7 @@ def __init__(self, restoration_model, definition_file_name): ------- None. - """ # noqa: D400, DOC202 + """ # noqa: D400, DOC202, RUF100 # some of the following lines have been adopted from WNTR self.rm = restoration_model self.crew_data = {} @@ -1180,7 +1180,7 @@ def _read_config(self): ------- None. - """ # noqa: D205, D400, D401, DOC202 + """ # noqa: D205, D400, D401, DOC202, RUF100 edata = OrderedDict() self._crew_file_name = [] self._crew_file_type = [] diff --git a/modules/systemPerformance/REWET/REWET/restoration/model.py b/modules/systemPerformance/REWET/REWET/restoration/model.py index 29291cc72..3f92bf261 100644 --- a/modules/systemPerformance/REWET/REWET/restoration/model.py +++ b/modules/systemPerformance/REWET/REWET/restoration/model.py @@ -907,7 +907,7 @@ def updateShifiting(self, time): # noqa: N802 ------- None. - """ # noqa: D400, D401, DOC202 + """ # noqa: D400, D401, DOC202, RUF100 if type(time) != int and type(time) != float: # noqa: E721 raise ValueError('Time must be integer not ' + str(type(time))) time = int(time) diff --git a/modules/systemPerformance/REWET/REWET/restoration/registry.py b/modules/systemPerformance/REWET/REWET/restoration/registry.py index 3b09d331f..38037c5b8 100644 --- a/modules/systemPerformance/REWET/REWET/restoration/registry.py +++ b/modules/systemPerformance/REWET/REWET/restoration/registry.py @@ -515,7 +515,7 @@ def addPipeDamageToRegistry(self, node_name, data): # noqa: N802 ------- None. - """ # noqa: D400, D401, DOC202 + """ # noqa: D400, D401, DOC202, RUF100 # self._pipe_node_damage_status[name] = data leaking_pipe_with_pipeA_orginal_pipe = self._pipe_leak_history[ # noqa: N806 @@ -1280,7 +1280,7 @@ def occupyNode(self, node_name, occupier_name): # noqa: N802 ------- None. - """ # noqa: D400, DOC202 + """ # noqa: D400, DOC202, RUF100 if occupier_name in self._occupancy: # if not iNodeCoupled(node_name): raise ValueError( # noqa: TRY003 @@ -1307,7 +1307,7 @@ def removeOccupancy(self, occupier_name): # noqa: N802 ------- None. - """ # noqa: D401, DOC202 + """ # noqa: D401, DOC202, RUF100 temp = self._occupancy[self._occupancy == occupier_name] if len(temp) == 0: @@ -1350,7 +1350,7 @@ def whereIsOccupiedByName(self, occupier_name): # noqa: N802 str or series node(s) ID. - """ # noqa: D400, D401, DOC202 + """ # noqa: D400, D401, DOC202, RUF100 temp = self._occupancy[self._occupancy == occupier_name] if len(temp) == 0: raise ValueError('there is no occupancy with this name') # noqa: EM101, TRY003 @@ -1387,7 +1387,7 @@ def coupleTwoBreakNodes(self, break_point_1_name, break_point_2_name): # noqa: ------- None. - """ # noqa: D205, DOC202 + """ # noqa: D205, DOC202, RUF100 self._pipe_break_node_coupling[break_point_1_name] = break_point_2_name self._pipe_break_node_coupling[break_point_2_name] = break_point_1_name self._break_point_attached_to_mainPipe.append(break_point_1_name) diff --git a/modules/systemPerformance/REWET/REWET/timeline.py b/modules/systemPerformance/REWET/REWET/timeline.py index af3b1e6a7..fa1a875ab 100644 --- a/modules/systemPerformance/REWET/REWET/timeline.py +++ b/modules/systemPerformance/REWET/REWET/timeline.py @@ -143,7 +143,7 @@ def addEventTime(self, event_distinct_time, event_type='dmg'): # noqa: N802 ------- None. - """ # noqa: D205, D401, D404, DOC202 + """ # noqa: D205, D401, D404, DOC202, RUF100 if type(event_distinct_time) != pd.core.series.Series: # noqa: E721 if ( type(event_distinct_time) == numpy.float64 # noqa: E721 @@ -218,7 +218,7 @@ def checkAndAmendTime(self): # noqa: N802 ------- None. - """ # noqa: D205, D401, DOC202 + """ # noqa: D205, D401, DOC202, RUF100 first_length = len(self._event_time_register.index) self._event_time_register = self._event_time_register[ self._event_time_register.index <= self._simulation_end_time diff --git a/modules/systemPerformance/REWET/preprocessorIO.py b/modules/systemPerformance/REWET/preprocessorIO.py index c9b44067e..10f291d48 100644 --- a/modules/systemPerformance/REWET/preprocessorIO.py +++ b/modules/systemPerformance/REWET/preprocessorIO.py @@ -187,7 +187,7 @@ def save_scenario_table(scenario_table, scenario_table_file_path): ------- None. - """ # noqa: D205, D400, D401, DOC202 + """ # noqa: D205, D400, D401, DOC202, RUF100 if isinstance(scenario_table, pd.core.frame.DataFrame): pass elif isinstance(scenario_table, list): From da4d973f8712351d5a45c05b83378e9c109a4716 Mon Sep 17 00:00:00 2001 From: Justin Bonus Date: Fri, 16 Aug 2024 16:13:22 -0700 Subject: [PATCH 10/59] Merge error in StochisticWave Include MIT license from welib and import the util example files. --- .../createEVENT/stochasticWave/StochasticWave.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/modules/createEVENT/stochasticWave/StochasticWave.py b/modules/createEVENT/stochasticWave/StochasticWave.py index 045946a49..e5e045f49 100644 --- a/modules/createEVENT/stochasticWave/StochasticWave.py +++ b/modules/createEVENT/stochasticWave/StochasticWave.py @@ -1,5 +1,15 @@ #!/usr/bin/env python3 # noqa: EXE001, D100 +""" +Notable portions of this code are derived courtesy of the welib python package +and the following source: + +Copyright 2019 E. Branlard +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +""" + import argparse import json import re @@ -10,6 +20,10 @@ from welib.hydro.morison import * # noqa: E402, F403 from welib.hydro.wavekin import * # noqa: E402, F403 +import Ex1_WaveKinematics +import Ex2_Jonswap_spectrum +import Ex3_WaveTimeSeries +import Ex4_WaveLoads class FloorForces: # noqa: D101 def __init__(self, recorderID=-1): # noqa: N803 From 5f73e74e64912902995a1c43eee3d3f9eed0d5f5 Mon Sep 17 00:00:00 2001 From: John Vouvakis Manousakis Date: Sat, 17 Aug 2024 04:11:05 -0700 Subject: [PATCH 11/59] `ruff format` --- .../CleanBeamSectionDatabase.ipynb | 148 +++++++++--------- modules/performREC/pyrecodes/run_pyrecodes.py | 1 - 2 files changed, 74 insertions(+), 75 deletions(-) diff --git a/modules/createSAM/AutoSDA/Preprocessing/CleanBeamSectionDatabase.ipynb b/modules/createSAM/AutoSDA/Preprocessing/CleanBeamSectionDatabase.ipynb index 5e1cc9ae2..036c75a4b 100644 --- a/modules/createSAM/AutoSDA/Preprocessing/CleanBeamSectionDatabase.ipynb +++ b/modules/createSAM/AutoSDA/Preprocessing/CleanBeamSectionDatabase.ipynb @@ -1,74 +1,74 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Clean Beam Section Database" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ - { - "ename": "FileNotFoundError", - "evalue": "[Errno 2] No such file or directory: 'BeamDatabase.csv'", - "output_type": "error", - "traceback": [ - "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[1;31mFileNotFoundError\u001b[0m Traceback (most recent call last)", - "\u001b[1;32m\u001b[0m in \u001b[0;36m\u001b[1;34m()\u001b[0m\n\u001b[0;32m 1\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mpandas\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0mpd\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 2\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 3\u001b[1;33m \u001b[1;32mwith\u001b[0m \u001b[0mopen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'BeamDatabase.csv'\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m'r'\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0mfile\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 4\u001b[0m \u001b[0mbeam_section_database\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mpd\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mread_csv\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfile\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mheader\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 5\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n", - "\u001b[1;31mFileNotFoundError\u001b[0m: [Errno 2] No such file or directory: 'BeamDatabase.csv'" - ] - } - ], - "source": [ - "import pandas as pd\n", - "\n", - "with open('BeamDatabase1.csv', 'r') as file:\n", - " beam_section_database = pd.read_csv(file, header=0)\n", - "\n", - "# Beam section weight shall be less than 300 lb/ft\n", - "# Beam flange thickness shall be less than 1.75 inch.\n", - "target_index = []\n", - "for indx in beam_section_database['index']:\n", - " if (beam_section_database.loc[indx, 'weight'] >= 300):\n", - " target_index.append(indx)\n", - " elif (beam_section_database.loc[indx, 'tf'] >= 1.75):\n", - " target_index.append(indx)\n", - "clean_beam_section = beam_section_database.drop(index=target_index)\n", - "clean_beam_section.to_csv('BeamDatabase2.csv', sep=',', index=False)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.5" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Clean Beam Section Database" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "ename": "FileNotFoundError", + "evalue": "[Errno 2] No such file or directory: 'BeamDatabase.csv'", + "output_type": "error", + "traceback": [ + "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[1;31mFileNotFoundError\u001b[0m Traceback (most recent call last)", + "\u001b[1;32m\u001b[0m in \u001b[0;36m\u001b[1;34m()\u001b[0m\n\u001b[0;32m 1\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mpandas\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0mpd\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 2\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 3\u001b[1;33m \u001b[1;32mwith\u001b[0m \u001b[0mopen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'BeamDatabase.csv'\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m'r'\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0mfile\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 4\u001b[0m \u001b[0mbeam_section_database\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mpd\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mread_csv\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfile\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mheader\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 5\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;31mFileNotFoundError\u001b[0m: [Errno 2] No such file or directory: 'BeamDatabase.csv'" + ] + } + ], + "source": [ + "import pandas as pd\n", + "\n", + "with open('BeamDatabase1.csv', 'r') as file:\n", + " beam_section_database = pd.read_csv(file, header=0)\n", + "\n", + "# Beam section weight shall be less than 300 lb/ft\n", + "# Beam flange thickness shall be less than 1.75 inch.\n", + "target_index = []\n", + "for indx in beam_section_database['index']:\n", + " if beam_section_database.loc[indx, 'weight'] >= 300:\n", + " target_index.append(indx)\n", + " elif beam_section_database.loc[indx, 'tf'] >= 1.75:\n", + " target_index.append(indx)\n", + "clean_beam_section = beam_section_database.drop(index=target_index)\n", + "clean_beam_section.to_csv('BeamDatabase2.csv', sep=',', index=False)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.5" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/modules/performREC/pyrecodes/run_pyrecodes.py b/modules/performREC/pyrecodes/run_pyrecodes.py index 7998e0aa6..ccd9783c2 100644 --- a/modules/performREC/pyrecodes/run_pyrecodes.py +++ b/modules/performREC/pyrecodes/run_pyrecodes.py @@ -504,4 +504,3 @@ def select_realizations_to_run(damage_input, inputRWHALE): # noqa: N803, D103 mpiExec=wfArgs.mpiexec, numPROC=numPROC, ) - From 4258790c346f35d01417bc7fd31cdfb82894196a Mon Sep 17 00:00:00 2001 From: John Vouvakis Manousakis Date: Sat, 17 Aug 2024 04:11:26 -0700 Subject: [PATCH 12/59] `ruff check --fix` --- .../AutoSDA/Preprocessing/CleanBeamSectionDatabase.ipynb | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/modules/createSAM/AutoSDA/Preprocessing/CleanBeamSectionDatabase.ipynb b/modules/createSAM/AutoSDA/Preprocessing/CleanBeamSectionDatabase.ipynb index 036c75a4b..892ec8a1e 100644 --- a/modules/createSAM/AutoSDA/Preprocessing/CleanBeamSectionDatabase.ipynb +++ b/modules/createSAM/AutoSDA/Preprocessing/CleanBeamSectionDatabase.ipynb @@ -27,16 +27,14 @@ "source": [ "import pandas as pd\n", "\n", - "with open('BeamDatabase1.csv', 'r') as file:\n", + "with open('BeamDatabase1.csv') as file:\n", " beam_section_database = pd.read_csv(file, header=0)\n", "\n", "# Beam section weight shall be less than 300 lb/ft\n", "# Beam flange thickness shall be less than 1.75 inch.\n", "target_index = []\n", "for indx in beam_section_database['index']:\n", - " if beam_section_database.loc[indx, 'weight'] >= 300:\n", - " target_index.append(indx)\n", - " elif beam_section_database.loc[indx, 'tf'] >= 1.75:\n", + " if beam_section_database.loc[indx, 'weight'] >= 300 or beam_section_database.loc[indx, 'tf'] >= 1.75:\n", " target_index.append(indx)\n", "clean_beam_section = beam_section_database.drop(index=target_index)\n", "clean_beam_section.to_csv('BeamDatabase2.csv', sep=',', index=False)" From 4675b6779fcbbdaef36c805d6659a5b773433d0b Mon Sep 17 00:00:00 2001 From: John Vouvakis Manousakis Date: Sat, 17 Aug 2024 04:20:32 -0700 Subject: [PATCH 13/59] Add # noqa manually ruff check --add-noqa doesn't modify `.ipynb` files. --- .../AutoSDA/Preprocessing/CleanBeamSectionDatabase.ipynb | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/modules/createSAM/AutoSDA/Preprocessing/CleanBeamSectionDatabase.ipynb b/modules/createSAM/AutoSDA/Preprocessing/CleanBeamSectionDatabase.ipynb index 892ec8a1e..abbe355f2 100644 --- a/modules/createSAM/AutoSDA/Preprocessing/CleanBeamSectionDatabase.ipynb +++ b/modules/createSAM/AutoSDA/Preprocessing/CleanBeamSectionDatabase.ipynb @@ -27,15 +27,18 @@ "source": [ "import pandas as pd\n", "\n", - "with open('BeamDatabase1.csv') as file:\n", + "with open('BeamDatabase1.csv') as file: # noqa: PTH123\n", " beam_section_database = pd.read_csv(file, header=0)\n", "\n", "# Beam section weight shall be less than 300 lb/ft\n", "# Beam flange thickness shall be less than 1.75 inch.\n", "target_index = []\n", "for indx in beam_section_database['index']:\n", - " if beam_section_database.loc[indx, 'weight'] >= 300 or beam_section_database.loc[indx, 'tf'] >= 1.75:\n", - " target_index.append(indx)\n", + " if (\n", + " beam_section_database.loc[indx, 'weight'] >= 300 # noqa: PLR2004\n", + " or beam_section_database.loc[indx, 'tf'] >= 1.75 # noqa: PLR2004\n", + " ):\n", + " target_index.append(indx) # noqa: PERF401\n", "clean_beam_section = beam_section_database.drop(index=target_index)\n", "clean_beam_section.to_csv('BeamDatabase2.csv', sep=',', index=False)" ] From 905704d48020441e6d6ee4457398fa5e02592e05 Mon Sep 17 00:00:00 2001 From: John Vouvakis Manousakis Date: Sat, 17 Aug 2024 04:35:20 -0700 Subject: [PATCH 14/59] Add syntax to ignore folders. --- pyproject.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index 43fede178..29ad35b40 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,6 +7,9 @@ select = ["ALL"] ignore = ["ANN", "D211", "D212", "Q000", "Q003", "COM812", "D203", "ISC001", "E501", "ERA001", "PGH003", "FIX002", "TD003", "S101", "N801", "S311", "G004", "SIM102", "SIM108", "NPY002", "F401"] preview = false +[tool.ruff.lint.per-file-ignores] +"path/to/folder/*" = ["ALL"] + [tool.ruff.lint.pydocstyle] convention = "numpy" From 830d6a5986c6af07371ae7ede3d5e804e2413a52 Mon Sep 17 00:00:00 2001 From: jinyan1214 Date: Tue, 20 Aug 2024 15:03:12 -0700 Subject: [PATCH 15/59] ruff check add noqa --- .../CleanBeamSectionDatabase.ipynb | 148 +++++++++--------- 1 file changed, 74 insertions(+), 74 deletions(-) diff --git a/modules/createSAM/AutoSDA/Preprocessing/CleanBeamSectionDatabase.ipynb b/modules/createSAM/AutoSDA/Preprocessing/CleanBeamSectionDatabase.ipynb index 5e1cc9ae2..5d6469510 100644 --- a/modules/createSAM/AutoSDA/Preprocessing/CleanBeamSectionDatabase.ipynb +++ b/modules/createSAM/AutoSDA/Preprocessing/CleanBeamSectionDatabase.ipynb @@ -1,74 +1,74 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Clean Beam Section Database" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ - { - "ename": "FileNotFoundError", - "evalue": "[Errno 2] No such file or directory: 'BeamDatabase.csv'", - "output_type": "error", - "traceback": [ - "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[1;31mFileNotFoundError\u001b[0m Traceback (most recent call last)", - "\u001b[1;32m\u001b[0m in \u001b[0;36m\u001b[1;34m()\u001b[0m\n\u001b[0;32m 1\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mpandas\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0mpd\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 2\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 3\u001b[1;33m \u001b[1;32mwith\u001b[0m \u001b[0mopen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'BeamDatabase.csv'\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m'r'\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0mfile\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 4\u001b[0m \u001b[0mbeam_section_database\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mpd\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mread_csv\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfile\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mheader\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 5\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n", - "\u001b[1;31mFileNotFoundError\u001b[0m: [Errno 2] No such file or directory: 'BeamDatabase.csv'" - ] - } - ], - "source": [ - "import pandas as pd\n", - "\n", - "with open('BeamDatabase1.csv', 'r') as file:\n", - " beam_section_database = pd.read_csv(file, header=0)\n", - "\n", - "# Beam section weight shall be less than 300 lb/ft\n", - "# Beam flange thickness shall be less than 1.75 inch.\n", - "target_index = []\n", - "for indx in beam_section_database['index']:\n", - " if (beam_section_database.loc[indx, 'weight'] >= 300):\n", - " target_index.append(indx)\n", - " elif (beam_section_database.loc[indx, 'tf'] >= 1.75):\n", - " target_index.append(indx)\n", - "clean_beam_section = beam_section_database.drop(index=target_index)\n", - "clean_beam_section.to_csv('BeamDatabase2.csv', sep=',', index=False)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.5" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Clean Beam Section Database" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "ename": "FileNotFoundError", + "evalue": "[Errno 2] No such file or directory: 'BeamDatabase.csv'", + "output_type": "error", + "traceback": [ + "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[1;31mFileNotFoundError\u001b[0m Traceback (most recent call last)", + "\u001b[1;32m\u001b[0m in \u001b[0;36m\u001b[1;34m()\u001b[0m\n\u001b[0;32m 1\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mpandas\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0mpd\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 2\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 3\u001b[1;33m \u001b[1;32mwith\u001b[0m \u001b[0mopen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'BeamDatabase.csv'\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m'r'\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0mfile\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 4\u001b[0m \u001b[0mbeam_section_database\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mpd\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mread_csv\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfile\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mheader\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 5\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;31mFileNotFoundError\u001b[0m: [Errno 2] No such file or directory: 'BeamDatabase.csv'" + ] + } + ], + "source": [ + "import pandas as pd\n", + "\n", + "with open('BeamDatabase1.csv', 'r') as file: # noqa: PTH123, UP015\n", + " beam_section_database = pd.read_csv(file, header=0)\n", + "\n", + "# Beam section weight shall be less than 300 lb/ft\n", + "# Beam flange thickness shall be less than 1.75 inch.\n", + "target_index = []\n", + "for indx in beam_section_database['index']:\n", + " if (beam_section_database.loc[indx, 'weight'] >= 300): # noqa: PLR2004, SIM114\n", + " target_index.append(indx)\n", + " elif (beam_section_database.loc[indx, 'tf'] >= 1.75): # noqa: PLR2004\n", + " target_index.append(indx)\n", + "clean_beam_section = beam_section_database.drop(index=target_index)\n", + "clean_beam_section.to_csv('BeamDatabase2.csv', sep=',', index=False)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.5" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} From 9ac6a87affeeaa03f2a7a19620554a3667fb72bd Mon Sep 17 00:00:00 2001 From: jinyan1214 Date: Tue, 20 Aug 2024 15:03:30 -0700 Subject: [PATCH 16/59] fix an error of reading R2D output json --- .../regionalGroundMotion/ComputeIntensityMeasure.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/ComputeIntensityMeasure.py b/modules/performRegionalEventSimulation/regionalGroundMotion/ComputeIntensityMeasure.py index 30d826ff2..f500a4b31 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/ComputeIntensityMeasure.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/ComputeIntensityMeasure.py @@ -725,12 +725,15 @@ def compute_im( # noqa: C901, D103 saveInJson = False # noqa: N806 filename = os.path.join(output_dir, filename) # noqa: PTH118 im_list = [] - if 'PGA' in im_info.keys(): # noqa: SIM118 + if 'PGA' in im_info.keys() or im_info.get('Type', None) == 'PGA': # noqa: SIM118 im_list.append('PGA') if 'SA' in im_info.keys(): # noqa: SIM118 for cur_period in im_info['SA']['Periods']: im_list.append(f'SA({cur_period!s})') # noqa: PERF401 - if 'PGV' in im_info.keys(): # noqa: SIM118 + if im_info.get('Type', None) == 'SA': + for cur_period in im_info['Periods']: + im_list.append(f'SA({cur_period!s})') # noqa: PERF401 + if 'PGV' in im_info.keys() or im_info.get('Type', None) == 'PGV': # noqa: SIM118 im_list.append('PGV') # Stations station_list = [ From efe32bcbebf49417cdec55c9d5c533c2e81676f2 Mon Sep 17 00:00:00 2001 From: fmckenna Date: Sat, 24 Aug 2024 11:48:00 -0700 Subject: [PATCH 17/59] fmk - adding a createResponseCSV file for PBE HPC --- modules/Workflow/CMakeLists.txt | 3 +- modules/Workflow/createResponseCSV.py | 89 +++++++++++++++++++++++++++ 2 files changed, 91 insertions(+), 1 deletion(-) create mode 100644 modules/Workflow/createResponseCSV.py diff --git a/modules/Workflow/CMakeLists.txt b/modules/Workflow/CMakeLists.txt index 97a1b7c19..0397a6208 100644 --- a/modules/Workflow/CMakeLists.txt +++ b/modules/Workflow/CMakeLists.txt @@ -4,6 +4,7 @@ simcenter_add_python_script(SCRIPT changeJSON.py) simcenter_add_python_script(SCRIPT "sWHALE.py") simcenter_add_python_script(SCRIPT "qWHALE.py") simcenter_add_python_script(SCRIPT "rWHALE.py") +simcenter_add_python_script(SCRIPT "createResponseCSV.py") simcenter_add_python_script(SCRIPT "siteResponseWHALE.py") simcenter_add_python_script(SCRIPT "createGM4BIM.py") simcenter_add_python_script(SCRIPT "computeResponseSpectrum.py") @@ -19,4 +20,4 @@ simcenter_add_python_script(SCRIPT R2DTool_workflow.py) simcenter_add_python_script(SCRIPT CreateWorkflowJobs.py) simcenter_add_executable(NAME runMultiModelDriver - FILES runMultiModelDriver.cpp) \ No newline at end of file + FILES runMultiModelDriver.cpp) diff --git a/modules/Workflow/createResponseCSV.py b/modules/Workflow/createResponseCSV.py new file mode 100644 index 000000000..59d1bac20 --- /dev/null +++ b/modules/Workflow/createResponseCSV.py @@ -0,0 +1,89 @@ +# noqa: D100 + +# +# Code to write response.csv file given input and dakotaTab.out files +# + +# Written fmk, important code copied from whale/main.py +# date: 08/24 + + + + +import argparse +import json + +import numpy as np +import pandas as pd + + +def main(input_file, dakota_tab_file): # noqa: D103 + + try: + # Attempt to open the file + with open(input_file) as file: # noqa: PTH123 + data = json.load(file) + + except FileNotFoundError: + # Handle the error if the file is not found + print(f"Error createResponseCSV.py: The file '{input_file}' was not found.") # noqa: T201 + return + except OSError: + # Handle other I/O errors + print(f"Error createResponseCSV.py: Error reading the file '{input_file}'.") # noqa: T201 + return + + app_data = data.get('Applications', None) + if app_data is not None: + dl_data = app_data.get('DL', None) + + if dl_data is not None: + dl_app_data = dl_data.get('ApplicationData', None) + + if dl_app_data is not None: + is_coupled = dl_app_data.get('coupled_EDP', None) + + try: + # sy, abs - added try-statement because dakota-reliability does not write DakotaTab.out + dakota_out = pd.read_csv(dakota_tab_file, sep=r'\s+', header=0, index_col=0) + + if is_coupled: + if 'eventID' in dakota_out.columns: + events = dakota_out['eventID'].values # noqa: PD011 + events = [int(e.split('x')[-1]) for e in events] + sorter = np.argsort(events) + dakota_out = dakota_out.iloc[sorter, :] + dakota_out.index = np.arange(dakota_out.shape[0]) + + dakota_out.to_csv('response.csv') + + except FileNotFoundError: + # Handle the error if the file is not found + print(f"Error createResponseCSV.py: The file '{dakota_tab_file}' not found.") # noqa: T201 + return + + except OSError: + # Handle other I/O errors + print(f"Error createResponseCSV.py: Error reading '{dakota_tab_file}'.") # noqa: T201 + return + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + + # Add arguments with default values + parser.add_argument( + '--input', type=str, default='AIM.json', help='Path to the input file)' + ) + parser.add_argument( + '--dakotaTab', + type=str, + default='dakotaTab.out', + help='Path to the dakotaTab file)', + ) + + # Parse the arguments + args = parser.parse_args() + + # Use the arguments + main(args.input, args.dakotaTab) From 869c69916f611474f81c5c696d536f9474b0bc4b Mon Sep 17 00:00:00 2001 From: fmckenna Date: Mon, 26 Aug 2024 08:37:31 -0700 Subject: [PATCH 18/59] fmk - getting path from inputFile & I hate some ruff defaults --- modules/Workflow/createResponseCSV.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/modules/Workflow/createResponseCSV.py b/modules/Workflow/createResponseCSV.py index 59d1bac20..e553398c3 100644 --- a/modules/Workflow/createResponseCSV.py +++ b/modules/Workflow/createResponseCSV.py @@ -7,18 +7,18 @@ # Written fmk, important code copied from whale/main.py # date: 08/24 - - - import argparse import json +import os import numpy as np import pandas as pd - def main(input_file, dakota_tab_file): # noqa: D103 + directory_inputs = os.path.dirname(input_file) # noqa: PTH120 + os.chdir(directory_inputs) + try: # Attempt to open the file with open(input_file) as file: # noqa: PTH123 @@ -73,7 +73,7 @@ def main(input_file, dakota_tab_file): # noqa: D103 # Add arguments with default values parser.add_argument( - '--input', type=str, default='AIM.json', help='Path to the input file)' + '--inputFile', type=str, default='AIM.json', help='Path to the input file)' ) parser.add_argument( '--dakotaTab', @@ -86,4 +86,4 @@ def main(input_file, dakota_tab_file): # noqa: D103 args = parser.parse_args() # Use the arguments - main(args.input, args.dakotaTab) + main(args.inputFile, args.dakotaTab) From 0ab0129700abae5b3fdc99c1c6a51a77566f2553 Mon Sep 17 00:00:00 2001 From: jinyan1214 Date: Mon, 26 Aug 2024 14:57:45 -0700 Subject: [PATCH 19/59] jz - UX supported IM type in error message --- .../regionalGroundMotion/gmpe/openSHAGMPE.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/openSHAGMPE.py b/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/openSHAGMPE.py index 3a6c527ec..a83b970f6 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/openSHAGMPE.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/openSHAGMPE.py @@ -74,7 +74,12 @@ def __init__(self): def setIMT(self, imt): # noqa: N802, D102 if imt not in self.supportedImt: - sys.exit(f'The imt {imt} is not supported by Chiou and Young (2014)') + # supported_imt = [] + # for i in self.supportedImt: + # if i is float: + # supported_imt.append(i) + supported_imt = [f'SA{x}s' if isinstance(x, float) else x for x in self.supportedImt] + sys.exit(f'The IM type {imt} is not supported by Chiou and Young (2014). \n The supported IM types are {supported_imt}') return False self.c1 = self.coeff['c1'][imt] self.c1a = self.coeff['c1a'][imt] @@ -337,8 +342,9 @@ def __init__(self): def setIMT(self, imt): # noqa: N802, D102 if imt not in self.supportedImt: + supported_imt = [f'SA{x}s' if isinstance(x, float) else x for x in self.supportedImt] sys.exit( - f'The imt {imt} is not supported by Abrahamson, Silva, and Kamai (2014)' + f'The IM type {imt} is not supported by Abrahamson, Silva, and Kamai (2014). \n The supported IM types are {supported_imt}' ) return self.imt = imt @@ -636,8 +642,9 @@ def __init__(self): def setIMT(self, imt): # noqa: N802, D102 if imt not in self.supportedImt: + supported_imt = [f'SA{x}s' if isinstance(x, float) else x for x in self.supportedImt] sys.exit( - f'The imt {imt} is not supported by Boore, Stewart, Seyhan & Atkinson (2014)' + f'The IM type {imt} is not supported by Boore, Stewart, Seyhan & Atkinson (2014). \n The supported IM types are {supported_imt}' ) return self.imt = imt @@ -863,8 +870,9 @@ def __init__(self): def setIMT(self, imt): # noqa: N802, D102 if imt not in self.supportedImt: + supported_imt = [f'SA{x}s' if isinstance(x, float) else x for x in self.supportedImt] sys.exit( - f'The imt {imt} is not supported by Campbell & Bozorgnia (2014)' + f'The IM type {imt} is not supported by Campbell & Bozorgnia (2014). \n The supported IM types are {supported_imt}' ) return self.imt = imt From fd65b973d3d00ef2290f41113f5dbccb0bbafe4a Mon Sep 17 00:00:00 2001 From: jinyan1214 Date: Mon, 26 Aug 2024 16:04:15 -0700 Subject: [PATCH 20/59] ruff format and ruff check --- modules/Workflow/createResponseCSV.py | 14 ++++++------- .../regionalGroundMotion/gmpe/openSHAGMPE.py | 20 ++++++++++++++----- 2 files changed, 22 insertions(+), 12 deletions(-) diff --git a/modules/Workflow/createResponseCSV.py b/modules/Workflow/createResponseCSV.py index e553398c3..557b14c80 100644 --- a/modules/Workflow/createResponseCSV.py +++ b/modules/Workflow/createResponseCSV.py @@ -1,4 +1,4 @@ -# noqa: D100 +# noqa: D100, INP001 # # Code to write response.csv file given input and dakotaTab.out files @@ -14,9 +14,9 @@ import numpy as np import pandas as pd -def main(input_file, dakota_tab_file): # noqa: D103 - directory_inputs = os.path.dirname(input_file) # noqa: PTH120 +def main(input_file, dakota_tab_file): # noqa: D103 + directory_inputs = os.path.dirname(input_file) # noqa: PTH120 os.chdir(directory_inputs) try: @@ -26,11 +26,11 @@ def main(input_file, dakota_tab_file): # noqa: D103 except FileNotFoundError: # Handle the error if the file is not found - print(f"Error createResponseCSV.py: The file '{input_file}' was not found.") # noqa: T201 + print(f"Error createResponseCSV.py: The file '{input_file}' was not found.") # noqa: T201 return except OSError: # Handle other I/O errors - print(f"Error createResponseCSV.py: Error reading the file '{input_file}'.") # noqa: T201 + print(f"Error createResponseCSV.py: Error reading the file '{input_file}'.") # noqa: T201 return app_data = data.get('Applications', None) @@ -59,12 +59,12 @@ def main(input_file, dakota_tab_file): # noqa: D103 except FileNotFoundError: # Handle the error if the file is not found - print(f"Error createResponseCSV.py: The file '{dakota_tab_file}' not found.") # noqa: T201 + print(f"Error createResponseCSV.py: The file '{dakota_tab_file}' not found.") # noqa: T201 return except OSError: # Handle other I/O errors - print(f"Error createResponseCSV.py: Error reading '{dakota_tab_file}'.") # noqa: T201 + print(f"Error createResponseCSV.py: Error reading '{dakota_tab_file}'.") # noqa: T201 return diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/openSHAGMPE.py b/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/openSHAGMPE.py index a83b970f6..69213803e 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/openSHAGMPE.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/openSHAGMPE.py @@ -78,8 +78,12 @@ def setIMT(self, imt): # noqa: N802, D102 # for i in self.supportedImt: # if i is float: # supported_imt.append(i) - supported_imt = [f'SA{x}s' if isinstance(x, float) else x for x in self.supportedImt] - sys.exit(f'The IM type {imt} is not supported by Chiou and Young (2014). \n The supported IM types are {supported_imt}') + supported_imt = [ + f'SA{x}s' if isinstance(x, float) else x for x in self.supportedImt + ] + sys.exit( + f'The IM type {imt} is not supported by Chiou and Young (2014). \n The supported IM types are {supported_imt}' + ) return False self.c1 = self.coeff['c1'][imt] self.c1a = self.coeff['c1a'][imt] @@ -342,7 +346,9 @@ def __init__(self): def setIMT(self, imt): # noqa: N802, D102 if imt not in self.supportedImt: - supported_imt = [f'SA{x}s' if isinstance(x, float) else x for x in self.supportedImt] + supported_imt = [ + f'SA{x}s' if isinstance(x, float) else x for x in self.supportedImt + ] sys.exit( f'The IM type {imt} is not supported by Abrahamson, Silva, and Kamai (2014). \n The supported IM types are {supported_imt}' ) @@ -642,7 +648,9 @@ def __init__(self): def setIMT(self, imt): # noqa: N802, D102 if imt not in self.supportedImt: - supported_imt = [f'SA{x}s' if isinstance(x, float) else x for x in self.supportedImt] + supported_imt = [ + f'SA{x}s' if isinstance(x, float) else x for x in self.supportedImt + ] sys.exit( f'The IM type {imt} is not supported by Boore, Stewart, Seyhan & Atkinson (2014). \n The supported IM types are {supported_imt}' ) @@ -870,7 +878,9 @@ def __init__(self): def setIMT(self, imt): # noqa: N802, D102 if imt not in self.supportedImt: - supported_imt = [f'SA{x}s' if isinstance(x, float) else x for x in self.supportedImt] + supported_imt = [ + f'SA{x}s' if isinstance(x, float) else x for x in self.supportedImt + ] sys.exit( f'The IM type {imt} is not supported by Campbell & Bozorgnia (2014). \n The supported IM types are {supported_imt}' ) From 6f461e904668a3d4d05f326a0e86b4d3ce4afb76 Mon Sep 17 00:00:00 2001 From: jinyan1214 Date: Tue, 27 Aug 2024 17:38:06 -0700 Subject: [PATCH 21/59] jz - add some tabs for easier reading --- .../multiplePEER/MultiplePEER_Events.cpp | 86 +++++++++---------- 1 file changed, 43 insertions(+), 43 deletions(-) diff --git a/modules/createEVENT/multiplePEER/MultiplePEER_Events.cpp b/modules/createEVENT/multiplePEER/MultiplePEER_Events.cpp index fbe69c22a..e311ebab4 100644 --- a/modules/createEVENT/multiplePEER/MultiplePEER_Events.cpp +++ b/modules/createEVENT/multiplePEER/MultiplePEER_Events.cpp @@ -88,52 +88,52 @@ int main(int argc, char **argv) if (strcmp(eventType,"ExistingPEER_Events") != 0) { - json_array_append(newEventArray, value); // copy event for next event app to parse + json_array_append(newEventArray, value); // copy event for next event app to parse } else { - json_t *eventObj = json_object(); - json_object_set(eventObj,"type", json_string("Seismic")); - json_object_set(eventObj,"subtype", json_string("MultiplePEER_Event")); - - json_t *existingEventsArray = json_object_get(value,"Events"); - int numExisting = json_array_size(existingEventsArray); - - if (numExisting > 1) { - - json_t *randomVar = json_object(); - json_object_set(randomVar, "distribution",json_string("discrete_design_set_string")); - json_object_set(randomVar, "name",json_string("MultipleEvent")); - json_object_set(randomVar, "value",json_string("RV.MultipleEvent")); - json_t *theMultipleEvents = json_array(); - - json_t *existingEvent = 0; - json_array_foreach(existingEventsArray, index, existingEvent) { - createSimCenterEvent(existingEvent); - json_array_append(theMultipleEvents, json_object_get(existingEvent,"name")); - } - - json_object_set(randomVar, "elements", theMultipleEvents); - json_array_append(rvArray, randomVar); - json_object_set(eventObj, "index", json_string("RV.MultipleEvent")); - - } else { - - json_t *existingEvent = json_array_get(existingEventsArray,0); - createSimCenterEvent(existingEvent); - json_object_set(eventObj, "index", json_integer(0)); - - } - - //add first event to event - json_t *firstEvent = json_array_get(existingEventsArray, 0); - json_t *fileValue = json_object_get(firstEvent, "name"); - if (fileValue != NULL) { - const char *fileName = json_string_value(fileValue); - addEvent(fileName, eventObj); - } - - json_array_append(newEventArray, eventObj); + json_t *eventObj = json_object(); + json_object_set(eventObj,"type", json_string("Seismic")); + json_object_set(eventObj,"subtype", json_string("MultiplePEER_Event")); + + json_t *existingEventsArray = json_object_get(value,"Events"); + int numExisting = json_array_size(existingEventsArray); + + if (numExisting > 1) { + + json_t *randomVar = json_object(); + json_object_set(randomVar, "distribution",json_string("discrete_design_set_string")); + json_object_set(randomVar, "name",json_string("MultipleEvent")); + json_object_set(randomVar, "value",json_string("RV.MultipleEvent")); + json_t *theMultipleEvents = json_array(); + + json_t *existingEvent = 0; + json_array_foreach(existingEventsArray, index, existingEvent) { + createSimCenterEvent(existingEvent); + json_array_append(theMultipleEvents, json_object_get(existingEvent,"name")); + } + + json_object_set(randomVar, "elements", theMultipleEvents); + json_array_append(rvArray, randomVar); + json_object_set(eventObj, "index", json_string("RV.MultipleEvent")); + + } else { + + json_t *existingEvent = json_array_get(existingEventsArray,0); + createSimCenterEvent(existingEvent); + json_object_set(eventObj, "index", json_integer(0)); + + } + + //add first event to event + json_t *firstEvent = json_array_get(existingEventsArray, 0); + json_t *fileValue = json_object_get(firstEvent, "name"); + if (fileValue != NULL) { + const char *fileName = json_string_value(fileValue); + addEvent(fileName, eventObj); + } + + json_array_append(newEventArray, eventObj); } } From 8ae17d429e081a46e7e302d1958d5e5541404768 Mon Sep 17 00:00:00 2001 From: John Vouvakis Manousakis Date: Wed, 28 Aug 2024 05:05:09 -0700 Subject: [PATCH 22/59] Pin Ruff version to 0.6.1 We'll need to keep an eye on Ruff releases and manually update the version when we deem it appropriate. --- .github/workflows/format_check.yml | 1 + .github/workflows/lint.yml | 2 ++ 2 files changed, 3 insertions(+) diff --git a/.github/workflows/format_check.yml b/.github/workflows/format_check.yml index dca19a00f..f779ab252 100644 --- a/.github/workflows/format_check.yml +++ b/.github/workflows/format_check.yml @@ -8,3 +8,4 @@ jobs: - uses: chartboost/ruff-action@v1 with: args: 'format --check' + version: 0.6.1 diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 744b4e642..be42eccfe 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -6,3 +6,5 @@ jobs: steps: - uses: actions/checkout@v4 - uses: chartboost/ruff-action@v1 + with: + version: 0.6.1 From 62cf6be3ba95911504c69b73ef99aab11697ffe8 Mon Sep 17 00:00:00 2001 From: John Vouvakis Manousakis Date: Wed, 28 Aug 2024 05:08:16 -0700 Subject: [PATCH 23/59] Format and lint --- modules/Workflow/createResponseCSV.py | 14 +++++++------- modules/performUQ/common/parallel_runner_mpi4py.py | 1 + 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/modules/Workflow/createResponseCSV.py b/modules/Workflow/createResponseCSV.py index e553398c3..557b14c80 100644 --- a/modules/Workflow/createResponseCSV.py +++ b/modules/Workflow/createResponseCSV.py @@ -1,4 +1,4 @@ -# noqa: D100 +# noqa: D100, INP001 # # Code to write response.csv file given input and dakotaTab.out files @@ -14,9 +14,9 @@ import numpy as np import pandas as pd -def main(input_file, dakota_tab_file): # noqa: D103 - directory_inputs = os.path.dirname(input_file) # noqa: PTH120 +def main(input_file, dakota_tab_file): # noqa: D103 + directory_inputs = os.path.dirname(input_file) # noqa: PTH120 os.chdir(directory_inputs) try: @@ -26,11 +26,11 @@ def main(input_file, dakota_tab_file): # noqa: D103 except FileNotFoundError: # Handle the error if the file is not found - print(f"Error createResponseCSV.py: The file '{input_file}' was not found.") # noqa: T201 + print(f"Error createResponseCSV.py: The file '{input_file}' was not found.") # noqa: T201 return except OSError: # Handle other I/O errors - print(f"Error createResponseCSV.py: Error reading the file '{input_file}'.") # noqa: T201 + print(f"Error createResponseCSV.py: Error reading the file '{input_file}'.") # noqa: T201 return app_data = data.get('Applications', None) @@ -59,12 +59,12 @@ def main(input_file, dakota_tab_file): # noqa: D103 except FileNotFoundError: # Handle the error if the file is not found - print(f"Error createResponseCSV.py: The file '{dakota_tab_file}' not found.") # noqa: T201 + print(f"Error createResponseCSV.py: The file '{dakota_tab_file}' not found.") # noqa: T201 return except OSError: # Handle other I/O errors - print(f"Error createResponseCSV.py: Error reading '{dakota_tab_file}'.") # noqa: T201 + print(f"Error createResponseCSV.py: Error reading '{dakota_tab_file}'.") # noqa: T201 return diff --git a/modules/performUQ/common/parallel_runner_mpi4py.py b/modules/performUQ/common/parallel_runner_mpi4py.py index 64bbb4a6d..bf8de20bc 100644 --- a/modules/performUQ/common/parallel_runner_mpi4py.py +++ b/modules/performUQ/common/parallel_runner_mpi4py.py @@ -1,6 +1,7 @@ from mpi4py import MPI # noqa: INP001, D100 from mpi4py.futures import MPIPoolExecutor + class ParallelRunnerMPI4PY: # noqa: D101 def __init__(self, run_type: str = 'runningRemote') -> None: self.run_type = run_type From f702763d4797508024971f40d3ce3e7cde29ae6c Mon Sep 17 00:00:00 2001 From: jinyan1214 Date: Fri, 30 Aug 2024 10:42:36 -0700 Subject: [PATCH 24/59] jz - fix the bug of hardcoding Results dir of rWHALE --- modules/Workflow/whale/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/Workflow/whale/main.py b/modules/Workflow/whale/main.py index 759f8af73..86d0f26ab 100644 --- a/modules/Workflow/whale/main.py +++ b/modules/Workflow/whale/main.py @@ -2874,7 +2874,7 @@ def aggregate_results( # noqa: C901, PLR0912, PLR0915 bldg_dir = Path(os.path.dirname(asst_data[a_i]['file'])).resolve() # noqa: PTH120 main_dir = bldg_dir assetTypeHierarchy = [bldg_dir.name] # noqa: N806 - while main_dir.parent.name != 'Results': + while main_dir.parent.name != self.run_dir.name: main_dir = bldg_dir.parent assetTypeHierarchy = [main_dir.name] + assetTypeHierarchy # noqa: N806, RUF005 From 272370a45e8e760701e475acbcd4e370e9674527 Mon Sep 17 00:00:00 2001 From: jinyan1214 Date: Fri, 30 Aug 2024 15:08:05 -0700 Subject: [PATCH 25/59] jz - fix a bug of HTA app argument in rwhale --- modules/Workflow/whale/main.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/modules/Workflow/whale/main.py b/modules/Workflow/whale/main.py index 86d0f26ab..732702807 100644 --- a/modules/Workflow/whale/main.py +++ b/modules/Workflow/whale/main.py @@ -1756,10 +1756,6 @@ def perform_regional_mapping(self, AIM_file_path, assetType, doParallel=True): for input_ in reg_mapping_app.inputs: if input_['id'] == 'assetFile': input_['default'] = str(AIM_file_path) - # Get the event file path - eventFilePath = self.shared_data.get('RegionalEvent', {}).get( # noqa: N806 - 'eventFilePath', self.reference_dir - ) reg_mapping_app.inputs.append( { @@ -1767,7 +1763,7 @@ def perform_regional_mapping(self, AIM_file_path, assetType, doParallel=True): 'type': 'path', 'default': resolve_path( self.shared_data['RegionalEvent']['eventFile'], - eventFilePath, + self.reference_dir, ), } ) From f67c15feb7a788dd7283de92b46159ccc742d65e Mon Sep 17 00:00:00 2001 From: jinyan1214 Date: Fri, 30 Aug 2024 15:08:56 -0700 Subject: [PATCH 26/59] jz - fix a format miss match between EQ simulation and R2D --- .../regionalGroundMotion/HazardSimulationEQ.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py index 3d9eaed21..621cb0511 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py @@ -99,6 +99,15 @@ def hazard_job(hazard_info): # noqa: C901, D103, PLR0915 event_info = hazard_info['Event'] # When vector IM is used. The PGA/SA needs to be computed before PGV im_info = event_info['IntensityMeasure'] + # To make the SA format consistent with R2D requirement + if im_info['Type'] == 'Vector' and 'SA' in im_info.keys(): # noqa: SIM118 + periods = im_info['SA']['Periods'] + periods = [float(i) for i in periods] + im_info['SA']['Periods'] = periods + if im_info['Type'] == 'SA': + periods = [im_info['Period']] + periods = [float(i) for i in periods] + im_info['SA']['Periods'] = periods if im_info['Type'] == 'Vector' and 'PGV' in im_info.keys(): # noqa: SIM118 PGV_info = im_info.pop('PGV') # noqa: N806 im_info.update({'PGV': PGV_info}) From ba9cea05f18762f8f6ca85c34c2a9a90971816be Mon Sep 17 00:00:00 2001 From: jinyan1214 Date: Fri, 30 Aug 2024 15:20:00 -0700 Subject: [PATCH 27/59] jz - fix a typo --- .../regionalGroundMotion/HazardSimulationEQ.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py index 621cb0511..13141556b 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py @@ -105,7 +105,7 @@ def hazard_job(hazard_info): # noqa: C901, D103, PLR0915 periods = [float(i) for i in periods] im_info['SA']['Periods'] = periods if im_info['Type'] == 'SA': - periods = [im_info['Period']] + periods = [im_info['Periods']] periods = [float(i) for i in periods] im_info['SA']['Periods'] = periods if im_info['Type'] == 'Vector' and 'PGV' in im_info.keys(): # noqa: SIM118 From ed10bce75544854b2a321ec8546f9d58f87e0176 Mon Sep 17 00:00:00 2001 From: jinyan1214 Date: Fri, 30 Aug 2024 15:39:36 -0700 Subject: [PATCH 28/59] jz - error in pervious fix --- .../regionalGroundMotion/HazardSimulationEQ.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py index 13141556b..b39773d71 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py @@ -105,9 +105,9 @@ def hazard_job(hazard_info): # noqa: C901, D103, PLR0915 periods = [float(i) for i in periods] im_info['SA']['Periods'] = periods if im_info['Type'] == 'SA': - periods = [im_info['Periods']] + periods = im_info['Periods'] periods = [float(i) for i in periods] - im_info['SA']['Periods'] = periods + im_info['Periods'] = periods if im_info['Type'] == 'Vector' and 'PGV' in im_info.keys(): # noqa: SIM118 PGV_info = im_info.pop('PGV') # noqa: N806 im_info.update({'PGV': PGV_info}) From f740974fa34ddea4279a37445c064f6fd04ae34e Mon Sep 17 00:00:00 2001 From: jinyan1214 Date: Tue, 3 Sep 2024 22:15:08 -0700 Subject: [PATCH 29/59] jz - joblib parallel create station for creating stations in R2D EQ tool --- .../regionalGroundMotion/CMakeLists.txt | 1 + .../regionalGroundMotion/CreateStation.py | 83 ++++++++++++++++--- .../regionalGroundMotion/FetchOpenSHA.py | 21 +++++ .../regionalGroundMotion/GlobalVariable.py | 1 + .../regionalGroundMotion/ScenarioForecast.py | 27 +++--- 5 files changed, 111 insertions(+), 22 deletions(-) create mode 100644 modules/performRegionalEventSimulation/regionalGroundMotion/GlobalVariable.py diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/CMakeLists.txt b/modules/performRegionalEventSimulation/regionalGroundMotion/CMakeLists.txt index 3d4f51a06..e1db539e4 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/CMakeLists.txt +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/CMakeLists.txt @@ -15,3 +15,4 @@ simcenter_add_python_script(SCRIPT ScenarioForecast.py) simcenter_add_python_script(SCRIPT liquefaction.py) simcenter_add_python_script(SCRIPT landslide.py) simcenter_add_python_script(SCRIPT GMSimulators.py) +simcenter_add_python_script(SCRIPT GlobalVariable.py) diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/CreateStation.py b/modules/performRegionalEventSimulation/regionalGroundMotion/CreateStation.py index c875cc8c5..71aa03b08 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/CreateStation.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/CreateStation.py @@ -43,6 +43,36 @@ import numpy as np import pandas as pd from tqdm import tqdm +import importlib +import subprocess + + +if importlib.util.find_spec('joblib') is None: + subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'joblib']) # noqa: S603 + +if importlib.util.find_spec('contextlib') is None: + subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'contextlib']) # noqa: S603 + +import joblib +import contextlib +from joblib import Parallel, delayed +import multiprocessing + +@contextlib.contextmanager +def tqdm_joblib(tqdm_object): + """Context manager to patch joblib to report into tqdm progress bar given as argument.""" + class TqdmBatchCompletionCallback(joblib.parallel.BatchCompletionCallBack): + def __call__(self, *args, **kwargs): + tqdm_object.update(n=self.batch_size) + return super().__call__(*args, **kwargs) + + old_batch_callback = joblib.parallel.BatchCompletionCallBack + joblib.parallel.BatchCompletionCallBack = TqdmBatchCompletionCallback + try: + yield tqdm_object + finally: + joblib.parallel.BatchCompletionCallBack = old_batch_callback + tqdm_object.close() if 'stampede2' not in socket.gethostname(): from FetchOpenSHA import ( @@ -428,6 +458,31 @@ def create_stations( # noqa: C901, PLR0912, PLR0915 'chi', ]: user_param_list.pop(user_param_list.index(cur_param)) + # If z1pt0 is OpenSHA default model, use parallel processing to get z1pt0 + if z1Config['Type'] == 'OpenSHA default model': + z1_tag = z1Config['z1_tag'] + if z1_tag == 2: # noqa: PLR2004 + num_cores = multiprocessing.cpu_count() + with tqdm_joblib(tqdm(desc="Get z1pt0 from openSHA", total=selected_stn.shape[0])) as progress_bar: + z1pt0_results = Parallel(n_jobs=num_cores)(delayed(get_site_z1pt0_from_opensha)( + lat, lon + ) for lat, lon in zip( + selected_stn['Latitude'].tolist(), + selected_stn['Longitude'].tolist(), + )) + if z25Config['Type'] == 'OpenSHA default model': + z25_tag = z25Config['z25_tag'] + if z25_tag == 2: # noqa: PLR2004 + num_cores = multiprocessing.cpu_count() + with tqdm_joblib(tqdm(desc="Get z1pt0 from openSHA", total=selected_stn.shape[0])) as progress_bar: + z2pt5_results = Parallel(n_jobs=num_cores)(delayed(get_site_z2pt5_from_opensha)( + lat, lon + ) for lat, lon in zip( + selected_stn['Latitude'].tolist(), + selected_stn['Longitude'].tolist(), + )) + + ground_failure_input_keys = set() for ind in tqdm(range(selected_stn.shape[0]), desc='Stations'): stn = selected_stn.iloc[ind, :] @@ -474,9 +529,7 @@ def create_stations( # noqa: C901, PLR0912, PLR0915 if z1_tag == 1: tmp.update({'z1pt0': get_z1(tmp['Vs30'])}) elif z1_tag == 2: # noqa: PLR2004 - z1pt0 = get_site_z1pt0_from_opensha( - tmp['Latitude'], tmp['Longitude'] - ) + z1pt0 = z1pt0_results[ind] if np.isnan(z1pt0): z1pt0 = get_z1(tmp.get('Vs30')) tmp.update({'z1pt0': z1pt0}) @@ -495,9 +548,7 @@ def create_stations( # noqa: C901, PLR0912, PLR0915 if z25_tag == 1: tmp.update({'z2pt5': get_z25(tmp['z1pt0'])}) elif z25_tag == 2: # noqa: PLR2004 - z2pt5 = get_site_z2pt5_from_opensha( - tmp['Latitude'], tmp['Longitude'] - ) + z2pt5 = z2pt5_results[ind] if np.isnan(z2pt5): z2pt5 = get_z25(tmp['z1pt0']) tmp.update({'z2pt5': z2pt5}) @@ -664,6 +715,16 @@ def get_vs30_global(lat, lon): # return return vs30 # noqa: DOC201, RET504, RUF100 +def parallel_interpolation(func, lat, lon): + """Interpolate data in parallel + Input: + func: interpolation function + lat: list of latitude + lon: list of longitude + Output: + data: list of interpolated data + """ + return func(lat, lon) def get_vs30_thompson(lat, lon): """Interpolate global Vs30 at given latitude and longitude @@ -683,16 +744,18 @@ def get_vs30_thompson(lat, lon): with open(cwd + '/database/site/thompson_vs30_4km.pkl', 'rb') as f: # noqa: PTH123 vs30_thompson = pickle.load(f) # noqa: S301 # Interpolation function (linear) - # Thompson's map gives zero values for water-covered region and outside CA -> use 760 for default - print( # noqa: T201 - 'CreateStation: Warning - approximate 760 m/s for sites not supported by Thompson Vs30 map (water/outside CA).' - ) vs30_thompson['Vs30'][vs30_thompson['Vs30'] < 0.1] = 760 # noqa: PLR2004 interpFunc = interpolate.interp2d( # noqa: N806 vs30_thompson['Longitude'], vs30_thompson['Latitude'], vs30_thompson['Vs30'] ) vs30 = [float(interpFunc(x, y)) for x, y in zip(lon, lat)] + num_zeros = len([x for x in vs30 if x == 0]) + if num_zeros > 0: + # Thompson's map gives zero values for water-covered region and outside CA -> use 760 for default + print( # noqa: T201 + f'CreateStation: Warning - approximate 760 m/s for {num_zeros} sites not supported by Thompson Vs30 map (water/outside CA).' + ) # return return vs30 # noqa: DOC201, RET504, RUF100 diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/FetchOpenSHA.py b/modules/performRegionalEventSimulation/regionalGroundMotion/FetchOpenSHA.py index c1454f127..579a36469 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/FetchOpenSHA.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/FetchOpenSHA.py @@ -40,6 +40,27 @@ import numpy as np import pandas as pd import ujson +import socket +import subprocess +import importlib +import sys +import psutil +import GlobalVariable + +if 'stampede2' not in socket.gethostname(): + import GlobalVariable + if GlobalVariable.JVM_started is False: + GlobalVariable.JVM_started = True + if importlib.util.find_spec('jpype') is None: + subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'JPype1']) # noqa: S603 + import jpype + # from jpype import imports + import jpype.imports + from jpype.types import * # noqa: F403 + memory_total = psutil.virtual_memory().total / (1024.0**3) + memory_request = int(memory_total * 0.75) + jpype.addClassPath('./lib/OpenSHA-1.5.2.jar') + jpype.startJVM(f'-Xmx{memory_request}G', convertStrings=False) from java.io import * # noqa: F403 from java.lang import * # noqa: F403 from java.lang.reflect import * # noqa: F403 diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/GlobalVariable.py b/modules/performRegionalEventSimulation/regionalGroundMotion/GlobalVariable.py new file mode 100644 index 000000000..08a9c992b --- /dev/null +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/GlobalVariable.py @@ -0,0 +1 @@ +JVM_started = False \ No newline at end of file diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/ScenarioForecast.py b/modules/performRegionalEventSimulation/regionalGroundMotion/ScenarioForecast.py index 67fd6cf1e..7686bd8dc 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/ScenarioForecast.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/ScenarioForecast.py @@ -96,18 +96,21 @@ import socket if 'stampede2' not in socket.gethostname(): - if importlib.util.find_spec('jpype') is None: - subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'JPype1']) # noqa: S603 - import jpype - - # from jpype import imports - import jpype.imports - from jpype.types import * # noqa: F403 - - memory_total = psutil.virtual_memory().total / (1024.0**3) - memory_request = int(memory_total * 0.75) - jpype.addClassPath('./lib/OpenSHA-1.5.2.jar') - jpype.startJVM(f'-Xmx{memory_request}G', convertStrings=False) + import GlobalVariable + if GlobalVariable.JVM_started is False: + GlobalVariable.JVM_started = True + if importlib.util.find_spec('jpype') is None: + subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'JPype1']) # noqa: S603 + import jpype + + # from jpype import imports + import jpype.imports + from jpype.types import * # noqa: F403 + + memory_total = psutil.virtual_memory().total / (1024.0**3) + memory_request = int(memory_total * 0.75) + jpype.addClassPath('./lib/OpenSHA-1.5.2.jar') + jpype.startJVM(f'-Xmx{memory_request}G', convertStrings=False) from CreateScenario import ( create_earthquake_scenarios, create_wind_scenarios, From 4e98757a2a34b2cc50d2fd7c6a77265c46a552ae Mon Sep 17 00:00:00 2001 From: jinyan1214 Date: Tue, 3 Sep 2024 22:36:26 -0700 Subject: [PATCH 30/59] fix typo --- .../regionalGroundMotion/CreateStation.py | 28 +++++++------------ 1 file changed, 10 insertions(+), 18 deletions(-) diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/CreateStation.py b/modules/performRegionalEventSimulation/regionalGroundMotion/CreateStation.py index 71aa03b08..b3988b3f9 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/CreateStation.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/CreateStation.py @@ -474,7 +474,7 @@ def create_stations( # noqa: C901, PLR0912, PLR0915 z25_tag = z25Config['z25_tag'] if z25_tag == 2: # noqa: PLR2004 num_cores = multiprocessing.cpu_count() - with tqdm_joblib(tqdm(desc="Get z1pt0 from openSHA", total=selected_stn.shape[0])) as progress_bar: + with tqdm_joblib(tqdm(desc="Get z2pt5 from openSHA", total=selected_stn.shape[0])) as progress_bar: z2pt5_results = Parallel(n_jobs=num_cores)(delayed(get_site_z2pt5_from_opensha)( lat, lon ) for lat, lon in zip( @@ -761,36 +761,28 @@ def get_vs30_thompson(lat, lon): def get_z1(vs30): - """Compute z1 based on the prediction equation by Chiou and Youngs (2013) (unit of vs30 is meter/second and z1 is meter)""" # noqa: D400 - z1 = np.exp(-7.15 / 4.0 * np.log((vs30**4 + 571.0**4) / (1360.0**4 + 571.0**4))) - # return - return z1 # noqa: DOC201, RET504, RUF100 - + """Compute z1 based on the prediction equation by Chiou and Youngs (2013) (unit of vs30 is meter/second and z1 is meter).""" + return np.exp(-7.15 / 4.0 * np.log((vs30**4 + 571.0**4) / (1360.0**4 + 571.0**4))) def get_z25(z1): - """Compute z25 based on the prediction equation by Campbell and Bozorgnia (2013)""" # noqa: D400 - z25 = 0.748 + 2.218 * z1 - # return - return z25 # noqa: DOC201, RET504, RUF100 + """Compute z25 based on the prediction equation by Campbell and Bozorgnia (2013).""" + return 0.748 + 2.218 * z1 def get_z25fromVs(vs): # noqa: N802 - """Compute z25 (m) based on the prediction equation 33 by Campbell and Bozorgnia (2014) - Vs is m/s - """ # noqa: D205, D400 - z25 = (7.089 - 1.144 * np.log(vs)) * 1000 - # return - return z25 # noqa: DOC201, RET504, RUF100 + """Compute z25 (m) based on the prediction equation 33 by Campbell and Bozorgnia (2014) Vs is m/s.""" + return (7.089 - 1.144 * np.log(vs)) * 1000 def get_zTR_global(lat, lon): # noqa: N802 - """Interpolate depth to rock at given latitude and longitude + """Interpolate depth to rock at given latitude and longitude. + Input: lat: list of latitude lon: list of longitude Output: zTR: list of zTR - """ # noqa: D205, D400 + """ import os import pickle From 3befe5db18275c5a3b0b46123b940f18272bd71b Mon Sep 17 00:00:00 2001 From: jinyan1214 Date: Fri, 6 Sep 2024 11:31:53 -0700 Subject: [PATCH 31/59] jz - add tabs to some c code for easier reading --- .../multiplePEER/MultiplePEER_Events.cpp | 48 +-- .../createSAM/openSeesInput/OpenSeesInput.c | 32 +- .../common/createStandardUQ_Input.cpp | 383 +++++++++--------- modules/performUQ/dakota/dakotaProcedures.cpp | 2 +- 4 files changed, 232 insertions(+), 233 deletions(-) diff --git a/modules/createEVENT/multiplePEER/MultiplePEER_Events.cpp b/modules/createEVENT/multiplePEER/MultiplePEER_Events.cpp index e311ebab4..10a1e99c3 100644 --- a/modules/createEVENT/multiplePEER/MultiplePEER_Events.cpp +++ b/modules/createEVENT/multiplePEER/MultiplePEER_Events.cpp @@ -174,30 +174,30 @@ int main(int argc, char **argv) const char *eventType = json_string_value(type); if (strcmp(eventType,"Seismic") == 0) { - json_t *subType = json_object_get(value,"subtype"); - if ((subType != NULL) && (strcmp("MultiplePEER_Event",json_string_value(subType)) ==0)) { - - json_t *index = json_object_get(value,"index"); - - if (json_is_integer(index) == false) { - const char *eventName = json_string_value(index); - - // we need to replace the EVENT with another event - json_t *inputEvent = json_array_get(inputEventsArray,count); - json_t *events = json_object_get(inputEvent,"Events"); - for (int i=0; i &rvFiles){ json_array_foreach(defaultRVs, index, value) { - const char *fName = json_string_value(value); - // std::cerr << "rvFILE: " << fName << "\n"; - json_t *rootOther = json_load_file(fName, 0, &error); - json_t *fileRandomVariables = json_object_get(rootOther, "randomVariables"); - if (fileRandomVariables == NULL) { - fileRandomVariables = json_object_get(rootOther, "RandomVariables"); - } - if (fileRandomVariables != NULL) { - // std::cerr << "RANDOM VARIABLES: " << json_dumps(fileRandomVariables, JSON_ENCODE_ANY) << "\n\n"; - int numRVs = json_array_size(fileRandomVariables); - //std::cerr << "numRVs=" <::iterator it; - - it = std::find(randomVariableNames.begin(), - randomVariableNames.end(), - nameS); - - if (it == randomVariableNames.end() ) { - randomVariableNames.push_back(nameS); - json_array_append(rootRVs, fileRandomVariable); - } - - - } - - json_t * corrMatJson = json_object_get(rootINPUT,"correlationMatrix"); - json_t * newCorrMatJson = json_array(); - - // if correlation matrix exists - if (corrMatJson != NULL) { - - int numCorrs = json_array_size(corrMatJson); - int numOrgRVs = std::sqrt(numCorrs); - int numTotRVs = json_array_size(rootRVs); - - std::cerr << "RANDOM VARIABLES: numORIG: " << numOrgRVs << " new: " << numTotRVs << "\n"; - - if (numOrgRVs::iterator it; + + it = std::find(randomVariableNames.begin(), + randomVariableNames.end(), + nameS); + + if (it == randomVariableNames.end() ) { + randomVariableNames.push_back(nameS); + json_array_append(rootRVs, fileRandomVariable); + } + + + } + + json_t * corrMatJson = json_object_get(rootINPUT,"correlationMatrix"); + json_t * newCorrMatJson = json_array(); + + // if correlation matrix exists + if (corrMatJson != NULL) { + + int numCorrs = json_array_size(corrMatJson); + int numOrgRVs = std::sqrt(numCorrs); + int numTotRVs = json_array_size(rootRVs); + + std::cerr << "RANDOM VARIABLES: numORIG: " << numOrgRVs << " new: " << numTotRVs << "\n"; + + if (numOrgRVs Date: Fri, 6 Sep 2024 12:44:38 -0700 Subject: [PATCH 32/59] jz - faster spactial correlation for R2D EQ tool --- .../regionalGroundMotion/GMSimulators.py | 34 ++++++++---- .../gmpe/CorrelationModel.py | 54 ++++++++----------- 2 files changed, 45 insertions(+), 43 deletions(-) diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/GMSimulators.py b/modules/performRegionalEventSimulation/regionalGroundMotion/GMSimulators.py index 72aa79283..bfe4eed82 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/GMSimulators.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/GMSimulators.py @@ -48,6 +48,8 @@ import h5py import numpy as np import ujson +import geopandas as gpd +from scipy.spatial.distance import cdist from gmpe import CorrelationModel from tqdm import tqdm @@ -192,14 +194,24 @@ def _compute_distance_matrix(self): self.stn_dist = None return # compute the distance matrix - tmp = np.zeros((self.num_sites, self.num_sites)) - for i in range(self.num_sites): - loc_i = np.array([self.sites[i]['lat'], self.sites[i]['lon']]) - for j in range(self.num_sites): - loc_j = np.array([self.sites[j]['lat'], self.sites[j]['lon']]) - # Computing station-wise distances - tmp[i, j] = CorrelationModel.get_distance_from_lat_lon(loc_i, loc_j) - self.stn_dist = tmp + # tmp = np.zeros((self.num_sites, self.num_sites)) + # # for i in tqdm(range(self.num_sites)): + # loc_i = np.array([self.sites[i]['lat'], self.sites[i]['lon']]) + # for j in range(self.num_sites): + # loc_j = np.array([self.sites[j]['lat'], self.sites[j]['lon']]) + # # Computing station-wise distances + # tmp[i, j] = CorrelationModel.get_distance_from_lat_lon(loc_i, loc_j) + # self.stn_dist = tmp + loc_i = np.array([[self.sites[i]['lat'], self.sites[i]['lon']] for i in range(self.num_sites)]) + loc_i_gdf = gpd.GeoDataFrame( + {'geometry': gpd.points_from_xy(loc_i[:, 1], loc_i[:, 0])}, crs='EPSG:4326' + ).to_crs('EPSG:6500') + lat = loc_i_gdf.geometry.y + lon = loc_i_gdf.geometry.x + loc_i = np.array([[lon[i], lat[i]] for i in range(self.num_sites)]) + loc_j = np.array([[lon[i], lat[i]] for i in range(self.num_sites)]) + distances = cdist(loc_i, loc_j, 'euclidean')/1000 # in km + self.stn_dist = distances def set_num_simu(self, num_simu): # noqa: D102 # set simulation number @@ -459,17 +471,17 @@ def compute_intra_event_residual_i(self, cm, im_name_list, num_simu): # noqa: D ).T elif cm == 'Loth & Baker (2013)': residuals = CorrelationModel.loth_baker_correlation_2013( - self.sites, im_name_list, num_simu + self.sites, im_name_list, num_simu, self.stn_dist ) elif cm == 'Markhvida et al. (2017)': num_pc = 19 residuals = CorrelationModel.markhvida_ceferino_baker_correlation_2017( - self.sites, im_name_list, num_simu, num_pc + self.sites, im_name_list, num_simu, self.stn_dist, num_pc ) elif cm == 'Du & Ning (2021)': num_pc = 23 residuals = CorrelationModel.du_ning_correlation_2021( - self.sites, im_name_list, num_simu, num_pc + self.sites, im_name_list, num_simu, self.stn_dist, num_pc ) else: # TODO: extending this to more inter-event correlation models # noqa: TD002 diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/CorrelationModel.py b/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/CorrelationModel.py index d6e820aac..882fe655c 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/CorrelationModel.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/CorrelationModel.py @@ -41,6 +41,7 @@ import numpy as np import pandas as pd +import scipy from scipy.interpolate import interp1d, interp2d @@ -306,7 +307,7 @@ def compute_rho_loth_baker_correlation_2013(T1, T2, h, B1, B2, B3): # noqa: N80 return rho # noqa: DOC201, RET504, RUF100 -def loth_baker_correlation_2013(stations, im_name_list, num_simu): # noqa: C901 +def loth_baker_correlation_2013(stations, im_name_list, stn_dist, num_simu): # noqa: C901 """Simulating intra-event residuals Reference: Loth and Baker (2013) A spatial cross-correlation model of spectral @@ -338,12 +339,6 @@ def loth_baker_correlation_2013(stations, im_name_list, num_simu): # noqa: C901 ) # Computing distance matrix num_stations = len(stations) - stn_dist = np.zeros((num_stations, num_stations)) - for i in range(num_stations): - loc_i = np.array([stations[i]['Latitude'], stations[i]['Longitude']]) - for j in range(num_stations): - loc_j = np.array([stations[j]['Latitude'], stations[j]['Longitude']]) - stn_dist[i, j] = get_distance_from_lat_lon(loc_i, loc_j) # Creating a covariance matrices for each of the principal components num_periods = len(periods) covMatrix = np.zeros((num_stations * num_periods, num_stations * num_periods)) # noqa: N806 @@ -411,6 +406,7 @@ def markhvida_ceferino_baker_correlation_2017( # noqa: C901 stations, im_name_list, num_simu, + stn_dist, num_pc=19, ): """Simulating intra-event residuals @@ -462,12 +458,6 @@ def markhvida_ceferino_baker_correlation_2017( # noqa: C901 model_coef = MCB_pca.iloc[:, 1 : num_pc + 1] # Computing distance matrix num_stations = len(stations) - stn_dist = np.zeros((num_stations, num_stations)) - for i in range(num_stations): - loc_i = np.array([stations[i]['lat'], stations[i]['lon']]) - for j in range(num_stations): - loc_j = np.array([stations[j]['lat'], stations[j]['lon']]) - stn_dist[i, j] = get_distance_from_lat_lon(loc_i, loc_j) # Scaling variance if less than 19 principal components are used c0 = c0 / MCB_var.iloc[0, num_pc - 1] c1 = c1 / MCB_var.iloc[0, num_pc - 1] @@ -551,7 +541,7 @@ def load_du_ning_correlation_2021(datapath): return DN_model, DN_pca, DN_var # noqa: DOC201, RUF100 -def du_ning_correlation_2021(stations, im_name_list, num_simu, num_pc=23): +def du_ning_correlation_2021(stations, im_name_list, num_simu, stn_dist, num_pc=23): """Simulating intra-event residuals Reference: Du and Ning (2021) Modeling spatial cross-correlation of multiple @@ -599,36 +589,36 @@ def du_ning_correlation_2021(stations, im_name_list, num_simu, num_pc=23): model_coef = DN_pca.iloc[:, 1 : num_pc + 1] # Computing distance matrix num_stations = len(stations) - stn_dist = np.zeros((num_stations, num_stations)) - for i in range(num_stations): - loc_i = np.array([stations[i]['lat'], stations[i]['lon']]) - for j in range(num_stations): - loc_j = np.array([stations[j]['lat'], stations[j]['lon']]) - stn_dist[i, j] = get_distance_from_lat_lon(loc_i, loc_j) # Scaling variance if less than 23 principal components are used c1 = c1 / DN_var.iloc[0, num_pc - 1] a1 = a1 / DN_var.iloc[0, num_pc - 1] a2 = a2 / DN_var.iloc[0, num_pc - 1] - # Creating a covariance matrices for each of the principal components - covMatrix = np.zeros((num_stations, num_stations, num_pc)) # noqa: N806 + ## The last principal component is nugget effect with c1 = 0 (see Eq 20 and + # table 4. This leads to zero covariance matrix and hence no need to simulate) + num_pc = num_pc - 1 + residuals_pca = np.zeros((num_stations, num_simu, num_pc)) for i in range(num_pc): + # from tqdm import tqdm + # for i in tqdm(range(num_pc)): if a1.iloc[0, i] == 0: # nug - covMatrix[:, :, i] = np.eye(num_stations) * c1.iloc[0, i] + cov_matrix = np.eye(num_stations) * c1.iloc[0, i] else: # iso nest - covMatrix[:, :, i] = ( + cov_matrix = ( c1.iloc[0, i] * (stn_dist == 0) + a1.iloc[0, i] * np.exp(-3.0 * stn_dist / b1.iloc[0, i]) + a2.iloc[0, i] * np.exp(-3.0 * stn_dist / b2.iloc[0, i]) ) - # Simulating residuals - residuals_pca = np.zeros((num_stations, num_simu, num_pc)) - mu = np.zeros(num_stations) - for i in range(num_pc): - residuals_pca[:, :, i] = np.random.multivariate_normal( - mu, covMatrix[:, :, i], num_simu - ).T + # residuals_pca[:, :, i] = np.random.multivariate_normal( + # mu, cov_matrix, num_simu + # ).T + # Replace np multivariate_normal with cholesky and standard normal + standard_normal =np.random.standard_normal((num_simu, num_stations)) + chole_lower = scipy.linalg.cholesky(cov_matrix, lower=True) + corr_samples = chole_lower @ standard_normal.T + residuals_pca[:, :, i] = corr_samples + # Interpolating model_coef by periods pseudo_periods = [x for x in model_periods if type(x) == float] + [ # noqa: E721 ims_map[x] @@ -653,7 +643,7 @@ def du_ning_correlation_2021(stations, im_name_list, num_simu, num_pc=23): residuals = np.empty([num_stations, num_periods, num_simu]) for i in range(num_simu): residuals[:, :, i] = np.reshape( - np.matmul(residuals_pca[:, i, :], simu_coef.T), residuals[:, :, i].shape + np.matmul(residuals_pca[:, i, :], simu_coef[:,:-1].T), residuals[:, :, i].shape ) # return From ed7cc6b2de8b84ce742f4f762e61c7b82b980896 Mon Sep 17 00:00:00 2001 From: jinyan1214 Date: Fri, 6 Sep 2024 12:49:00 -0700 Subject: [PATCH 33/59] jz - add parallel access to openSHA from R2D EQ tool --- .../regionalGroundMotion/CreateStation.py | 58 ++-- .../regionalGroundMotion/FetchOpenSHA.py | 265 +++++++++--------- .../regionalGroundMotion/HazardSimulation.py | 12 +- .../HazardSimulationEQ.py | 27 +- 4 files changed, 189 insertions(+), 173 deletions(-) diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/CreateStation.py b/modules/performRegionalEventSimulation/regionalGroundMotion/CreateStation.py index b3988b3f9..994777186 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/CreateStation.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/CreateStation.py @@ -236,6 +236,11 @@ def create_stations( # noqa: C901, PLR0912, PLR0915 selected_stn[soil_model_label] = [ soil_model_tag for x in range(len(selected_stn.index)) ] + # Check if any duplicated points + if selected_stn.duplicated(subset=[lon_label, lat_label]).any(): + sys.exit('Error: Duplicated lat and lon in the Site File (.csv), ' + f'please check site \n{selected_stn[selected_stn.duplicated(subset=[lon_label, lat_label], keep = False)].index.tolist()}') + STN = [] # noqa: N806 stn_file = {'Stations': []} # Get Vs30 @@ -462,26 +467,45 @@ def create_stations( # noqa: C901, PLR0912, PLR0915 if z1Config['Type'] == 'OpenSHA default model': z1_tag = z1Config['z1_tag'] if z1_tag == 2: # noqa: PLR2004 - num_cores = multiprocessing.cpu_count() - with tqdm_joblib(tqdm(desc="Get z1pt0 from openSHA", total=selected_stn.shape[0])) as progress_bar: - z1pt0_results = Parallel(n_jobs=num_cores)(delayed(get_site_z1pt0_from_opensha)( - lat, lon - ) for lat, lon in zip( - selected_stn['Latitude'].tolist(), - selected_stn['Longitude'].tolist(), - )) + # num_cores = z1Config.get('num_cores', multiprocessing.cpu_count()) + num_cores = z1Config.get('num_cores', 1) + if num_cores == 1: + z1pt0_results = [ + get_site_z1pt0_from_opensha(lat, lon) + for lat, lon in zip( + selected_stn['Latitude'].tolist(), + selected_stn['Longitude'].tolist(), + ) + ] + else: + with tqdm_joblib(tqdm(desc="Get z1pt0 from openSHA", total=selected_stn.shape[0])) as progress_bar: + z1pt0_results = Parallel(n_jobs=num_cores)(delayed(get_site_z1pt0_from_opensha)( + lat, lon + ) for lat, lon in zip( + selected_stn['Latitude'].tolist(), + selected_stn['Longitude'].tolist(), + )) if z25Config['Type'] == 'OpenSHA default model': z25_tag = z25Config['z25_tag'] if z25_tag == 2: # noqa: PLR2004 - num_cores = multiprocessing.cpu_count() - with tqdm_joblib(tqdm(desc="Get z2pt5 from openSHA", total=selected_stn.shape[0])) as progress_bar: - z2pt5_results = Parallel(n_jobs=num_cores)(delayed(get_site_z2pt5_from_opensha)( - lat, lon - ) for lat, lon in zip( - selected_stn['Latitude'].tolist(), - selected_stn['Longitude'].tolist(), - )) - + # num_cores = z25Config.get('num_cores', multiprocessing.cpu_count()) + num_cores = z25Config.get('num_cores', 1) + if num_cores == 1: + z2pt5_results = [ + get_site_z2pt5_from_opensha(lat, lon) + for lat, lon in zip( + selected_stn['Latitude'].tolist(), + selected_stn['Longitude'].tolist(), + ) + ] + else: + with tqdm_joblib(tqdm(desc="Get z2pt5 from openSHA", total=selected_stn.shape[0])) as progress_bar: + z2pt5_results = Parallel(n_jobs=num_cores)(delayed(get_site_z2pt5_from_opensha)( + lat, lon + ) for lat, lon in zip( + selected_stn['Latitude'].tolist(), + selected_stn['Longitude'].tolist(), + )) ground_failure_input_keys = set() for ind in tqdm(range(selected_stn.shape[0]), desc='Stations'): diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/FetchOpenSHA.py b/modules/performRegionalEventSimulation/regionalGroundMotion/FetchOpenSHA.py index 579a36469..ac91e1054 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/FetchOpenSHA.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/FetchOpenSHA.py @@ -506,6 +506,7 @@ def export_to_json( # noqa: C901, D103 minMag=0.0, # noqa: N803 maxMag=10.0, # noqa: N803 maxDistance=1000.0, # noqa: N803 + use_hdf5=False, # noqa: N803 ): # Initializing erf_data = {'type': 'FeatureCollection'} @@ -515,156 +516,148 @@ def export_to_json( # noqa: C901, D103 num_sources = erf.getNumSources() source_tag = [] source_dist = [] - for i in range(num_sources): - rupSource = erf.getSource(i) # noqa: N806 - distanceToSource = rupSource.getMinDistance(site) # noqa: N806 + for i in tqdm(range(num_sources), desc=f'Find sources with in {maxDistance} km'): + rup_source = erf.getSource(i) + distance_to_source = rup_source.getMinDistance(site) # sourceSurface = rupSource.getSourceSurface() # distanceToSource = sourceSurface.getDistanceRup(site_loc) source_tag.append(i) - source_dist.append(distanceToSource) + source_dist.append(distance_to_source) df = pd.DataFrame.from_dict({'sourceID': source_tag, 'sourceDist': source_dist}) # noqa: PD901 # Sorting sources source_collection = df.sort_values(['sourceDist'], ascending=(True)) source_collection = source_collection[ source_collection['sourceDist'] < maxDistance ] - # Collecting source features - feature_collection = [] - for i in tqdm(range(source_collection.shape[0]), desc='Sources'): - source_index = source_collection.iloc[i, 0] - distanceToSource = source_collection.iloc[i, 1] # noqa: N806 - # Getting rupture distances - rupSource = erf.getSource(source_index) # noqa: N806 - try: - rupList = rupSource.getRuptureList() # noqa: N806 - except: # noqa: E722 - numOfRup = rupSource.getNumRuptures() # noqa: N806 - rupList = [] # noqa: N806 - for n in range(numOfRup): - rupList.append(rupSource.getRupture(n)) - rupList = ArrayList(rupList) # noqa: N806, F405 - rup_tag = [] - rup_dist = [] - for j in range(rupList.size()): - ruptureSurface = rupList.get(j).getRuptureSurface() # noqa: N806 - # If pointsource rupture distance correction - if isinstance(ruptureSurface, PointSurface): # noqa: F405 - # or 'FIELD' or 'NSHMP08' - distCorrType = PtSrcDistCorr.Type.NONE # noqa: N806 - (PointSurface @ ruptureSurface).setDistCorrMagAndType( # noqa: F405 - rupList.get(j).getMag(), distCorrType - ) - cur_dist = ruptureSurface.getDistanceRup(site_loc) - rup_tag.append(j) - if cur_dist < maxDistance: - rup_dist.append(cur_dist) - else: - # exceeding the maxDistance requirement - rup_dist.append(-1.0) - df = pd.DataFrame.from_dict({'rupID': rup_tag, 'rupDist': rup_dist}) # noqa: PD901 - # Sorting - rup_collection = df.sort_values(['rupDist'], ascending=(True)) - # Preparing the dict of ruptures - for j in range(rupList.size()): - cur_dict = dict() # noqa: C408 - cur_dict.update({'type': 'Feature'}) - rup_index = rup_collection.iloc[j, 0] - cur_dist = rup_collection.iloc[j, 1] - if cur_dist <= 0.0: - # skipping ruptures with distance exceeding the maxDistance - continue - rupture = rupList.get(rup_index) - maf = rupture.getMeanAnnualRate(erf.getTimeSpan().getDuration()) - if maf <= 0.0: - continue - ruptureSurface = rupture.getRuptureSurface() # noqa: N806 - # Properties - cur_dict['properties'] = dict() # noqa: C408 - name = str(rupSource.getName()) - if EqName is not None: - if EqName not in name: - continue - cur_dict['properties'].update({'Name': name}) - Mag = float(rupture.getMag()) # noqa: N806 - if (Mag < minMag) or (Mag > maxMag): - continue - cur_dict['properties'].update({'Magnitude': Mag}) - cur_dict['properties'].update({'Rupture': int(rup_index)}) - cur_dict['properties'].update({'Source': int(source_index)}) - if outfile is not None: - # these calls are time-consuming, so only run them if one needs - # detailed outputs of the sources - cur_dict['properties'].update({'Distance': float(cur_dist)}) - distanceRup = rupture.getRuptureSurface().getDistanceRup(site_loc) # noqa: N806 - cur_dict['properties'].update({'DistanceRup': float(distanceRup)}) - distanceSeis = rupture.getRuptureSurface().getDistanceSeis(site_loc) # noqa: N806 - cur_dict['properties'].update({'DistanceSeis': float(distanceSeis)}) - distanceJB = rupture.getRuptureSurface().getDistanceJB(site_loc) # noqa: N806 - cur_dict['properties'].update({'DistanceJB': float(distanceJB)}) - distanceX = rupture.getRuptureSurface().getDistanceX(site_loc) # noqa: N806 - cur_dict['properties'].update({'DistanceX': float(distanceX)}) - Prob = rupture.getProbability() # noqa: N806 - cur_dict['properties'].update({'Probability': float(Prob)}) - maf = rupture.getMeanAnnualRate(erf.getTimeSpan().getDuration()) - cur_dict['properties'].update({'MeanAnnualRate': abs(float(maf))}) - # Geometry - cur_dict['geometry'] = dict() # noqa: C408 - if ruptureSurface.isPointSurface(): - # Point source - pointSurface = ruptureSurface # noqa: N806 - location = pointSurface.getLocation() - cur_dict['geometry'].update({'type': 'Point'}) - cur_dict['geometry'].update( - { - 'coordinates': [ - float(location.getLongitude()), - float(location.getLatitude()), - ] - } + #Collecting source features + if not use_hdf5: + feature_collection = [] + for i in tqdm(range(source_collection.shape[0]), desc=f'Find ruptures with in {maxDistance} km'): + source_index = source_collection.iloc[i, 0] + # Getting rupture distances + rupSource = erf.getSource(source_index) # noqa: N806 + try: + rupList = rupSource.getRuptureList() # noqa: N806 + except: # noqa: E722 + numOfRup = rupSource.getNumRuptures() # noqa: N806 + rupList = [] # noqa: N806 + for n in range(numOfRup): + rupList.append(rupSource.getRupture(n)) + rupList = ArrayList(rupList) # noqa: N806, F405 + rup_tag = [] + rup_dist = [] + for j in range(rupList.size()): + ruptureSurface = rupList.get(j).getRuptureSurface() # noqa: N806 + # If pointsource rupture distance correction + if isinstance(ruptureSurface, PointSurface): # noqa: F405 + # or 'FIELD' or 'NSHMP08' + distCorrType = PtSrcDistCorr.Type.NONE # noqa: N806 + (PointSurface @ ruptureSurface).setDistCorrMagAndType( # noqa: F405 + rupList.get(j).getMag(), distCorrType ) + cur_dist = ruptureSurface.getDistanceRup(site_loc) + rup_tag.append(j) + if cur_dist < maxDistance: + rup_dist.append(cur_dist) else: - # Line source - try: - trace = ruptureSurface.getUpperEdge() - except: # noqa: E722 - trace = ruptureSurface.getEvenlyDiscritizedUpperEdge() - coordinates = [] - for k in trace: - coordinates.append( # noqa: PERF401 - [float(k.getLongitude()), float(k.getLatitude())] + # exceeding the maxDistance requirement + rup_dist.append(-1.0) + df = pd.DataFrame.from_dict({'rupID': rup_tag, 'rupDist': rup_dist}) # noqa: PD901 + # Sorting + rup_collection = df.sort_values(['rupDist'], ascending=(True)) + # Preparing the dict of ruptures + for j in range(rupList.size()): + cur_dict = dict() # noqa: C408 + cur_dict.update({'type': 'Feature'}) + rup_index = rup_collection.iloc[j, 0] + cur_dist = rup_collection.iloc[j, 1] + if cur_dist <= 0.0: + # skipping ruptures with distance exceeding the maxDistance + continue + rupture = rupList.get(rup_index) + maf = rupture.getMeanAnnualRate(erf.getTimeSpan().getDuration()) + if maf <= 0.0: + continue + ruptureSurface = rupture.getRuptureSurface() # noqa: N806 + # Properties + cur_dict['properties'] = dict() # noqa: C408 + name = str(rupSource.getName()) + if EqName is not None: + if EqName not in name: + continue + cur_dict['properties'].update({'Name': name}) + Mag = float(rupture.getMag()) # noqa: N806 + if (Mag < minMag) or (Mag > maxMag): + continue + cur_dict['properties'].update({'Magnitude': Mag}) + cur_dict['properties'].update({'Rupture': int(rup_index)}) + cur_dict['properties'].update({'Source': int(source_index)}) + if outfile is not None: + # these calls are time-consuming, so only run them if one needs + # detailed outputs of the sources + cur_dict['properties'].update({'Distance': float(cur_dist)}) + distanceRup = rupture.getRuptureSurface().getDistanceRup(site_loc) # noqa: N806 + cur_dict['properties'].update({'DistanceRup': float(distanceRup)}) + distanceSeis = rupture.getRuptureSurface().getDistanceSeis(site_loc) # noqa: N806 + cur_dict['properties'].update({'DistanceSeis': float(distanceSeis)}) + distanceJB = rupture.getRuptureSurface().getDistanceJB(site_loc) # noqa: N806 + cur_dict['properties'].update({'DistanceJB': float(distanceJB)}) + distanceX = rupture.getRuptureSurface().getDistanceX(site_loc) # noqa: N806 + cur_dict['properties'].update({'DistanceX': float(distanceX)}) + Prob = rupture.getProbability() # noqa: N806 + cur_dict['properties'].update({'Probability': float(Prob)}) + maf = rupture.getMeanAnnualRate(erf.getTimeSpan().getDuration()) + cur_dict['properties'].update({'MeanAnnualRate': abs(float(maf))}) + # Geometry + cur_dict['geometry'] = dict() # noqa: C408 + if ruptureSurface.isPointSurface(): + # Point source + pointSurface = ruptureSurface # noqa: N806 + location = pointSurface.getLocation() + cur_dict['geometry'].update({'type': 'Point'}) + cur_dict['geometry'].update( + { + 'coordinates': [ + float(location.getLongitude()), + float(location.getLatitude()), + ] + } ) - cur_dict['geometry'].update({'type': 'LineString'}) - cur_dict['geometry'].update({'coordinates': coordinates}) - # Appending - feature_collection.append(cur_dict) - # end for j - # end for i - # sort the list - maf_list_n = [-x['properties']['MeanAnnualRate'] for x in feature_collection] - sort_ids = np.argsort(maf_list_n) - feature_collection_sorted = [feature_collection[i] for i in sort_ids] - del feature_collection - erf_data.update({'features': feature_collection_sorted}) - print( # noqa: T201 - f'FetchOpenSHA: total {len(feature_collection_sorted)} ruptures are collected.' - ) - # num_preview = 1000 - # if len(feature_collection_sorted) > num_preview: - # preview_erf_data={'features': feature_collection_sorted[0:num_preview]} - # else: - # preview_erf_data = erf_data - # Output - # import time - # startTime = time.process_time_ns() - if outfile is not None: + else: + # Line source + try: + trace = ruptureSurface.getUpperEdge() + except: # noqa: E722 + trace = ruptureSurface.getEvenlyDiscritizedUpperEdge() + coordinates = [] + for k in trace: + coordinates.append( # noqa: PERF401 + [float(k.getLongitude()), float(k.getLatitude())] + ) + cur_dict['geometry'].update({'type': 'LineString'}) + cur_dict['geometry'].update({'coordinates': coordinates}) + # Appending + feature_collection.append(cur_dict) + # sort the list + maf_list_n = [-x['properties']['MeanAnnualRate'] for x in feature_collection] + sort_ids = np.argsort(maf_list_n) + feature_collection_sorted = [feature_collection[i] for i in sort_ids] + del feature_collection + erf_data.update({'features': feature_collection_sorted}) print( # noqa: T201 - f'The collected ruptures are sorted by MeanAnnualRate and saved in {outfile}' + f'FetchOpenSHA: total {len(feature_collection_sorted)} ruptures are collected.' ) - with open(outfile, 'w') as f: # noqa: PTH123 - ujson.dump(erf_data, f, indent=2) - # print(f"Time consumed by json dump is {(time.process_time_ns()-startTime)/1e9}s") - - # del preview_erf_data + if outfile is not None: + print( # noqa: T201 + f'The collected ruptures are sorted by MeanAnnualRate and saved in {outfile}' + ) + with open(outfile, 'w') as f: # noqa: PTH123 + ujson.dump(erf_data, f, indent=2) + else: + import h5py + with h5py.File(outfile, 'w') as h5file: + # Store the geometry as a string array + h5file.create_dataset('geometry', data=gdf.geometry.astype(str).values.astype('S')) # return return erf_data diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulation.py b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulation.py index 2fe6f8bac..53dcc70ab 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulation.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulation.py @@ -571,13 +571,13 @@ def hazard_job(hazard_info): # noqa: C901, D103, PLR0915 os.makedirs(f"{os.environ.get('OQ_DATADIR')}") # noqa: PTH103 # import modules - from ComputeIntensityMeasure import * # noqa: F403 - from CreateScenario import * # noqa: F403 - from CreateStation import * # noqa: F403 + # from ComputeIntensityMeasure import * # noqa: F403 + # from CreateScenario import * # noqa: F403 + # from CreateStation import * # noqa: F403 - # KZ-08/23/22: adding hazard occurrence model - from HazardOccurrence import * # noqa: F403 - from SelectGroundMotion import * # noqa: F403 + # # KZ-08/23/22: adding hazard occurrence model + # from HazardOccurrence import * # noqa: F403 + # from SelectGroundMotion import * # noqa: F403 if oq_flag: # import FetchOpenQuake diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py index b39773d71..eef93e904 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py @@ -557,21 +557,20 @@ def hazard_job(hazard_info): # noqa: C901, D103, PLR0915 import socket if 'stampede2' not in socket.gethostname(): - if importlib.util.find_spec('jpype') is None: - subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'JPype1']) # noqa: S603 - import jpype - from jpype import imports - from jpype.types import * # noqa: F403 - - memory_total = psutil.virtual_memory().total / (1024.0**3) - memory_request = int(memory_total * 0.75) - jpype.addClassPath('./lib/OpenSHA-1.5.2.jar') - try: + import GlobalVariable + if GlobalVariable.JVM_started is False: + GlobalVariable.JVM_started = True + if importlib.util.find_spec('jpype') is None: + subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'JPype1']) # noqa: S603 + import jpype + + # from jpype import imports + import jpype.imports + from jpype.types import * # noqa: F403 + memory_total = psutil.virtual_memory().total / (1024.0**3) + memory_request = int(memory_total * 0.75) + jpype.addClassPath('./lib/OpenSHA-1.5.2.jar') jpype.startJVM(f'-Xmx{memory_request}G', convertStrings=False) - except: # noqa: E722 - print( # noqa: T201 - f'StartJVM of ./lib/OpenSHA-1.5.2.jar with {memory_request} GB Memory fails. Try again after releasing some memory' - ) if oq_flag: # clear up old db.sqlite3 if any if os.path.isfile(os.path.expanduser('~/oqdata/db.sqlite3')): # noqa: PTH111, PTH113 From 45e2987e52c9f7c2b67b949a25a4b52495113fff Mon Sep 17 00:00:00 2001 From: jinyan1214 Date: Fri, 6 Sep 2024 12:49:16 -0700 Subject: [PATCH 34/59] ruff --- .../regionalGroundMotion/CreateScenario.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/CreateScenario.py b/modules/performRegionalEventSimulation/regionalGroundMotion/CreateScenario.py index 4e423b4fb..c257fa985 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/CreateScenario.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/CreateScenario.py @@ -457,17 +457,17 @@ def create_earthquake_scenarios( # noqa: C901, D103 t_start = time.time() if source_type == 'ERF': if ( - 'SourceIndex' in scenario_info['EqRupture'].keys() # noqa: SIM118 - and 'RuptureIndex' in scenario_info['EqRupture'].keys() # noqa: SIM118 + 'SourceIndex' in scenario_info['EqRupture'] + and 'RuptureIndex' in scenario_info['EqRupture'] ): source_model = scenario_info['EqRupture']['Model'] eq_source = getERF(scenario_info) # noqa: F405 # check source index list and rupture index list - if type(scenario_info['EqRupture']['SourceIndex']) == int: # noqa: E721 + if isinstance(scenario_info['EqRupture']['SourceIndex'], int): source_index_list = [scenario_info['EqRupture']['SourceIndex']] else: source_index_list = scenario_info['EqRupture']['SourceIndex'] - if type(scenario_info['EqRupture']['RuptureIndex']) == int: # noqa: E721 + if isinstance(scenario_info['EqRupture']['RuptureIndex'], int): rup_index_list = [scenario_info['EqRupture']['RuptureIndex']] else: rup_index_list = scenario_info['EqRupture']['RuptureIndex'] @@ -510,7 +510,8 @@ def create_earthquake_scenarios( # noqa: C901, D103 max_M = scenario_info['EqRupture'].get('max_Mag', 9.0) # noqa: N806 max_R = scenario_info['EqRupture'].get('max_Dist', 1000.0) # noqa: N806 eq_source = getERF(scenario_info) # noqa: F405 - erf_data = export_to_json( # noqa: F405, F841 + use_hdf5 = scenario_info['EqRupture'].get('use_hdf5', False) + export_to_json( # noqa: F405 eq_source, ref_station, outfile=os.path.join(out_dir, 'RupFile.geojson'), # noqa: PTH118 @@ -518,6 +519,7 @@ def create_earthquake_scenarios( # noqa: C901, D103 minMag=min_M, maxMag=max_M, maxDistance=max_R, + use_hdf5=use_hdf5 ) # Parsing data # feat = erf_data['features'] From 416e23222a99b1974863f43b0afa4324a3775d2b Mon Sep 17 00:00:00 2001 From: jinyan1214 Date: Fri, 6 Sep 2024 13:03:45 -0700 Subject: [PATCH 35/59] Ruff check --- modules/createAIM/GeoJSON_to_ASSET/GeoJSON_to_ASSET.py | 9 +++++---- modules/createAIM/INP_FILE/INP_FILE.py | 9 +++++---- modules/createAIM/JSON_to_AIM/JSON_to_AIM_transport.py | 7 ++++--- 3 files changed, 14 insertions(+), 11 deletions(-) diff --git a/modules/createAIM/GeoJSON_to_ASSET/GeoJSON_to_ASSET.py b/modules/createAIM/GeoJSON_to_ASSET/GeoJSON_to_ASSET.py index 350ead0da..b72abf1b0 100644 --- a/modules/createAIM/GeoJSON_to_ASSET/GeoJSON_to_ASSET.py +++ b/modules/createAIM/GeoJSON_to_ASSET/GeoJSON_to_ASSET.py @@ -1,4 +1,4 @@ -import argparse # noqa: INP001, D100 +import argparse # noqa: D100, INP001 import importlib import json import os @@ -8,7 +8,8 @@ import warnings import geopandas as gpd -import momepy + +# import momepy import numpy as np import pandas as pd import shapely @@ -179,10 +180,10 @@ def defineConnectivities( # noqa: N802, D102 # Convert find connectivity and add start_node, end_node attributes edges = self.gdf datacrs = edges.crs - graph = momepy.gdf_to_nx(edges.to_crs('epsg:6500'), approach='primal') + graph = momepy.gdf_to_nx(edges.to_crs('epsg:6500'), approach='primal') # noqa: F821 with warnings.catch_warnings(): # Suppress the warning of disconnected components in the graph warnings.simplefilter('ignore') - nodes, edges, sw = momepy.nx_to_gdf( + nodes, edges, sw = momepy.nx_to_gdf( # noqa: F821 graph, points=True, lines=True, spatial_weights=True ) # edges = edges.set_index('ind') diff --git a/modules/createAIM/INP_FILE/INP_FILE.py b/modules/createAIM/INP_FILE/INP_FILE.py index c2a36f946..9850eb36a 100644 --- a/modules/createAIM/INP_FILE/INP_FILE.py +++ b/modules/createAIM/INP_FILE/INP_FILE.py @@ -1,4 +1,4 @@ -import argparse # noqa: INP001, D100 +import argparse # noqa: D100, INP001 import importlib import json import os @@ -8,7 +8,8 @@ import warnings import geopandas as gpd -import momepy + +# import momepy import numpy as np import pandas as pd import shapely @@ -179,10 +180,10 @@ def defineConnectivities( # noqa: N802, D102 # Convert find connectivity and add start_node, end_node attributes edges = self.gdf datacrs = edges.crs - graph = momepy.gdf_to_nx(edges.to_crs('epsg:6500'), approach='primal') + graph = momepy.gdf_to_nx(edges.to_crs('epsg:6500'), approach='primal') # noqa: F821 with warnings.catch_warnings(): # Suppress the warning of disconnected components in the graph warnings.simplefilter('ignore') - nodes, edges, sw = momepy.nx_to_gdf( + nodes, edges, sw = momepy.nx_to_gdf( # noqa: F821 graph, points=True, lines=True, spatial_weights=True ) # edges = edges.set_index('ind') diff --git a/modules/createAIM/JSON_to_AIM/JSON_to_AIM_transport.py b/modules/createAIM/JSON_to_AIM/JSON_to_AIM_transport.py index 69d2d3265..29a825cd7 100644 --- a/modules/createAIM/JSON_to_AIM/JSON_to_AIM_transport.py +++ b/modules/createAIM/JSON_to_AIM/JSON_to_AIM_transport.py @@ -48,7 +48,8 @@ import warnings import geopandas as gpd -import momepy + +# import momepy import numpy as np import pandas as pd import shapely @@ -358,10 +359,10 @@ def create_asset_files( # noqa: C901, D103, PLR0915 roadDF['geometry'] = LineStringList roadDF = roadDF[['ID', 'roadType', 'lanes', 'maxMPH', 'geometry']] # noqa: N806 roadGDF = gpd.GeoDataFrame(roadDF, geometry='geometry', crs=datacrs) # noqa: N806 - graph = momepy.gdf_to_nx(roadGDF.to_crs('epsg:6500'), approach='primal') + graph = momepy.gdf_to_nx(roadGDF.to_crs('epsg:6500'), approach='primal') # noqa: F821 with warnings.catch_warnings(): # Suppress the warning of disconnected components in the graph warnings.simplefilter('ignore') - nodes, edges, sw = momepy.nx_to_gdf( + nodes, edges, sw = momepy.nx_to_gdf( # noqa: F821 graph, points=True, lines=True, spatial_weights=True ) # Oneway or twoway is not considered in D&L, remove duplicated edges From fe4942d82b8d177f7743ba342e02b06fa17d0951 Mon Sep 17 00:00:00 2001 From: jinyan1214 Date: Sat, 7 Sep 2024 02:11:02 -0700 Subject: [PATCH 36/59] jz - faster multivariate normal sampling R2D EQ tool --- .../gmpe/CorrelationModel.py | 38 +++++++++++-------- 1 file changed, 23 insertions(+), 15 deletions(-) diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/CorrelationModel.py b/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/CorrelationModel.py index 882fe655c..70aabc58f 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/CorrelationModel.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/CorrelationModel.py @@ -341,18 +341,23 @@ def loth_baker_correlation_2013(stations, im_name_list, stn_dist, num_simu): # num_stations = len(stations) # Creating a covariance matrices for each of the principal components num_periods = len(periods) - covMatrix = np.zeros((num_stations * num_periods, num_stations * num_periods)) # noqa: N806 + cov_matrix = np.zeros((num_stations * num_periods, num_stations * num_periods)) for i in range(num_periods): for j in range(num_periods): - covMatrix[ + cov_matrix[ num_stations * i : num_stations * (i + 1), num_stations * j : num_stations * (j + 1), ] = compute_rho_loth_baker_correlation_2013( periods[i], periods[j], stn_dist, B1, B2, B3 ) - mu = np.zeros(num_stations * num_periods) - residuals_raw = np.random.multivariate_normal(mu, covMatrix, num_simu) + # mu = np.zeros(num_stations * num_periods) + # residuals_raw = np.random.multivariate_normal(mu, covMatrix, num_simu) + # Replace np multivariate_normal with cholesky and standard normal + standard_normal =np.random.standard_normal((num_simu, num_stations)) + chole_lower = scipy.linalg.cholesky(cov_matrix, lower=True) + corr_samples = chole_lower @ standard_normal.T + residuals_raw = corr_samples.T # reorder residual_raw [[period1],[period2],...,[]]-->[[site1],[site2],...,[]] residuals_reorder = [] for i in range(num_simu): @@ -462,26 +467,29 @@ def markhvida_ceferino_baker_correlation_2017( # noqa: C901 c0 = c0 / MCB_var.iloc[0, num_pc - 1] c1 = c1 / MCB_var.iloc[0, num_pc - 1] c2 = c2 / MCB_var.iloc[0, num_pc - 1] - # Creating a covariance matrices for each of the principal components - covMatrix = np.zeros((num_stations, num_stations, num_pc)) # noqa: N806 + # Simulating residuals + residuals_pca = np.zeros((num_stations, num_simu, num_pc)) for i in range(num_pc): + # Creating a covariance matrices for each of the principal components if c1.iloc[0, i] == 0: # nug - covMatrix[:, :, i] = np.eye(num_stations) * c0.iloc[0, i] + cov_matrix= np.eye(num_stations) * c0.iloc[0, i] else: # iso nest - covMatrix[:, :, i] = ( + cov_matrix = ( c0.iloc[0, i] * (stn_dist == 0) + c1.iloc[0, i] * np.exp(-3.0 * stn_dist / a1.iloc[0, i]) + c2.iloc[0, i] * np.exp(-3.0 * stn_dist / a2.iloc[0, i]) ) - # Simulating residuals - residuals_pca = np.zeros((num_stations, num_simu, num_pc)) - mu = np.zeros(num_stations) - for i in range(num_pc): - residuals_pca[:, :, i] = np.random.multivariate_normal( - mu, covMatrix[:, :, i], num_simu - ).T + # residuals_pca[:, :, i] = np.random.multivariate_normal( + # mu, cov_matrix, num_simu + # ).T + # Replace np multivariate_normal with cholesky and standard normal + standard_normal =np.random.standard_normal((num_simu, num_stations)) + chole_lower = scipy.linalg.cholesky(cov_matrix, lower=True) + corr_samples = chole_lower @ standard_normal.T + residuals_pca[:, :, i] = corr_samples + # Interpolating model_coef by periods interp_fun = interp1d(model_periods, model_coef, axis=0) model_Tmax = 5.0 # noqa: N806 From 97b0abfa14ca83931eafccdf39b58eb288dd5fdd Mon Sep 17 00:00:00 2001 From: yisangriB Date: Mon, 9 Sep 2024 13:14:45 -0700 Subject: [PATCH 37/59] sy - error to workflow.err --- modules/createEVENT/stochasticGroundMotion/main.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/modules/createEVENT/stochasticGroundMotion/main.cpp b/modules/createEVENT/stochasticGroundMotion/main.cpp index 4c064f9e1..944d3ff56 100644 --- a/modules/createEVENT/stochasticGroundMotion/main.cpp +++ b/modules/createEVENT/stochasticGroundMotion/main.cpp @@ -281,7 +281,8 @@ void throwError(std::string msg){ std::string filePathString = cwd.string(); std::size_t loc_dot = filePathString.find_last_of("."); int id = std::stoi(filePathString.substr(loc_dot+1)) ; - std::string errFile = "../workflow.err." + std::to_string(id); // e.g. workflow.err.1 + //std::string errFile = "../workflow.err." + std::to_string(id); // e.g. workflow.err.1 + std::string errFile = "./workflow.err"; // e.g. workflow.err // // Write msg to the file From 30fa5235d7dc47761f9ca1880eb95a33abb7d264 Mon Sep 17 00:00:00 2001 From: yisangriB Date: Mon, 9 Sep 2024 13:15:45 -0700 Subject: [PATCH 38/59] sy - fixing seed in stochastic wind --- modules/createEVENT/stochasticWind/main.cpp | 77 ++++++++++++++++----- 1 file changed, 59 insertions(+), 18 deletions(-) diff --git a/modules/createEVENT/stochasticWind/main.cpp b/modules/createEVENT/stochasticWind/main.cpp index a79963815..49036c60c 100644 --- a/modules/createEVENT/stochasticWind/main.cpp +++ b/modules/createEVENT/stochasticWind/main.cpp @@ -10,6 +10,7 @@ #include "command_parser.h" #include "function_dispatcher.h" #include "wind_generator.h" +#include using json = nlohmann::json; typedef std::chrono::duration< @@ -121,24 +122,64 @@ int main(int argc, char** argv) { auto nanoseconds = std::chrono::duration_cast(duration); - auto wind_forces = - inputs.seed_provided() - ? Dispatcher, json>, std::string, - double, double, double, double, unsigned int, - double, double, int>::instance() - ->dispatch("WittigSinha1975", exposure_category, - gust_wind_speed, drag_coeff, width, height, - num_floors, total_time, force_conversion, - inputs.get_seed()) - : Dispatcher, json>, std::string, - double, double, double, double, unsigned int, - double, double, int>::instance() - ->dispatch("WittigSinha1975", exposure_category, - gust_wind_speed, drag_coeff, width, height, - num_floors, total_time, force_conversion, - nanoseconds.count()); - - auto static_forces = std::get<0>(wind_forces); + + // main seed + int seed; + if (input_data["Events"][0]["seed"].is_string()) { + // if this is "None" + seed = nanoseconds.count(); + + } else { + + int base_seed = input_data["Events"][0]["seed"].get(); + + // Get the current working directory + std::string folderName = std::filesystem::path(std::filesystem::current_path()).filename().string(); + + + // Split by '.' and get the last part (sampNum) + std::size_t pos = folderName.find_last_of('.'); + + if (pos!= std::string::npos) { + std::string samp_num_str = (pos != std::string::npos) ? folderName.substr(pos + 1) : folderName; + int samp_num = std::stoi(samp_num_str); + seed = samp_num + base_seed; + } else { + seed = base_seed; + } + + + } + + + std::cout << seed << std::endl; + auto wind_forces = Dispatcher, json>, std::string, + double, double, double, double, unsigned int, + double, double, int>::instance() + ->dispatch("WittigSinha1975", exposure_category, + gust_wind_speed, drag_coeff, width, height, + num_floors, total_time, force_conversion, + seed); + + // auto wind_forces = + // inputs.seed_provided() + // ? Dispatcher, json>, std::string, + // double, double, double, double, unsigned int, + // double, double, int>::instance() + // ->dispatch("WittigSinha1975", exposure_category, + // gust_wind_speed, drag_coeff, width, height, + // num_floors, total_time, force_conversion, + // inputs.get_seed()) + // : Dispatcher, json>, std::string, + // double, double, double, double, unsigned int, + // double, double, int>::instance() + // ->dispatch("WittigSinha1975", exposure_category, + // gust_wind_speed, drag_coeff, width, height, + // num_floors, total_time, force_conversion, + // nanoseconds.count()); + + + auto static_forces = std::get<0>(wind_forces); auto dynamic_forces = std::get<1>(wind_forces); auto pattern = json::array(); auto time_series = json::array(); From ba0f5f5037349128c73ec7cb83047604b8f91bd1 Mon Sep 17 00:00:00 2001 From: yisangriB Date: Mon, 9 Sep 2024 13:16:25 -0700 Subject: [PATCH 39/59] sy - removing mkl and ipp from smelt --- .gitignore | 2 -- conanfile.py | 4 ++-- modules/createEVENT/CMakeLists.txt | 4 ++-- modules/createEVENT/stochasticGroundMotion/CMakeLists.txt | 2 +- modules/createEVENT/stochasticWind/CMakeLists.txt | 5 +++-- 5 files changed, 8 insertions(+), 9 deletions(-) diff --git a/.gitignore b/.gitignore index a6472ca28..0e6bff056 100644 --- a/.gitignore +++ b/.gitignore @@ -62,8 +62,6 @@ UserDefinedEDP SiteResponse MDOF_BuildingModel ExtractPGA -StochasticWind -StochasticGroundMotion StandardEarthquakeEDP OpenSeesInput OpenSeesPostprocessor diff --git a/conanfile.py b/conanfile.py index 8ea9a94c9..f2c6ab9e9 100644 --- a/conanfile.py +++ b/conanfile.py @@ -18,8 +18,6 @@ class simCenterBackendApps(ConanFile): # noqa: D101 } options = {'shared': [True, False]} # noqa: RUF012 default_options = { # noqa: RUF012 - 'mkl-static:threaded': False, - 'ipp-static:simcenter_backend': True, 'libcurl:with_ssl': 'openssl', } generators = 'cmake' @@ -33,6 +31,8 @@ class simCenterBackendApps(ConanFile): # noqa: D101 'jsonformoderncpp/3.7.0', 'nanoflann/1.3.2', 'nlopt/2.7.1', + 'smelt/1.2.0@simcenter/stable', + 'kissfft/131.1.0', ] # Custom attributes for Bincrafters recipe conventions diff --git a/modules/createEVENT/CMakeLists.txt b/modules/createEVENT/CMakeLists.txt index 8b37e584f..6106ae001 100644 --- a/modules/createEVENT/CMakeLists.txt +++ b/modules/createEVENT/CMakeLists.txt @@ -12,8 +12,8 @@ add_subdirectory(pointWindSpeed) add_subdirectory(LLNL_SW4) add_subdirectory(SimCenterEvent) add_subdirectory(ASCE7_WindSpeed) -#add_subdirectory(stochasticGroundMotion) -#add_subdirectory(stochasticWind) +add_subdirectory(stochasticGroundMotion) +add_subdirectory(stochasticWind) add_subdirectory(groundMotionIM) add_subdirectory(uniformPEER) add_subdirectory(experimentalWindForces) diff --git a/modules/createEVENT/stochasticGroundMotion/CMakeLists.txt b/modules/createEVENT/stochasticGroundMotion/CMakeLists.txt index d489f5b65..ecd3ae58d 100644 --- a/modules/createEVENT/stochasticGroundMotion/CMakeLists.txt +++ b/modules/createEVENT/stochasticGroundMotion/CMakeLists.txt @@ -1,4 +1,4 @@ simcenter_add_executable(NAME StochasticGM - DEPENDS CONAN_PKG::smelt CONAN_PKG::ipp-static CONAN_PKG::mkl-static) + DEPENDS CONAN_PKG::smelt CONAN_PKG::kissfft) set_property(TARGET StochasticGM PROPERTY CXX_STANDARD 17) diff --git a/modules/createEVENT/stochasticWind/CMakeLists.txt b/modules/createEVENT/stochasticWind/CMakeLists.txt index 7e14fe704..a47587260 100644 --- a/modules/createEVENT/stochasticWind/CMakeLists.txt +++ b/modules/createEVENT/stochasticWind/CMakeLists.txt @@ -1,3 +1,4 @@ simcenter_add_executable(NAME StochasticWind - DEPENDS CONAN_PKG::smelt CONAN_PKG::ipp-static - CONAN_PKG::mkl-static common) + DEPENDS CONAN_PKG::smelt CONAN_PKG::kissfft common) + +set_property(TARGET StochasticWind PROPERTY CXX_STANDARD 17) From cd0ef8a6b79d68f9e3179ac4200f201957665ded Mon Sep 17 00:00:00 2001 From: yisangriB Date: Mon, 9 Sep 2024 18:19:51 -0700 Subject: [PATCH 40/59] sy - fixing a bug in surrogate prediction --- modules/performFEM/surrogateGP/gpPredict.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/performFEM/surrogateGP/gpPredict.py b/modules/performFEM/surrogateGP/gpPredict.py index 5582a5414..1d6ff956c 100644 --- a/modules/performFEM/surrogateGP/gpPredict.py +++ b/modules/performFEM/surrogateGP/gpPredict.py @@ -324,7 +324,7 @@ def get_stochastic_variance(X, Y, x, ny): # noqa: N803 not name_values[1] .replace('.', '', 1) .replace('e', '', 1) - .replace('-', '', 1) + .replace('-', '', 2) .replace('+', '', 1) .isdigit() ): From 2a68fec43e5e70d3e034041fa772d5f54e64a0ee Mon Sep 17 00:00:00 2001 From: yisangriB Date: Mon, 9 Sep 2024 18:23:57 -0700 Subject: [PATCH 41/59] sy - fixing a bug in surrogate prediction --- modules/performFEM/surrogateGP/gpPredict.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/performFEM/surrogateGP/gpPredict.py b/modules/performFEM/surrogateGP/gpPredict.py index 5582a5414..1d6ff956c 100644 --- a/modules/performFEM/surrogateGP/gpPredict.py +++ b/modules/performFEM/surrogateGP/gpPredict.py @@ -324,7 +324,7 @@ def get_stochastic_variance(X, Y, x, ny): # noqa: N803 not name_values[1] .replace('.', '', 1) .replace('e', '', 1) - .replace('-', '', 1) + .replace('-', '', 2) .replace('+', '', 1) .isdigit() ): From c0acede072e6db55d6c65176ce2db2cf6876e5f9 Mon Sep 17 00:00:00 2001 From: yisangriB Date: Tue, 10 Sep 2024 14:47:38 -0700 Subject: [PATCH 42/59] sy - fixing PLoM for quoFEM and EE-UQ examples --- modules/performUQ/SimCenterUQ/runPLoM.py | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/modules/performUQ/SimCenterUQ/runPLoM.py b/modules/performUQ/SimCenterUQ/runPLoM.py index bc3f494a6..7d833cb18 100644 --- a/modules/performUQ/SimCenterUQ/runPLoM.py +++ b/modules/performUQ/SimCenterUQ/runPLoM.py @@ -673,14 +673,17 @@ def _load_hyperparameter(self): run_flag = 0 try: # load constraints first - constr_file = Path(self.constraintsFile).resolve() # noqa: F405 - sys.path.insert(0, str(constr_file.parent) + '/') - constr_script = importlib.__import__( # noqa: F405 - constr_file.name[:-3], globals(), locals(), [], 0 - ) - self.beta_c = constr_script.beta_c() - print('beta_c = ', self.beta_c) # noqa: T201 - # if smootherKDE + if ( + self.constraintsFlag + ): # sy - added because quoFEM/EE-UQ example failed 09/10/2024 + constr_file = Path(self.constraintsFile).resolve() # noqa: F405 + sys.path.insert(0, str(constr_file.parent) + '/') + constr_script = importlib.__import__( # noqa: F405 + constr_file.name[:-3], globals(), locals(), [], 0 + ) + self.beta_c = constr_script.beta_c() + print('beta_c = ', self.beta_c) # noqa: T201 + # if smootherKDE if self.smootherKDE_Customize: kde_file = Path(self.smootherKDE_file).resolve() # noqa: F405 sys.path.insert(0, str(kde_file.parent) + '/') From ac7bd571c33c8d8d92c429964f650c58e20aa72c Mon Sep 17 00:00:00 2001 From: fmckenna Date: Wed, 11 Sep 2024 12:29:34 -0700 Subject: [PATCH 43/59] fmk - moving smelt lib generation to backend, updating correspondind stochastic EQ and Wind applications, sadly had to add boost to conanfile --- conanfile.py | 7 +- modules/createEVENT/CMakeLists.txt | 1 + modules/createEVENT/common/CMakeLists.txt | 1 + .../createEVENT/common/smelt/CMakeLists.txt | 22 + modules/createEVENT/common/smelt/beta_dist.cc | 32 + modules/createEVENT/common/smelt/beta_dist.h | 67 + modules/createEVENT/common/smelt/configure.cc | 122 ++ modules/createEVENT/common/smelt/configure.h | 17 + .../common/smelt/dabaghi_der_kiureghian.cc | 1510 +++++++++++++++++ .../common/smelt/dabaghi_der_kiureghian.h | 418 +++++ .../createEVENT/common/smelt/distribution.h | 61 + modules/createEVENT/common/smelt/factory.h | 153 ++ modules/createEVENT/common/smelt/filter.cc | 224 +++ modules/createEVENT/common/smelt/filter.h | 48 + .../common/smelt/function_dispatcher.h | 111 ++ .../common/smelt/inv_gauss_dist.cc | 35 + .../createEVENT/common/smelt/inv_gauss_dist.h | 67 + .../createEVENT/common/smelt/json_object.cc | 74 + .../createEVENT/common/smelt/json_object.h | 134 ++ .../createEVENT/common/smelt/json_object.tcc | 94 + .../common/smelt/lognormal_dist.cc | 32 + .../createEVENT/common/smelt/lognormal_dist.h | 66 + .../createEVENT/common/smelt/nelder_mead.cc | 206 +++ .../createEVENT/common/smelt/nelder_mead.h | 128 ++ .../createEVENT/common/smelt/normal_dist.cc | 32 + .../createEVENT/common/smelt/normal_dist.h | 66 + .../common/smelt/normal_multivar.cc | 73 + .../common/smelt/normal_multivar.h | 66 + .../createEVENT/common/smelt/numeric_utils.cc | 521 ++++++ .../createEVENT/common/smelt/numeric_utils.h | 227 +++ .../common/smelt/stochastic_model.h | 71 + .../common/smelt/students_t_dist.cc | 36 + .../common/smelt/students_t_dist.h | 68 + .../createEVENT/common/smelt/uniform_dist.cc | 32 + .../createEVENT/common/smelt/uniform_dist.h | 66 + .../createEVENT/common/smelt/vlachos_et_al.cc | 919 ++++++++++ .../createEVENT/common/smelt/vlachos_et_al.h | 277 +++ .../createEVENT/common/smelt/wind_profile.cc | 58 + .../createEVENT/common/smelt/wind_profile.h | 30 + modules/createEVENT/common/smelt/window.h | 35 + .../createEVENT/common/smelt/wittig_sinha.cc | 335 ++++ .../createEVENT/common/smelt/wittig_sinha.h | 182 ++ .../stochasticGroundMotion/CMakeLists.txt | 9 +- .../createEVENT/stochasticWind/CMakeLists.txt | 10 +- 44 files changed, 6735 insertions(+), 8 deletions(-) create mode 100644 modules/createEVENT/common/CMakeLists.txt create mode 100644 modules/createEVENT/common/smelt/CMakeLists.txt create mode 100644 modules/createEVENT/common/smelt/beta_dist.cc create mode 100644 modules/createEVENT/common/smelt/beta_dist.h create mode 100644 modules/createEVENT/common/smelt/configure.cc create mode 100644 modules/createEVENT/common/smelt/configure.h create mode 100644 modules/createEVENT/common/smelt/dabaghi_der_kiureghian.cc create mode 100644 modules/createEVENT/common/smelt/dabaghi_der_kiureghian.h create mode 100644 modules/createEVENT/common/smelt/distribution.h create mode 100644 modules/createEVENT/common/smelt/factory.h create mode 100644 modules/createEVENT/common/smelt/filter.cc create mode 100644 modules/createEVENT/common/smelt/filter.h create mode 100644 modules/createEVENT/common/smelt/function_dispatcher.h create mode 100644 modules/createEVENT/common/smelt/inv_gauss_dist.cc create mode 100644 modules/createEVENT/common/smelt/inv_gauss_dist.h create mode 100644 modules/createEVENT/common/smelt/json_object.cc create mode 100644 modules/createEVENT/common/smelt/json_object.h create mode 100644 modules/createEVENT/common/smelt/json_object.tcc create mode 100644 modules/createEVENT/common/smelt/lognormal_dist.cc create mode 100644 modules/createEVENT/common/smelt/lognormal_dist.h create mode 100644 modules/createEVENT/common/smelt/nelder_mead.cc create mode 100644 modules/createEVENT/common/smelt/nelder_mead.h create mode 100644 modules/createEVENT/common/smelt/normal_dist.cc create mode 100644 modules/createEVENT/common/smelt/normal_dist.h create mode 100644 modules/createEVENT/common/smelt/normal_multivar.cc create mode 100644 modules/createEVENT/common/smelt/normal_multivar.h create mode 100644 modules/createEVENT/common/smelt/numeric_utils.cc create mode 100644 modules/createEVENT/common/smelt/numeric_utils.h create mode 100644 modules/createEVENT/common/smelt/stochastic_model.h create mode 100644 modules/createEVENT/common/smelt/students_t_dist.cc create mode 100644 modules/createEVENT/common/smelt/students_t_dist.h create mode 100644 modules/createEVENT/common/smelt/uniform_dist.cc create mode 100644 modules/createEVENT/common/smelt/uniform_dist.h create mode 100644 modules/createEVENT/common/smelt/vlachos_et_al.cc create mode 100644 modules/createEVENT/common/smelt/vlachos_et_al.h create mode 100644 modules/createEVENT/common/smelt/wind_profile.cc create mode 100644 modules/createEVENT/common/smelt/wind_profile.h create mode 100644 modules/createEVENT/common/smelt/window.h create mode 100644 modules/createEVENT/common/smelt/wittig_sinha.cc create mode 100644 modules/createEVENT/common/smelt/wittig_sinha.h diff --git a/conanfile.py b/conanfile.py index f2c6ab9e9..772baeaeb 100644 --- a/conanfile.py +++ b/conanfile.py @@ -2,7 +2,6 @@ from conans import CMake, ConanFile - class simCenterBackendApps(ConanFile): # noqa: D101 name = 'SimCenterBackendApplications' version = '1.2.2' @@ -25,14 +24,14 @@ class simCenterBackendApps(ConanFile): # noqa: D101 requires = [ # noqa: RUF012 'jansson/2.13.1', 'zlib/1.2.11', - 'libcurl/8.1.1', + 'libcurl/8.6.0', 'eigen/3.3.7', 'clara/1.1.5', 'jsonformoderncpp/3.7.0', 'nanoflann/1.3.2', 'nlopt/2.7.1', - 'smelt/1.2.0@simcenter/stable', - 'kissfft/131.1.0', + "boost/1.71.0", + 'kissfft/131.1.0' ] # Custom attributes for Bincrafters recipe conventions diff --git a/modules/createEVENT/CMakeLists.txt b/modules/createEVENT/CMakeLists.txt index 6106ae001..fcb573ff4 100644 --- a/modules/createEVENT/CMakeLists.txt +++ b/modules/createEVENT/CMakeLists.txt @@ -1,3 +1,4 @@ +add_subdirectory(common) add_subdirectory(CFDEvent) add_subdirectory(GeoClawOpenFOAM) add_subdirectory(DEDM_HRP) diff --git a/modules/createEVENT/common/CMakeLists.txt b/modules/createEVENT/common/CMakeLists.txt new file mode 100644 index 000000000..f34499b35 --- /dev/null +++ b/modules/createEVENT/common/CMakeLists.txt @@ -0,0 +1 @@ +add_subdirectory(smelt) diff --git a/modules/createEVENT/common/smelt/CMakeLists.txt b/modules/createEVENT/common/smelt/CMakeLists.txt new file mode 100644 index 000000000..fc6eb80d9 --- /dev/null +++ b/modules/createEVENT/common/smelt/CMakeLists.txt @@ -0,0 +1,22 @@ +add_library(smelt STATIC + numeric_utils.cc + normal_multivar.cc + normal_dist.cc + lognormal_dist.cc + beta_dist.cc + inv_gauss_dist.cc + students_t_dist.cc + json_object.cc + vlachos_et_al.cc + configure.cc + wittig_sinha.cc + filter.cc + wind_profile.cc + uniform_dist.cc + dabaghi_der_kiureghian.cc + nelder_mead.cc) + +# Include directories +target_include_directories(smelt PUBLIC ${CONAN_INCLUDE_DIRS}) +#target_include_directories(smelt PUBLIC ${CONAN_INCLUDE_DIRS_JANSSON}) + diff --git a/modules/createEVENT/common/smelt/beta_dist.cc b/modules/createEVENT/common/smelt/beta_dist.cc new file mode 100644 index 000000000..ba86f0f92 --- /dev/null +++ b/modules/createEVENT/common/smelt/beta_dist.cc @@ -0,0 +1,32 @@ +#include +#include +#include "beta_dist.h" + +stochastic::BetaDistribution::BetaDistribution(double alpha, double beta) + : Distribution(), + alpha_{alpha}, + beta_{beta}, + distribution_{alpha, beta_} +{} + +std::vector stochastic::BetaDistribution::cumulative_dist_func( + const std::vector& locations) const { + std::vector evaluations(locations.size()); + + for (unsigned int i = 0; i < locations.size(); ++i) { + evaluations[i] = cdf(distribution_, locations[i]); + } + + return evaluations; +} + +std::vector stochastic::BetaDistribution::inv_cumulative_dist_func( + const std::vector& probabilities) const { + std::vector evaluations(probabilities.size()); + + for (unsigned int i = 0; i < probabilities.size(); ++i) { + evaluations[i] = quantile(distribution_, probabilities[i]); + } + + return evaluations; +} diff --git a/modules/createEVENT/common/smelt/beta_dist.h b/modules/createEVENT/common/smelt/beta_dist.h new file mode 100644 index 000000000..16d47f714 --- /dev/null +++ b/modules/createEVENT/common/smelt/beta_dist.h @@ -0,0 +1,67 @@ +#ifndef _BETA_DIST_H_ +#define _BETA_DIST_H_ + +#include +#include +#include +#include "distribution.h" + +namespace stochastic { +/** + * Beta distribution + */ +class BetaDistribution : public Distribution { + public: + /** + * @constructor Delete default constructor + */ + BetaDistribution() = delete; + + /** + * @constructor Construct beta distribution with specified mean and + * standard deviation + * @param[in] alpha Shape parameter + * @param[in] beta Shape parameter + */ + BetaDistribution(double alpha, double beta); + + /** + * @destructor Virtual destructor + */ + virtual ~BetaDistribution(){}; + + /** + * Get the name of the distribution model + * @return Model name as a string + */ + std::string name() const override { return "BetaDist"; }; + + /** + * Compute the cumulative distribution function (CDF) of the distribution at + * specified input locations + * @param[in] locations Vector containing locations at which to + * calculate CDF + * @return Vector of evaluated values of CDF at input locations + */ + std::vector cumulative_dist_func( + const std::vector& locations) const override; + + /** + * Compute the inverse cumulative distribution function (ICDF) of the + * distribution at specified input locations + * @param[in] probabilities Vector containing probabilities at which to + * calculate ICDF + * @return Vector of evaluated values of ICDF at input locations + */ + std::vector inv_cumulative_dist_func( + const std::vector& probabilities) const override; + + protected: + double alpha_; /**< Shape parameter */ + double beta_; /**< Shape parameter */ + boost::math::beta_distribution + distribution_; /**< Beta distribution */ +}; +} // namespace stochastic + +#endif // _BETA_DIST_H_ diff --git a/modules/createEVENT/common/smelt/configure.cc b/modules/createEVENT/common/smelt/configure.cc new file mode 100644 index 000000000..72ce20e2e --- /dev/null +++ b/modules/createEVENT/common/smelt/configure.cc @@ -0,0 +1,122 @@ +#include +#include "beta_dist.h" +#include "configure.h" +#include "dabaghi_der_kiureghian.h" +#include "factory.h" +#include "filter.h" +#include "function_dispatcher.h" +#include "inv_gauss_dist.h" +#include "lognormal_dist.h" +#include "numeric_utils.h" +#include "normal_dist.h" +#include "normal_multivar.h" +#include "students_t_dist.h" +#include "uniform_dist.h" +#include "vlachos_et_al.h" +#include "wind_profile.h" +#include "window.h" +#include "wittig_sinha.h" + +void config::initialize() { + // RANDOM VARIABLE GENERATION + // Register multivariate normal distribution random number generator + static Register + normal_multivar_default("MultivariateNormal"); + static Register + normal_multivar("MultivariateNormal"); + + // DISTRIBUTION TYPES + // Register normal distribution + static Register + normal_dist("NormalDist"); + // Register lognormal distribution + static Register + lognormal_dist("LognormalDist"); + // Register inverse Gaussian distribution + static Register + inv_gauss_dist("InverseGaussianDist"); + // Register beta distribution + static Register + beta_dist("BetaDist"); + // Register Student's t distribution + static Register + student_t_dist("StudentstDist"); + // Register uniform distribution + static Register + uniform_dist("UniformDist"); + + // STOCHASTIC MODELS + // Earthquake + static Register + vlachos_et_al("VlachosSiteSpecificEQ"); + static Register + vlachos_et_al_seed("VlachosSiteSpecificEQ"); + static Register + dabaghi_der_kiureghian("DabaghiDerKiureghianNFGM"); + static Register + dabaghi_der_kiureghian_seed("DabaghiDerKiureghianNFGM"); + + // Wind + static Register + wittig_sinha_equal_floors("WittigSinhaDiscreteFreqWind"); + static Register + wittig_sinha_equal_floors_seed("WittigSinhaDiscreteFreqWind"); + static Register&, + const std::vector&, const std::vector&, + double> + wittig_sinha_unequal_floors("WittigSinhaDiscreteFreqWind"); + static Register&, + const std::vector&, const std::vector&, + double, int> + wittig_sinha_unequal_floors_seed("WittigSinhaDiscreteFreqWind"); + + // WINDOW FUNCTIONS + // Register Hann window + static DispatchRegister hann_window_function( + "HannWindow", signal_processing::hann_window); + + // FILTER FUNCTIONS + // Register highpass Butterworth filter + static DispatchRegister>, int, double> + hp_butterworth_function("HighPassButter", + signal_processing::hp_butterworth()); + + // Register filter impulse response + static DispatchRegister, std::vector, + std::vector, int, int> + filter_impulse_response("ImpulseResponse", + signal_processing::impulse_response()); + + // Register acausal highpass Butterwork filter + static DispatchRegister, double, double, unsigned int, + unsigned int> + acausal_highpass_filter("AcausalHighpassButterworth", + signal_processing::acausal_highpass_filter()); + + // WIND VELOCITY PROFILES + // Exposure category-based velocity profile using power law + static DispatchRegister&, double, double, + std::vector&> + exposure_category_vel("ExposureCategoryVel", + wind::exposure_category_velocity()); +} diff --git a/modules/createEVENT/common/smelt/configure.h b/modules/createEVENT/common/smelt/configure.h new file mode 100644 index 000000000..bdfa1db94 --- /dev/null +++ b/modules/createEVENT/common/smelt/configure.h @@ -0,0 +1,17 @@ +#ifndef _CONFIGURE_H_ +#define _CONFIGURE_H_ + +/** + * Namespace for configuration and settings + */ +namespace config { + +/** + * This function registers classes with the factory methods so that clients + * using the library will have access to them + */ +void initialize(); + +} // namespace config + +#endif // _CONFIGURE_H_ diff --git a/modules/createEVENT/common/smelt/dabaghi_der_kiureghian.cc b/modules/createEVENT/common/smelt/dabaghi_der_kiureghian.cc new file mode 100644 index 000000000..0ae2b30b2 --- /dev/null +++ b/modules/createEVENT/common/smelt/dabaghi_der_kiureghian.cc @@ -0,0 +1,1510 @@ +#define _USE_MATH_DEFINES +#include +#include +#include +#include +#include +#include +#include +#include +// Boost random generator +#include +#include +#include +#include +// Eigen dense matrices +#include + +#include "beta_dist.h" +#include "dabaghi_der_kiureghian.h" +#include "factory.h" +#include "function_dispatcher.h" +#include "json_object.h" +#include "nelder_mead.h" +#include "normal_dist.h" +#include "normal_multivar.h" +#include "numeric_utils.h" + +stochastic::DabaghiDerKiureghian::DabaghiDerKiureghian( + stochastic::FaultType faulting, stochastic::SimulationType simulation_type, + double moment_magnitude, double depth_to_rupt, double rupture_distance, + double vs30, double s_or_d, double theta_or_phi, unsigned int num_sims, + unsigned int num_realizations, bool truncate) + : StochasticModel(), + faulting_{faulting}, + sim_type_{simulation_type}, + moment_magnitude_{moment_magnitude}, + depth_to_rupt_{depth_to_rupt}, + rupture_dist_{rupture_distance}, + vs30_{vs30}, + s_or_d_{s_or_d}, + theta_or_phi_{theta_or_phi}, + truncate_{truncate}, + num_realizations_{num_realizations}, + seed_value_{std::numeric_limits::infinity()}, + time_step_{0.005} +{ + model_name_ = "DabaghiDerKiureghian"; + + switch (sim_type_) { + case stochastic::SimulationType::NoPulse: + num_sims_pulse_ = 0; + break; + + case stochastic::SimulationType::Pulse: + num_sims_pulse_ = num_sims; + break; + + case stochastic::SimulationType::PulseAndNoPulse: + num_sims_pulse_ = simulate_pulse_type(num_sims); + break; + } + + num_sims_nopulse_ = num_sims - num_sims_pulse_; + + // Initialize multivariate normal generator without seed + sample_generator_ = + Factory::instance()->create( + "MultivariateNormal"); + + // Set regression constants + std_dev_pulse_.resize(19); + std_dev_nopulse_.resize(14); + corr_matrix_pulse_.resize(19, 19); + corr_matrix_nopulse_.resize(14, 14); + beta_distribution_pulse_.resize(19, 8); + beta_distribution_nopulse_.resize(14, 8); + params_lower_bound_.resize(19); + params_upper_bound_.resize(19); + params_fitted1_.resize(19); + params_fitted2_.resize(19); + params_fitted3_.resize(19); + + // clang-format off + std_dev_pulse_ << + 0.385316782551070, 0.580604607504062, 1.000000000000000, + 1.000000000000000, 0.468600626648626, 0.781462017439926, + 0.371951335342951, 0.442124880737131, 0.393548709857826, + 0.409988222892834, 0.820134132413354, 1.096436125406160, + 0.746552904533869, 0.402440900041447, 0.461424491954810, + 0.407724539607907, 0.440166826670740, 0.824632603897275, + 0.961997443697123; + std_dev_nopulse_ << + 1.052723262620090, 0.398427668080412, 0.456828618083131, + 0.305727090125879, 0.447517114210032, 0.941288665677244, + 1.007680943597980, 1.028226030318770, 0.375877126809162, + 0.458413470215522, 0.294118965466636, 0.399966941388161, + 0.831550984874095, 0.887870513796394; + + corr_matrix_pulse_ << + 1, -0.175836500638571, -0.0203457508302324, 0.173589795302921, 0.191081284058678, 0.446601858355920, 0.0422292189436895, 0.0268120615665615, 0.120003074985399, -0.382241273128019, 0.0573276270760638, 0.155115849407343, 0.407222009805941, -0.0405527073664880, 0.0164573906036360, 0.0415103476380082, -0.282855958356553, 0.106173613413053, 0.0486136094283240, + -0.175836500638571, 1, 0.183359484001117, 0.00131202858838653, 0.431552926960257, -0.0802776119560438, 0.0983744630339078, 0.310456055209018, 0.368157334142620, 0.0509849248659661, 0.0223164167958617, 0.175239604674009, -0.0979434412969958, 0.143078236501468, 0.295214504980143, 0.370873286184142, 0.0409704140512328, -0.0681917932721656, 0.243299650091412, + -0.0203457508302324, 0.183359484001117, 1, -0.190019984820145, 0.242770958876810, 0.178170099097870, 0.107202069371700, 0.150512140384424, 0.236433714526518, -0.114026379004051, 0.0535511100783777, 0.0642193542887409, 0.0688913090123055, 0.127858468767928, 0.0893601270266779, 0.213748741406994, -0.0697188946975121, 0.0212082250410815, 0.123098089235859, + 0.173589795302921, 0.00131202858838653, -0.190019984820145, 1, 0.119159474433286, -0.0812496120969922, 0.0911626190533515, 0.0653720081018262, 0.0718151835177181, -0.133641807530098, -0.0920397447082436, 0.0299407456292105, -0.0242282623085006, -0.0172855366304964, 0.0663348465393307, 0.0729217102589892, -0.146245197915904, 0.0999563418090508, -0.0409433584122388, + 0.191081284058678, 0.431552926960257, 0.242770958876810, 0.119159474433286, 1, 0.0597439686298471, 0.163724290728144, 0.733127256394521, 0.788656583220928, -0.0312901152364670, -0.153794052040040, 0.126072923696987, 0.0157717534215698, 0.186868291148981, 0.680765575675777, 0.749289248752773, 0.00752109582582447, -0.165776964800244, 0.192516732945902, + 0.446601858355920, -0.0802776119560438, 0.178170099097870, -0.0812496120969922, 0.0597439686298471, 1, -0.0244049816649646, 0.0584250580150467, 0.0856617566986387, 0.0783654782068241, 0.0990943110343599, 0.0264806656713516, 0.837794567493788, 0.0235744551632278, 0.00722635162195151, 0.0801743456052000, 0.125480219427063, 0.0813027074983168, 0.0454360407125637, + 0.0422292189436895, 0.0983744630339078, 0.107202069371700, 0.0911626190533515, 0.163724290728144, -0.0244049816649646, 1, 0.0612360822187756, 0.245411252666782, -0.0247015935697729, -0.224809121780326, 0.0459354003374228, 0.0364365286784528, 0.760546075583923, 0.0370009049319755, 0.205658174145097, -0.0837872744722257, -0.0338684418879410, -0.0274880324550587, + 0.0268120615665615, 0.310456055209018, 0.150512140384424, 0.0653720081018262, 0.733127256394521, 0.0584250580150467, 0.0612360822187756, 1, 0.855219194729329, 0.0120435632525462, -0.0489647334211007, 0.199955997824984, 0.0212627853872894, 0.146801029873847, 0.931624963947970, 0.850430406207709, 0.0232039506387295, -0.00394570150177181, 0.244390432559234, + 0.120003074985399, 0.368157334142620, 0.236433714526518, 0.0718151835177181, 0.788656583220928, 0.0856617566986387, 0.245411252666782, 0.855219194729329, 1, 0.0298843738756568, -0.0618147333807716, 0.197760496777865, 0.104861038989822, 0.262606914312613, 0.795537444898935, 0.906602423826413, 0.0381266607245457, -0.0143594270096637, 0.226001111347866, + -0.382241273128019, 0.0509849248659661, -0.114026379004051, -0.133641807530098, -0.0312901152364670, 0.0783654782068241, -0.0247015935697729, 0.0120435632525462, 0.0298843738756568, 1, -0.241519621897699, 0.112473855187119, 0.171776282660699, -0.0492035952139502, 0.0493755848461730, 0.112703656481410, 0.864715714588433, -0.286206615545551, 0.157882561174870, + 0.0573276270760638, 0.0223164167958617, 0.0535511100783777, -0.0920397447082436, -0.153794052040040, 0.0990943110343599, -0.224809121780326, -0.0489647334211007, -0.0618147333807716, -0.241519621897699, 1, 0.112365366315021, -0.0106706754632641, -0.0488287220881385, -0.0635241398373312, -0.0911374530847290, -0.0885687207002145, 0.421522224993818, 0.239492035016932, + 0.155115849407343, 0.175239604674009, 0.0642193542887409, 0.0299407456292105, 0.126072923696987, 0.0264806656713516, 0.0459354003374228, 0.199955997824984, 0.197760496777865, 0.112473855187119, 0.112365366315021, 1, -0.0274158393051499, 0.0871234642667006, 0.160919524797887, 0.256368904115107, 0.275537783357955, -0.175867301345906, 0.792491388890200, + 0.407222009805941, -0.0979434412969958, 0.0688913090123055, -0.0242282623085006, 0.0157717534215698, 0.837794567493788, 0.0364365286784528, 0.0212627853872894, 0.104861038989822, 0.171776282660699, -0.0106706754632641, -0.0274158393051499, 1, -0.168436208860883, 0.0139228196755138, 0.0606940022435021, 0.0950730856215810, 0.166344473008140, -0.0377746475967110, + -0.0405527073664880, 0.143078236501468, 0.127858468767928, -0.0172855366304964, 0.186868291148981, 0.0235744551632278, 0.760546075583923, 0.146801029873847, 0.262606914312613, -0.0492035952139502, -0.0488287220881385, 0.0871234642667006, -0.168436208860883, 1, 0.0584697041090115, 0.243068478704018, 0.0101378284587601, -0.134310788077535, 0.0858141211009980, + 0.0164573906036360, 0.295214504980143, 0.0893601270266779, 0.0663348465393307, 0.680765575675777, 0.00722635162195151, 0.0370009049319755, 0.931624963947970, 0.795537444898935, 0.0493755848461730, -0.0635241398373312, 0.160919524797887, 0.0139228196755138, 0.0584697041090115, 1, 0.841226866362572, 0.0319804533198723, 0.0395868643324250, 0.183336741524880, + 0.0415103476380082, 0.370873286184142, 0.213748741406994, 0.0729217102589892, 0.749289248752773, 0.0801743456052000, 0.205658174145097, 0.850430406207709, 0.906602423826413, 0.112703656481410, -0.0911374530847290, 0.256368904115107, 0.0606940022435021, 0.243068478704018, 0.841226866362572, 1, 0.0999775444207582, -0.0848763381048173, 0.277349661980356, + -0.282855958356553, 0.0409704140512328, -0.0697188946975121, -0.146245197915904, 0.00752109582582447, 0.125480219427063, -0.0837872744722257, 0.0232039506387295, 0.0381266607245457, 0.864715714588433, -0.0885687207002145, 0.275537783357955, 0.0950730856215810, 0.0101378284587601, 0.0319804533198723, 0.0999775444207582, 1, -0.432951535761925, 0.262376572921422, + 0.106173613413053, -0.0681917932721656, 0.0212082250410815, 0.0999563418090508, -0.165776964800244, 0.0813027074983168, -0.0338684418879410, -0.00394570150177181, -0.0143594270096637, -0.286206615545551, 0.421522224993818, -0.175867301345906, 0.166344473008140, -0.134310788077535, 0.0395868643324250, -0.0848763381048173, -0.432951535761925, 1, -0.184861197592255, + 0.0486136094283240, 0.243299650091412, 0.123098089235859, -0.0409433584122388, 0.192516732945902, 0.0454360407125637, -0.0274880324550587, 0.244390432559234, 0.226001111347866, 0.157882561174870, 0.239492035016932, 0.792491388890200, -0.0377746475967110, 0.0858141211009980, 0.183336741524880, 0.277349661980356, 0.262376572921422, -0.184861197592255, 1; + + corr_matrix_nopulse_ << + 1, -0.183620641202513, 0.0890171218487119, 0.104132896092390, 0.0143281984142704, 0.202871723469377, -0.151909317725644, 0.945163870283100, -0.0778432911362303, 0.0495683691288216, 0.0966843496273208, 0.0917721771113965, 0.103741261286614, -0.121195511065596, + -0.183620641202513, 1, 0.0854423655718587, 0.307373593686928, -0.0150490152853094, -0.149397471797000, 0.0888348816281089, -0.0794047082384602, 0.848433024094089, 0.0950830201555768, 0.288741920713302, -0.0596971902001111, -0.0160895956989375, 0.113905908782625, + 0.0890171218487119, 0.0854423655718587, 1, 0.813213357087557, -0.225438606252670, 0.000735703328121357, -0.0840944291284059, 0.0562899783045387, 0.137192754102300, 0.907605550316775, 0.788263163970441, -0.189167012249831, -0.0219422025252862, -0.0931560965857281, + 0.104132896092390, 0.307373593686928, 0.813213357087557, 1, -0.162877782549727, -0.0946212742252604, -0.0192432432422716, 0.131014027317413, 0.289495591671556, 0.752836252137069, 0.908489822222397, -0.151488045845577, -0.0637984858287899, -0.0498615936932623, + 0.0143281984142704, -0.0150490152853094, -0.225438606252670, -0.162877782549727, 1, -0.187527904866745, -0.163661457269968, 0.0720174301613615, -0.0805658506819631, -0.173027594836660, -0.165135405413428, 0.897082085156820, -0.0778400029130002, -0.00420375269007833, + 0.202871723469377, -0.149397471797000, 0.000735703328121357, -0.0946212742252604, -0.187527904866745, 1, -0.0853760806838177, 0.151981779909482, -0.0282514238500333, 0.00217240817323823, -0.0879254146125414, -0.0874961774514421, 0.647468312996265, -0.157070775414507, + -0.151909317725644, 0.0888348816281089, -0.0840944291284059, -0.0192432432422716, -0.163661457269968, -0.0853760806838177, 1, -0.109821275189057, 0.0605607584025393, -0.0674419385042876, -0.0178734643092523, -0.0879585887923949, -0.105931812299965, 0.761324011431321, + 0.945163870283100, -0.0794047082384602, 0.0562899783045387, 0.131014027317413, 0.0720174301613615, 0.151981779909482, -0.109821275189057, 1, -0.0744735914486929, 0.0477307773362732, 0.116976058986177, 0.104731056969540, 0.137507782979518, -0.107158677162180, + -0.0778432911362303, 0.848433024094089, 0.137192754102300, 0.289495591671556, -0.0805658506819631, -0.0282514238500333, 0.0605607584025393, -0.0744735914486929, 1, 0.0782297693189997, 0.294723991917604, -0.0895932216480470, -0.0501878780366773, 0.0967842822751738, + 0.0495683691288216, 0.0950830201555768, 0.907605550316775, 0.752836252137069, -0.173027594836660, 0.00217240817323823, -0.0674419385042876, 0.0477307773362732, 0.0782297693189997, 1, 0.786122088469745, -0.177521125226899, 0.00868592321024064, -0.0671981184080449, + 0.0966843496273208, 0.288741920713302, 0.788263163970441, 0.908489822222397, -0.165135405413428, -0.0879254146125414, -0.0178734643092523, 0.116976058986177, 0.294723991917604, 0.786122088469745, 1, -0.168356869639510, -0.0773913106180435, -0.0274804568206274, + 0.0917721771113965, -0.0596971902001111, -0.189167012249831, -0.151488045845577, 0.897082085156820, -0.0874961774514421, -0.0879585887923949, 0.104731056969540, -0.0895932216480470, -0.177521125226899, -0.168356869639510, 1, -0.183773515755380, 0.00693211686662153, + 0.103741261286614, -0.0160895956989375, -0.0219422025252862, -0.0637984858287899, -0.0778400029130002, 0.647468312996265, -0.105931812299965, 0.137507782979518, -0.0501878780366773, 0.00868592321024064, -0.0773913106180435, -0.183773515755380, 1, -0.110874872058067, + -0.121195511065596, 0.113905908782625, -0.0931560965857281, -0.0498615936932623, -0.00420375269007833, -0.157070775414507, 0.761324011431321, -0.107158677162180, 0.0967842822751738, -0.0671981184080449, -0.0274804568206274, 0.00693211686662153, -0.110874872058067, 1; + + beta_distribution_pulse_ << + 1.69862554416145, 0.608190177030033, -0.608190177030033, -0.576217471720854, 0, 0.183071013159864, -0.0939319189357984, 0.00657091132855174, + -2.47924338395758, 0.670395625327187, 0, 0, 0, -0.263957799575519, -0.232548659903406, 0.00791988432530859, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + -4.24873260256537, 0.852185710838635, 0, 0.389602053633798, 0, -0.380323064996537, -0.0880126751081291, 0, + -2.11599237942931, 1.47405211856417, -1.37810504103118, -1.07311968166742, 0, 0.336513504829882, 0, 0, + -0.381092000896248, 0.732824259094057, 0, 0.216502277780155, 0, -0.162653108888503, -0.426573570884097, 0, + -5.56310544580757, 0.905239391642438, 0, 0.385150957140062, 0, -0.282428134685156, 0, 0, + -4.77682417817789, 0.879981585446539, 0, 0.310609998745732, 0, -0.339225914155431, 0, 0, + 0.966712608298821, -0.110938996751065, 0, 0, 0, 0, 0.183289269601829, 0, + -2.16587686889173, 0.321501356495567, 0, 0, 0, 0, 0, 0, + -1.70734873800232, 0.433032709755610, 0, -0.412648447269525, 0, 0, 0, 0, + -0.263198077701693, 1.13060571952101, -1.16957372638359, -1.65164476300789, 0.104746885300915, 0.404058507352901, 0, 0, + -0.515969600508314, 0.754135122993588, 0, 0.191575083960373, 0, -0.121665118341219, -0.423844926774291, 0, + -5.77208004012831, 0.923144661954273, 0, 0.402940883581593, 0, -0.238200773846646, 0, 0, + -5.01588271867143, 0.905027154000921, 0, 0.326841161038211, 0, -0.328283913521590, 0, 0, + 0.434339606308037, -0.125225415996048, 0, 0, 0, 0, 0.301631247865572, 0, + -2.87544520181323, 0.415682222485807, 0, 0, 0, 0, 0, 0, + -1.86755738290362, 0.457448335779201, 0, -0.501103981295545, 0, 0, 0, 0; + + beta_distribution_nopulse_ << + 8.09695881287823, 1.00609515629221, -1.39347614723327, -4.85869770683701, 0.472644100309933, 0.434550762616159, -0.862562872197509, 0, + -1.03473761679032, 0.769091178587874, 0, 0.412237308297152, 0, -0.377739650769220, -0.424234099315427, 0, + -4.72728279119446, 0.709717476708319, 0, 0.470974168011549, 0, -0.123518047425648, 0, 0, + -4.44400222195478, 0.798093247074753, 0, 0.345405210060350, 0, -0.230823340141895, 0, 0, + 0.247133528936450, -0.149209862203390, 0, 0, 0, 0, 0.377202902904920, 0, + -1.44302935447839, 0.223053706671624, 0, 0, 0, 0, 0, 0, + -0.380413278316438, 0.159342468070527, 0, -0.298208438215333, 0, 0, 0, 0, + 7.30682757526241, 0.999256668956432, -1.33082594407524, -4.95306361630276, 0.490554994733579, 0.442502068793772, -0.835310070621911, 0, + -0.403711730133755, 0.672375321924977, 0, 0.335372498461681, 0, -0.330322239630250, -0.366700025738387, 0, + -4.79820204505010, 0.709160958437296, 0, 0.472560804537015, 0, -0.0755764830052928, 0, 0, + -4.35041760661412, 0.785290791385159, 0, 0.325462132630085, 0, -0.221525656800750, 0, 0, + 0.424849811725595, -0.181204207590470, 0, 0, 0, 0, 0.401549107903204, 0, + -2.97911606394595, 0.420016455603546, 0, 0, 0, 0, 0, 0, + -0.703694160589291, 0.160571013696218, 0, -0.145792047865653, 0, 0, 0, 0; + + params_lower_bound_ << 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, -3.5, -4.7, 0, 0, 0, 0, 0, -3.5, -4.7; + + params_upper_bound_ << 0, 0, 3.2, 2, 0, 0, 0, 0, 0, 0, 1.5, 0, 0, 0, 0, 0, 0, 1.5, 0; + + params_fitted1_ << 0, 0, 1.30326178289206, 0, 0, 0, 0, 0, 0, 0, 14.2935537214223, 5.33551936215137, 0, 0, 0, 0, 0, 14.2935537214223, 5.33551936215137; + + params_fitted2_ << 0, 0, 3.96858083951547, 0, 0, 0, 0, 0, 0, 0, 6.40242376475815, 3.82954843573707, 0, 0, 0, 0, 0, 6.40242376475815, 3.82954843573707; + + params_fitted3_ << 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4.42179588354923, 0, 0, 0, 0, 0, 0, 4.42179588354923, 0; + // clang-format on +} + +stochastic::DabaghiDerKiureghian::DabaghiDerKiureghian( + stochastic::FaultType faulting, stochastic::SimulationType simulation_type, + double moment_magnitude, double depth_to_rupt, double rupture_distance, + double vs30, double s_or_d, double theta_or_phi, unsigned int num_sims, + unsigned int num_realizations, bool truncate, int seed_value) + : StochasticModel(), + faulting_{faulting}, + sim_type_{simulation_type}, + moment_magnitude_{moment_magnitude}, + depth_to_rupt_{depth_to_rupt}, + rupture_dist_{rupture_distance}, + vs30_{vs30}, + s_or_d_{s_or_d}, + theta_or_phi_{theta_or_phi}, + truncate_{truncate}, + num_realizations_{num_realizations}, + seed_value_{seed_value}, + time_step_{0.005} +{ + model_name_ = "DabaghiDerKiureghian"; + + switch (sim_type_) { + case stochastic::SimulationType::NoPulse: + num_sims_pulse_ = 0; + break; + + case stochastic::SimulationType::Pulse: + num_sims_pulse_ = num_sims; + break; + + case stochastic::SimulationType::PulseAndNoPulse: + num_sims_pulse_ = simulate_pulse_type(num_sims); + break; + } + + num_sims_nopulse_ = num_sims - num_sims_pulse_; + + // Initialize multivariate normal generator without seed + sample_generator_ = + Factory::instance()->create( + "MultivariateNormal", std::move(seed_value_)); + + // Set regression constants + std_dev_pulse_.resize(19); + std_dev_nopulse_.resize(14); + corr_matrix_pulse_.resize(19, 19); + corr_matrix_nopulse_.resize(14, 14); + beta_distribution_pulse_.resize(19, 8); + beta_distribution_nopulse_.resize(14, 8); + params_lower_bound_.resize(19); + params_upper_bound_.resize(19); + params_fitted1_.resize(19); + params_fitted2_.resize(19); + params_fitted3_.resize(19); + + // clang-format off + std_dev_pulse_ << + 0.385316782551070, 0.580604607504062, 1.000000000000000, + 1.000000000000000, 0.468600626648626, 0.781462017439926, + 0.371951335342951, 0.442124880737131, 0.393548709857826, + 0.409988222892834, 0.820134132413354, 1.096436125406160, + 0.746552904533869, 0.402440900041447, 0.461424491954810, + 0.407724539607907, 0.440166826670740, 0.824632603897275, + 0.961997443697123; + std_dev_nopulse_ << + 1.052723262620090, 0.398427668080412, 0.456828618083131, + 0.305727090125879, 0.447517114210032, 0.941288665677244, + 1.007680943597980, 1.028226030318770, 0.375877126809162, + 0.458413470215522, 0.294118965466636, 0.399966941388161, + 0.831550984874095, 0.887870513796394; + + corr_matrix_pulse_ << + 1, -0.175836500638571, -0.0203457508302324, 0.173589795302921, 0.191081284058678, 0.446601858355920, 0.0422292189436895, 0.0268120615665615, 0.120003074985399, -0.382241273128019, 0.0573276270760638, 0.155115849407343, 0.407222009805941, -0.0405527073664880, 0.0164573906036360, 0.0415103476380082, -0.282855958356553, 0.106173613413053, 0.0486136094283240, + -0.175836500638571, 1, 0.183359484001117, 0.00131202858838653, 0.431552926960257, -0.0802776119560438, 0.0983744630339078, 0.310456055209018, 0.368157334142620, 0.0509849248659661, 0.0223164167958617, 0.175239604674009, -0.0979434412969958, 0.143078236501468, 0.295214504980143, 0.370873286184142, 0.0409704140512328, -0.0681917932721656, 0.243299650091412, + -0.0203457508302324, 0.183359484001117, 1, -0.190019984820145, 0.242770958876810, 0.178170099097870, 0.107202069371700, 0.150512140384424, 0.236433714526518, -0.114026379004051, 0.0535511100783777, 0.0642193542887409, 0.0688913090123055, 0.127858468767928, 0.0893601270266779, 0.213748741406994, -0.0697188946975121, 0.0212082250410815, 0.123098089235859, + 0.173589795302921, 0.00131202858838653, -0.190019984820145, 1, 0.119159474433286, -0.0812496120969922, 0.0911626190533515, 0.0653720081018262, 0.0718151835177181, -0.133641807530098, -0.0920397447082436, 0.0299407456292105, -0.0242282623085006, -0.0172855366304964, 0.0663348465393307, 0.0729217102589892, -0.146245197915904, 0.0999563418090508, -0.0409433584122388, + 0.191081284058678, 0.431552926960257, 0.242770958876810, 0.119159474433286, 1, 0.0597439686298471, 0.163724290728144, 0.733127256394521, 0.788656583220928, -0.0312901152364670, -0.153794052040040, 0.126072923696987, 0.0157717534215698, 0.186868291148981, 0.680765575675777, 0.749289248752773, 0.00752109582582447, -0.165776964800244, 0.192516732945902, + 0.446601858355920, -0.0802776119560438, 0.178170099097870, -0.0812496120969922, 0.0597439686298471, 1, -0.0244049816649646, 0.0584250580150467, 0.0856617566986387, 0.0783654782068241, 0.0990943110343599, 0.0264806656713516, 0.837794567493788, 0.0235744551632278, 0.00722635162195151, 0.0801743456052000, 0.125480219427063, 0.0813027074983168, 0.0454360407125637, + 0.0422292189436895, 0.0983744630339078, 0.107202069371700, 0.0911626190533515, 0.163724290728144, -0.0244049816649646, 1, 0.0612360822187756, 0.245411252666782, -0.0247015935697729, -0.224809121780326, 0.0459354003374228, 0.0364365286784528, 0.760546075583923, 0.0370009049319755, 0.205658174145097, -0.0837872744722257, -0.0338684418879410, -0.0274880324550587, + 0.0268120615665615, 0.310456055209018, 0.150512140384424, 0.0653720081018262, 0.733127256394521, 0.0584250580150467, 0.0612360822187756, 1, 0.855219194729329, 0.0120435632525462, -0.0489647334211007, 0.199955997824984, 0.0212627853872894, 0.146801029873847, 0.931624963947970, 0.850430406207709, 0.0232039506387295, -0.00394570150177181, 0.244390432559234, + 0.120003074985399, 0.368157334142620, 0.236433714526518, 0.0718151835177181, 0.788656583220928, 0.0856617566986387, 0.245411252666782, 0.855219194729329, 1, 0.0298843738756568, -0.0618147333807716, 0.197760496777865, 0.104861038989822, 0.262606914312613, 0.795537444898935, 0.906602423826413, 0.0381266607245457, -0.0143594270096637, 0.226001111347866, + -0.382241273128019, 0.0509849248659661, -0.114026379004051, -0.133641807530098, -0.0312901152364670, 0.0783654782068241, -0.0247015935697729, 0.0120435632525462, 0.0298843738756568, 1, -0.241519621897699, 0.112473855187119, 0.171776282660699, -0.0492035952139502, 0.0493755848461730, 0.112703656481410, 0.864715714588433, -0.286206615545551, 0.157882561174870, + 0.0573276270760638, 0.0223164167958617, 0.0535511100783777, -0.0920397447082436, -0.153794052040040, 0.0990943110343599, -0.224809121780326, -0.0489647334211007, -0.0618147333807716, -0.241519621897699, 1, 0.112365366315021, -0.0106706754632641, -0.0488287220881385, -0.0635241398373312, -0.0911374530847290, -0.0885687207002145, 0.421522224993818, 0.239492035016932, + 0.155115849407343, 0.175239604674009, 0.0642193542887409, 0.0299407456292105, 0.126072923696987, 0.0264806656713516, 0.0459354003374228, 0.199955997824984, 0.197760496777865, 0.112473855187119, 0.112365366315021, 1, -0.0274158393051499, 0.0871234642667006, 0.160919524797887, 0.256368904115107, 0.275537783357955, -0.175867301345906, 0.792491388890200, + 0.407222009805941, -0.0979434412969958, 0.0688913090123055, -0.0242282623085006, 0.0157717534215698, 0.837794567493788, 0.0364365286784528, 0.0212627853872894, 0.104861038989822, 0.171776282660699, -0.0106706754632641, -0.0274158393051499, 1, -0.168436208860883, 0.0139228196755138, 0.0606940022435021, 0.0950730856215810, 0.166344473008140, -0.0377746475967110, + -0.0405527073664880, 0.143078236501468, 0.127858468767928, -0.0172855366304964, 0.186868291148981, 0.0235744551632278, 0.760546075583923, 0.146801029873847, 0.262606914312613, -0.0492035952139502, -0.0488287220881385, 0.0871234642667006, -0.168436208860883, 1, 0.0584697041090115, 0.243068478704018, 0.0101378284587601, -0.134310788077535, 0.0858141211009980, + 0.0164573906036360, 0.295214504980143, 0.0893601270266779, 0.0663348465393307, 0.680765575675777, 0.00722635162195151, 0.0370009049319755, 0.931624963947970, 0.795537444898935, 0.0493755848461730, -0.0635241398373312, 0.160919524797887, 0.0139228196755138, 0.0584697041090115, 1, 0.841226866362572, 0.0319804533198723, 0.0395868643324250, 0.183336741524880, + 0.0415103476380082, 0.370873286184142, 0.213748741406994, 0.0729217102589892, 0.749289248752773, 0.0801743456052000, 0.205658174145097, 0.850430406207709, 0.906602423826413, 0.112703656481410, -0.0911374530847290, 0.256368904115107, 0.0606940022435021, 0.243068478704018, 0.841226866362572, 1, 0.0999775444207582, -0.0848763381048173, 0.277349661980356, + -0.282855958356553, 0.0409704140512328, -0.0697188946975121, -0.146245197915904, 0.00752109582582447, 0.125480219427063, -0.0837872744722257, 0.0232039506387295, 0.0381266607245457, 0.864715714588433, -0.0885687207002145, 0.275537783357955, 0.0950730856215810, 0.0101378284587601, 0.0319804533198723, 0.0999775444207582, 1, -0.432951535761925, 0.262376572921422, + 0.106173613413053, -0.0681917932721656, 0.0212082250410815, 0.0999563418090508, -0.165776964800244, 0.0813027074983168, -0.0338684418879410, -0.00394570150177181, -0.0143594270096637, -0.286206615545551, 0.421522224993818, -0.175867301345906, 0.166344473008140, -0.134310788077535, 0.0395868643324250, -0.0848763381048173, -0.432951535761925, 1, -0.184861197592255, + 0.0486136094283240, 0.243299650091412, 0.123098089235859, -0.0409433584122388, 0.192516732945902, 0.0454360407125637, -0.0274880324550587, 0.244390432559234, 0.226001111347866, 0.157882561174870, 0.239492035016932, 0.792491388890200, -0.0377746475967110, 0.0858141211009980, 0.183336741524880, 0.277349661980356, 0.262376572921422, -0.184861197592255, 1; + + corr_matrix_nopulse_ << + 1, -0.183620641202513, 0.0890171218487119, 0.104132896092390, 0.0143281984142704, 0.202871723469377, -0.151909317725644, 0.945163870283100, -0.0778432911362303, 0.0495683691288216, 0.0966843496273208, 0.0917721771113965, 0.103741261286614, -0.121195511065596, + -0.183620641202513, 1, 0.0854423655718587, 0.307373593686928, -0.0150490152853094, -0.149397471797000, 0.0888348816281089, -0.0794047082384602, 0.848433024094089, 0.0950830201555768, 0.288741920713302, -0.0596971902001111, -0.0160895956989375, 0.113905908782625, + 0.0890171218487119, 0.0854423655718587, 1, 0.813213357087557, -0.225438606252670, 0.000735703328121357, -0.0840944291284059, 0.0562899783045387, 0.137192754102300, 0.907605550316775, 0.788263163970441, -0.189167012249831, -0.0219422025252862, -0.0931560965857281, + 0.104132896092390, 0.307373593686928, 0.813213357087557, 1, -0.162877782549727, -0.0946212742252604, -0.0192432432422716, 0.131014027317413, 0.289495591671556, 0.752836252137069, 0.908489822222397, -0.151488045845577, -0.0637984858287899, -0.0498615936932623, + 0.0143281984142704, -0.0150490152853094, -0.225438606252670, -0.162877782549727, 1, -0.187527904866745, -0.163661457269968, 0.0720174301613615, -0.0805658506819631, -0.173027594836660, -0.165135405413428, 0.897082085156820, -0.0778400029130002, -0.00420375269007833, + 0.202871723469377, -0.149397471797000, 0.000735703328121357, -0.0946212742252604, -0.187527904866745, 1, -0.0853760806838177, 0.151981779909482, -0.0282514238500333, 0.00217240817323823, -0.0879254146125414, -0.0874961774514421, 0.647468312996265, -0.157070775414507, + -0.151909317725644, 0.0888348816281089, -0.0840944291284059, -0.0192432432422716, -0.163661457269968, -0.0853760806838177, 1, -0.109821275189057, 0.0605607584025393, -0.0674419385042876, -0.0178734643092523, -0.0879585887923949, -0.105931812299965, 0.761324011431321, + 0.945163870283100, -0.0794047082384602, 0.0562899783045387, 0.131014027317413, 0.0720174301613615, 0.151981779909482, -0.109821275189057, 1, -0.0744735914486929, 0.0477307773362732, 0.116976058986177, 0.104731056969540, 0.137507782979518, -0.107158677162180, + -0.0778432911362303, 0.848433024094089, 0.137192754102300, 0.289495591671556, -0.0805658506819631, -0.0282514238500333, 0.0605607584025393, -0.0744735914486929, 1, 0.0782297693189997, 0.294723991917604, -0.0895932216480470, -0.0501878780366773, 0.0967842822751738, + 0.0495683691288216, 0.0950830201555768, 0.907605550316775, 0.752836252137069, -0.173027594836660, 0.00217240817323823, -0.0674419385042876, 0.0477307773362732, 0.0782297693189997, 1, 0.786122088469745, -0.177521125226899, 0.00868592321024064, -0.0671981184080449, + 0.0966843496273208, 0.288741920713302, 0.788263163970441, 0.908489822222397, -0.165135405413428, -0.0879254146125414, -0.0178734643092523, 0.116976058986177, 0.294723991917604, 0.786122088469745, 1, -0.168356869639510, -0.0773913106180435, -0.0274804568206274, + 0.0917721771113965, -0.0596971902001111, -0.189167012249831, -0.151488045845577, 0.897082085156820, -0.0874961774514421, -0.0879585887923949, 0.104731056969540, -0.0895932216480470, -0.177521125226899, -0.168356869639510, 1, -0.183773515755380, 0.00693211686662153, + 0.103741261286614, -0.0160895956989375, -0.0219422025252862, -0.0637984858287899, -0.0778400029130002, 0.647468312996265, -0.105931812299965, 0.137507782979518, -0.0501878780366773, 0.00868592321024064, -0.0773913106180435, -0.183773515755380, 1, -0.110874872058067, + -0.121195511065596, 0.113905908782625, -0.0931560965857281, -0.0498615936932623, -0.00420375269007833, -0.157070775414507, 0.761324011431321, -0.107158677162180, 0.0967842822751738, -0.0671981184080449, -0.0274804568206274, 0.00693211686662153, -0.110874872058067, 1; + + beta_distribution_pulse_ << + 1.69862554416145, 0.608190177030033, -0.608190177030033, -0.576217471720854, 0, 0.183071013159864, -0.0939319189357984, 0.00657091132855174, + -2.47924338395758, 0.670395625327187, 0, 0, 0, -0.263957799575519, -0.232548659903406, 0.00791988432530859, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + -4.24873260256537, 0.852185710838635, 0, 0.389602053633798, 0, -0.380323064996537, -0.0880126751081291, 0, + -2.11599237942931, 1.47405211856417, -1.37810504103118, -1.07311968166742, 0, 0.336513504829882, 0, 0, + -0.381092000896248, 0.732824259094057, 0, 0.216502277780155, 0, -0.162653108888503, -0.426573570884097, 0, + -5.56310544580757, 0.905239391642438, 0, 0.385150957140062, 0, -0.282428134685156, 0, 0, + -4.77682417817789, 0.879981585446539, 0, 0.310609998745732, 0, -0.339225914155431, 0, 0, + 0.966712608298821, -0.110938996751065, 0, 0, 0, 0, 0.183289269601829, 0, + -2.16587686889173, 0.321501356495567, 0, 0, 0, 0, 0, 0, + -1.70734873800232, 0.433032709755610, 0, -0.412648447269525, 0, 0, 0, 0, + -0.263198077701693, 1.13060571952101, -1.16957372638359, -1.65164476300789, 0.104746885300915, 0.404058507352901, 0, 0, + -0.515969600508314, 0.754135122993588, 0, 0.191575083960373, 0, -0.121665118341219, -0.423844926774291, 0, + -5.77208004012831, 0.923144661954273, 0, 0.402940883581593, 0, -0.238200773846646, 0, 0, + -5.01588271867143, 0.905027154000921, 0, 0.326841161038211, 0, -0.328283913521590, 0, 0, + 0.434339606308037, -0.125225415996048, 0, 0, 0, 0, 0.301631247865572, 0, + -2.87544520181323, 0.415682222485807, 0, 0, 0, 0, 0, 0, + -1.86755738290362, 0.457448335779201, 0, -0.501103981295545, 0, 0, 0, 0; + + beta_distribution_nopulse_ << + 8.09695881287823, 1.00609515629221, -1.39347614723327, -4.85869770683701, 0.472644100309933, 0.434550762616159, -0.862562872197509, 0, + -1.03473761679032, 0.769091178587874, 0, 0.412237308297152, 0, -0.377739650769220, -0.424234099315427, 0, + -4.72728279119446, 0.709717476708319, 0, 0.470974168011549, 0, -0.123518047425648, 0, 0, + -4.44400222195478, 0.798093247074753, 0, 0.345405210060350, 0, -0.230823340141895, 0, 0, + 0.247133528936450, -0.149209862203390, 0, 0, 0, 0, 0.377202902904920, 0, + -1.44302935447839, 0.223053706671624, 0, 0, 0, 0, 0, 0, + -0.380413278316438, 0.159342468070527, 0, -0.298208438215333, 0, 0, 0, 0, + 7.30682757526241, 0.999256668956432, -1.33082594407524, -4.95306361630276, 0.490554994733579, 0.442502068793772, -0.835310070621911, 0, + -0.403711730133755, 0.672375321924977, 0, 0.335372498461681, 0, -0.330322239630250, -0.366700025738387, 0, + -4.79820204505010, 0.709160958437296, 0, 0.472560804537015, 0, -0.0755764830052928, 0, 0, + -4.35041760661412, 0.785290791385159, 0, 0.325462132630085, 0, -0.221525656800750, 0, 0, + 0.424849811725595, -0.181204207590470, 0, 0, 0, 0, 0.401549107903204, 0, + -2.97911606394595, 0.420016455603546, 0, 0, 0, 0, 0, 0, + -0.703694160589291, 0.160571013696218, 0, -0.145792047865653, 0, 0, 0, 0; + + params_lower_bound_ << 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, -3.5, -4.7, 0, 0, 0, 0, 0, -3.5, -4.7; + + params_upper_bound_ << 0, 0, 3.2, 2, 0, 0, 0, 0, 0, 0, 1.5, 0, 0, 0, 0, 0, 0, 1.5, 0; + + params_fitted1_ << 0, 0, 1.30326178289206, 0, 0, 0, 0, 0, 0, 0, 14.2935537214223, 5.33551936215137, 0, 0, 0, 0, 0, 14.2935537214223, 5.33551936215137; + + params_fitted2_ << 0, 0, 3.96858083951547, 0, 0, 0, 0, 0, 0, 0, 6.40242376475815, 3.82954843573707, 0, 0, 0, 0, 0, 6.40242376475815, 3.82954843573707; + + params_fitted3_ << 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4.42179588354923, 0, 0, 0, 0, 0, 0, 4.42179588354923, 0; + // clang-format on +} + +utilities::JsonObject stochastic::DabaghiDerKiureghian::generate( + const std::string& event_name, bool units) { + + // Create vectors for pulse-like and non-pulse-like motions + std::vector>> pulse_motions_comp1( + num_sims_pulse_, std::vector>(num_realizations_, + std::vector())); + + std::vector>> pulse_motions_comp2( + num_sims_pulse_, std::vector>(num_realizations_, + std::vector())); + + std::vector>> nopulse_motions_comp1( + num_sims_nopulse_, std::vector>( + num_realizations_, std::vector())); + + std::vector>> nopulse_motions_comp2( + num_sims_nopulse_, std::vector>( + num_realizations_, std::vector())); + + // Generated simulated acceleration time histories + try { + // Simulate model parameters + Eigen::MatrixXd parameters_pulse = + simulate_model_parameters(true, num_sims_pulse_); + Eigen::MatrixXd parameters_nopulse = + simulate_model_parameters(false, num_sims_nopulse_); + + // Simulate pulse-like motions + for (unsigned int i = 0; i < num_sims_pulse_; ++i) { + simulate_near_fault_ground_motion( + true, parameters_pulse.row(i), pulse_motions_comp1[i], + pulse_motions_comp2[i], num_realizations_); + } + + // Simulate non-pulse-like motions + for (unsigned int i = 0; i < num_sims_nopulse_; ++i) { + simulate_near_fault_ground_motion( + false, parameters_nopulse.row(i), nopulse_motions_comp1[i], + nopulse_motions_comp2[i], num_realizations_); + } + + // If requested, truncate and baseline correct time histories + double gfactor = 981; + unsigned int fit_order = 5; + if (truncate_) { + // First truncate motions + for (unsigned int i = 0; i < num_sims_pulse_; ++i) { + truncate_time_histories(pulse_motions_comp1[i], pulse_motions_comp2[i], + gfactor); + } + + for (unsigned int i = 0; i < num_sims_nopulse_; ++i) { + truncate_time_histories(nopulse_motions_comp1[i], + nopulse_motions_comp2[i], gfactor); + } + + // Baseline correct truncated pulse-like motions + for (unsigned int i = 0; i < num_sims_pulse_; ++i) { + for (unsigned int j = 0; j < num_realizations_; ++j) { + baseline_correct_time_history(pulse_motions_comp1[i][j], gfactor, + fit_order); + baseline_correct_time_history(pulse_motions_comp2[i][j], gfactor, + fit_order); + } + } + + // Baseline correct trunacted non-pulse-like motions + for (unsigned int i = 0; i < num_sims_nopulse_; ++i) { + for (unsigned int j = 0; j < num_realizations_; ++j) { + baseline_correct_time_history(nopulse_motions_comp1[i][j], gfactor, + fit_order); + baseline_correct_time_history(nopulse_motions_comp2[i][j], gfactor, + fit_order); + } + } + } + } catch (const std::exception& e) { + std::cerr << e.what(); + throw; + } + + // Create JsonObject for events + auto events = utilities::JsonObject(); + std::vector events_array( + num_realizations_ * (num_sims_pulse_ + num_sims_nopulse_)); + + // Add pattern information for JSON + auto pattern_x = utilities::JsonObject(); + auto pattern_y = utilities::JsonObject(); + pattern_x.add_value("type", "UniformAcceleration"); + pattern_x.add_value("timeSeries", "accel_x"); + pattern_x.add_value("dof", 1); + pattern_y.add_value("type", "UniformAcceleration"); + pattern_y.add_value("timeSeries", "accel_y"); + pattern_y.add_value("dof", 2); + + // Create JSON for specific event + auto event_data = utilities::JsonObject(); + // Loop over simulations for different parameter sets for pulse-like + // motions + for (unsigned int i = 0; i < num_sims_pulse_; ++i) { + // Loop over number of realizations per parameter set realization + for (unsigned int j = 0; j < num_realizations_; ++j) { + event_data.add_value("name", event_name + "_ParameterSetPulse" + std::to_string(i) + + "_Sim" + std::to_string(j)); + event_data.add_value("type", "Seismic"); + event_data.add_value("dT", time_step_); + event_data.add_value("numSteps", pulse_motions_comp1[i][j].size()); + event_data.add_value( + "pattern", std::vector{pattern_x, pattern_y}); + + // Rotate accelerations, if necessary + std::vector x_accels(pulse_motions_comp1[i][j].size()); + std::vector y_accels(pulse_motions_comp2[i][j].size()); + convert_time_history_units(pulse_motions_comp1[i][j], units); + convert_time_history_units(pulse_motions_comp2[i][j], units); + + // Add time histories for x and y directions to event + auto time_history_x = utilities::JsonObject(); + auto time_history_y = utilities::JsonObject(); + time_history_x.add_value("name", "accel_x"); + time_history_x.add_value("type", "Value"); + time_history_x.add_value("dT", time_step_); + time_history_x.add_value("data", pulse_motions_comp1[i][j]); + time_history_y.add_value("name", "accel_y"); + time_history_y.add_value("type", "Value"); + time_history_y.add_value("dT", time_step_); + time_history_y.add_value("data", pulse_motions_comp2[i][j]); + event_data.add_value("timeSeries", std::vector{ + time_history_x, time_history_y}); + events_array[i * num_realizations_ + j] = event_data; + event_data.clear(); + } + } + + + // Loop over different simulations for parameter sets non-pulse-like + // motions + for (unsigned int i = 0; i < num_sims_nopulse_; ++i) { + // Loop over number of realizations per parameter set realization + for (unsigned int j = 0; j < num_realizations_; ++j) { + event_data.add_value("name", event_name + "_ParameterSetNoPulse" + std::to_string(i) + + "_Sim" + std::to_string(j + num_sims_pulse_)); + event_data.add_value("type", "Seismic"); + event_data.add_value("dT", time_step_); + event_data.add_value("numSteps", nopulse_motions_comp1[i][j].size()); + event_data.add_value( + "pattern", std::vector{pattern_x, pattern_y}); + + // Rotate accelerations, if necessary + std::vector x_accels(nopulse_motions_comp1[i][j].size()); + std::vector y_accels(nopulse_motions_comp2[i][j].size()); + convert_time_history_units(nopulse_motions_comp1[i][j], units); + convert_time_history_units(nopulse_motions_comp2[i][j], units); + + // Add time histories for x and y directions to event + auto time_history_x = utilities::JsonObject(); + auto time_history_y = utilities::JsonObject(); + time_history_x.add_value("name", "accel_x"); + time_history_x.add_value("type", "Value"); + time_history_x.add_value("dT", time_step_); + time_history_x.add_value("data", nopulse_motions_comp1[i][j]); + time_history_y.add_value("name", "accel_y"); + time_history_y.add_value("type", "Value"); + time_history_y.add_value("dT", time_step_); + time_history_y.add_value("data", nopulse_motions_comp2[i][j]); + event_data.add_value("timeSeries", std::vector{ + time_history_x, time_history_y}); + events_array[i * num_realizations_ + j + + num_realizations_ * num_sims_pulse_] = event_data; + event_data.clear(); + } + } + + events.add_value("Events", events_array); + + return events; +} + +bool stochastic::DabaghiDerKiureghian::generate( + const std::string& event_name, const std::string& output_location, + bool units) { + bool status = true; + + // Generate pool of acceleration time histories + try{ + auto json_output = generate(event_name, units); + json_output.write_to_file(output_location); + } catch (const std::exception& e) { + std::cerr << e.what(); + status = false; + throw; + } + + return status; +} + +unsigned int stochastic::DabaghiDerKiureghian::simulate_pulse_type( + unsigned int num_sims) const { + double pulse_probability = 0.0; + + // Calculate pulse probability for any type of pulse + if (faulting_ == stochastic::FaultType::StrikeSlip) { + pulse_probability = 1.0 / (1.0 + std::exp(0.457 + 0.126 * rupture_dist_ - + 0.244 * std::sqrt(s_or_d_) + + 0.013 * theta_or_phi_)); + } else { + pulse_probability = 1.0 / (1.0 + std::exp(0.304 + 0.072 * rupture_dist_ - + 0.208 * std::sqrt(s_or_d_) + + 0.021 * theta_or_phi_)); + } + + // Create random generator for uniform distribution between 0.0 and 1.0 + auto generator = + seed_value_ != std::numeric_limits::infinity() + ? boost::random::mt19937(static_cast(seed_value_)) + : boost::random::mt19937( + static_cast(std::time(nullptr))); + + boost::random::uniform_real_distribution<> distribution(0.0, 1.0); + boost::random::variate_generator> + pulse_gen(generator, distribution); + + unsigned int number_of_pulses = 0; + + for (unsigned int i = 0; i < num_sims; ++i) { + if (pulse_gen() < pulse_probability) { + number_of_pulses++; + } + } + + return number_of_pulses; +} + +Eigen::MatrixXd stochastic::DabaghiDerKiureghian::simulate_model_parameters( + bool pulse_like, unsigned int num_sims) { + // Calculate covariance matrix + Eigen::MatrixXd error_cov = + pulse_like + ? numeric_utils::corr_to_cov(corr_matrix_pulse_, std_dev_pulse_) + : numeric_utils::corr_to_cov(corr_matrix_nopulse_, std_dev_nopulse_); + + Eigen::MatrixXd simulated_params = pulse_like + ? Eigen::MatrixXd::Zero(num_sims, 19) + : Eigen::MatrixXd::Zero(num_sims, 14); + + Eigen::VectorXd error_mean = + pulse_like ? Eigen::VectorXd::Zero(19) : Eigen::VectorXd::Zero(14); + + // Compute conditional mean values of transformed model parameters using + // regression coefficients + Eigen::VectorXd predicted_model_params = + compute_transformed_model_parameters(pulse_like); + + Eigen::Matrix parameter_realizations; + Eigen::Matrix epsilon; + + // Create simulated model parameters for specified number of motions + double test; + Eigen::VectorXd model_params(error_mean.size()); + + // Loop over number of simulations requested, generating parameter realizations + for (unsigned int i = 0; i < num_sims; ++i) { + test = -1.0; + + // Continue looping in event parameters for pulse-like motion are unsatisfactory + while (test < 0.0) { + sample_generator_->generate(parameter_realizations, error_mean, error_cov, + 1); + epsilon = pulse_like + ? parameter_realizations.cwiseQuotient(std_dev_pulse_) + : parameter_realizations.cwiseQuotient(std_dev_nopulse_); + double max_epsilon = epsilon.cwiseAbs().maxCoeff(); + + while (max_epsilon > 2.0) { + sample_generator_->generate(parameter_realizations, error_mean, + error_cov, 1); + epsilon = pulse_like + ? parameter_realizations.cwiseQuotient(std_dev_pulse_) + : parameter_realizations.cwiseQuotient(std_dev_nopulse_); + max_epsilon = epsilon.cwiseAbs().maxCoeff(); + } + + // Random realization of model parameters in normal space + model_params = predicted_model_params + parameter_realizations; + // Transform random realization to real space + transform_parameters_from_normal_space(pulse_like, model_params); + + // Additional check on pulse-like parameters + if (pulse_like) { + test = model_params(4) - 0.5 * model_params(1) * model_params(2); + } else { + test = 1.0; + } + } + + // Set current row of parameter realizations to generated model parameters + simulated_params.row(i) = model_params; + } + + return simulated_params; +} + +Eigen::VectorXd + stochastic::DabaghiDerKiureghian::compute_transformed_model_parameters( + bool pulse_like) const { + // Calculate parameters and create parameter vector + double depth_parameter = depth_to_rupt_ < 1.0 ? depth_to_rupt_ : 1.0; + double site_parameter = vs30_ <= 1100.0 ? std::log(vs30_) : std::log(1100.0); + double fault_parameter = + faulting_ == stochastic::FaultType::StrikeSlip ? 0.0 : 1.0; + + Eigen::VectorXd params_vector(8); + params_vector << 1.0, moment_magnitude_, + std::pow(moment_magnitude_ - magnitude_baseline_, + static_cast(moment_magnitude_ > magnitude_baseline_)), + std::log(std::sqrt(rupture_dist_ * rupture_dist_ + c6_ * c6_)), + moment_magnitude_ * + std::log(std::sqrt(rupture_dist_ * rupture_dist_ + c6_ * c6_)), + fault_parameter * depth_parameter, site_parameter, s_or_d_; + + if (std::abs(moment_magnitude_ - magnitude_baseline_) < 1e-16) { + params_vector(2) = 0.0; + } + + // Calculate the mean predicted model parameters in normal space + if (pulse_like) { + return beta_distribution_pulse_ * params_vector; + } else { + return beta_distribution_nopulse_ * params_vector; + } +} + +void stochastic::DabaghiDerKiureghian::transform_parameters_from_normal_space( + bool pulse_like, Eigen::VectorXd& parameters) { + Eigen::VectorXd transformed_params(parameters.size()); + auto standard_normal = + Factory::instance()->create( + "NormalDist", std::move(0.0), std::move(1.0)); + + if (pulse_like) { + std::vector indices = {0, 1, 4, 5, 6, 7, 8, 9, 12, 13, 14, 15, 16}; + for (auto const& index : indices) { + transformed_params(index) = std::exp(parameters(index)); + } + + // Calculate gamma + auto beta_dist = + Factory::instance()->create( + "BetaDist", std::move(params_fitted1_(2)), + std::move(params_fitted2_(2))); + + transformed_params(2) = + ((beta_dist->inv_cumulative_dist_func( + standard_normal->cumulative_dist_func( + std::vector{parameters(2)}))) + .at(0)) * + (params_upper_bound_(2) - params_lower_bound_(2)) + + params_lower_bound_(2); + + // Calculate nu + auto uniform_dist = + Factory::instance()->create( + "UniformDist", std::move(params_lower_bound_(3)), + std::move(params_upper_bound_(3))); + + transformed_params(3) = (uniform_dist->inv_cumulative_dist_func( + standard_normal->cumulative_dist_func( + std::vector{parameters(3)}))) + .at(0); + + // Calculate f' residual + transformed_params(10) = + inv_double_exp(standard_normal->cumulative_dist_func( + std::vector{parameters(10)})[0], + params_fitted1_(10), params_fitted2_(10), + params_fitted3_(10), params_lower_bound_(10)); + + // Calculate depth_to_rupt residual + beta_dist = + Factory::instance()->create( + "BetaDist", std::move(params_fitted1_(11)), + std::move(params_fitted2_(11))); + + transformed_params(11) = + std::exp(((beta_dist->inv_cumulative_dist_func( + standard_normal->cumulative_dist_func( + std::vector{parameters(11)}))) + .at(0)) * + (params_upper_bound_(11) - params_lower_bound_(11)) + + params_lower_bound_(11)); + + // Calculate f' pulse-only + transformed_params(17) = + inv_double_exp(standard_normal->cumulative_dist_func( + std::vector{parameters(17)})[0], + params_fitted1_(17), params_fitted2_(17), + params_fitted3_(17), params_lower_bound_(17)); + + // Calculate depth_to_rupt pulse-only + beta_dist = + Factory::instance()->create( + "BetaDist", std::move(params_fitted1_(18)), + std::move(params_fitted2_(18))); + + transformed_params(18) = + std::exp(((beta_dist->inv_cumulative_dist_func( + standard_normal->cumulative_dist_func( + std::vector{parameters(18)}))) + .at(0)) * + (params_upper_bound_(18) - params_lower_bound_(18)) + + params_lower_bound_(18)); + } else { + std::vector indices = {0, 1, 2, 3, 4, 7, 8, 9, 10, 11}; + for (auto const& index : indices) { + transformed_params(index) = std::exp(parameters(index)); + } + + // Calculate f' component 1 + transformed_params(5) = + inv_double_exp(standard_normal->cumulative_dist_func( + std::vector{parameters(5)})[0], + params_fitted1_(10), params_fitted2_(10), + params_fitted3_(10), params_lower_bound_(10)); + + // Calculate depth_to_rupture component 1 + auto beta_dist = + Factory::instance()->create( + "BetaDist", std::move(params_fitted1_(11)), + std::move(params_fitted2_(11))); + + transformed_params(6) = + std::exp(((beta_dist->inv_cumulative_dist_func( + standard_normal->cumulative_dist_func( + std::vector{parameters(6)}))) + .at(0)) * + (params_upper_bound_(11) - params_lower_bound_(11)) + + params_lower_bound_(11)); + + // Calculate f' component 2 + transformed_params(12) = + inv_double_exp(standard_normal->cumulative_dist_func( + std::vector{parameters(12)})[0], + params_fitted1_(17), params_fitted2_(17), + params_fitted3_(17), params_lower_bound_(17)); + + // Calculate depth_to_rupture compenent 2 + beta_dist = + Factory::instance()->create( + "BetaDist", std::move(params_fitted1_(18)), + std::move(params_fitted2_(18))); + + transformed_params(13) = + std::exp(((beta_dist->inv_cumulative_dist_func( + standard_normal->cumulative_dist_func( + std::vector{parameters(13)}))) + .at(0)) * + (params_upper_bound_(18) - params_lower_bound_(18)) + + params_lower_bound_(18)); + } + + // Set input vector of parameters to transformed parameters + parameters = transformed_params; +} + +double stochastic::DabaghiDerKiureghian::inv_double_exp( + double probability, double param_a, double param_b, double param_c, + double lower_bound) const { + if (probability < 0.0 || probability > 1.0) { + throw std::runtime_error( + "\nERROR: in stochastic::DabaghiDerKiureghian::inv_double_exp: " + "Probability argument less than 0.0 or greater than 1.0\n"); + } + + double location_inv = + (1.0 / param_b) * std::log((param_b / param_c) * probability + + std::exp(param_b * lower_bound)); + + if (location_inv < lower_bound || location_inv > 0.0) { + location_inv = + -(1.0 / param_a) * + std::log((param_a / param_b) * (1.0 - std::exp(param_b * lower_bound)) - + (param_a / param_c) * probability + 1.0); + } + + return location_inv; +} + +void stochastic::DabaghiDerKiureghian::simulate_near_fault_ground_motion( + bool pulse_like, const Eigen::VectorXd& parameters, + std::vector>& accel_comp_1, + std::vector>& accel_comp_2, + unsigned int num_gms) const { + + // Extract parameters for two components of ground motion + Eigen::VectorXd alpha_1(7); + Eigen::VectorXd alpha_2(7); + if (pulse_like) { + alpha_1 = parameters.segment(5, 7); + alpha_2 = parameters.segment(12, 7); + } else { + alpha_1 = parameters.segment(0, 7); + alpha_2 = parameters.segment(7, 7); + } + + // Set modulating and filter parameters + Eigen::VectorXd modulating_params_1 = + backcalculate_modulating_params(alpha_1.segment(0, 4), start_time_); + Eigen::VectorXd modulating_params_2 = + backcalculate_modulating_params(alpha_2.segment(0, 4), start_time_); + + Eigen::VectorXd filter_params_1 = alpha_1.segment(4, 3); + Eigen::VectorXd filter_params_2 = alpha_2.segment(4, 3); + + // Determine length of time for simulation + double t95 = start_time_ + alpha_1[1] + alpha_1[2] > + start_time_ + alpha_2[1] + alpha_2[2] + ? start_time_ + alpha_1[1] + alpha_1[2] + : start_time_ + alpha_2[1] + alpha_2[2]; + + unsigned int num_steps = + static_cast(std::ceil(2.5 * t95 / time_step_)); + + num_steps = num_steps % 2 == 1 ? num_steps + 1 : num_steps; + + // Generated modulated filtered white noise + auto white_noise_1 = simulate_white_noise( + modulating_params_1, filter_params_1, num_steps, num_gms); + auto white_noise_2 = simulate_white_noise( + modulating_params_2, filter_params_2, num_steps, num_gms); + + // Calculate high-pass filter and padding + double freq_corner = std::pow(10.0, 1.4071 - 0.3452 * moment_magnitude_); + unsigned int filter_order = 4; + double padding_duration = 0.5 * 1.5 * filter_order / freq_corner; + unsigned int num_pads = + static_cast(std::ceil(padding_duration / time_step_)); + + // Add zero-padding + Eigen::MatrixXd accel_padded_1 = + Eigen::MatrixXd::Zero(num_gms, num_pads + num_steps + num_pads); + Eigen::MatrixXd accel_padded_2 = + Eigen::MatrixXd::Zero(num_gms, num_pads + num_steps + num_pads); + + for (unsigned int i = 0; i < num_gms; ++i) { + // Pad component 1 + accel_padded_1.block(i, 0, 1, num_pads) = + Eigen::RowVectorXd::Zero(num_pads); + accel_padded_1.block(i, num_pads - 1, 1, num_steps) = white_noise_1.row(i); + accel_padded_1.block(i, num_pads + num_steps - 1, 1, num_pads) = + Eigen::RowVectorXd::Zero(num_pads); + + // Pad component 2 + accel_padded_2.block(i, 0, 1, num_pads) = + Eigen::RowVectorXd::Zero(num_pads); + accel_padded_2.block(i, num_pads - 1, 1, num_steps) = white_noise_2.row(i); + accel_padded_2.block(i, num_pads + num_steps - 1, 1, num_pads) = + Eigen::RowVectorXd::Zero(num_pads); + } + + // Apply filter to padded acceleration time histories + accel_comp_1.resize(num_gms); + accel_comp_2.resize(num_gms); + for (unsigned int i = 0; i < num_gms; ++i) { + accel_comp_1[i] = filter_acceleration(accel_padded_1.row(i), freq_corner, filter_order); + accel_comp_2[i] = filter_acceleration(accel_padded_2.row(i), freq_corner, filter_order); + } + + // Rescale time histories for energy consistency: + // Target Arias intensity for rescaling after high-pass filter in g-sec + double target_ai_1 = alpha_1(0) / 981; + double target_ai_2 = alpha_2(0) / 981; + + std::vector> arias_intensity_1( + num_gms, std::vector(accel_comp_1[0].size())); + std::vector> arias_intensity_2( + num_gms, std::vector(accel_comp_2[0].size())); + + // Calculate Arias intensity + for (unsigned int i = 0; i < num_gms; ++i) { + std::transform(accel_comp_1[i].begin(), accel_comp_1[i].end(), + arias_intensity_1[i].begin(), + [this](double value) -> double { + return value * value * time_step_ * M_PI / 2.0; + }); + + std::partial_sum(arias_intensity_1[i].begin(), arias_intensity_1[i].end(), + arias_intensity_1[i].begin()); + + std::transform(accel_comp_2[i].begin(), accel_comp_2[i].end(), + arias_intensity_2[i].begin(), + [this](double value) -> double { + return value * value * time_step_ * M_PI / 2.0; + }); + + std::partial_sum(arias_intensity_2[i].begin(), arias_intensity_2[i].end(), + arias_intensity_2[i].begin()); + } + + // Calculate scaling factors and scale accelerations to match Arias intensity + for (unsigned int i = 0; i < num_gms; ++i) { + double scale_factor_1 = std::sqrt( + target_ai_1 / arias_intensity_1[i][arias_intensity_1[i].size() - 1]); + double scale_factor_2 = std::sqrt( + target_ai_2 / arias_intensity_2[i][arias_intensity_2[i].size() - 1]); + + std::transform(accel_comp_1[i].begin(), accel_comp_1[i].end(), + accel_comp_1[i].begin(), + [&scale_factor_1](double value) -> double { + return value * scale_factor_1; + }); + + std::transform(accel_comp_2[i].begin(), accel_comp_2[i].end(), + accel_comp_2[i].begin(), + [&scale_factor_2](double value) -> double { + return value * scale_factor_2; + }); + } + + // If pulse-like, add pulse acceleration to component 1 direction + if (pulse_like) { + // Calculate pulse acceleration + auto pulse_accel = calc_pulse_acceleration(num_steps, parameters); + + // Add pulse motion to component 1 + for (unsigned int i = 0; i < num_gms; ++i) { + for (unsigned int j = 0; j < pulse_accel.size(); ++j) { + accel_comp_1[i][j + num_pads - 1] = + accel_comp_1[i][j + num_pads - 1] + pulse_accel[j]; + } + } + } +} + +Eigen::VectorXd + stochastic::DabaghiDerKiureghian::backcalculate_modulating_params( + const Eigen::VectorXd& q_params, double t0) const { + double arias_intensity = q_params(0) / 981, // Convert from cm/s to g-s + d595 = q_params(1), d05 = q_params(2), + d030 = q_params(3), d095 = d05 + d595, + t30 = t0 + d030; + + // Search for local minimum by trying several starting points + optimization::NelderMead minimizer(1e-10); + std::vector diffs(6); + std::function&)> error_function = + std::bind(&stochastic::DabaghiDerKiureghian::calc_parameter_error, this, + std::placeholders::_1, d05, d030, d095, t0); + + // Iterate over starting points + std::vector> starting_points = { + {1.0, 0.2, t30}, {2.0, 0.2, t30}, {5.0, 0.2, t30}, + {1.0, 1.0, t30}, {2.0, 1.0, t30}, {5.0, 1.0, t30}}; + std::vector deltas(starting_points[0].size()); + + unsigned int point_counter = 0; + for (auto& point : starting_points) { + for (unsigned int i = 0; i < deltas.size(); ++i) { + deltas[i] = std::abs(point[i]) < 1.0e-6 ? 0.00025 : 0.05 * std::abs(point[i]); + } + + point = minimizer.minimize(point, deltas, error_function); + diffs[point_counter] = error_function(point); + ++point_counter; + } + + // To avoid negative values of alpha, multiply cost value by 10000 if so + for (unsigned int i = 0; i < starting_points.size(); ++i) { + if (starting_points[i][0] < 0.0) { + diffs[i] *= 10000.0; + } + } + + // Find solution that matches targe values best + auto min_index = std::distance( + std::begin(diffs), std::min_element(std::begin(diffs), std::end(diffs))); + + Eigen::VectorXd parameters(4); + parameters << starting_points[min_index][0], starting_points[min_index][1], + starting_points[min_index][2], std::numeric_limits::infinity(); + + parameters[3] = std::sqrt( + arias_intensity / + ((M_PI / 2.0) * ((parameters[2] - t0) / (2.0 * parameters[0] + 1.0) + + 1.0 / (2.0 * parameters[1])))); + + return parameters; +} + +double stochastic::DabaghiDerKiureghian::calc_parameter_error( + const std::vector& parameters, double d05_target, + double d030_target, double d095_target, double t0) const { + // Modulating function parameters + double alpha = parameters[0], beta = parameters[1], t_max_q = parameters[2]; + + // Arias intensity times corresponding to the selected modulating function + // and parameters + double t5_fit = + t0 + std::pow((5.0 / 100.0) * std::pow(t_max_q - t0, 2.0 * alpha) * + ((t_max_q - t0) + (2.0 * alpha + 1.0) / (2.0 * beta)), + 1.0 / (2.0 * alpha + 1.0)); + + double t30_fit = + t0 + std::pow((30.0 / 100.0) * std::pow(t_max_q - t0, 2.0 * alpha) * + ((t_max_q - t0) + (2.0 * alpha + 1.0) / (2.0 * beta)), + 1.0 / (2.0 * alpha + 1.0)); + + double t95_fit = + t0 + std::pow((95.0 / 100.0) * std::pow(t_max_q - t0, 2.0 * alpha) * + ((t_max_q - t0) + (2.0 * alpha + 1.0) / (2.0 * beta)), + 1.0 / (2.0 * alpha + 1.0)); + + if (t5_fit > t_max_q) { + t5_fit = t_max_q - + (1.0 / (2.0 * beta)) * + std::log(((100.0 - 5.0) / 100.0) * + ((t_max_q - t0) * (2.0 * beta) / (2.0 * alpha + 1.0) + + 1.0)); + } + + if (t30_fit > t_max_q) { + t30_fit = + t_max_q - + (1.0 / (2.0 * beta)) * + std::log( + ((100.0 - 30.0) / 100.0) * + ((t_max_q - t0) * (2.0 * beta) / (2.0 * alpha + 1.0) + 1.0)); + } + + if (t95_fit > t_max_q) { + t95_fit = + t_max_q - + (1.0 / (2.0 * beta)) * + std::log( + ((100.0 - 95.0) / 100.0) * + ((t_max_q - t0) * (2.0 * beta) / (2.0 * alpha + 1.0) + 1.0)); + } + + // Duration parameters of corresponding modulating function + double d05_fit = t5_fit - t0; + double d030_fit = t30_fit - t0; + double d095_fit = t95_fit - t0; + + // Error measure + // NOTE: It doesn't matter whether the terms are normalized or not + return std::pow(d05_target - d05_fit, 2) + + std::pow(d030_target - d030_fit, 2) + + std::pow(d095_target - d095_fit, 2); +} + +Eigen::MatrixXd stochastic::DabaghiDerKiureghian::simulate_white_noise( + const Eigen::VectorXd& modulating_params, + const Eigen::VectorXd& filter_params, unsigned int num_steps, + unsigned int num_gms) const { + // CALCULATE MODULATING FUNCTION: + auto modulating_func = + calc_modulating_func(num_steps, start_time_, modulating_params); + + // CALCULATE FREQUENCY FUNCTION: + // For any general modulating function, get the discretized times of interest + // Lower bound before t01 + double t01 = calc_time_to_intensity(modulating_func, 1.0); + // Middle set to t30 + double tmid = calc_time_to_intensity(modulating_func, 30.0); + // Upper bound after t99 + double t99 = calc_time_to_intensity(modulating_func, 99.0); + + // Define the filter frequency and bandwidth + auto frequency_filter = + calc_linear_filter(num_steps, filter_params, t01, tmid, t99); + + // Generate white noise + auto generator = + seed_value_ != std::numeric_limits::infinity() + ? boost::random::mt19937(static_cast(seed_value_)) + : boost::random::mt19937( + static_cast(std::time(nullptr))); + + boost::random::normal_distribution<> distribution(0.0, 1.0); + boost::random::variate_generator> + noise_gen(generator, distribution); + + Eigen::MatrixXd white_noise(num_gms, num_steps); + for (unsigned int i = 0; i < num_gms; ++i) { + for (unsigned int j = 0; j < num_steps; ++j) { + white_noise(i, j) = noise_gen(); + } + } + + // Calculate impulse response + Eigen::MatrixXd impulse_response = calc_impulse_response_filter( + num_steps, frequency_filter, filter_params(2)); + + auto freq_func = white_noise * impulse_response; + + Eigen::MatrixXd filtered_white_noise(num_gms, num_steps); + // Convert modulating function to Eigen::VectorXd + Eigen::VectorXd mod_func_vec = Eigen::Map( + modulating_func.data(), modulating_func.size()); + + for (unsigned int i = 0; i < num_gms; ++i) { + filtered_white_noise.row(i) = freq_func.row(i).cwiseProduct(mod_func_vec.transpose()); + } + + return filtered_white_noise; +} + +std::vector stochastic::DabaghiDerKiureghian::calc_modulating_func( + unsigned int num_steps, double t0, + const Eigen::VectorXd& parameters) const { + + std::vector mod_func_vals(num_steps); + + for (unsigned int i = 0; i < num_steps; ++i) { + double time = static_cast(i) * time_step_; + + if (time < t0) { + mod_func_vals[i] = 0.0; + } else if (time < parameters(2)) { + mod_func_vals[i] = + parameters(3) * + std::pow((time - t0) / (parameters(2) - t0), parameters[0]); + } else { + mod_func_vals[i] = + parameters(3) * std::exp(-parameters(1) * (time - parameters[2])); + } + } + + return mod_func_vals; +} + +double stochastic::DabaghiDerKiureghian::calc_time_to_intensity( + const std::vector& acceleration, double percentage) const { + // Calculate cumulative energy in acceleration time series, which is + // proportional to Arias intensity + std::vector t01_sum(acceleration.size()); + + std::transform(acceleration.begin(), acceleration.end(), t01_sum.begin(), + [](double value) -> double { return value * value; }); + + std::partial_sum(t01_sum.begin(), t01_sum.end(), t01_sum.begin()); + + // Calculate normalized cumulative Arias intensity in percent + std::transform(t01_sum.begin(), t01_sum.end(), t01_sum.begin(), + [&t01_sum](double value) -> double { + return value / t01_sum[t01_sum.size() - 1] * 100.0; + }); + + return time_step_ * + static_cast( + std::distance(t01_sum.begin(), + std::find_if(t01_sum.begin(), t01_sum.end(), + [percentage](double value) { + return value >= percentage; + })) + + 1); +} + +std::vector stochastic::DabaghiDerKiureghian::calc_linear_filter( + unsigned int num_steps, const Eigen::VectorXd& filter_params, double t01, + double tmid, double t99) const { + // Mininum frequency in Hz + double min_freq = 0.3; + std::vector filter_func(num_steps); + // Frequency at tmid, in Hz + double mid_freq = filter_params(0); + // Slope of frequency assumed constant + double freq_slope = filter_params(1); + + for (unsigned int i = 0; i < num_steps; ++i) { + double current_time = i * time_step_; + if (current_time < t01) { + filter_func[i] = + min_freq > mid_freq + freq_slope * (t01 - tmid) + ? min_freq * 2.0 * M_PI + : (mid_freq + freq_slope * (t01 - tmid)) * 2.0 * M_PI; + } else if (current_time <= t99) { + filter_func[i] = + min_freq > mid_freq + freq_slope * (current_time - tmid) + ? min_freq * 2.0 * M_PI + : (mid_freq + freq_slope * (current_time - tmid)) * 2.0 * M_PI; + } else { + filter_func[i] = + min_freq > mid_freq + freq_slope * (t99 - tmid) + ? min_freq * 2.0 * M_PI + : (mid_freq + freq_slope * (t99 - tmid)) * 2.0 * M_PI; + } + } + + return filter_func; +} + +Eigen::MatrixXd stochastic::DabaghiDerKiureghian::calc_impulse_response_filter( + unsigned int num_steps, const std::vector& input_filter, + double zeta) const { + Eigen::MatrixXd impulse_response = + Eigen::MatrixXd::Zero(num_steps, num_steps); + + for (unsigned int i = 0; i < num_steps; ++i) { + double omega = input_filter[i]; + Eigen::VectorXd times(num_steps - i); + + for (unsigned int j = 0; j < times.size(); ++j) { + times(j) = static_cast(j) * time_step_; + } + + impulse_response.block(i, i, 1, times.size()) = + ((omega / std::sqrt(1.0 - zeta * zeta)) * + ((-zeta * omega * times).array().exp()) * + ((omega * std::sqrt(1.0 - zeta * zeta) * times).array().sin())) + .matrix() + .transpose(); + } + + Eigen::VectorXd denominator = + ((impulse_response.array().pow(2.0)).matrix().colwise().sum()) + .array() + .sqrt(); + denominator(0) = 0.1; + + for (unsigned int i = 0; i < impulse_response.rows(); ++i) { + impulse_response.row(i) = + impulse_response.row(i).cwiseQuotient(denominator.transpose()); + } + + return impulse_response; +} + +std::vector stochastic::DabaghiDerKiureghian::filter_acceleration( + const Eigen::VectorXd& accel_history, double freq_corner, + unsigned int filter_order) const { + + // Calculate normalized cutoff frequency + std::vector> accel_fft(accel_history.size()); + + // Compute FFT of acceleration history + numeric_utils::fft(accel_history, accel_fft); + + // Get filter coefficients + auto filter = Dispatcher, double, double, unsigned int, + unsigned int>::instance() + ->dispatch("AcausalHighpassButterworth", freq_corner, + time_step_, filter_order, accel_fft.size()); + + // Filter acceleration in frequency domain + for (unsigned int i = 0; i < accel_fft.size(); ++i) { + accel_fft[i] = accel_fft[i] * filter[i]; + } + + // Compute inverse FFT of filtered transformed acceleration + std::vector filtered_acc(accel_fft.size()); + numeric_utils::inverse_fft(accel_fft, filtered_acc); + + return filtered_acc; +} + +std::vector stochastic::DabaghiDerKiureghian::calc_pulse_acceleration( + unsigned int num_steps, const Eigen::VectorXd& parameters) const { + double pulse_velocity = parameters(0); + double pulse_frequency = 1.0 / parameters(1); + double oscillation_param = parameters(2); + double phase_angle = parameters(3) * M_PI; + double peak_time = start_time_ + parameters(4); + + double resp_disp = pulse_velocity / (4.0 * M_PI * pulse_frequency) * + std::sin(phase_angle + oscillation_param * M_PI) / + (1 - oscillation_param * oscillation_param) - + pulse_velocity / (4.0 * M_PI * pulse_frequency) * + std::sin(phase_angle - oscillation_param * M_PI) / + (1.0 - oscillation_param * oscillation_param); + + // Calculate pulse velocity time history + std::vector velocity_history(num_steps); + + for (unsigned int i = 0; i < num_steps; ++i) { + double time = static_cast(i) * time_step_; + if (time > (peak_time - 0.5 * oscillation_param / pulse_frequency) && + time <= (peak_time + 0.5 * oscillation_param / pulse_frequency)) { + velocity_history[i] = + (0.5 * pulse_velocity * + std::cos(2.0 * M_PI * pulse_frequency * (time - peak_time) + + phase_angle) - + resp_disp * pulse_frequency / oscillation_param) * + (1.0 + std::cos(2.0 * M_PI * pulse_frequency * (time - peak_time) / + oscillation_param)); + } + else { + velocity_history[i] = 0.0; + } + } + + // Calculate pulse acceleration time history + return numeric_utils::derivative(velocity_history, 1.0 / (981.0 * time_step_), true); +} + +void stochastic::DabaghiDerKiureghian::truncate_time_histories( + std::vector>& accel_comp_1, + std::vector>& accel_comp_2, double gfactor, + double amplitude_lim, double pgd_lim) const { + + // Iterate over time histories + for (unsigned int i = 0; i < accel_comp_1.size(); ++i) { + // Calculate peak ground displacement (PGD): + // Component 1 + std::vector vel_comp_1(accel_comp_1[i].size()); + std::vector disp_comp_1(accel_comp_1[i].size()); + + std::transform(accel_comp_1[i].begin(), accel_comp_1[i].end(), + vel_comp_1.begin(), + [&gfactor, this](double value) -> double { + return value * gfactor * time_step_; + }); + + std::partial_sum(vel_comp_1.begin(), vel_comp_1.end(), + vel_comp_1.begin()); + + std::transform( + vel_comp_1.begin(), vel_comp_1.end(), disp_comp_1.begin(), + [this](double value) -> double { return value * time_step_; }); + + std::partial_sum(disp_comp_1.begin(), disp_comp_1.end(), + disp_comp_1.begin()); + + double pgd_1 = *std::max_element(disp_comp_1.begin(), disp_comp_1.end()); + + double disp_limit_1 = + amplitude_lim < pgd_1 * pgd_lim ? amplitude_lim : pgd_1 * pgd_lim; + + // Component 2 + std::vector vel_comp_2(accel_comp_2[i].size()); + std::vector disp_comp_2(accel_comp_2[i].size()); + + std::transform(accel_comp_2[i].begin(), accel_comp_2[i].end(), + vel_comp_2.begin(), + [&gfactor, this](double value) -> double { + return value * gfactor * time_step_; + }); + + std::partial_sum(vel_comp_2.begin(), vel_comp_2.end(), + vel_comp_2.begin()); + + std::transform( + vel_comp_2.begin(), vel_comp_2.end(), disp_comp_2.begin(), + [this](double value) -> double { return value * time_step_; }); + + std::partial_sum(disp_comp_2.begin(), disp_comp_2.end(), + disp_comp_2.begin()); + + double pgd_2 = *std::max_element(disp_comp_2.begin(), disp_comp_2.end()); + + double disp_limit_2 = + amplitude_lim < pgd_2 * pgd_lim ? amplitude_lim : pgd_2 * pgd_lim; + + // Calculate displacement limit indices: + // Component 1 + unsigned int initial_index_1 = + static_cast( + std::distance(disp_comp_1.begin(), + std::find_if(disp_comp_1.begin(), disp_comp_1.end(), + [&disp_limit_1](double value) { + return value > disp_limit_1; + }))) - + 1; + + unsigned int final_index_1 = + static_cast(disp_comp_1.size()) - + static_cast( + std::distance(disp_comp_1.rbegin(), + std::find_if(disp_comp_1.rbegin(), disp_comp_1.rend(), + [&disp_limit_1](double value) { + return value > disp_limit_1; + }))); + + // Component 2 + unsigned int initial_index_2 = + static_cast( + std::distance(disp_comp_2.begin(), + std::find_if(disp_comp_2.begin(), disp_comp_2.end(), + [&disp_limit_2](double value) { + return value > disp_limit_2; + }))) - + 1; + + unsigned int final_index_2 = + static_cast(disp_comp_2.size()) - + static_cast( + std::distance(disp_comp_2.rbegin(), + std::find_if(disp_comp_2.rbegin(), disp_comp_2.rend(), + [&disp_limit_2](double value) { + return value > disp_limit_2; + }))); + + // Truncate acceleration + unsigned int initial_index = + initial_index_1 <= initial_index_2 ? initial_index_1 : initial_index_2; + unsigned int final_index = + final_index_1 >= final_index_2 ? final_index_1 : final_index_2; + + if (final_index == disp_comp_1.size() - 1) { + final_index -= 1; + } + + accel_comp_1[i] = + std::vector(accel_comp_1[i].begin() + initial_index, + accel_comp_1[i].begin() + final_index); + accel_comp_2[i] = + std::vector(accel_comp_2[i].begin() + initial_index, + accel_comp_2[i].begin() + final_index); + + } +} + +void stochastic::DabaghiDerKiureghian::baseline_correct_time_history( + std::vector& time_history, double gfactor, + unsigned int order) const { + + // Calculate velocity and displacment time histories + std::vector vel_series(time_history.size()); + std::vector disp_series(time_history.size()); + + std::transform(time_history.begin(), time_history.end(), vel_series.begin(), + [&gfactor, this](double value) -> double { + return value * gfactor * time_step_; + }); + + std::partial_sum(vel_series.begin(), vel_series.end(), vel_series.begin()); + + std::transform(vel_series.begin(), vel_series.end(), disp_series.begin(), + [this](double value) -> double { return value * time_step_; }); + + std::partial_sum(disp_series.begin(), disp_series.end(), disp_series.begin()); + + Eigen::VectorXd times(time_history.size()); + + for (unsigned int i = 0; i < times.size(); ++i) { + times(i) = i * time_step_; + } + + // Convert input time history from std vector to Eigen vector + Eigen::VectorXd disp_vector = + Eigen::Map(disp_series.data(), disp_series.size()); + + // Fit zero-intercept polynomial to displacement time history + auto displacement_poly = + numeric_utils::polyfit_intercept(times, disp_vector, 0.0, 5); + auto velocity_poly = numeric_utils::polynomial_derivative(displacement_poly); + auto accel_poly = numeric_utils::polynomial_derivative(velocity_poly); + + // Calculate acceleration correction based on polynomial + auto accel_correction = + numeric_utils::evaluate_polynomial(accel_poly, times) / gfactor; + + // Correct time series based on acceleration correction + for (unsigned int i = 0; i < accel_correction.size(); ++i) { + time_history[i] = time_history[i] - accel_correction(i); + } +} + +void stochastic::DabaghiDerKiureghian::convert_time_history_units( + std::vector& time_history, bool units) const { + double conversion_factor = units ? 1.0 : 9.81; + + for (auto& val : time_history) { + val = val * conversion_factor; + } +} diff --git a/modules/createEVENT/common/smelt/dabaghi_der_kiureghian.h b/modules/createEVENT/common/smelt/dabaghi_der_kiureghian.h new file mode 100644 index 000000000..26f5bb04a --- /dev/null +++ b/modules/createEVENT/common/smelt/dabaghi_der_kiureghian.h @@ -0,0 +1,418 @@ +#ifndef _DABAGHI_DER_KIUREGHIAN_H_ +#define _DABAGHI_DER_KIUREGHIAN_H_ + +#include +#include +#include +#include +#include +#include "distribution.h" +#include "json_object.h" +#include "numeric_utils.h" +#include "stochastic_model.h" + +namespace stochastic { +/** @enum stochastic::FaultType + * @brief is a strongly typed enum class representing the type of faulting + */ +enum class FaultType { + StrikeSlip, /**< Strike-slip fault */ + ReverseAndRevObliq /**< Reverse or reverse-oblique fault */ +}; + +/** @enum stochastic::SimulationType + * @brief is a strongly typed enum class representing pulse-like proportion + * of ground motion + */ +enum class SimulationType { + PulseAndNoPulse, /**< pulse-like and non-pulse-like motions in proportion predicted by Shahi and Baker(2014) */ + Pulse, /**< only pulse-like */ + NoPulse /**< only non-pulse-like */ +}; + +/** + * Stochastic model for simulating near-fault ground motions. Based on the following + * references: + * 1. Dabaghi and Der Kiureghian (2014 PEER report) "Stochastic Modeling and Simulation of Near-Fault Ground Motions for Performance-Based Earthquake Engineering" + * 2. Dabaghi and Der Kiureghian (2017 EESD) "Stochastic model for simulation of NF GMs" + * 3. Dabaghi and Der Kiureghian (2018 EESD) "Simulation of orthogonal horizontal components of near-fault ground motion for specified EQ source and site characteristics" + */ +class DabaghiDerKiureghian : public StochasticModel { + public: + /** + * @constructor Default constructor + */ + DabaghiDerKiureghian() = default; + + /** + * @constructor Construct near-fault ground motion model based on input + * parameters + * @param[in] faulting Type of faulting + * @param[in] simulation_type Pulse-type of simulation + * @param[in] moment_magnitude Moment magnitude of earthquake + * @param[in] depth_to_rupt Depth to the top of the rupture plane in + * kilometers + * @param[in] rupture_distance Closest distance from the site to the fault + * rupture in kilometers + * @param[in] vs30 Soil shear wave velocity averaged over top 30 meters in + * meters per second + * @param[in] s_or_d Directivity parameter s or d (km)--input the larger of + * the two + * @param[in] theta_or_phi Directivity ange parameter theta or phi + * (degrees)--input corresponding value to s or d + * @param[in] num_sims Number of simulated ground motion time histories that + * should be generated (number of different model parameter realizations) + * @param[in] num_realizations Number of realizations of non-stationary, modulated, filtered + * white noise per set of model parameters + * @param[in] truncate Boolean indicating whether to truncate and baseline correct + * synthetic motion + */ + DabaghiDerKiureghian(FaultType faulting, SimulationType simulation_type, + double moment_magnitude, double depth_to_rupt, + double rupture_distance, double vs30, double s_or_d, + double theta_or_phi, unsigned int num_sims, + unsigned int num_realizations, bool truncate); + + /** + * @constructor Construct near-fault ground motion model based on input + * parameters + * @param[in] faulting Type of faulting + * @param[in] simulation_type Pulse-type of simulation + * @param[in] moment_magnitude Moment magnitude of earthquake + * @param[in] depth_to_rupt Depth to the top of the rupture plane in + * kilometers + * @param[in] rupture_distance Closest distance from the site to the fault + * rupture in kilometers + * @param[in] vs30 Soil shear wave velocity averaged over top 30 meters in + * meters per second + * @param[in] s_or_d Directivity parameter s or d (km)--input the larger of + * the two + * @param[in] theta_or_phi Directivity ange parameter theta or phi + * (degrees)--input corresponding value to s or d + * @param[in] num_sims Number of simulated ground motion time histories that + * should be generated (number of different model parameter realizations) + * @param[in] num_realizations Number of realizations of non-stationary, modulated, filtered + * white noise per set of model parameters + * @param[in] truncate Boolean indicating whether to truncate and baseline correct + * synthetic motion + * @param[in] seed_value Value to seed random variables with to ensure + * repeatability + */ + DabaghiDerKiureghian(FaultType faulting, SimulationType simulation_type, + double moment_magnitude, double depth_to_rupt, + double rupture_distance, double vs30, double s_or_d, + double theta_or_phi, unsigned int num_sims, + unsigned int num_realizations, bool truncate, int seed_value); + + /** + * @destructor Virtual destructor + */ + virtual ~DabaghiDerKiureghian() {}; + + /** + * Delete copy constructor + */ + DabaghiDerKiureghian(const DabaghiDerKiureghian&) = delete; + + /** + * Delete assignment operator + */ + DabaghiDerKiureghian& operator=(const DabaghiDerKiureghian&) = delete; + + /** + * Generate ground motion time histories based on input parameters + * and store outputs as JSON object. Throws exception if errors + * are encountered during time history generation. + * @param[in] event_name Name to assign to event + * @param[in] units Indicates that time histories should be returned in + * units of g. Defaults to false where time histories + * are returned in units of m/s^2 + * @return JsonObject containing time histories + */ + utilities::JsonObject generate(const std::string& event_name, + bool units = false) override; + + /** + * Generate ground motion time histories based on input parameters + * and write results to file in JSON format. Throws exception if + * errors are encountered during time history generation. + * @param[in] event_name Name to assign to event + * @param[in, out] output_location Location to write outputs to + * @param[in] units Indicates that time histories should be returned in + * units of g. Defaults to false where time histories + * are returned in units of m/s^2 + * @return Returns true if successful, false otherwise + */ + bool generate(const std::string& event_name, + const std::string& output_location, + bool units = false) override; + + /** + * Generates proportion of motions that should be pulse-like based on total + * number of simulations and probability of those motions containing a pulse + * following pulse probability model developed by Shahi & Baker (2014) + * @param[in] num_sims Total number of simulations that should be generated + * @return Total number of pulse-like motions + */ + unsigned int simulate_pulse_type(unsigned num_sims) const; + + /** + * Simulate model parameters for ground motions based on either pulse-like + * or non-pulse-like behavior + * @param[in] pulse_like Boolean indicating whether ground motions are + * pulse-like + * @param[in] num_sims Number of simulations to simulate model parameters for + * @return Model parameters for ground motions + */ + Eigen::MatrixXd simulate_model_parameters(bool pulse_like, + unsigned int num_sims); + + /** + * Compute the conditional mean values of the transformed model parameters + * using regressiong coefficients and Equation 12 from Dabaghi & Der + * Kiureghian (2017) + * @param[in] pulse_like Boolean indicating whether ground motions are + * pulse-like + * @return Vector containing predicted model parameters + */ + Eigen::VectorXd compute_transformed_model_parameters(bool pulse_like) const; + + /** + * Transforms model parameters from normal space back to real space + * @param[in] pulse_like Boolean indicating whether ground motions are + * pulse-like + * @param[in, out] parameters Vector of parameters in normal space. Transformed variables will be + * stored in this vector. + */ + void transform_parameters_from_normal_space(bool pulse_like, Eigen::VectorXd& parameters); + + /** + * Calculate the inverse of double-exponential distribution + * @param[in] probability Probability at which to evaluate inverse CDF + * @param[in] param_a Distribution parameter + * @param[in] param_b Distribution parameter + * @param[in] param_c Distribution parameter + * @param[in] lower_bound Lower bound for location + */ + double inv_double_exp(double probability, double param_a, double param_b, + double param_c, double lower_bound) const; + + /** + * Simulate near-fault ground motion given model parameters and whether motion + * is pulse-like or not. + * @param[in] pulse_like Boolean indicating whether ground motions are + * pulse-like + * @param[in] parameters Vector of model parameters to use for ground motion + * simulation + * @param[in,out] accel_comp_1 Simulated near-fault ground motion components + * in direction 1. Outputs are written here. + * @param[in,out] accel_comp_2 Simulated near-fault ground motion components + * in direction 2. Outputs are written here. + * @param[in] num_gms Number of ground motions that should be generated. + * Defaults to 1. + */ + void simulate_near_fault_ground_motion( + bool pulse_like, const Eigen::VectorXd& parameters, + std::vector>& accel_comp_1, + std::vector>& accel_comp_2, + unsigned int num_gms = 1) const; + + /** + * Backcalculate modulating parameters given Arias Intesity and duration parameters + * @param[in] q_params Vector containing Ia, D595, D05, and D030 + * @param[in] t0 Initial time. Defaults to 0.0. + * @return Vector containing parameters alpha, beta, tmaxq, and c + */ + Eigen::VectorXd backcalculate_modulating_params( + const Eigen::VectorXd& q_params, double t0 = 0.0) const; + + /** + * Simulate modulated filtered white noise process + * @param[in] modulating_params Modulating parameters + * @param[in] filter_params Filtering parameters + * @param[in] num_steps Total number of time steps to be taken + * @param[in] num_gms Number of ground motions that should be generated. + * Defaults to 1. + * @return Vector of vectors containing time history of simulated modulate + * filtered white noise + */ + Eigen::MatrixXd simulate_white_noise(const Eigen::VectorXd& modulating_params, + const Eigen::VectorXd& filter_params, + unsigned int num_steps, + unsigned int num_gms = 1) const; + + /** + * This function defines an error measure based on matching times of the 5%, + * 30%, and 95% Arias intensity of the target ground motion and corresponding + * modulating function q(t) with parameters alpha_q_sub as defined in Eq. 4 + * of Reference 2. This is used to back-calculate the modulating function + * parameters by minimizing the corresponding error measure. Input to returned + * function is a vector containing alpha, beta, and t_max_q. + * @param[in] parameters Modulating function parameters: alpha, beta, and t_max_q + * @param[in] d05_target Time from t0 to time of 5% Arias intensity of target + * motion + * @param[in] d030_target Time from t0 to time of 30% Arias intensity of + * target motion + * @param[in] d095_target Time from t0 to time of 95% Arias intensity of + * target motion + * @param[in] t0 Start time of modulating function and of target ground motion + * @return ERrro in modulating function + */ + double calc_parameter_error(const std::vector& parameters, + double d05_target, double d030_target, + double d095_target, double t0) const; + + /** + * Calculate values of modulating function given function parameters + * @param[in] num_steps Total number of time steps to be taken + * @param[in] t0 Initial time + * @param[in] parameters Modulating function parameters + * @return Vector containing time series of modulating function values + */ + std::vector calc_modulating_func( + unsigned int num_steps, double t0, + const Eigen::VectorXd& parameters) const; + + /** + * Calculate the time at which the input percentage of the Arias intensity + * is reached + * @param[in] acceleration Acceleration time history + * @param[in] percentage Percentage of Arias intensity to be reached + * @return Time at which input percentage of Arias intensity is reached + */ + double calc_time_to_intensity(const std::vector& acceleration, + double percentage) const; + + /** + * Calculate the linearly varying filter function (in rad/sec) given the + * filter function parameters (in Hz) and the times of 1%, 30%(mid) and 99% + * Arias Intensity (AI) + * @param[in] num_steps Number of time steps in time history + * @param[in] filter_params Filter function parameters [fmid, f_slope] (in + * Hz), tmid is defined as the time of 30% AI + * @param[in] t01 Time of 1% of AI of the modulating function (and in an + * average sense of the simulated GM) + * @param[in] tmid Time of 30% of AI of the modulating function (and in an + * average sense of the simulated GM) + * @param[in] t99 Time of 99% of AI of the modulating function (and in an + * average sense of the simulated GM) + */ + std::vector calc_linear_filter(unsigned int num_steps, + const Eigen::VectorXd& filter_params, + double t01, double tmid, + double t99) const; + + /** + * Calculate impulse response filter based on time series, input filter, + * and filter parameter zeta + * @param[in] num_steps Number of time steps in time history + * @param[in] input_filter Input filter coefficients to use in impulse + * response + * @param[in] zeta Filter parameter + * @return Impulse response filter + */ + Eigen::MatrixXd calc_impulse_response_filter( + unsigned int num_steps, const std::vector& input_filter, + double zeta) const; + + /** + * Filters input acceleration time history in frequency domain using + * acausal high-pass Butterworth filter + * @param[in] accel_history Acceleration time history to filter + * @param[in] freq_corner Corner frequency + * @param[in] filter_order Order of filter + * @return Filtered time history + */ + std::vector filter_acceleration(const Eigen::VectorXd& accel_history, + double freq_corner, + unsigned int filter_order) const; + + /** + * Calculate the pulse acceleration based on the modified Mavroeidis and + * Papageorgiou model + * @param[in] num_steps Number of time steps in time history + * @param[in] parameters Vector of model parameters to use for ground motion + * simulation + * @return Time history of pulse acceleration + */ + std::vector calc_pulse_acceleration( + unsigned int num_steps, const Eigen::VectorXd& parameters) const; + + /** + * Truncate acceleration time histories at the beginning and/or end where + * displacement amplitudes are almost zero effectively zero + * @param[in, out] accel_comp_1 Component 1 of acceleration time history to + * truncate + * @param[in, out] accel_comp_2 Component 2 of acceleration time history to + * truncate + * @param[in] gfactor Factor to convert acceleration to cm/s^2 + * @param[in] amplitude_lim Displacement amplitude limit in cm below which to + * apply truncation. Defaults to 0.2cm + * @param[in] pgd_lim Ratio of peak ground displacement below which to + * truncate. Defaults to 0.01. + * + */ + void truncate_time_histories(std::vector>& accel_comp_1, + std::vector>& accel_comp_2, + double gfactor, double amplitude_lim = 0.2, + double pgd_lim = 0.01) const; + + /** + * Baseline correct acceleration time histories by fitting a polynomial + * starting from the 2nd degree of the displacement time series + * @param[in, out] time_history Acceleration time history to truncate + * @param[in] gfactor Factor to convert acceleration to cm/s^2 + * @param[in] order Order of the polynomial fitted to the displacement time + * series + */ + void baseline_correct_time_history(std::vector& time_history, + double gfactor, unsigned int order) const; + + /** + * Convert input time history to units of g or m/s^2 + * @param[in, out] time_history Time history to convert units for + * @param[in] units If true, converts to units of g, otherwise to m/s^2 + */ + void convert_time_history_units(std::vector& time_history, + bool units) const; + + private: + FaultType faulting_; /**< Enum for type of faulting for scenario */ + SimulationType sim_type_; /**< Enum for pulse-like nature of ground motion */ + double moment_magnitude_; /**< Moment magnitude for scenario */ + double depth_to_rupt_; /**< Depth to the top of the rupture plane (km) */ + double rupture_dist_; /**< Closest-to-site rupture distance in kilometers */ + double vs30_; /**< Soil shear wave velocity averaged over top 30 meters in + meters per second */ + double s_or_d_; /**< Directivity parameter s or d (km) */ + double theta_or_phi_; /**< Directivity angle parameter theta or phi */ + bool truncate_; /**< Indicates whether to truncate and baseline correct motion */ + unsigned int num_sims_pulse_; /**< Number of pulse-like simulated ground + motion time histories that should be generated */ + unsigned int num_sims_nopulse_; /**< Number of no-pulse-like simulated ground + motion time histories that should be generated */ + unsigned int num_realizations_; /**< Number of realizations of model parameters */ + int seed_value_; /**< Integer to seed random distributions with */ + double time_step_; /**< Temporal discretization. Set to 0.005 seconds */ + double start_time_ = 0.0; /**< Start time of ground motion */ + Eigen::VectorXd std_dev_pulse_; /**< Pulse-like parameter standard deviation */ + Eigen::VectorXd std_dev_nopulse_; /**< No-pulse-like parameter standard deviation */ + Eigen::MatrixXd corr_matrix_pulse_; /**< Pulse-like parameter correlation matrix */ + Eigen::MatrixXd corr_matrix_nopulse_; /**< No-pulse-like parameter correlation matrix */ + Eigen::MatrixXd beta_distribution_pulse_; /**< Beta distrubution parameters for pulse-like motion */ + Eigen::MatrixXd beta_distribution_nopulse_; /**< Beta distrubution parameters for no-pulse-like motion */ + Eigen::VectorXd params_lower_bound_; /**< Lower bound for marginal distributions fitted to params + (Table 5 in Dabaghi & Der Kiureghian, 2017) */ + Eigen::VectorXd params_upper_bound_; /**< Upper bound for marginal distributions fitted to params + (Table 5 in Dabaghi & Der Kiureghian, 2017) */ + Eigen::VectorXd params_fitted1_; /** Fitted distribution parameters from Table 5 (Dabaghi & Der Kiureghian, 2017) */ + Eigen::VectorXd params_fitted2_; /** Fitted distribution parameters from Table 5 (Dabaghi & Der Kiureghian, 2017) */ + Eigen::VectorXd params_fitted3_; /** Fitted distribution parameters from Table 5 (Dabaghi & Der Kiureghian, 2017) */ + const double magnitude_baseline_ = 6.5; /**< Baseline regression factor for magnitude */ + const double c6_ = 6.0 ; /**< This factor is set to avoid non-linearity in regression */ + std::shared_ptr + sample_generator_; /**< Multivariate normal random number generator */ +}; +} // namespace stochastic + +#endif // _DABAGHI_DER_KIUREGHIAN_H_ diff --git a/modules/createEVENT/common/smelt/distribution.h b/modules/createEVENT/common/smelt/distribution.h new file mode 100644 index 000000000..911335f57 --- /dev/null +++ b/modules/createEVENT/common/smelt/distribution.h @@ -0,0 +1,61 @@ +#ifndef _DISTRIBUTION_H_ +#define _DISTRIBUTION_H_ + +#include +#include + +namespace stochastic { + +/** + * Abstract base class for distribution models + */ +class Distribution { + public: + /** + * @constructor Default constructor + */ + Distribution() = default; + + /** + * @destructor Virtual destructor + */ + virtual ~Distribution(){}; + + /** + * Delete copy constructor + */ + Distribution(const Distribution&) = delete; + + /** + * Delete assignment operator + */ + Distribution& operator=(const Distribution&) = delete; + + /** + * Get the name of the distribution model + * @return Model name as a string + */ + virtual std::string name() const = 0; + + /** + * Compute the cumulative distribution function (CDF) of the distribution at + * specified input locations + * @param[in] locations Vector containing locations at which to calculate CDF + * @return Vector of evaluated values of CDF at input locations + */ + virtual std::vector cumulative_dist_func( + const std::vector& locations) const = 0; + + /** + * Compute the inverse cumulative distribution function (ICDF) of the + * distribution at specified input locations + * @param[in] probabilities Vector containing probabilities at which to + * calculate ICDF + * @return Vector of evaluated values of ICDF at input locations + */ + virtual std::vector inv_cumulative_dist_func( + const std::vector& probabilities) const = 0; +}; +} // namespace stochastic + +#endif // _DISTRIBUTION_H_ diff --git a/modules/createEVENT/common/smelt/factory.h b/modules/createEVENT/common/smelt/factory.h new file mode 100644 index 000000000..a6f39a5b3 --- /dev/null +++ b/modules/createEVENT/common/smelt/factory.h @@ -0,0 +1,153 @@ +/* THIS LICENSE ONLY APPLIES TO THIS FILE, FACTORY.H. THE REMAINDER OF THE FILES */ +/* IN THIS PROJECT ARE RELEASED UNDER THE GNU GENERAL PUBLIC LICENSE VERSION 3 */ + +/* # MIT License */ + +/* Copyright (c) 2018 NHERI SimCenter */ +/* Copyright (c) 2017 CB-Geo MPM */ + +/* Permission is hereby granted, free of charge, to any person obtaining a copy */ +/* of this software and associated documentation files (the "Software"), to deal */ +/* in the Software without restriction, including without limitation the rights */ +/* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell */ +/* copies of the Software, and to permit persons to whom the Software is */ +/* furnished to do so, subject to the following conditions: */ + +/* The above copyright notice and this permission notice shall be included in all */ +/* copies or substantial portions of the Software. */ + +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR */ +/* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, */ +/* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE */ +/* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER */ +/* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, */ +/* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE */ +/* SOFTWARE. */ + + +#ifndef _FACTORY_H_ +#define _FACTORY_H_ + +#include +#include +#include +#include +#include +#include + +/** + * Singleton factory implementation + * @tparam Tbaseclass Base class + * @tparam Targs variadic template arguments + */ +template +class Factory { + public: + /** + * Get the single instance of the factory + */ + static Factory* instance() { + static Factory factory; + return &factory; + } + + /** + * Register a factory function to create an instance of classname + * @param[in] key Register key + * @tparam Tderivedclass Derived class + */ + template + void register_factory(const std::string& key) { + registry[key].reset(new Creator); + } + + /** + * Create an instance of a registered class + * @param[in] key Key to item in registry + * @param[in] args Variadic template arguments + * @return shared_ptr Shared pointer to a base class + */ + std::shared_ptr create(const std::string& key, Targs&&... args) { + if (!this->check(key)) + throw std::runtime_error("Invalid key: " + key + + ", not found in the factory register!"); + return registry.at(key)->create(std::forward(args)...); + } + + /** + * Check if an element is registered + * @param[in] key Key to be checked in registry + * @return status Return if key is in registry or not + */ + bool check(const std::string& key) const { + bool status = false; + for (const auto& keyvalue : registry) + if (keyvalue.first == key) status = true; + return status; + } + + /** + * List registered elements + * @return factory_items Return list of items in the registry + */ + std::vector list() const { + std::vector factory_items; + for (const auto& keyvalue : registry) + factory_items.push_back(keyvalue.first); + return factory_items; + } + + private: + /** + * Private constructor + */ + Factory() = default; + + /** + * A base class creator struct + */ + struct CreatorBase { + // A virtual create function + virtual std::shared_ptr create(Targs&&...) = 0; + }; + + /** + * Creator class + * @tparam Tderivedclass Derived class + */ + template + struct Creator : public CreatorBase { + /** + * Create instance of object + */ + std::shared_ptr create(Targs&&... args) override { + return std::make_shared(std::forward(args)...); + } + }; + + std::map> registry; /**< Register of + factory + functions */ +}; + +/** + * A helper class to register a factory function + * @tparam Tbaseclass Base class + * @tparam Tderivedclass Derived class + * @tparam Targs Variadic template arguments + */ +template +class Register { + public: + /** + * Register with a given key + * @param[in] key Key to item in registry + */ + explicit Register(const std::string& key) { + // register the class factory function + Factory::instance() + ->template register_factory(key); + } +}; + +#endif // _FACTORY_H_ diff --git a/modules/createEVENT/common/smelt/filter.cc b/modules/createEVENT/common/smelt/filter.cc new file mode 100644 index 000000000..175f772eb --- /dev/null +++ b/modules/createEVENT/common/smelt/filter.cc @@ -0,0 +1,224 @@ +#include +#include +#include +#include +#include +// #include +#include +#include +#include // For std::setprecision + + +// Eigen dense matrices +#include + +namespace signal_processing { + +std::function>(int, double)> hp_butterworth() { + return [](int filter_order, + double cutoff_freq) -> std::vector> { + + // // Allocated memory for coefficients + // std::vector taps(2 * (filter_order + 1)); + // IppStatus status = ippStsNoErr; + // int internal_buffer_size; + + // // Calculate required buffer size for internal calculations + // status = ippsIIRGenGetBufferSize(filter_order, &internal_buffer_size); + // if (status != ippStsNoErr) { + // throw std::runtime_error( + // "\nERROR: in signal_processing::hp_butterworth: Error in buffer size " + // "calculations\n"); + // } + + // // Divide by 2 to make cutoff frequency match the definition given in MATLAB + // Ipp8u* internal_calcs = ippsMalloc_8u(internal_buffer_size); + // status = + // ippsIIRGenHighpass_64f(cutoff_freq / 2.0, 0, filter_order, taps.data(), + // ippButterworth, internal_calcs); + + // // Check if filter computation succeeded + // if (status != ippStsNoErr) { + // throw std::runtime_error( + // "\nERROR: in signal_processing::hp_butterworth: Error in coefficient " + // "calculations\n"); + // } + + // std::vector numerator(filter_order + 1); + // std::vector denominator(filter_order + 1); + + // for (int i = 0; i < filter_order + 1; ++i) { + // numerator[i] = taps[i]; + // denominator[i] = taps[i + filter_order + 1]; + // } + + // // Free memory associated with internal calcs + // ippsFree(internal_calcs); + + + if ((filter_order==4)&&(cutoff_freq==0.004)) { + // all good + } else { + throw std::runtime_error("We only support filter_order==4 and cutoff_freq==0.004. Other setups require manual computation of butterworth highpass filter parameters by changing the source code"); + } + + std::vector numerator{ 0.983715174129757,-3.934860696519026,5.902291044778539,-3.934860696519026,0.983715174129757 }; + std::vector denominator{ 1.000000000000000, -3.967162595948848, 5.902025861490879, -3.902558784823240, 0.967695543813137 }; + + return std::vector>{numerator, denominator}; + }; +} + +std::function(std::vector, std::vector, int, + int)> + impulse_response() { + + return [](const std::vector& numerator_coeffs, + const std::vector& denominator_coeffs, int order, + int num_samples) -> std::vector { + + std::cout << "impulse_response needs update" << std::endl; + if (numerator_coeffs.size() != denominator_coeffs.size()) { + throw std::runtime_error( + "\nERROR: in signal_processing::impulse_response: Inputs for " + "numerator " + "and denominator coefficients not same length\n"); + } + + // IppStatus status = ippStsNoErr; + // int internal_buffer_size; + // IppsIIRState_64f* filter_state = nullptr; + // Ipp64f *samples = ippsMalloc_64f(num_samples), + // *impulse = ippsMalloc_64f(num_samples); + // std::vector taps(numerator_coeffs.size() + + // denominator_coeffs.size()); + + // // Set all values to zero except first one for impulse + // impulse[0] = 1.0; + // for (int i = 1; i < num_samples; ++i) { + // impulse[i] = 0.0; + // } + + // // Put filter coefficients into single stack array + // for (unsigned int i = 0; i < numerator_coeffs.size(); ++i) { + // taps[i] = numerator_coeffs[i]; + // taps[i + numerator_coeffs.size()] = denominator_coeffs[i]; + // } + + // // Get buffer size required for internal calcs + // status = ippsIIRGetStateSize_64f(order, &internal_buffer_size); + // if (status != ippStsNoErr) { + // throw std::runtime_error( + // "\nERROR: in signal_processing::impulse_response: Error in buffer " + // "size " + // "calculations\n"); + // } + + // // Allocate memory for internal calcs + // Ipp8u* internal_calcs = ippsMalloc_8u(internal_buffer_size); + + // // Initialize filter state + // status = ippsIIRInit_64f(&filter_state, taps.data(), order, nullptr, + // internal_calcs); + // if (status != ippStsNoErr) { + // throw std::runtime_error( + // "\nERROR: in signal_processing::impulse_response: Error in filter " + // "initialization\n"); + // } + + // // Apply filter to impulse + // status = ippsIIR_64f(impulse, samples, num_samples, filter_state); + // if (status != ippStsNoErr) { + // throw std::runtime_error( + // "\nERROR: in signal_processing::impulse_response: Error in filter " + // "application\n"); + // } + + // std::vector sample_vec(num_samples); + // for (int i = 0; i < num_samples; ++i) { + // sample_vec[i] = samples[i]; + // } + + // // Free memory used for filtering + // ippsFree(samples); + // ippsFree(impulse); + // ippsFree(internal_calcs); + + + // Initialize impulse and samples vectors + std::vector impulse(num_samples, 0.0); + impulse[0] = 1.0; // Set first value to 1 for impulse + std::vector samples(num_samples, 0.0); + + // Convert the coefficient vectors to Eigen objects + Eigen::VectorXd b = Eigen::Map(numerator_coeffs.data(), numerator_coeffs.size()); + Eigen::VectorXd a = Eigen::Map(denominator_coeffs.data(), denominator_coeffs.size()); + + if (a[0] == 0) { + throw std::runtime_error("Denominator's first coefficient (a[0]) cannot be zero"); + } + + // Normalize the coefficients + b /= a[0]; + a /= a[0]; + + // Apply the filter (difference equation) + for (int n = 0; n < num_samples; ++n) { + samples[n] = b[0] * impulse[n]; + for (int i = 1; i <= order; ++i) { + if (n - i >= 0) { + samples[n] += b[i] * impulse[n - i] - a[i] * samples[n - i]; + } + } + } + + std::vector sample_vec(num_samples); + for (int i = 0; i < num_samples; ++i) { + sample_vec[i] = samples[i]; + } + return sample_vec; + }; +} + +std::function(double, double, unsigned int, unsigned int)> + acausal_highpass_filter() { + return [](double freq_corner, double time_step, unsigned int order, + unsigned int num_samples) -> std::vector { + + // Calculate normalized frequency + double freq_cutoff_norm = 1.0 / (2.0 * time_step); + + // Initialize filter and frequencies + Eigen::VectorXd filter(num_samples); + std::vector freq_steps(static_cast(num_samples / 2) + + 1); + double step_freq = freq_cutoff_norm / static_cast(num_samples / 2); + + // Create vector of frequencies ranging from 0 to normalized cutoff + // frequency + for (unsigned int i = 0; i < freq_steps.size(); ++i) { + freq_steps[i] = static_cast(i) * step_freq; + } + + // Calculate first half of filter coefficients + for (unsigned int i = 0; i < freq_steps.size(); ++i) { + filter(i) = + std::sqrt(1.0 / (1.0 + std::pow(freq_corner / freq_steps[i], + 2.0 * order))); + } + + // Mirror coefficients + Eigen::VectorXd highpass_filter(2 * filter.size() - 2); + highpass_filter.head(filter.size()) = filter; + highpass_filter.segment(filter.size(), filter.size() - 2) = + filter.segment(1, filter.size() - 2).reverse(); + + // Place filter coefficients in STL vector + std::vector filter_vector(highpass_filter.size()); + Eigen::VectorXd::Map(&filter_vector[0], highpass_filter.size()) = + highpass_filter; + + return filter_vector; + }; +} +} // namespace signal_processing diff --git a/modules/createEVENT/common/smelt/filter.h b/modules/createEVENT/common/smelt/filter.h new file mode 100644 index 000000000..d237527ff --- /dev/null +++ b/modules/createEVENT/common/smelt/filter.h @@ -0,0 +1,48 @@ +#ifndef _FILTER_H_ +#define _FILTER_H_ + +#include +#include + +/** + * Signal processing functionality + */ +namespace signal_processing { + +/** + * Function for calculating the coefficients of the highpass Butterworth filter + * @param[in] filter_order Order of the Butterworth filter + * @param[in] cuttoff_freq Normalized cutoff frequency + * @return Returns a vector containing two vectors where the first vector + * contains and numerator coefficients and the second vector contains + * the denominator coefficients. + */ +std::function>(int, double)> hp_butterworth(); + +/** + * Function that calculates the impulse response of a filter defined by the + * input numerator and denominator coefficients for the input number of samples + * @param[in] numerator_coeffs Numerator coefficients for filter + * @param[in] denominator_coeffs Denominator coefficients for filter + * @param[in] order Order of the filter + * @param[in] num_samples Number of samples desired + * @return Vector containing impulse response for requested number of samples + */ +std::function(std::vector, std::vector, int, + int)> + impulse_response(); + +/** + * Function that calculates the acausal Butterworth filter for the requested + * number of samples at input cutoff frequency + * @param[in] freq_corner Corner frequency + * @param[in] time_step Time step between observations + * @param[in] order Order of the filter + * @param[in] num_samples Number of samples desired + * @return Vector containing filter coefficients for requested number of samples + */ +std::function(double, double, unsigned int, unsigned int)> + acausal_highpass_filter(); +} // namespace signal_processing + +#endif // _FILTER_H_ diff --git a/modules/createEVENT/common/smelt/function_dispatcher.h b/modules/createEVENT/common/smelt/function_dispatcher.h new file mode 100644 index 000000000..9c7cad747 --- /dev/null +++ b/modules/createEVENT/common/smelt/function_dispatcher.h @@ -0,0 +1,111 @@ +#ifndef _FUNCTION_DISPATCHER_H_ +#define _FUNCTION_DISPATCHER_H_ + +#include +#include +#include +#include +#include + +/** + * Singleton function dispatcher + * @tparam Treturntype Function return type + * @tparam Targs Function arguments + */ +template +class Dispatcher { + public: + /** + * Get the single instance of the dispatcher + */ + static Dispatcher* instance() { + static Dispatcher dispatcher; + return &dispatcher; + } + + /** + * Register a function with the dispatcher + * @param[in] key Register key + * @param[in] new_function New function to register with the dispatcher + */ + void register_function(const std::string& key, + std::function new_function) { + if (!this->check(key)) { + registry[key] = std::move(new_function); + } else { + throw std::runtime_error("Duplicate key: " + key + + ", already registered. Verify key choice to " + "ensure function has not already been added"); + } + } + + /** + * Dispatch function with input arguments + * @param key Key to function in dispatcher + * @param args Inputs to function + * @return Return result of evaluating function with input args + */ + Treturntype dispatch(const std::string& key, Targs... args) { + if (!this->check(key)) { + throw std::runtime_error("Invalid key: " + key + ", not found in the function dispatcher"); + } else { + return registry.at(key)(std::forward(args)...); + } + } + + /** + * Check if a function is registered + * @param[in] key Key to be checked in registry + * @return status Return if key is in registry or not + */ + bool check(const std::string& key) const { + bool status = false; + for (const auto& keyvalue : registry) + if (keyvalue.first == key) status = true; + return status; + } + + /** + * List registered functions + * @return dispather_items Return list of items in the registry + */ + std::vector list() const { + std::vector dispatcher_items; + for (const auto& keyvalue : registry) + dispatcher_items.push_back(keyvalue.first); + return dispatcher_items; + } + + private: + /** + * Private constructor + */ + Dispatcher() = default; + + std::map> registry; /**< + Register of + functions */ +}; + +/** + * A helper class to register a function with the dispatcher + * @tparam Treturntype Function return type + * @tparam Targs Function arguments + */ +template +class DispatchRegister { + public: + /** + * Register with a given key + * @param[in] key Key to item in registry + */ + explicit DispatchRegister( + const std::string& key, + std::function new_function) { + // register the class factory function + Dispatcher::instance()->register_function( + key, std::move(new_function)); + } +}; + +#endif // _FUNCTION_DISPATCHER_H_ diff --git a/modules/createEVENT/common/smelt/inv_gauss_dist.cc b/modules/createEVENT/common/smelt/inv_gauss_dist.cc new file mode 100644 index 000000000..771aa7bec --- /dev/null +++ b/modules/createEVENT/common/smelt/inv_gauss_dist.cc @@ -0,0 +1,35 @@ +#include +#include +#include "inv_gauss_dist.h" + +stochastic::InverseGaussianDistribution::InverseGaussianDistribution( + double mean, double std_dev) + : Distribution(), + mean_{mean}, + std_dev_{std_dev}, + distribution_{mean, std_dev_} +{} + +std::vector + stochastic::InverseGaussianDistribution::cumulative_dist_func( + const std::vector& locations) const { + std::vector evaluations(locations.size()); + + for (unsigned int i = 0; i < locations.size(); ++i) { + evaluations[i] = cdf(distribution_, locations[i]); + } + + return evaluations; +} + +std::vector + stochastic::InverseGaussianDistribution::inv_cumulative_dist_func( + const std::vector& probabilities) const { + std::vector evaluations(probabilities.size()); + + for (unsigned int i = 0; i < probabilities.size(); ++i) { + evaluations[i] = quantile(distribution_, probabilities[i]); + } + + return evaluations; +} diff --git a/modules/createEVENT/common/smelt/inv_gauss_dist.h b/modules/createEVENT/common/smelt/inv_gauss_dist.h new file mode 100644 index 000000000..6868ebb40 --- /dev/null +++ b/modules/createEVENT/common/smelt/inv_gauss_dist.h @@ -0,0 +1,67 @@ +#ifndef _INV_GAUSS_DIST_H_ +#define _INV_GAUSS_DIST_H_ + +#include +#include +#include +#include "distribution.h" + +namespace stochastic { +/** + * Inverse Gaussian distribution + */ +class InverseGaussianDistribution : public Distribution { + public: + /** + * @constructor Delete default constructor + */ + InverseGaussianDistribution() = delete; + + /** + * @constructor Construct inverse Gaussian distribution with specified mean and + * standard deviation + * @param[in] mean Mean of distribution + * @param[in] scale Scale parameter of distribution + */ + InverseGaussianDistribution(double mean, double scale); + + /** + * @destructor Virtual destructor + */ + virtual ~InverseGaussianDistribution(){}; + + /** + * Get the name of the distribution model + * @return Model name as a string + */ + std::string name() const override { return "InverseGaussianDist"; }; + + /** + * Compute the cumulative distribution function (CDF) of the distribution at + * specified input locations + * @param[in] locations Vector containing locations at which to + * calculate CDF + * @return Vector of evaluated values of CDF at input locations + */ + std::vector cumulative_dist_func( + const std::vector& locations) const override; + + /** + * Compute the inverse cumulative distribution function (ICDF) of the + * distribution at specified input locations + * @param[in] probabilities Vector containing probabilities at which to + * calculate ICDF + * @return Vector of evaluated values of ICDF at input locations + */ + std::vector inv_cumulative_dist_func( + const std::vector& probabilities) const override; + + protected: + double mean_; /**< Distribution mean */ + double std_dev_; /**< Distribution standard deviation */ + boost::math::inverse_gaussian distribution_; /**< Inverse Gaussian + distribution */ +}; +} // namespace stochastic + +#endif // _INV_GAUSS_DIST_H_ diff --git a/modules/createEVENT/common/smelt/json_object.cc b/modules/createEVENT/common/smelt/json_object.cc new file mode 100644 index 000000000..1340166e2 --- /dev/null +++ b/modules/createEVENT/common/smelt/json_object.cc @@ -0,0 +1,74 @@ +#include +#include +#include +#include +#include +// JSON for Modern C++ single-include header +#include +#include "json_object.h" + +utilities::JsonObject::JsonObject() { + // This is constructed here to ensure the JSON member is stored as an object + json_object_ = json::object(); +} + +utilities::JsonObject::JsonObject(json library_json) + : JsonObject::JsonObject() +{ + json_object_ = library_json; +} + +bool utilities::JsonObject::delete_key(const std::string& key) { + bool status = true; + + int erased = json_object_.erase(key); + + if (erased != 1) { + status = false; + throw std::runtime_error( + "\nWARNING: In utilities::JsonObject::delete_key: Key not present, so " + "no values were erased\n"); + } + + return status; +} + +bool utilities::JsonObject::write_to_file( + const std::string& output_location) const { + bool status = true; + std::ofstream output_file; + output_file.open(output_location); + + if (!output_file.is_open()) { + status = false; + throw std::runtime_error( + "\nERROR: In utilities::JsonObject::write_to_file(): Could not open " + "output location\n"); + } + + // Write prettyfied JSON to file + output_file << std::setw(4) << json_object_ << std::endl; + + output_file.close(); + + if (output_file.fail()) { + status = false; + throw std::runtime_error( + "\nERROR: In utilities::JsonObject::write_to_file(): Error when " + "closing output location\n"); + } + + return status; +} + +void utilities::JsonObject::clear() { + json_object_.clear(); +} + +bool utilities::JsonObject::is_empty() const { + return json_object_.empty(); +} + +unsigned int utilities::JsonObject::get_size() const { + return json_object_.size(); +} diff --git a/modules/createEVENT/common/smelt/json_object.h b/modules/createEVENT/common/smelt/json_object.h new file mode 100644 index 000000000..12a98426f --- /dev/null +++ b/modules/createEVENT/common/smelt/json_object.h @@ -0,0 +1,134 @@ +#ifndef _JSON_OBJECT_H_ +#define _JSON_OBJECT_H_ + +#include +#include +// JSON for Modern C++ single-include header +#include + +namespace utilities { +// Alias for JSON type + using json = nlohmann::json; + +/** + * Wrapper class for JSON implementation + */ +class JsonObject { + public: + /** + * Friend operator for output stream insertion + */ + friend std::ostream& operator<<(std::ostream& out, const utilities::JsonObject& json_object); + + /** + * @constructor Construct JSON wrapper + */ + JsonObject(); + + /** + * @destructor Virtual destructor + */ + virtual ~JsonObject() {}; + + /** + * Compares input JsonObject to this JsonObject for equality + * @param[in] json_value JsonObject to compare this JsonObject to + * @return Returns true if JsonObjects are equal, false otherwise + */ + inline bool operator==(const JsonObject& json_value) const { + return json_object_ == json_value.json_object_; + }; + + /** + * Compares input JsonObject to this JsonObject for inequality + * @param[in] json_value JsonObject to compare this JsonObject to + * @return Returns true if JsonObjects are not equal, false otherwise + */ + inline bool operator!=(const JsonObject& json_value) const { + return !(*this == json_value); + }; + + /** + * Add value to key in JSON object. If key doesn't exist, + * it will automatically added. + * @tparam Tparam Template parameter for value + * @param[in] key Key to add values to + * @param[in] value Value to add to key + * @return Returns true if successful, false otherwise + */ + template + bool add_value(const std::string& key, const Tparam& value); + + /** + * Get the value associated with the input key + * @tparam Tparam Template parameter value + * @param[in] key Key at which to get value + * @return Returns value at input key. Throws exception if key is not present. + */ + template + Tparam get_value(const std::string& key) const; + + /** + * Delete key from JSON object + * @param[in] key Key to remove from JSON object + * @return Returns true if successful, false otherwise + */ + bool delete_key(const std::string& key); + + /** + * Write prettified JSON object to file + * @param[in] output_location Location to write JSON object to + * @return Returns true if successful, false otherwise + */ + bool write_to_file(const std::string& output_location) const; + + /** + * Clear JSON object contents + */ + void clear(); + + /** + * Check if JSON object is empty + * @return Returns true if object is empty, false otherwise + */ + bool is_empty() const; + + /** + * Get the number of keys in the JSON object + * @return Number of keys in the JSON object + */ + unsigned int get_size() const; + + /** + * Get underlying JSON library object + * @return Copy of interal JSON implementation + */ + json get_library_json() const { + return json_object_; + }; + + protected: + /** + * @constructor Construct JsonObject from underlying library object + * @param[in] library_json Library JSON object + */ + JsonObject(json library_json); + + json json_object_; /**< JSON object for this class */ +}; + +/** + * Overload for output stream insertion operator + * @param[in] out Output stream to insert JSON object into + * @param[in] json_object JSON object to insert into output stream + * @return Reference to output stream that JSON object has been inserted into + */ +inline std::ostream& operator<<(std::ostream& out, const utilities::JsonObject& json_object) { + out << std::setw(4) << json_object.json_object_; + return out; +}; +} // namespace utilities + +#include "json_object.tcc" + +#endif // _JSON_OBJECT_H_ diff --git a/modules/createEVENT/common/smelt/json_object.tcc b/modules/createEVENT/common/smelt/json_object.tcc new file mode 100644 index 000000000..70f6b69b2 --- /dev/null +++ b/modules/createEVENT/common/smelt/json_object.tcc @@ -0,0 +1,94 @@ +#include +#include +#include + +/**< Add value to key */ +template inline +bool utilities::JsonObject::add_value(const std::string& key, + const Tparam& value) { + bool status = true; + + try { + if (json_object_.find(key) != json_object_.end()) { + status = false; + throw std::runtime_error( + "\nWARNING: In utilities::JsonObject::add_value: Input already " + "exists, so no value was added!\n"); + } else { + json_object_.emplace(key, value); + } + } catch (const std::exception& e) { + std::cerr << e.what(); + status = false; + throw; + } + + return status; +}; + +/**< Template specialization for case when input value in JsonObject */ +template <> inline +bool utilities::JsonObject::add_value(const std::string& key, + const utilities::JsonObject& value) { + bool status = true; + + try { + add_value(key, value.get_library_json()); + } catch (const std::exception& e) { + std::cerr << e.what(); + status = false; + throw; + } + + return status; +} + +/**< Template specialization for case when input value is vector of JsonObjects */ +template<> inline +bool utilities::JsonObject::add_value(const std::string& key, const std::vector& value) { + bool status = true; + + try { + // Create vector nlohmann::json type + std::vector value_vector(value.size()); + for (unsigned int i = 0; i < value.size(); ++i) { + value_vector[i] = value[i].get_library_json(); + } + add_value>(key, value_vector); + } catch (const std::exception& e) { + std::cerr << e.what(); + status = false; + throw; + } + + return status; +} + +/**< Get value at input key */ +template +Tparam utilities::JsonObject::get_value(const std::string& key) const { + + auto key_location = json_object_.find(key); + if (key_location != json_object_.end()) { + return key_location.value(); + } else { + throw std::runtime_error( + "\nWARNING: In utilities::JsonObject::get_value: Input key not found, " + "so make sure that exception is properly handled!\n"); + } +}; + +/**< Template specialization for case when return value is JsonObject */ +template <> inline +utilities::JsonObject utilities::JsonObject::get_value( + const std::string& key) const { + + auto key_location = json_object_.find(key); + if (json_object_.find(key) != json_object_.end()) { + return JsonObject(key_location.value()); + } else { + throw std::runtime_error( + "\nWARNING: In utilities::JsonObject::get_value: Input key not found, " + "so make sure that exception is properly handled!\n"); + } +} diff --git a/modules/createEVENT/common/smelt/lognormal_dist.cc b/modules/createEVENT/common/smelt/lognormal_dist.cc new file mode 100644 index 000000000..8e1f525af --- /dev/null +++ b/modules/createEVENT/common/smelt/lognormal_dist.cc @@ -0,0 +1,32 @@ +#include +#include +#include "lognormal_dist.h" + +stochastic::LognormalDistribution::LognormalDistribution(double mean, double std_dev) + : Distribution(), + mean_{mean}, + std_dev_{std_dev}, + distribution_{mean, std_dev_} +{} + +std::vector stochastic::LognormalDistribution::cumulative_dist_func( + const std::vector& locations) const { + std::vector evaluations(locations.size()); + + for (unsigned int i = 0; i < locations.size(); ++i) { + evaluations[i] = cdf(distribution_, locations[i]); + } + + return evaluations; +} + +std::vector stochastic::LognormalDistribution::inv_cumulative_dist_func( + const std::vector& probabilities) const { + std::vector evaluations(probabilities.size()); + + for (unsigned int i = 0; i < probabilities.size(); ++i) { + evaluations[i] = quantile(distribution_, probabilities[i]); + } + + return evaluations; +} diff --git a/modules/createEVENT/common/smelt/lognormal_dist.h b/modules/createEVENT/common/smelt/lognormal_dist.h new file mode 100644 index 000000000..b0f68c03c --- /dev/null +++ b/modules/createEVENT/common/smelt/lognormal_dist.h @@ -0,0 +1,66 @@ +#ifndef _LOGNORMAL_DIST_H_ +#define _LOGNORMAL_DIST_H_ + +#include +#include +#include +#include "distribution.h" + +namespace stochastic { +/** + * Lognormal distribution + */ +class LognormalDistribution : public Distribution { + public: + /** + * @constructor Delete default constructor + */ + LognormalDistribution() = delete; + + /** + * @constructor Construct lognormal distribution with specified mean and + * standard deviation + * @param[in] mean Mean of distribution + * @param[in] std_dev Standard deviation of distribution + */ + LognormalDistribution(double mean, double std_dev); + + /** + * @destructor Virtual destructor + */ + virtual ~LognormalDistribution(){}; + + /** + * Get the name of the distribution model + * @return Model name as a string + */ + std::string name() const override { return "LognormalDist"; }; + + /** + * Compute the cumulative distribution function (CDF) of the distribution at + * specified input locations + * @param[in] locations Vector containing locations at which to + * calculate CDF + * @return Vector of evaluated values of CDF at input locations + */ + std::vector cumulative_dist_func( + const std::vector& locations) const override; + + /** + * Compute the inverse cumulative distribution function (ICDF) of the + * distribution at specified input locations + * @param[in] probabilities Vector containing probabilities at which to + * calculate ICDF + * @return Vector of evaluated values of ICDF at input locations + */ + std::vector inv_cumulative_dist_func( + const std::vector& probabilities) const override; + + protected: + double mean_; /**< Distribution mean */ + double std_dev_; /**< Distribution standard deviation */ + boost::math::lognormal distribution_; /**< Lognormal distribution */ +}; +} // namespace stochastic + +#endif // _LOGNORMAL_DIST_H_ diff --git a/modules/createEVENT/common/smelt/nelder_mead.cc b/modules/createEVENT/common/smelt/nelder_mead.cc new file mode 100644 index 000000000..8b96029e9 --- /dev/null +++ b/modules/createEVENT/common/smelt/nelder_mead.cc @@ -0,0 +1,206 @@ +#include +#include +#include +#include +#include +#include "nelder_mead.h" + +std::vector optimization::NelderMead::minimize( + const std::vector& initial_point, double delta, + std::function&)>& objective_function) { + // Create vector of deltas with length equal to the number of dimensions + std::vector deltas(initial_point.size(), delta); + // Call minmize with vector of deltas + return minimize(initial_point, deltas, objective_function); +} + +std::vector optimization::NelderMead::minimize( + const std::vector& initial_point, const std::vector& deltas, + std::function&)>& objective_function) { + // Set the number of dimensions + num_dimensions_ = initial_point.size(); + // Initialize matrix that expands initial simplex in different directions + // using input deltas + std::vector> simplex( + num_dimensions_ + 1, std::vector(num_dimensions_)); + for (unsigned int i = 0; i < num_dimensions_ + 1; ++i) { + for (unsigned int j = 0; j < num_dimensions_; ++j) { + simplex[i][j] = initial_point[j]; + } + + if (i != 0) { + simplex[i][i - 1] = simplex[i][i - 1] + deltas[i - 1]; + } + } + + // Call minimize with matrix definining initial simplex + return minimize(simplex, objective_function); +} + +std::vector optimization::NelderMead::minimize( + const std::vector>& initial_simplex, + std::function&)>& objective_function) { + + // Initialize variables + unsigned int index_high, index_low, index_next_high; + num_dimensions_ = initial_simplex[0].size(); + num_points_ = initial_simplex.size(); + + func_vals_.resize(num_points_); + simplex_ = initial_simplex; + std::vector evaluation_point(num_dimensions_); + std::vector simplex_mins(num_dimensions_); + std::vector centroids(num_dimensions_); + + // Evaluate objective function at all points in simplex + for (unsigned int i = 0; i < num_points_; ++i) { + for (unsigned int j = 0; j < num_dimensions_; ++j) { + evaluation_point[j] = simplex_[i][j]; + } + func_vals_[i] = objective_function(evaluation_point); + } + + num_evals_ = 0; + + centroids = calc_centroid(simplex_, num_dimensions_, num_points_); + + // centroids = calc_centroid(); + + // Iterate until specified tolerance is achieved or maximum number of + // iterations is exceeded + while (true) { + index_low = 0; + // Order points from best to worst + index_high = func_vals_[0] > func_vals_[1] ? (index_next_high = 1, 0) + : (index_next_high = 0, 1); + + for (unsigned int i = 0; i < num_points_; ++i) { + // Check if function value is best than current low + if (func_vals_[i] <= func_vals_[index_low]) { + index_low = i; + } + // Check if function value is worse than current worst + if (func_vals_[i] > func_vals_[index_high]) { + index_next_high = index_high; + index_high = i; + // Check if function value is worse than next-worst, but not worst + } else if (func_vals_[i] > func_vals_[index_next_high] && + i != index_high) { + index_next_high = i; + } + } + + // Calculate function tolerance + double tolerance = + 2.0 * std::abs(func_vals_[index_high] - func_vals_[index_low]) / + (std::abs(func_vals_[index_high]) + std::abs(func_vals_[index_low]) + + EPSILON_); + + // Check if tolerance is sufficiently small + if (tolerance < function_tol_) { + std::swap(func_vals_[0], func_vals_[index_low]); + simplex_[0].swap(simplex_[index_low]); + simplex_mins = simplex_[0]; + func_min_ = func_vals_[0]; + + return simplex_mins; + } + + if (num_evals_ >= MAX_ITERS_) { + std::swap(func_vals_[0], func_vals_[index_low]); + simplex_[0].swap(simplex_[index_low]); + simplex_mins = simplex_[0]; + func_min_ = func_vals_[0]; + // std::cerr << "\nWARNING: In optimization::NelderMead::minimize: Max " + // "iterations exceeded, returning current " + // "minimum location with function value of " + // << func_min_ << std::endl; + + return simplex_mins; + } + + num_evals_ += 2; + + // Start iteration by first extrapolating by a factor -1 through the face of + // the simplex across from the high point--reflect simplex from high point + double reflection = reflect(simplex_, func_vals_, centroids, index_high, + -1.0, objective_function); + + // Reflection gives better result than best point, so extrapolate again with + // factor 2.0 + if (reflection <= func_vals_[index_low]) { + reflection = reflect(simplex_, func_vals_, centroids, index_high, 2.0, + objective_function); + // Reflection is worse than next-highest, so do 1-D contraction to find + // intermediate lower point + } else if (reflection >= func_vals_[index_next_high]) { + double func_next_high = func_vals_[index_next_high]; + reflection = reflect(simplex_, func_vals_, centroids, index_high, 0.5, + objective_function); + + // Worst point is not going away, so contract around best point + if (reflection >= func_next_high) { + for (unsigned int i = 0; i < num_points_; ++i) { + if (i != index_low) { + for (unsigned int j = 0; j < num_dimensions_; ++j) { + centroids[j] = 0.5 * (simplex_[i][j] + simplex_[index_low][j]); + simplex_[i][j] = centroids[j]; + } + func_vals_[i] = objective_function(centroids); + } + } + num_evals_ += num_dimensions_; + centroids = calc_centroid(simplex_, num_dimensions_, num_points_); + } + } else { + --num_evals_; + } + } +} + +double optimization::NelderMead::reflect( + std::vector>& simplex, + std::vector& objective_vals, std::vector& centroids, + unsigned int index_worst, double factor, + std::function&)>& objective_function) { + + std::vector evaluations(num_dimensions_); + + double factor1 = (1.0 - factor) / static_cast(num_dimensions_); + double factor2 = factor1 - factor; + + for (unsigned int j = 0; j < num_dimensions_; ++j) { + evaluations[j] = centroids[j] * factor1 - simplex[index_worst][j] * factor2; + } + + double objective_value = objective_function(evaluations); + + if (objective_value < objective_vals[index_worst]) { + objective_vals[index_worst] = objective_value; + + for (unsigned int j = 0; j < num_dimensions_; ++j) { + centroids[j] += evaluations[j] - simplex[index_worst][j]; + simplex[index_worst][j] = evaluations[j]; + } + } + + return objective_value; +} + +std::vector optimization::NelderMead::calc_centroid( + const std::vector>& simplex, + unsigned int num_dimensions, unsigned int num_points) const { + + std::vector centroids(num_dimensions); + + for (unsigned int j = 0; j < num_dimensions; ++j) { + double sum = 0.0; + for (unsigned int i = 0; i < num_points; ++i) { + sum += simplex[i][j]; + } + + centroids[j] = sum; + } + + return centroids; +} diff --git a/modules/createEVENT/common/smelt/nelder_mead.h b/modules/createEVENT/common/smelt/nelder_mead.h new file mode 100644 index 000000000..17302b4a4 --- /dev/null +++ b/modules/createEVENT/common/smelt/nelder_mead.h @@ -0,0 +1,128 @@ +#ifndef _NELDER_MEAD_H_ +#define _NELDER_MEAD_H_ + +#include +#include +#include + +/** + * Optimization utilities + */ +namespace optimization { + +/** + * Class that implements Nelder-Mead algorithm for multidimensional + * unconstrained optimization. Based on implementation presented in + * Press et al. (2007) - "Numerical Recipes" + */ +class NelderMead { + public: + /** + * @constructor Default constructor + */ + NelderMead() = default; + + /** + * @constructor Construct with input function tolerance + * @param[in] function_tolerance Tolerance in consecutive function evaluations + * for convergence + */ + NelderMead(double function_tolerance) + : function_tol_{function_tolerance}, + func_min_{std::numeric_limits::infinity()} {}; + + /** + * @destructor Virtual destructor + */ + virtual ~NelderMead(){}; + + /** + * Delete copy constructor + */ + NelderMead(const NelderMead&) = delete; + + /** + * Delete assignment operator + */ + NelderMead& operator=(const NelderMead&) = delete; + + /** + * Minimize the input objective function given initial point and step size + * @param[in] initial_point Initial values to use for each dimension + * @param[in] delta Single step size to use for each dimension + * @param[in] objective_function Function to minimize + * @return Location of minimum + */ + std::vector minimize( + const std::vector& initial_point, double delta, + std::function&)>& objective_function); + + /** + * Minimize the input objective function given initial point and step sizes + * @param[in] initial_point Initial values to use for each dimension + * @param[in] deltas Vector of step sizes to use for dimensions + * @param[in] objective_function Function to minimize + * @return Location of minimum + */ + std::vector minimize( + const std::vector& initial_point, const std::vector& deltas, + std::function&)>& objective_function); + + /** + * Get the minimum value of the objective function + * @return Minimum value of objective function + */ + double get_minimum() const { return func_min_; }; + + /** + * Minimize the input objective function given initial simplex + * @tparam Tfunc_returntype Return type of objective function + * @tparam Tfunc_args Objective function arguments + * @param[in] initial_simplex Initial simplex to start optimization + * @param[in] objective_function Function to minimize + * @return Location of minimum + */ + std::vector minimize( + const std::vector>& initial_simplex, + std::function&)>& objective_function); + + private: + /** + * Calculates the centroid of all points and returns them as a vector + * @param[in] simplex Matrix describing simplex + * @return Vector containing centroids + */ + std::vector calc_centroid( + const std::vector>& simplex, + unsigned int num_dimensions, unsigned int num_points) const; + + /** + * Exptrapolate by input factor through the face of the simplex across from + * the high point. Replaces high point if the new point is better. + * @param[in] simplex Matrix describing simplex + * @param[in] objective_vals Vector of objective values based on simplex + * @param[in] centroids Vector containing centroids + * @param[in] index_worst Index of worst value + * @param[in] factor Factor by which to extrapolate + * @param[in] objective_function Objective function to minimize + * @return Objective value + */ + double reflect( + std::vector>& simplex, + std::vector& objective_vals, std::vector& centroids, + unsigned int index_worst, double factor, + std::function&)>& objective_function); + + double function_tol_; /**< Function tolerance for convergence */ + unsigned int num_evals_; /**< Number of function evaluations */ + unsigned int num_points_; /**< Number of points */ + unsigned int num_dimensions_; /**< Number of dimensions */ + double func_min_; /**< Objective function minimum */ + std::vector func_vals_; /**< Function values at vertices */ + std::vector> simplex_; /**< Current simplex */ + const double EPSILON_ = 1.0e-10; /**< Tolerance */ + const unsigned int MAX_ITERS_ = 10000; /**< Maximum number of iterations */ +}; +} // namespace optimization + +#endif // _NELDER_MEAD_H_ diff --git a/modules/createEVENT/common/smelt/normal_dist.cc b/modules/createEVENT/common/smelt/normal_dist.cc new file mode 100644 index 000000000..de5441b49 --- /dev/null +++ b/modules/createEVENT/common/smelt/normal_dist.cc @@ -0,0 +1,32 @@ +#include +#include +#include "normal_dist.h" + +stochastic::NormalDistribution::NormalDistribution(double mean, double std_dev) + : Distribution(), + mean_{mean}, + std_dev_{std_dev}, + distribution_{mean, std_dev_} +{} + +std::vector stochastic::NormalDistribution::cumulative_dist_func( + const std::vector& locations) const { + std::vector evaluations(locations.size()); + + for (unsigned int i = 0; i < locations.size(); ++i) { + evaluations[i] = cdf(distribution_, locations[i]); + } + + return evaluations; +} + +std::vector stochastic::NormalDistribution::inv_cumulative_dist_func( + const std::vector& probabilities) const { + std::vector evaluations(probabilities.size()); + + for (unsigned int i = 0; i < probabilities.size(); ++i) { + evaluations[i] = quantile(distribution_, probabilities[i]); + } + + return evaluations; +} diff --git a/modules/createEVENT/common/smelt/normal_dist.h b/modules/createEVENT/common/smelt/normal_dist.h new file mode 100644 index 000000000..6fda9ba88 --- /dev/null +++ b/modules/createEVENT/common/smelt/normal_dist.h @@ -0,0 +1,66 @@ +#ifndef _NORMAL_DIST_H_ +#define _NORMAL_DIST_H_ + +#include +#include +#include +#include "distribution.h" + +namespace stochastic { +/** + * Normal distribution + */ +class NormalDistribution : public Distribution { + public: + /** + * @constructor Construct standard normal distribution + */ + NormalDistribution() = delete; + + /** + * @constructor Construct normal distribution with specified mean and + * standard deviation + * @param[in] mean Mean of distribution + * @param[in] std_dev Standard deviation of distribution + */ + NormalDistribution(double mean, double std_dev); + + /** + * @destructor Virtual destructor + */ + virtual ~NormalDistribution(){}; + + /** + * Get the name of the distribution model + * @return Model name as a string + */ + std::string name() const override { return "NormalDist"; }; + + /** + * Compute the cumulative distribution function (CDF) of the distribution at + * specified input locations + * @param[in] locations Vector containing locations at which to + * calculate CDF + * @return Vector of evaluated values of CDF at input locations + */ + std::vector cumulative_dist_func( + const std::vector& locations) const override; + + /** + * Compute the inverse cumulative distribution function (ICDF) of the + * distribution at specified input locations + * @param[in] probabilities Vector containing probabilities at which to + * calculate ICDF + * @return Vector of evaluated values of ICDF at input locations + */ + std::vector inv_cumulative_dist_func( + const std::vector& probabilities) const override; + + protected: + double mean_; /**< Distribution mean */ + double std_dev_; /**< Distribution standard deviation */ + boost::math::normal distribution_; /**< Normal distribution */ +}; +} // namespace stochastic + +#endif // _NORMAL_DIST_H_ diff --git a/modules/createEVENT/common/smelt/normal_multivar.cc b/modules/createEVENT/common/smelt/normal_multivar.cc new file mode 100644 index 000000000..750adb63d --- /dev/null +++ b/modules/createEVENT/common/smelt/normal_multivar.cc @@ -0,0 +1,73 @@ +#include +#include +// Boost random generator +#include +#include +// Eigen dense matrices +#include + +#include "factory.h" +#include "normal_multivar.h" + +namespace numeric_utils { + +NormalMultiVar::NormalMultiVar() + : RandomGenerator() +{ + generator_ = boost::random::mt19937(seed_); + distribution_ = boost::random::normal_distribution(); +} + +NormalMultiVar::NormalMultiVar(int seed) + : RandomGenerator() +{ + seed_ = seed; + generator_ = boost::random::mt19937(seed_); + distribution_ = boost::random::normal_distribution(); +} + +bool NormalMultiVar::generate( + Eigen::Matrix& random_numbers, + const Eigen::VectorXd& means, const Eigen::MatrixXd& cov, + unsigned int cases) { + + bool success = true; + Eigen::Matrix lower_cholesky; + + try { + auto llt = cov.llt(); + lower_cholesky = llt.matrixL(); + + if (llt.info() == Eigen::NumericalIssue) { + throw std::runtime_error( + "\nERROR: In NormalMultivar::generate method: Input covariance matrix is not " + "positive semi-definite\n"); + } + } catch (const std::exception& e) { + std::cerr << "\nERROR: In normal multivariate random number generation: " + << e.what() << std::endl; + success = false; + } + + random_numbers.resize(cov.rows(), cases); + + // Generate random numbers based on distribution and generator type for + // requested number of cases + for (unsigned int i = 0; i < random_numbers.cols(); ++i) { + for (unsigned int j = 0; j < random_numbers.rows(); ++j) { + random_numbers(j, i) = distribution_(generator_); + } + } + + // Transform from unit normal distribution based on covariance and mean values + for (unsigned int i = 0; i < random_numbers.cols(); ++i) { + random_numbers.col(i) = lower_cholesky * random_numbers.col(i) + means; + } + + return success; +} + +std::string NormalMultiVar::name() const { + return "NormalMultiVar"; +} +} // namespace numeric_utils diff --git a/modules/createEVENT/common/smelt/normal_multivar.h b/modules/createEVENT/common/smelt/normal_multivar.h new file mode 100644 index 000000000..b716ecda6 --- /dev/null +++ b/modules/createEVENT/common/smelt/normal_multivar.h @@ -0,0 +1,66 @@ +#ifndef _NORMAL_MULTIVAR_H_ +#define _NORMAL_MULTIVAR_H_ + +// Boost random generator +#include +#include +// Eigen dense matrices +#include + +#include "numeric_utils.h" + +namespace numeric_utils { +/** + * Class for generating random realizations of a multivariate + * normal distribution + */ +class NormalMultiVar : public RandomGenerator { + public: + /** + * @constructor Default constructor + */ + NormalMultiVar(); + + /** + * @constructor Construct an instance of the multivariate normal random number + * generator + * @param[in] seed Seed value to use in random number generator + */ + NormalMultiVar(int seed); + + /** + * @destructor Virtual destructor + */ + virtual ~NormalMultiVar(){}; + + /** + * Get multivariate random realization + * @param[in, out] random_numbers Matrix to store generated random numbers to + * @param[in] means Vector of mean values for random variables + * @param[in] cov Covariance matrix of for random variables + * @param[in] cases Number of cases to generate + * @return Returns true if no issues were encountered in Cholesky + * decomposition of covariance matrix, returns false otherwise + */ + bool generate( + Eigen::Matrix& random_numbers, + const Eigen::VectorXd& means, const Eigen::MatrixXd& cov, + unsigned int cases = 1) override; + + /** + * Get the class name + * @return Class name + */ + std::string name() const override; + + private: + boost::random::mt19937 generator_; /**< Mersenne Twister random number + generator */ + boost::random::normal_distribution distribution_; /**< Normal + distribution to use + with random number + generator */ +}; +} // namespace numeric_utils + +#endif // _NORMAL_MULTIVAR_H_ diff --git a/modules/createEVENT/common/smelt/numeric_utils.cc b/modules/createEVENT/common/smelt/numeric_utils.cc new file mode 100644 index 000000000..645c39c47 --- /dev/null +++ b/modules/createEVENT/common/smelt/numeric_utils.cc @@ -0,0 +1,521 @@ +#include +#include +#include +#include +#include +#include + +//#include +//#include +//#include +#include "numeric_utils.h" + +namespace numeric_utils { +Eigen::MatrixXd corr_to_cov(const Eigen::MatrixXd& corr, + const Eigen::VectorXd& std_dev) { + Eigen::MatrixXd cov_matrix = Eigen::MatrixXd::Zero(corr.rows(), corr.cols()); + + for (unsigned int i = 0; i < cov_matrix.rows(); ++i) { + for (unsigned int j = 0; j < cov_matrix.cols(); ++j) { + cov_matrix(i, j) = corr(i, j) * std_dev(i) * std_dev(j); + } + } + + return cov_matrix; +} + +bool convolve_1d(const std::vector& input_x, + const std::vector& input_y, + std::vector& response) { + + + // bool status = true; + // response.resize(input_x.size() + input_y.size() - 1); + + // // Create convolution status and task pointer + // int conv_status; + // VSLConvTaskPtr conv_task; + // // Construct convolution task, with solution mode set to direct + // conv_status = + // vsldConvNewTask1D(&conv_task, VSL_CONV_MODE_DIRECT, input_x.size(), + // input_y.size(), response.size()); + + // // Check if convolution construction was successful + // if (conv_status != VSL_STATUS_OK) { + // throw std::runtime_error( + // "\nERROR: in numeric_utils::convolve_1d: Error in convolution " + // "construction\n"); + // status = false; + // } + + // // Set convolution to start at first element in input_y + // vslConvSetStart(conv_task, 0); + + // // Execute convolution + // conv_status = vsldConvExec1D(conv_task, input_x.data(), 1, input_y.data(), 1, + // response.data(), 1); + + // // Check if convolution exectution was successful + // if (conv_status != VSL_STATUS_OK) { + // throw std::runtime_error( + // "\nERROR: in numeric_utils::convolve_1d: Error in convolution " + // "execution\n"); + // status = false; + // } + + // // Delete convolution task + // vslConvDeleteTask(&conv_task); + + int n_x = input_x.size(); + int n_y = input_y.size(); + int n_response = n_x + n_y - 1; + + // Resize response to hold the convolution result + response.resize(n_response); + + // Find the next power of 2 to perform FFT (for efficiency) + int n_fft = 1; + while (n_fft < n_response) { + n_fft <<= 1; // Multiply by 2 + } + + // Allocate FFT configurations for forward and inverse FFT + kiss_fft_cfg cfg_forward = kiss_fft_alloc(n_fft, 0, nullptr, nullptr); // 0 means forward FFT + kiss_fft_cfg cfg_inverse = kiss_fft_alloc(n_fft, 1, nullptr, nullptr); // 1 means inverse FFT + + if (!cfg_forward || !cfg_inverse) { + std::cerr << "ERROR: Failed to allocate KissFFT configuration." << std::endl; + return false; + } + + // Prepare complex input arrays for FFT + std::vector input_x_fft(n_fft, {0, 0}); + std::vector input_y_fft(n_fft, {0, 0}); + + // Copy real inputs to complex arrays (imaginary part is 0) + for (int i = 0; i < n_x; ++i) { + input_x_fft[i].r = input_x[i]; + input_x_fft[i].i = 0.0; + } + for (int i = 0; i < n_y; ++i) { + input_y_fft[i].r = input_y[i]; + input_y_fft[i].i = 0.0; + } + + // Perform forward FFT on both inputs + std::vector x_fft_result(n_fft); + std::vector y_fft_result(n_fft); + + kiss_fft(cfg_forward, input_x_fft.data(), x_fft_result.data()); + kiss_fft(cfg_forward, input_y_fft.data(), y_fft_result.data()); + + // Multiply the FFT results element-wise (complex multiplication) + std::vector convolution_fft(n_fft); + for (int i = 0; i < n_fft; ++i) { + convolution_fft[i].r = x_fft_result[i].r * y_fft_result[i].r - x_fft_result[i].i * y_fft_result[i].i; + convolution_fft[i].i = x_fft_result[i].r * y_fft_result[i].i + x_fft_result[i].i * y_fft_result[i].r; + } + + // Perform inverse FFT to get the convolution result in time domain + std::vector result_ifft(n_fft); + kiss_fft(cfg_inverse, convolution_fft.data(), result_ifft.data()); + + // Extract real part of the result and scale it by 1/n_fft + for (int i = 0; i < n_response; ++i) { + response[i] = result_ifft[i].r / n_fft; + } + + // Free the KissFFT configurations + free(cfg_forward); + free(cfg_inverse); + + return true; +} + +bool inverse_fft(std::vector> input_vector, + std::vector& output_vector) { + + //output_vector.resize(input_vector.size()); + // // Create task descriptor and MKL status + // DFTI_DESCRIPTOR_HANDLE fft_descriptor; + // MKL_LONG fft_status; + + // // Allocate the descriptor data structure and initializes it with default + // // configuration values + // fft_status = DftiCreateDescriptor(&fft_descriptor, DFTI_DOUBLE, DFTI_REAL, 1, + // input_vector.size()); + // if (fft_status != DFTI_NO_ERROR) { + // throw std::runtime_error( + // "\nERROR: in numeric_utils::inverse_fft: Error in descriptor creation\n"); + // return false; + // } + + // // Set configuration value to not do inplace transformation + // fft_status = DftiSetValue(fft_descriptor, DFTI_PLACEMENT, DFTI_NOT_INPLACE); + // if (fft_status != DFTI_NO_ERROR) { + // throw std::runtime_error( + // "\nERROR: in numeric_utils::inverse_fft: Error in setting configuration\n"); + // return false; + // } + + // // Set the backward scale factor to be 1 divided by the size of the input vector + // // to make the backward tranform the inverse of the forward transform + // fft_status = DftiSetValue(fft_descriptor, DFTI_BACKWARD_SCALE, + // static_cast(1.0 / input_vector.size())); + // if (fft_status != DFTI_NO_ERROR) { + // throw std::runtime_error( + // "\nERROR: in numeric_utils::inverse_fft: Error in setting backward " + // "scale factor\n"); + // return false; + // } + + // // Perform all initialization for the actual FFT computation + // fft_status = DftiCommitDescriptor(fft_descriptor); + // if (fft_status != DFTI_NO_ERROR) { + // throw std::runtime_error( + // "\nERROR: in numeric_utils::inverse_fft: Error in committing descriptor\n"); + // return false; + // } + + // // Compute the backward FFT + // fft_status = DftiComputeBackward(fft_descriptor, input_vector.data(), + // output_vector.data()); + // if (fft_status != DFTI_NO_ERROR) { + // throw std::runtime_error( + // "\nERROR: in numeric_utils::inverse_fft: Error in computing backward FFT\n"); + // return false; + // } + + // // Free the memory allocated for descriptor + // fft_status = DftiFreeDescriptor(&fft_descriptor); + // if (fft_status != DFTI_NO_ERROR) { + // throw std::runtime_error( + // "\nERROR: in numeric_utils::inverse_fft: Error in freeing FFT descriptor\n"); + // return false; + // } + + int n = input_vector.size(); + + // Resize output vector to match the input size + output_vector.resize(n); + + // Create KissFFT configuration for inverse FFT + kiss_fft_cfg cfg = kiss_fft_alloc(n, 1, nullptr, nullptr); // '1' indicates inverse FFT + if (!cfg) { + std::cerr << "ERROR: Failed to allocate KissFFT configuration." << std::endl; + return false; + } + + // Convert input vector from std::complex to kiss_fft_cpx + std::vector input_kissfft(n); + for (int i = 0; i < n; ++i) { + input_kissfft[i].r = input_vector[i].real(); + input_kissfft[i].i = input_vector[i].imag(); + } + + // Allocate output for KissFFT result (complex output even for inverse FFT) + std::vector output_kissfft(n); + + // Perform the inverse FFT + kiss_fft(cfg, input_kissfft.data(), output_kissfft.data()); + + // Free the KissFFT configuration + free(cfg); + + // Convert complex KissFFT result to real output (we discard the imaginary parts) + for (int i = 0; i < n; ++i) { + output_vector[i] = output_kissfft[i].r / n; // Scaling the result manually + } + + + + return true; +} + +bool inverse_fft(const Eigen::VectorXcd& input_vector, + Eigen::VectorXd& output_vector) { + // Convert input Eigen vector to std vector + std::vector> input_vals(input_vector.size()); + std::vector outputs(input_vals.size()); + Eigen::VectorXcd::Map(&input_vals[0], input_vector.size()) = input_vector; + + try { + inverse_fft(input_vals, outputs); + } catch (const std::exception& e) { + std::cerr << "\nERROR: In numeric_utils::inverse_fft (With Eigen Vectors):" + << e.what() << std::endl; + } + + // Convert output from std vector to Eigen vector + output_vector = Eigen::Map(outputs.data(), outputs.size()); + + return true; +} + +bool inverse_fft(const Eigen::VectorXcd& input_vector, + std::vector& output_vector) { + // Convert input Eigen vector to std vector + std::vector> input_vals(input_vector.size()); + Eigen::VectorXcd::Map(&input_vals[0], input_vector.size()) = input_vector; + output_vector.resize(input_vector.size()); + + try { + inverse_fft(input_vals, output_vector); + } catch (const std::exception& e) { + std::cerr << "\nERROR: In numeric_utils::inverse_fft (With Eigen Vectors):" + << e.what() << std::endl; + } + + return true; +} + +bool fft(std::vector input_vector, + std::vector>& output_vector) { + // Convert input vector to complex values + + // std::vector> input_complex(input_vector.size()); + // std::copy(input_vector.begin(), input_vector.end(), input_complex.begin()); + + // output_vector.resize(input_vector.size()); + + // // Create task descriptor and MKL status + // DFTI_DESCRIPTOR_HANDLE fft_descriptor; + // MKL_LONG fft_status; + + // // Allocate the descriptor data structure and initializes it with default + // // configuration values + // fft_status = DftiCreateDescriptor(&fft_descriptor, DFTI_DOUBLE, DFTI_COMPLEX, + // 1, input_complex.size()); + // if (fft_status != DFTI_NO_ERROR) { + // throw std::runtime_error( + // "\nERROR: in numeric_utils::fft: Error in descriptor creation\n"); + // return false; + // } + + // // Set configuration value to not do inplace transformation + // fft_status = DftiSetValue(fft_descriptor, DFTI_PLACEMENT, DFTI_NOT_INPLACE); + // if (fft_status != DFTI_NO_ERROR) { + // throw std::runtime_error( + // "\nERROR: in numeric_utils::fft: Error in setting configuration\n"); + // return false; + // } + + // // Perform all initialization for the actual FFT computation + // fft_status = DftiCommitDescriptor(fft_descriptor); + // if (fft_status != DFTI_NO_ERROR) { + // throw std::runtime_error( + // "\nERROR: in numeric_utils::fft: Error in committing descriptor\n"); + // return false; + // } + + // // Compute the backward FFT + // fft_status = DftiComputeForward(fft_descriptor, input_complex.data(), + // output_vector.data()); + // if (fft_status != DFTI_NO_ERROR) { + // throw std::runtime_error( + // "\nERROR: in numeric_utils::fft: Error in computing FFT\n"); + // return false; + // } + + // // Free the memory allocated for descriptor + // fft_status = DftiFreeDescriptor(&fft_descriptor); + // if (fft_status != DFTI_NO_ERROR) { + // throw std::runtime_error( + // "\nERROR: in numeric_utils::fft: Error in freeing FFT descriptor\n"); + // return false; + // } + + size_t size = input_vector.size(); + + // Convert input vector to kiss_fft_cpx + std::vector input_complex(size); + for (size_t i = 0; i < size; ++i) { + input_complex[i].r = static_cast(input_vector[i]); + input_complex[i].i = 0; + } + + // Prepare output vector + output_vector.resize(size); + + // Initialize KissFFT configuration + kiss_fft_cfg cfg = kiss_fft_alloc(size, 0, nullptr, nullptr); + if (!cfg) { + throw std::runtime_error("ERROR: in fft: Failed to allocate KissFFT configuration"); + return false; + } + + // Perform the FFT + std::vector output_complex(size); + kiss_fft(cfg, input_complex.data(), output_complex.data()); + + // Convert the output to std::complex + for (size_t i = 0; i < size; ++i) { + output_vector[i] = std::complex(output_complex[i].r, output_complex[i].i); + } + + // Free KissFFT configuration + free(cfg); + + return true; +} + +bool fft(const Eigen::VectorXd& input_vector, Eigen::VectorXcd& output_vector) { + + // Convert input Eigen vector to std vector + std::vector input_vals(input_vector.size()); + std::vector> outputs(input_vals.size()); + Eigen::VectorXd::Map(&input_vals[0], input_vector.size()) = input_vector; + + try { + fft(input_vals, outputs); + } catch (const std::exception& e) { + std::cerr << "\nERROR: In numeric_utils::fft (With Eigen Vectors):" + << e.what() << std::endl; + } + + // Convert output from std vector to Eigen vector + output_vector = Eigen::Map(outputs.data(), outputs.size()); + + return true; +} + +bool fft(const Eigen::VectorXd& input_vector, + std::vector>& output_vector) { + // Convert input Eigen vector to std vector + std::vector input_vals(input_vector.size()); + Eigen::VectorXd::Map(&input_vals[0], input_vector.size()) = input_vector; + output_vector.resize(input_vector.size()); + + try { + fft(input_vals, output_vector); + } catch (const std::exception& e) { + std::cerr << "\nERROR: In numeric_utils::fft (With Eigen Vector and STL vector):" + << e.what() << std::endl; + } + + return true; +} + +double trapazoid_rule(const std::vector& input_vector, double spacing) { + double result = (input_vector[0] + input_vector[input_vector.size() - 1]) / 2.0; + + for (unsigned int i = 1; i < input_vector.size() - 1; ++i) { + result = result + input_vector[i]; + } + + return result * spacing; +} + +double trapazoid_rule(const Eigen::VectorXd& input_vector, double spacing) { + double result = (input_vector[0] + input_vector[input_vector.size() - 1]) / 2.0; + + for (unsigned int i = 1; i < input_vector.size() - 1; ++i) { + result = result + input_vector[i]; + } + + return result * spacing; +} + +Eigen::VectorXd polyfit_intercept(const Eigen::VectorXd& points, + const Eigen::VectorXd& data, + double intercept, + unsigned int degree) { + + Eigen::MatrixXd coefficients = + Eigen::MatrixXd::Zero(points.size(), degree - 1); + + for (unsigned int i = 0; i < degree - 1; ++i) { + coefficients.col(i) = points.array().pow(degree - i); + } + + // Solve system + Eigen::VectorXd solution = coefficients.fullPivHouseholderQr().solve( + (data.array() - intercept).matrix()); + + // Set y-intercept to zero + Eigen::VectorXd poly_fit(solution.size() + 2); + + for (unsigned int i = 0; i < solution.size(); ++i) { + poly_fit(i) = solution(i); + } + poly_fit(poly_fit.size() - 1) = intercept; + poly_fit(poly_fit.size() - 2) = 0.0; + + return poly_fit; +} + +Eigen::VectorXd polynomial_derivative(const Eigen::VectorXd& coefficients) { + Eigen::VectorXd derivative(coefficients.size() - 1); + + for (unsigned int i = 0; i < derivative.size(); ++i) { + derivative(i) = coefficients(i) * (coefficients.size() - 1 - i); + } + + return derivative; +} + +std::vector derivative(const std::vector& coefficients, + double constant_factor, bool add_zero) { + + if (add_zero) { + std::vector derivative(coefficients.size()); + derivative[0] = coefficients[0] * constant_factor; + + for (unsigned int i = 1; i < derivative.size(); ++i) { + derivative[i] = (coefficients[i] - coefficients[i - 1]) * constant_factor; + } + + return derivative; + } else { + std::vector derivative(coefficients.size() - 1); + + for (unsigned int i = 0; i < derivative.size(); ++i) { + derivative[i] = (coefficients[i + 1] - coefficients[i]) * constant_factor; + } + + return derivative; + } +} + +Eigen::VectorXd evaluate_polynomial(const Eigen::VectorXd& coefficients, + const Eigen::VectorXd& points) { + Eigen::VectorXd evaluations = Eigen::VectorXd::Zero(points.size()); + + for (unsigned int i = 0; i < evaluations.size(); ++i) { + for (unsigned int j = 0; j < coefficients.size(); ++j) { + evaluations(i) += + coefficients(j) * std::pow(points(i), coefficients.size() - 1 - j); + } + } + + return evaluations; +} + +Eigen::VectorXd evaluate_polynomial(const Eigen::VectorXd& coefficients, + const std::vector& points) { + Eigen::VectorXd evaluations = Eigen::VectorXd::Zero(points.size()); + + for (unsigned int i = 0; i < evaluations.size(); ++i) { + for (unsigned int j = 0; j < coefficients.size(); ++j) { + evaluations(i) += + coefficients(j) * std::pow(points[i], coefficients.size() - 1 - j); + } + } + + return evaluations; +} + +std::vector evaluate_polynomial(const std::vector& coefficients, + const std::vector& points) { + std::vector evaluations(points.size(), 0.0); + + for (unsigned int i = 0; i < evaluations.size(); ++i) { + for (unsigned int j = 0; j < coefficients.size(); ++j) { + evaluations[i] += + coefficients[j] * std::pow(points[i], coefficients.size() - 1 - j); + } + } + + return evaluations; +} +} // namespace numeric_utils diff --git a/modules/createEVENT/common/smelt/numeric_utils.h b/modules/createEVENT/common/smelt/numeric_utils.h new file mode 100644 index 000000000..1411c2a3f --- /dev/null +++ b/modules/createEVENT/common/smelt/numeric_utils.h @@ -0,0 +1,227 @@ +#ifndef _NUMERIC_UTILS_H_ +#define _NUMERIC_UTILS_H_ + +#include +#include +#include +#include +#include + +/** + * Numeric utility functions not tied to any particular class + */ +namespace numeric_utils { + +/** + * Convert input correlation matrix and standard deviation to covariance matrix + * @param[in] corr Input correlation matrix + * @param[in] std_dev Standard deviation vector + * @return Covariance matrix with same dimensions as input correlation matrix + */ +Eigen::MatrixXd corr_to_cov(const Eigen::MatrixXd& corr, + const Eigen::VectorXd& std_dev); + +/** + * Compute the 1-dimensional convolution of two input vectors + * @param[in] input_x First input vector of data + * @param[in] input_y Second input vector of data + * @param[out] output Vector to story convolution results to + * @return Returns true if convolution was successful, false otherwise + */ +bool convolve_1d(const std::vector& input_x, + const std::vector& input_y, + std::vector& response); + +/** + * Computes the real portion of the 1-dimensional inverse Fast Fourier Transform + * (FFT) of the input vector + * @param[in] input_vector Input vector to compute the inverse FFT of + * @param[in, out] output_vector Vector to write output to + * @return Returns true if computations were successful, false otherwise + */ +bool inverse_fft(std::vector> input_vector, + std::vector& output_vector); + +/** + * Computes the real portion of the 1-dimensional inverse Fast Fourier Transform + * (FFT) of the input vector + * @param[in] input_vector Input vector to compute the inverse FFT of + * @param[in, out] output_vector Vector to write output to + * @return Returns true if computations were successful, false otherwise + */ +bool inverse_fft(const Eigen::VectorXcd& input_vector, + Eigen::VectorXd& output_vector); + +/** + * Computes the real portion of the 1-dimensional inverse Fast Fourier Transform + * (FFT) of the input vector + * @param[in] input_vector Input vector to compute the inverse FFT of + * @param[in, out] output_vector Vector to write output to + * @return Returns true if computations were successful, false otherwise + */ +bool inverse_fft(const Eigen::VectorXcd& input_vector, + std::vector& output_vector); + +/** + * Computes the real portion of the 1-dimensional Fast Fourier Transform + * (FFT) of the input vector + * @param[in] input_vector Input vector to compute the FFT of + * @param[in, out] output_vector Vector to write output to + * @return Returns true if computations were successful, false otherwise + */ +bool fft(std::vector input_vector, + std::vector>& output_vector); + +/** + * Computes the real portion of the 1-dimensional Fast Fourier Transform + * (FFT) of the input vector + * @param[in] input_vector Input vector to compute the FFT of + * @param[in, out] output_vector Vector to write output to + * @return Returns true if computations were successful, false otherwise + */ +bool fft(const Eigen::VectorXd& input_vector, Eigen::VectorXcd& output_vector); + +/** + * Computes the real portion of the 1-dimensional Fast Fourier Transform + * (FFT) of the input vector + * @param[in] input_vector Input vector to compute the FFT of + * @param[in, out] output_vector Vector to write output to + * @return Returns true if computations were successful, false otherwise + */ +bool fft(const Eigen::VectorXd& input_vector, + std::vector>& output_vector); + +/** + * Calculate the integral of the input vector with uniform spacing + * between data points + * @param[in] input_vector Vector containing function values + * @param[in] spacing Spacing between data points + * @return Approximate value of function integral + */ +double trapazoid_rule(const std::vector& input_vector, double spacing); + +/** + * Calculate the integral of the input vector with uniform spacing + * between data points + * @param[in] input_vector Vector containing function values + * @param[in] spacing Spacing between data points + * @return Approximate value of function integral + */ +double trapazoid_rule(const Eigen::VectorXd& input_vector, double spacing); + +/** + * Fit polynomial to data, forcing y-intercept to zero + * @param[in] points Vector of evaluation points + * @param[in] data Vector of data for evaluation points + * @param[in] intercept Value for y-intercept + * @param[in] degree Degree of of polynomial fit + */ +Eigen::VectorXd polyfit_intercept(const Eigen::VectorXd& points, + const Eigen::VectorXd& data, + double intercept, + unsigned int degree); + +/** + * Take the derivative of a polynomial described by its coefficients + * @param[in] coefficients Coefficients of polynomial terms ordered in + * descending power + * @return Vector of coefficients for input polynomial derivative + */ +Eigen::VectorXd polynomial_derivative(const Eigen::VectorXd& coefficients); + +/** + * Approximates the derivative as differences between adjacent input points + * @param[in] coefficients Coefficients of polynomial terms ordered in + * descending power + * @param[in] constant_factor Constant factor to multiply coefficients by. + * Defaults to 1.0. + * @param[in] add_zero Boolean indicating to add leading zero to coefficients. + * Defaults to false. + * @return Vector of difference for input vector + */ +std::vector derivative( + const std::vector& coefficients, double constant_factor = 1.0, + bool add_zero = false); + +/** + * Evaluate polynomial described by input coefficients at input points + * @param[in] coefficients Coefficients of polynomial terms ordered in + * descending power + * @param[in] points Vector of points at which to evaluate polynomial + * @return Vector of polynomial values evaluated at input points + */ +Eigen::VectorXd evaluate_polynomial(const Eigen::VectorXd& coefficients, + const Eigen::VectorXd& points); + +/** + * Evaluate polynomial described by input coefficients at input points + * @param[in] coefficients Coefficients of polynomial terms ordered in + * descending power + * @param[in] points Vector of points at which to evaluate polynomial + * @return Vector of polynomial values evaluated at input points + */ +Eigen::VectorXd evaluate_polynomial(const Eigen::VectorXd& coefficients, + const std::vector& points); + +/** + * Evaluate polynomial described by input coefficients at input points + * @param[in] coefficients Coefficients of polynomial terms ordered in + * descending power + * @param[in] points Vector of points at which to evaluate polynomial + * @return Vector of polynomial values evaluated at input points + */ +std::vector evaluate_polynomial(const std::vector& coefficients, + const std::vector& points); + +/** + * Abstract base class for random number generators + */ +class RandomGenerator { + public: + /** + * @constructor Default constructor + */ + RandomGenerator() = default; + + /** + * @destructor Virtual destructor + */ + virtual ~RandomGenerator() {}; + + /** + * Delete copy constructor + */ + RandomGenerator(const RandomGenerator&) = delete; + + /** + * Delete assignment operator + */ + RandomGenerator& operator=(const RandomGenerator&) = delete; + + /** + * Get multivariate random realization + * @param[in, out] random_numbers Matrix to store generated random numbers to + * @param[in] means Vector of mean values for random variables + * @param[in] cov Covariance matrix of for random variables + * @param[in] cases Number of cases to generate + * @return Returns true if no issues were encountered in Cholesky + * decomposition of covariance matrix, returns false otherwise + */ + virtual bool generate( + Eigen::Matrix& random_numbers, + const Eigen::VectorXd& means, const Eigen::MatrixXd& cov, + unsigned int cases = 1) = 0; + + /** + * Get the class name + * @return Class name + */ + virtual std::string name() const = 0; + + protected: + int seed_ = static_cast( + std::time(nullptr)); /**< Seed value to use in random number generator */ +}; +} // namespace numeric_utils + +#endif // _NUMERIC_UTILS_H_ diff --git a/modules/createEVENT/common/smelt/stochastic_model.h b/modules/createEVENT/common/smelt/stochastic_model.h new file mode 100644 index 000000000..caee1813d --- /dev/null +++ b/modules/createEVENT/common/smelt/stochastic_model.h @@ -0,0 +1,71 @@ +#ifndef _STOCHASTIC_MODEL_H_ +#define _STOCHASTIC_MODEL_H_ + +#include +#include "json_object.h" + +namespace stochastic { + +/** + * Abstract base class for stochastic models + */ +class StochasticModel { + public: + /** + * @constructor Default constructor + */ + StochasticModel() = default; + + /** + * @destructor Virtual destructor + */ + virtual ~StochasticModel() {}; + + /** + * Delete copy constructor + */ + StochasticModel(const StochasticModel&) = delete; + + /** + * Delete assignment operator + */ + StochasticModel& operator=(const StochasticModel&) = delete; + + /** + * Get the name of the stochastic model + * @return Model name as a string + */ + std::string model_name() const { return model_name_; }; + + /** + * Generate loading based on stochastic model and store + * outputs as JSON object + * @param[in] event_name Name to assign to event + * @param[in] units Indicates that time histories should be returned in + * specific units. These units will depend on the subclass; the input + * just allows for ensuring outputs are in a certain unit. + * @return JsonObject containing loading time histories + */ + virtual utilities::JsonObject generate(const std::string& event_name, + bool units = false) = 0; + + /** + * Generate loading based on stochastic model and write + * results to file in JSON format + * @param[in] event_name Name to assign to event + * @param[in, out] output_location Location to write outputs to + * @param[in] units Indicates that time histories should be returned in + * specific units. These units will depend on the subclass; the input + * just allows for ensuring outputs are in a certain unit. + * @return Returns true if successful, false otherwise + */ + virtual bool generate(const std::string& event_name, + const std::string& output_location, + bool units = false) = 0; + + protected: + std::string model_name_ = "StochasticModel"; /**< Name of stochastic model */ +}; +} // namespace stochastic + +#endif // _STOCHASTIC_MODEL_H_ diff --git a/modules/createEVENT/common/smelt/students_t_dist.cc b/modules/createEVENT/common/smelt/students_t_dist.cc new file mode 100644 index 000000000..7b44f6a6a --- /dev/null +++ b/modules/createEVENT/common/smelt/students_t_dist.cc @@ -0,0 +1,36 @@ +#include +#include +#include "students_t_dist.h" + +stochastic::StudentstDistribution::StudentstDistribution(double mean, + double std_dev, + double dof) + : Distribution(), + mean_{mean}, + std_dev_{std_dev}, + dof_{dof}, + distribution_{dof_} +{} + +std::vector stochastic::StudentstDistribution::cumulative_dist_func( + const std::vector& locations) const { + std::vector evaluations(locations.size()); + + for (unsigned int i = 0; i < locations.size(); ++i) { + evaluations[i] = cdf(distribution_, (locations[i] - mean_) / std_dev_); + } + + return evaluations; +} + +std::vector stochastic::StudentstDistribution::inv_cumulative_dist_func( + const std::vector& probabilities) const { + std::vector evaluations(probabilities.size()); + + for (unsigned int i = 0; i < probabilities.size(); ++i) { + evaluations[i] = + std_dev_ * quantile(distribution_, probabilities[i]) + mean_; + } + + return evaluations; +} diff --git a/modules/createEVENT/common/smelt/students_t_dist.h b/modules/createEVENT/common/smelt/students_t_dist.h new file mode 100644 index 000000000..ebdffd567 --- /dev/null +++ b/modules/createEVENT/common/smelt/students_t_dist.h @@ -0,0 +1,68 @@ +#ifndef _STUDENTS_T_DIST_H_ +#define _STUDENTS_T_DIST_H_ + +#include +#include +#include +#include "distribution.h" + +namespace stochastic { +/** + * Student's t distribution + */ +class StudentstDistribution : public Distribution { + public: + /** + * @constructor Delete default constructor + */ + StudentstDistribution() = delete; + + /** + * @constructor Construct Student's t distribution with specified mean, + * standard deviation and degrees of freedom + * @param[in] mean Mean of distribution + * @param[in] std_dev Standard deviation of distribution + * @param[in] dof Degrees of freedom of distribution + */ + StudentstDistribution(double mean, double scale, double dof); + + /** + * @destructor Virtual destructor + */ + virtual ~StudentstDistribution(){}; + + /** + * Get the name of the distribution model + * @return Model name as a string + */ + std::string name() const override { return "StudentstDist"; }; + + /** + * Compute the cumulative distribution function (CDF) of the distribution at + * specified input locations + * @param[in] locations Vector containing locations at which to + * calculate CDF + * @return Vector of evaluated values of CDF at input locations + */ + std::vector cumulative_dist_func( + const std::vector& locations) const override; + + /** + * Compute the inverse cumulative distribution function (ICDF) of the + * distribution at specified input locations + * @param[in] probabilities Vector containing probabilities at which to + * calculate ICDF + * @return Vector of evaluated values of ICDF at input locations + */ + std::vector inv_cumulative_dist_func( + const std::vector& probabilities) const override; + + protected: + double mean_; /**< Distribution mean */ + double std_dev_; /**< Distribution standard deviation */ + double dof_; /**< Degrees of freedom */ + boost::math::students_t distribution_; /**< Student's t distribution */ +}; +} // namespace stochastic + +#endif // _STUDENTS_T_DIST_H_ diff --git a/modules/createEVENT/common/smelt/uniform_dist.cc b/modules/createEVENT/common/smelt/uniform_dist.cc new file mode 100644 index 000000000..2f51a063a --- /dev/null +++ b/modules/createEVENT/common/smelt/uniform_dist.cc @@ -0,0 +1,32 @@ +#include +#include +#include "uniform_dist.h" + +stochastic::UniformDistribution::UniformDistribution(double lower, double upper) + : Distribution(), + lower_bound_{lower}, + upper_bound_{upper}, + distribution_{lower, upper} +{} + +std::vector stochastic::UniformDistribution::cumulative_dist_func( + const std::vector& locations) const { + std::vector evaluations(locations.size()); + + for (unsigned int i = 0; i < locations.size(); ++i) { + evaluations[i] = cdf(distribution_, locations[i]); + } + + return evaluations; +} + +std::vector stochastic::UniformDistribution::inv_cumulative_dist_func( + const std::vector& probabilities) const { + std::vector evaluations(probabilities.size()); + + for (unsigned int i = 0; i < probabilities.size(); ++i) { + evaluations[i] = quantile(distribution_, probabilities[i]); + } + + return evaluations; +} diff --git a/modules/createEVENT/common/smelt/uniform_dist.h b/modules/createEVENT/common/smelt/uniform_dist.h new file mode 100644 index 000000000..ac1cd1c08 --- /dev/null +++ b/modules/createEVENT/common/smelt/uniform_dist.h @@ -0,0 +1,66 @@ +#ifndef _UNIFORM_DIST_H_ +#define _UNIFORM_DIST_H_ + +#include +#include +#include +#include "distribution.h" + +namespace stochastic { +/** + * Uniform distribution + */ +class UniformDistribution : public Distribution { + public: + /** + * @constructor Construct standard uniform distribution + */ + UniformDistribution() = delete; + + /** + * @constructor Construct uniform distribution with specified lower and + * upper bounds + * @param[in] lower Lower bound + * @param[in] upper Upper bound + */ + UniformDistribution(double lower, double upper); + + /** + * @destructor Virtual destructor + */ + virtual ~UniformDistribution(){}; + + /** + * Get the name of the distribution model + * @return Model name as a string + */ + std::string name() const override { return "UniformDist"; }; + + /** + * Compute the cumulative distribution function (CDF) of the distribution at + * specified input locations + * @param[in] locations Vector containing locations at which to + * calculate CDF + * @return Vector of evaluated values of CDF at input locations + */ + std::vector cumulative_dist_func( + const std::vector& locations) const override; + + /** + * Compute the inverse cumulative distribution function (ICDF) of the + * distribution at specified input locations + * @param[in] probabilities Vector containing probabilities at which to + * calculate ICDF + * @return Vector of evaluated values of ICDF at input locations + */ + std::vector inv_cumulative_dist_func( + const std::vector& probabilities) const override; + + protected: + double lower_bound_; /**< Distribution lower bound */ + double upper_bound_; /**< Distribution upper bound */ + boost::math::uniform distribution_; /**< Uniform distribution */ +}; +} // namespace stochastic + +#endif // _UNIFORM_DIST_H_ diff --git a/modules/createEVENT/common/smelt/vlachos_et_al.cc b/modules/createEVENT/common/smelt/vlachos_et_al.cc new file mode 100644 index 000000000..0065eef25 --- /dev/null +++ b/modules/createEVENT/common/smelt/vlachos_et_al.cc @@ -0,0 +1,919 @@ +#define _USE_MATH_DEFINES +#include +#include +#include +#include +#include +#include +#include +#include +// Boost random generator +#include +#include +#include +// Eigen dense matrices +#include + +#include "factory.h" +#include "function_dispatcher.h" +#include "json_object.h" +#include "lognormal_dist.h" +#include "normal_dist.h" +#include "normal_multivar.h" +#include "numeric_utils.h" +#include "vlachos_et_al.h" + +stochastic::VlachosEtAl::VlachosEtAl(double moment_magnitude, + double rupture_distance, double vs30, + double orientation, + unsigned int num_spectra, + unsigned int num_sims) + : StochasticModel(), + moment_magnitude_{moment_magnitude / 6.0}, + rupture_dist_{(rupture_distance + 5.0) / 30.0}, + vs30_{vs30 / 450.0}, + orientation_{orientation}, + time_step_{0.01}, + freq_step_{0.2}, + cutoff_freq_{220.0}, + num_spectra_{num_spectra}, + num_sims_{num_sims}, + seed_value_{std::numeric_limits::infinity()}, + model_parameters_{18} { + model_name_ = "VlachosEtAl"; + // Factors for site condition based on Vs30 + double site_soft = 0.0, site_medium = 0.0, site_hard = 0.0; + if (vs30 <= 300.0) { + site_soft = 1.0; + } else if (vs30 <= 450.0) { + site_medium = 1.0; + } else { + site_hard = 1.0; + } + + // Estimated conditional mean values + Eigen::VectorXd conditional_means(7); + // clang-format off + conditional_means << + 1.0, moment_magnitude_, std::log(rupture_dist_), + moment_magnitude_ * std::log(rupture_dist_), site_soft * std::log(vs30_), + site_medium * std::log(vs30_), site_hard * std::log(vs30_); + // clang-format on + + // Restricted Maximum Likelihood method regression coefficients and variance + // components of the normal model parameters (Table 3 on page 13) + Eigen::MatrixXd beta(18, 7); + // clang-format off + beta << + -1.1417, 1.0917, 1.9125, -0.9696, 0.0971, 0.3476, -0.6740, + 1.8052,-1.8381, -3.5874, 3.7895, 0.3236, 0.5497, 0.2876, + 1.8969,-1.8819, -2.0818, 1.9000, -0.3520, -0.6959, -0.0025, + 1.6627,-1.6922, -1.2509, 1.1880, -0.5170, -1.0157, -0.1041, + 3.8703,-3.4745, -0.0816, 0.0166, 0.4904, 0.8697, 0.3179, + 1.1043,-1.1852, -1.0068, 0.9388, -0.5603, -0.8855, -0.3174, + 1.1935,-1.2922, -0.7028, 0.6975, -0.6629, -1.1075, -0.4542, + 1.7895,-1.5014, -0.0300, -0.1306, 0.4526, 0.7132, 0.1522, + -3.6404, 3.3189, -0.5316, 0.3874, -0.3757, -0.8334, 0.1006, + -2.2742, 2.1454, 0.6315, -0.6620, 0.1093, -0.1028, -0.0479, + 0.6930, -0.6202, 1.8037, -1.6064, 0.0727, -0.1498, -0.0722, + 1.3003, -1.2004, -1.2210, 1.0623, -0.0252, 0.1885, 0.0069, + 0.4604, -0.4087, -0.5057, 0.4486, 0.1073, -0.0219, -0.1352, + 2.2304, -2.0398, -0.1364, 0.1910, 0.2425, 0.1801, 0.3233, + 2.3806, -2.2011, -0.3256, 0.2226, -0.0221, 0.0970, 0.0762, + 0.2057, -0.1714, 0.3385, -0.2229, 0.0802, 0.2649, 0.0396, + -7.6011, 6.8507, -2.3609, 0.9201, -0.7508, -0.7903, -0.6204, + -6.3472, 5.8241, 3.2994, -2.8774, -0.1411, -0.5298, -0.0203; + // clang-format on + + // Variance of model parameters (Table 3 on page 13) + Eigen::VectorXd variance(18); + // clang-format off + variance << + 0.90, 0.80, 0.78, 0.74, 0.66, 0.73, 0.72, 0.70, 0.69, + 0.78, 0.90, 0.90, 0.90, 0.90, 0.80, 0.90, 0.35, 0.80; + // clang-format on + + // Estimated correlation matrix (Table A1 on page 24) + Eigen::MatrixXd correlation_matrix(18, 18); + // clang-format off + correlation_matrix << + 1.0000, 0.0382, -0.0912, -0.0701, -0.0214, -0.0849, -0.0545, -0.0185, 0.0270, -0.0122, 0.0059, -0.0344, -0.0342, 0.0409, -0.0137, -0.0168, -0.0990, -0.6701, + 0.0382, 1.0000, -0.1159, -0.1856, 0.0681, -0.2018, -0.2765, -0.0304, -0.1719, -0.1157, -0.0347, -0.0277, -0.0189, 0.0357, 0.0657, -0.0070, 0.3690, -0.0510, + -0.0912, -0.1159, 1.0000, 0.9467, 0.4123, 0.4815, 0.4240, 0.2120, 0.1070, -0.1898, 0.0506, -0.0661, -0.0380, 0.0260, 0.0506, -0.0317, -0.0278, 0.0245, + -0.0701, -0.1856, 0.9467, 1.0000, 0.4075, 0.4891, 0.4940, 0.2285, 0.2009, -0.1709, 0.0365, -0.0579, -0.0999, 0.0467, 0.0410, 0.0027, -0.0966, 0.0631, + -0.0214, 0.0681, 0.4123, 0.4075, 1.0000, 0.1772, 0.1337, 0.7315, -0.0066, -0.2787, 0.0703, -0.0541, -0.0453, 0.1597, 0.0792, 0.0220, 0.0606, -0.0844, + -0.0849, -0.2018, 0.4815, 0.4891, 0.1772, 1.0000, 0.9448, 0.3749, 0.1682, -0.0831, 0.0124, -0.1236, -0.0346, -0.0054, 0.0877, -0.0197, -0.0867, 0.0281, + -0.0545, -0.2765, 0.4240, 0.4940, 0.1337, 0.9448, 1.0000, 0.3530, 0.2305, -0.0546, -0.0223, -0.0782, -0.0872, 0.0074, 0.0999, 0.0066, -0.1358, 0.0626, + -0.0185, -0.0304, 0.2120, 0.2285, 0.7315, 0.3749, 0.3530, 1.0000, 0.1939, -0.0617, -0.0017, -0.0942, -0.0332, 0.0813, 0.0810, -0.0032, -0.0870, -0.0599, + 0.0270, -0.1719, 0.1070, 0.2009, -0.0066, 0.1682, 0.2305, 0.1939, 1.0000, -0.1851, -0.2073, -0.0756, -0.1637, -0.0865, 0.0699, -0.0485, -0.2153, 0.0320, + -0.0122, -0.1157, -0.1898, -0.1709, -0.2787, -0.0831, -0.0546, -0.0617, -0.1851, 1.0000, 0.2139, 0.0769, 0.1391, 0.0769, -0.1838, 0.0377, -0.1615, 0.1000, + 0.0059, -0.0347, 0.0506, 0.0365, 0.0703, 0.0124, -0.0223, -0.0017, -0.2073, 0.2139, 1.0000, -0.1102, -0.0530, 0.0791, 0.0012, 0.0090, -0.0236, 0.0037, + -0.0344, -0.0277, -0.0661, -0.0579, -0.0541, -0.1236, -0.0782, -0.0942, -0.0756, 0.0769, -0.1102, 1.0000, -0.2562, -0.0406, 0.3154, 0.0065, -0.0093, -0.0354, + -0.0342, -0.0189, -0.0380, -0.0999, -0.0453, -0.0346, -0.0872, -0.0332, -0.1637, 0.1391, -0.0530, -0.2562, 1.0000, -0.1836, -0.1624, -0.5646, 0.0216, 0.0243, + 0.0409, 0.0357, 0.0260, 0.0467, 0.1597, -0.0054, 0.0074, 0.0813, -0.0865, 0.0769, 0.0791, -0.0406, -0.1836, 1.0000, 0.1624, 0.1989, 0.0549, -0.0411, + -0.0137, 0.0657, 0.0506, 0.0410, 0.0792, 0.0877, 0.0999, 0.0810, 0.0699, -0.1838, 0.0012, 0.3154, -0.1624, 0.1624, 1.0000, 0.1552, 0.0844, -0.0637, + -0.0168, -0.0070, -0.0317, 0.0027, 0.0220, -0.0197, 0.0066, -0.0032, -0.0485, 0.0377, 0.0090, 0.0065, -0.5646, 0.1989, 0.1552, 1.0000, 0.0058, 0.0503, + -0.0990, 0.3690, -0.0278, -0.0966, 0.0606, -0.0867, -0.1358, -0.0870, -0.2153, -0.1615, -0.0236, -0.0093, 0.0216, 0.0549, 0.0844, 0.0058, 1.0000, -0.0930, + -0.6701, -0.0510, 0.0245, 0.0631, -0.0844, 0.0281, 0.0626, -0.0599, 0.0320, 0.1000, 0.0037, -0.0354, 0.0243, -0.0411, -0.0637, 0.0503, -0.0930, 1.0000; + // clang-format on + + // Mean of transformed normal model parameters (described by Eq. 25 on page 12) + means_ = beta * conditional_means; + + // Convert the standard deviation and correlation to covariance + covariance_ = numeric_utils::corr_to_cov(correlation_matrix, + (variance.array().sqrt()).matrix()); + + // Generate realizations of model parameters + sample_generator_ = + Factory::instance()->create( + "MultivariateNormal"); + sample_generator_->generate(parameter_realizations_, means_, covariance_, + num_spectra_); + parameter_realizations_.transposeInPlace(); + + // Create distributions for model parameters + model_parameters_[0] = + Factory::instance()->create( + "LognormalDist", std::move(-1.735), std::move(0.523)); + model_parameters_[1] = + Factory::instance()->create( + "LognormalDist", std::move(1.009), std::move(0.422)); + model_parameters_[2] = + Factory::instance()->create( + "NormalDist", std::move(0.249), std::move(1.759)); + model_parameters_[3] = + Factory::instance()->create( + "NormalDist", std::move(0.768), std::move(1.958)); + model_parameters_[4] = + Factory::instance()->create( + "LognormalDist", std::move(2.568), std::move(0.557)); + model_parameters_[5] = + Factory::instance()->create( + "NormalDist", std::move(0.034), std::move(1.471)); + model_parameters_[6] = + Factory::instance()->create( + "NormalDist", std::move(0.441), std::move(1.733)); + model_parameters_[7] = + Factory::instance()->create( + "LognormalDist", std::move(3.356), std::move(0.473)); + model_parameters_[8] = + Factory::instance()->create( + "BetaDist", std::move(2.516), std::move(9.174)); + model_parameters_[9] = + Factory::instance()->create( + "BetaDist", std::move(3.582), std::move(15.209)); + model_parameters_[10] = + Factory::instance()->create( + "LognormalDist", std::move(0.746), std::move(0.404)); + model_parameters_[11] = + Factory::instance() + ->create("StudentstDist", std::move(0.205), std::move(0.232), + std::move(7.250)); + model_parameters_[12] = + Factory::instance()->create( + "InverseGaussianDist", std::move(0.499), std::move(0.213)); + model_parameters_[13] = + Factory::instance()->create( + "LognormalDist", std::move(0.702), std::move(0.435)); + model_parameters_[14] = + Factory::instance() + ->create("StudentstDist", std::move(0.792), std::move(0.157), + std::move(4.223)); + model_parameters_[15] = + Factory::instance()->create( + "InverseGaussianDist", std::move(0.350), std::move(0.170)); + model_parameters_[16] = + Factory::instance()->create( + "LognormalDist", std::move(9.470), std::move(1.317)); + model_parameters_[17] = + Factory::instance()->create( + "LognormalDist", std::move(3.658), std::move(0.375)); + + // Standard normal distribution with mean at 0.0 and standard deviation of 1.0 + auto std_normal_dist = + Factory::instance()->create( + "NormalDist", std::move(0.0), std::move(1.0)); + + physical_parameters_.resize(parameter_realizations_.rows(), + parameter_realizations_.cols()); + + // Transform sample normal model parameters to physical space + for (unsigned int i = 0; i < parameter_realizations_.rows(); ++i) { + for (unsigned int j = 0; j < model_parameters_.size(); ++j) { + physical_parameters_(i, j) = + (model_parameters_[j]->inv_cumulative_dist_func( + std_normal_dist->cumulative_dist_func( + std::vector{parameter_realizations_(i, j)})))[0]; + } + } +} + + +stochastic::VlachosEtAl::VlachosEtAl(double moment_magnitude, + double rupture_distance, double vs30, + double orientation, + unsigned int num_spectra, + unsigned int num_sims, + int seed_value) + : StochasticModel(), + moment_magnitude_{moment_magnitude / 6.0}, + rupture_dist_{(rupture_distance + 5.0) / 30.0}, + vs30_{vs30 / 450.0}, + orientation_{orientation}, + time_step_{0.01}, + freq_step_{0.2}, + cutoff_freq_{220.0}, + num_spectra_{num_spectra}, + num_sims_{num_sims}, + seed_value_{seed_value}, + model_parameters_{18} { + model_name_ = "VlachosEtAl"; + // Factors for site condition based on Vs30 + double site_soft = 0.0, site_medium = 0.0, site_hard = 0.0; + if (vs30 <= 300.0) { + site_soft = 1.0; + } else if (vs30 <= 450.0) { + site_medium = 1.0; + } else { + site_hard = 1.0; + } + + // Estimated conditional mean values + Eigen::VectorXd conditional_means(7); + // clang-format off + conditional_means << + 1.0, moment_magnitude_, std::log(rupture_dist_), + moment_magnitude_ * std::log(rupture_dist_), site_soft * std::log(vs30_), + site_medium * std::log(vs30_), site_hard * std::log(vs30_); + // clang-format on + + // Restricted Maximum Likelihood method regression coefficients and variance + // components of the normal model parameters (Table 3 on page 13) + Eigen::MatrixXd beta(18, 7); + // clang-format off + beta << + -1.1417, 1.0917, 1.9125, -0.9696, 0.0971, 0.3476, -0.6740, + 1.8052,-1.8381, -3.5874, 3.7895, 0.3236, 0.5497, 0.2876, + 1.8969,-1.8819, -2.0818, 1.9000, -0.3520, -0.6959, -0.0025, + 1.6627,-1.6922, -1.2509, 1.1880, -0.5170, -1.0157, -0.1041, + 3.8703,-3.4745, -0.0816, 0.0166, 0.4904, 0.8697, 0.3179, + 1.1043,-1.1852, -1.0068, 0.9388, -0.5603, -0.8855, -0.3174, + 1.1935,-1.2922, -0.7028, 0.6975, -0.6629, -1.1075, -0.4542, + 1.7895,-1.5014, -0.0300, -0.1306, 0.4526, 0.7132, 0.1522, + -3.6404, 3.3189, -0.5316, 0.3874, -0.3757, -0.8334, 0.1006, + -2.2742, 2.1454, 0.6315, -0.6620, 0.1093, -0.1028, -0.0479, + 0.6930, -0.6202, 1.8037, -1.6064, 0.0727, -0.1498, -0.0722, + 1.3003, -1.2004, -1.2210, 1.0623, -0.0252, 0.1885, 0.0069, + 0.4604, -0.4087, -0.5057, 0.4486, 0.1073, -0.0219, -0.1352, + 2.2304, -2.0398, -0.1364, 0.1910, 0.2425, 0.1801, 0.3233, + 2.3806, -2.2011, -0.3256, 0.2226, -0.0221, 0.0970, 0.0762, + 0.2057, -0.1714, 0.3385, -0.2229, 0.0802, 0.2649, 0.0396, + -7.6011, 6.8507, -2.3609, 0.9201, -0.7508, -0.7903, -0.6204, + -6.3472, 5.8241, 3.2994, -2.8774, -0.1411, -0.5298, -0.0203; + // clang-format on + + // Variance of model parameters (Table 3 on page 13) + Eigen::VectorXd variance(18); + // clang-format off + variance << + 0.90, 0.80, 0.78, 0.74, 0.66, 0.73, 0.72, 0.70, 0.69, + 0.78, 0.90, 0.90, 0.90, 0.90, 0.80, 0.90, 0.35, 0.80; + // clang-format on + + // Estimated correlation matrix (Table A1 on page 24) + Eigen::MatrixXd correlation_matrix(18, 18); + // clang-format off + correlation_matrix << + 1.0000, 0.0382, -0.0912, -0.0701, -0.0214, -0.0849, -0.0545, -0.0185, 0.0270, -0.0122, 0.0059, -0.0344, -0.0342, 0.0409, -0.0137, -0.0168, -0.0990, -0.6701, + 0.0382, 1.0000, -0.1159, -0.1856, 0.0681, -0.2018, -0.2765, -0.0304, -0.1719, -0.1157, -0.0347, -0.0277, -0.0189, 0.0357, 0.0657, -0.0070, 0.3690, -0.0510, + -0.0912, -0.1159, 1.0000, 0.9467, 0.4123, 0.4815, 0.4240, 0.2120, 0.1070, -0.1898, 0.0506, -0.0661, -0.0380, 0.0260, 0.0506, -0.0317, -0.0278, 0.0245, + -0.0701, -0.1856, 0.9467, 1.0000, 0.4075, 0.4891, 0.4940, 0.2285, 0.2009, -0.1709, 0.0365, -0.0579, -0.0999, 0.0467, 0.0410, 0.0027, -0.0966, 0.0631, + -0.0214, 0.0681, 0.4123, 0.4075, 1.0000, 0.1772, 0.1337, 0.7315, -0.0066, -0.2787, 0.0703, -0.0541, -0.0453, 0.1597, 0.0792, 0.0220, 0.0606, -0.0844, + -0.0849, -0.2018, 0.4815, 0.4891, 0.1772, 1.0000, 0.9448, 0.3749, 0.1682, -0.0831, 0.0124, -0.1236, -0.0346, -0.0054, 0.0877, -0.0197, -0.0867, 0.0281, + -0.0545, -0.2765, 0.4240, 0.4940, 0.1337, 0.9448, 1.0000, 0.3530, 0.2305, -0.0546, -0.0223, -0.0782, -0.0872, 0.0074, 0.0999, 0.0066, -0.1358, 0.0626, + -0.0185, -0.0304, 0.2120, 0.2285, 0.7315, 0.3749, 0.3530, 1.0000, 0.1939, -0.0617, -0.0017, -0.0942, -0.0332, 0.0813, 0.0810, -0.0032, -0.0870, -0.0599, + 0.0270, -0.1719, 0.1070, 0.2009, -0.0066, 0.1682, 0.2305, 0.1939, 1.0000, -0.1851, -0.2073, -0.0756, -0.1637, -0.0865, 0.0699, -0.0485, -0.2153, 0.0320, + -0.0122, -0.1157, -0.1898, -0.1709, -0.2787, -0.0831, -0.0546, -0.0617, -0.1851, 1.0000, 0.2139, 0.0769, 0.1391, 0.0769, -0.1838, 0.0377, -0.1615, 0.1000, + 0.0059, -0.0347, 0.0506, 0.0365, 0.0703, 0.0124, -0.0223, -0.0017, -0.2073, 0.2139, 1.0000, -0.1102, -0.0530, 0.0791, 0.0012, 0.0090, -0.0236, 0.0037, + -0.0344, -0.0277, -0.0661, -0.0579, -0.0541, -0.1236, -0.0782, -0.0942, -0.0756, 0.0769, -0.1102, 1.0000, -0.2562, -0.0406, 0.3154, 0.0065, -0.0093, -0.0354, + -0.0342, -0.0189, -0.0380, -0.0999, -0.0453, -0.0346, -0.0872, -0.0332, -0.1637, 0.1391, -0.0530, -0.2562, 1.0000, -0.1836, -0.1624, -0.5646, 0.0216, 0.0243, + 0.0409, 0.0357, 0.0260, 0.0467, 0.1597, -0.0054, 0.0074, 0.0813, -0.0865, 0.0769, 0.0791, -0.0406, -0.1836, 1.0000, 0.1624, 0.1989, 0.0549, -0.0411, + -0.0137, 0.0657, 0.0506, 0.0410, 0.0792, 0.0877, 0.0999, 0.0810, 0.0699, -0.1838, 0.0012, 0.3154, -0.1624, 0.1624, 1.0000, 0.1552, 0.0844, -0.0637, + -0.0168, -0.0070, -0.0317, 0.0027, 0.0220, -0.0197, 0.0066, -0.0032, -0.0485, 0.0377, 0.0090, 0.0065, -0.5646, 0.1989, 0.1552, 1.0000, 0.0058, 0.0503, + -0.0990, 0.3690, -0.0278, -0.0966, 0.0606, -0.0867, -0.1358, -0.0870, -0.2153, -0.1615, -0.0236, -0.0093, 0.0216, 0.0549, 0.0844, 0.0058, 1.0000, -0.0930, + -0.6701, -0.0510, 0.0245, 0.0631, -0.0844, 0.0281, 0.0626, -0.0599, 0.0320, 0.1000, 0.0037, -0.0354, 0.0243, -0.0411, -0.0637, 0.0503, -0.0930, 1.0000; + // clang-format on + + // Mean of transformed normal model parameters (described by Eq. 25 on page 12) + means_ = beta * conditional_means; + + // Convert the standard deviation and correlation to covariance + covariance_ = numeric_utils::corr_to_cov(correlation_matrix, + (variance.array().sqrt()).matrix()); + + // Generate realizations of model parameters + sample_generator_ = + Factory::instance()->create( + "MultivariateNormal", std::move(seed_value_)); + sample_generator_->generate(parameter_realizations_, means_, covariance_, + num_spectra_); + parameter_realizations_.transposeInPlace(); + + // Create distributions for model parameters + model_parameters_[0] = + Factory::instance()->create( + "LognormalDist", std::move(-1.735), std::move(0.523)); + model_parameters_[1] = + Factory::instance()->create( + "LognormalDist", std::move(1.009), std::move(0.422)); + model_parameters_[2] = + Factory::instance()->create( + "NormalDist", std::move(0.249), std::move(1.759)); + model_parameters_[3] = + Factory::instance()->create( + "NormalDist", std::move(0.768), std::move(1.958)); + model_parameters_[4] = + Factory::instance()->create( + "LognormalDist", std::move(2.568), std::move(0.557)); + model_parameters_[5] = + Factory::instance()->create( + "NormalDist", std::move(0.034), std::move(1.471)); + model_parameters_[6] = + Factory::instance()->create( + "NormalDist", std::move(0.441), std::move(1.733)); + model_parameters_[7] = + Factory::instance()->create( + "LognormalDist", std::move(3.356), std::move(0.473)); + model_parameters_[8] = + Factory::instance()->create( + "BetaDist", std::move(2.516), std::move(9.714)); + model_parameters_[9] = + Factory::instance()->create( + "BetaDist", std::move(3.582), std::move(15.209)); + model_parameters_[10] = + Factory::instance()->create( + "LognormalDist", std::move(0.746), std::move(0.404)); + model_parameters_[11] = + Factory::instance() + ->create("StudentstDist", std::move(0.205), std::move(0.232), + std::move(7.250)); + model_parameters_[12] = + Factory::instance()->create( + "InverseGaussianDist", std::move(0.499), std::move(0.213)); + model_parameters_[13] = + Factory::instance()->create( + "LognormalDist", std::move(0.702), std::move(0.435)); + model_parameters_[14] = + Factory::instance() + ->create("StudentstDist", std::move(0.792), std::move(0.157), + std::move(4.223)); + model_parameters_[15] = + Factory::instance()->create( + "InverseGaussianDist", std::move(0.350), std::move(0.170)); + model_parameters_[16] = + Factory::instance()->create( + "LognormalDist", std::move(9.470), std::move(1.317)); + model_parameters_[17] = + Factory::instance()->create( + "LognormalDist", std::move(3.658), std::move(0.375)); + + // Standard normal distribution with mean at 0.0 and standard deviation of 1.0 + auto std_normal_dist = + Factory::instance()->create( + "NormalDist", std::move(0.0), std::move(1.0)); + + physical_parameters_.resize(parameter_realizations_.rows(), + parameter_realizations_.cols()); + + // Transform sample normal model parameters to physical space + for (unsigned int i = 0; i < parameter_realizations_.rows(); ++i) { + for (unsigned int j = 0; j < model_parameters_.size(); ++j) { + physical_parameters_(i, j) = + (model_parameters_[j]->inv_cumulative_dist_func( + std_normal_dist->cumulative_dist_func( + std::vector{parameter_realizations_(i, j)})))[0]; + } + } +} + +utilities::JsonObject stochastic::VlachosEtAl::generate( + const std::string& event_name, bool units) { + + // Pool of acceleration time histories based on number of spectra and + // simulations requested + std::vector>> acceleration_pool( + num_spectra_, + std::vector>(num_sims_, std::vector())); + + // Generate family of time histories for each spectrum. Family size is + // specified by requested number of simulations per spectra. + try { + for (unsigned int i = 0; i < num_spectra_; ++i) { + time_history_family(acceleration_pool[i], physical_parameters_.row(i)); + } + } catch (const std::exception& e) { + std::cerr << e.what(); + throw; + } + + // Create JsonObject for events + auto events = utilities::JsonObject(); + std::vector events_array(num_spectra_ * num_sims_); + + // Add pattern information for JSON + auto pattern_x = utilities::JsonObject(); + auto pattern_y = utilities::JsonObject(); + pattern_x.add_value("type", "UniformAcceleration"); + pattern_x.add_value("timeSeries", "accel_x"); + pattern_x.add_value("dof", 1); + pattern_y.add_value("type", "UniformAcceleration"); + pattern_y.add_value("timeSeries", "accel_y"); + pattern_y.add_value("dof", 2); + + // Create JSON for specific event + auto event_data = utilities::JsonObject(); + // Loop over spectra + for (unsigned int i = 0; i < num_spectra_; ++i) { + // Loop over different simulations for current spectra + for (unsigned int j = 0; j < num_sims_; ++j) { + event_data.add_value("name", event_name + "_Spectra" + std::to_string(i) + + "_Sim" + std::to_string(j)); + event_data.add_value("type", "Seismic"); + event_data.add_value("dT", time_step_); + event_data.add_value("numSteps", acceleration_pool[i][j].size()); + event_data.add_value( + "pattern", std::vector{pattern_x, pattern_y}); + + // Rotate accelerations, if necessary + std::vector x_accels(acceleration_pool[i][j].size()); + std::vector y_accels(acceleration_pool[i][j].size()); + rotate_acceleration(acceleration_pool[i][j], x_accels, y_accels, units); + + // Add time histories for x and y directions to event + auto time_history_x = utilities::JsonObject(); + auto time_history_y = utilities::JsonObject(); + time_history_x.add_value("name", "accel_x"); + time_history_x.add_value("type", "Value"); + time_history_x.add_value("dT", time_step_); + time_history_x.add_value("data", x_accels); + time_history_y.add_value("name", "accel_y"); + time_history_y.add_value("type", "Value"); + time_history_y.add_value("dT", time_step_); + time_history_y.add_value("data", y_accels); + event_data.add_value("timeSeries", std::vector{ + time_history_x, time_history_y}); + events_array[i * num_sims_ + j] = event_data; + event_data.clear(); + } + } + + events.add_value("Events", events_array); + + return events; +} + +bool stochastic::VlachosEtAl::generate(const std::string& event_name, + const std::string& output_location, + bool units) { + bool status = true; + + // Generate pool of acceleration time histories + try{ + auto json_output = generate(event_name, units); + json_output.write_to_file(output_location); + } catch (const std::exception& e) { + std::cerr << e.what(); + status = false; + throw; + } + + return status; +} + +bool stochastic::VlachosEtAl::time_history_family( + std::vector>& time_histories, + const Eigen::VectorXd& parameters) const { + bool status = true; + auto identified_parameters = identify_parameters(parameters); + + unsigned int num_times = + static_cast(std::ceil(identified_parameters[17] / time_step_)) + 1; + unsigned int num_freqs = + static_cast(std::ceil(cutoff_freq_ / freq_step_)) + 1; + + std::vector times(num_times); + std::vector frequencies(num_freqs); + double total_time = (num_times - 1) * time_step_; + + for (unsigned int i = 0; i < times.size(); ++i) { + times[i] = i * time_step_ / total_time; + } + times[0] = 1E-6; + + for (unsigned int i = 0; i < frequencies.size(); ++i) { + frequencies[i] = i * freq_step_; + } + + // Calculate non-dimensional energy accumulation + auto energy = energy_accumulation( + std::vector{identified_parameters[0], identified_parameters[1]}, + times); + + // Calculate mode 1 and 2 dominant frequencies + auto mode_1_freqs = modal_frequencies( + std::vector{identified_parameters[2], identified_parameters[3], + identified_parameters[4]}, + energy); + auto mode_2_freqs = modal_frequencies( + std::vector{identified_parameters[5], identified_parameters[6], + identified_parameters[7]}, + energy); + + // Calculate 2nd modal participation factor + auto mode_2_participation = modal_participation_factor( + std::vector{identified_parameters[10], identified_parameters[11], + identified_parameters[12], identified_parameters[13], + identified_parameters[14], identified_parameters[15]}, + energy); + + // Calculate amplitude modulating function + auto amplitude_modulation = amplitude_modulating_function( + identified_parameters[17], identified_parameters[16], + std::vector{identified_parameters[0], identified_parameters[1]}, + times); + + // Parameters for high-pass Butterworth filter + int filter_order = 4; + double norm_cutoff_freq = 0.20; + + // Calculate energy content of the Butterworth filter transfer function + std::vector highpass_butter_energy(frequencies.size()); + double freq_ratio_sq; + for (unsigned int i = 0; i < frequencies.size(); ++i) { + freq_ratio_sq = std::pow(frequencies[i] / (2.0 * M_PI * norm_cutoff_freq), + 2 * filter_order); + highpass_butter_energy[i] = freq_ratio_sq / (1.0 + freq_ratio_sq); + } + + // Calculate the evolutionary power spectrum with unit variance at + // each time step + Eigen::MatrixXd power_spectrum(times.size(), frequencies.size()); + + for (unsigned int i = 0; i < times.size(); ++i) { + power_spectrum.row(i) = + kt_2(std::vector{mode_1_freqs[i], identified_parameters[8], 1.0, + mode_2_freqs[i], identified_parameters[9], + mode_2_participation[i]}, + frequencies, highpass_butter_energy); + + double freq_domain_integral = + 2.0 * numeric_utils::trapazoid_rule(power_spectrum.row(i), freq_step_); + + power_spectrum.row(i) = + power_spectrum.row(i) * amplitude_modulation[i] / freq_domain_integral; + + } + + // Get coefficients for highpass Butterworth filter + int num_samples = + static_cast(std::round(1.5 * static_cast(filter_order) / + (2.0 * norm_cutoff_freq)) / + time_step_ + + 1); + + auto hp_butter = + Dispatcher>, int, double>::instance() + ->dispatch("HighPassButter", filter_order, + norm_cutoff_freq / (1.0 / time_step_ / 2.0)); + + // Calculate filter impulse response for calculated number of samples + auto impulse_response = + Dispatcher, std::vector, std::vector, + int, int>::instance() + ->dispatch("ImpulseResponse", hp_butter[0], hp_butter[1], + filter_order, num_samples); + + try { + // Generate family of time histories + for (unsigned int i = 0; i < num_sims_; ++i) { + simulate_time_history(time_histories[i], power_spectrum); + post_process(time_histories[i], impulse_response); + } + } catch (const std::exception& e) { + std::cerr << e.what(); + status = false; + throw; + } + + return status; +} + +void stochastic::VlachosEtAl::simulate_time_history( + std::vector& time_history, + const Eigen::MatrixXd& power_spectrum) const { + unsigned int num_times = power_spectrum.rows(), + num_freqs = power_spectrum.cols(); + + time_history.resize(num_times, 0.0); + + std::vector times(num_times); + std::vector frequencies(num_freqs); + + for (unsigned int i = 0; i < times.size(); ++i) { + times[i] = i * time_step_; + } + + for (unsigned int i = 0; i < frequencies.size(); ++i) { + frequencies[i] = i * freq_step_; + } + + static unsigned int history_seed = static_cast(std::time(nullptr)); + history_seed = history_seed + 10; + + auto generator = + seed_value_ != std::numeric_limits::infinity() + ? boost::random::mt19937(static_cast(seed_value_ + 10)) + : boost::random::mt19937(history_seed); + + boost::random::uniform_real_distribution<> distribution(0.0, 2.0 * M_PI); + boost::random::variate_generator> + angle_gen(generator, distribution); + + std::vector phase_angle(num_freqs, 0.0); + + for (auto & angle : phase_angle) { + angle = angle_gen(); + } + + // Loop over all frequencies and times to calculate time history + for (unsigned int i = 0; i < num_times; ++i) { + for (unsigned int j = 0; j < num_freqs; ++j) { + time_history[i] = + time_history[i] + + std::sqrt(power_spectrum(i, j)) * + std::cos(frequencies[j] * times[i] + phase_angle[j]); + } + time_history[i] = 2.0 * std::sqrt(freq_step_) * time_history[i]; + } +} + +bool stochastic::VlachosEtAl::post_process( + std::vector& time_history, + const std::vector& filter_imp_resp) const { + + bool status = true; + double time_hann_2 = 1.0; + + Eigen::VectorXd window = Eigen::VectorXd::Ones(time_history.size()); + unsigned int window1_size = + static_cast(time_hann_2 / time_step_ + 1); + unsigned int window2_size = static_cast((window1_size - 1) / 2); + + // Check if input time history length is sufficient + if (time_history.size() < window1_size) { + throw std::runtime_error( + "\nERROR: in stochastic::VlachosEtAl::post_process: Input time history " + "too short for Hanning Window size\n"); + } + + // Get Hanning window of length window1_size + auto hann_window = + Dispatcher::instance()->dispatch( + "HannWindow", window1_size); + + window.head(window2_size) = hann_window.head(window2_size); + window.tail(window2_size) = hann_window.head(window2_size).reverse(); + + // Calculate mean of time history + double mean = std::accumulate(time_history.begin(), time_history.end(), 0.0) / + static_cast(time_history.size()); + + // Apply window + for (unsigned int i = 0; i < time_history.size(); ++i) { + time_history[i] = window[i] * (time_history[i] - mean); + } + + // Apply 4th order Butterworth filter + std::vector filtered_history(filter_imp_resp.size() + + time_history.size() - 1); + try { + numeric_utils::convolve_1d(filter_imp_resp, time_history, filtered_history); + } catch (const std::exception& e) { + std::cerr << e.what(); + status = false; + throw; + } + + // Copy filtered results to time_history + time_history = filtered_history; + + return status; +} + +Eigen::VectorXd stochastic::VlachosEtAl::identify_parameters( + const Eigen::VectorXd& initial_params) const { + // Initialize non-dimensional cumulative energy + std::vector energy(static_cast(1.0 / 0.05) + 1, 0.0); + + for (unsigned int i = 1; i < energy.size(); ++i) { + energy[i] = energy[i - 1] + 0.05; + } + + // Initialze mode 1 parameters and frequencies + std::vector mode_1_params = {initial_params(2), + initial_params(3), + initial_params(4)}; + auto mode_1_freqs = modal_frequencies(mode_1_params, energy); + + // Initialize mode 2 parameters and frequencies + std::vector mode_2_params = {initial_params(5), + initial_params(6), + initial_params(7)}; + auto mode_2_freqs = modal_frequencies(mode_2_params, energy); + + double mode_1_mean = initial_params(11), mode_2_mean = initial_params(14); + + // Standard normal distribution with mean at 0.0 and standard deviation of 1.0 + auto std_normal_dist = + Factory::instance()->create( + "NormalDist", std::move(0.0), std::move(1.0)); + + Eigen::Matrix realizations( + initial_params.size(), 1); + Eigen::VectorXd transformed_realizations = initial_params; + + // Check if any mode 1 dominant frequencies across all non-dimensional energy + // values are greater than the corresponding values for mode 2 + bool freq_comparison = false; + for (unsigned int i = 0; i < mode_1_freqs.size(); ++i) { + if (mode_1_freqs[i] > mode_2_freqs[i]) { + freq_comparison = true; + break; + } + } + + // Iterate until suitable parameter values have been identified + while (freq_comparison || (mode_1_mean > mode_2_mean)) { + + // Generate realizations of parameters + sample_generator_->generate(realizations, means_, covariance_, 1); + + // Transform parameter realizations to physical space + for (unsigned int i = 0; i < initial_params.size(); ++i) { + transformed_realizations(i) = + (model_parameters_[i]->inv_cumulative_dist_func( + std_normal_dist->cumulative_dist_func( + std::vector{realizations(i, 0)})))[0]; + } + + // Calculate dominant modal frequencies + mode_1_freqs = + modal_frequencies(std::vector{transformed_realizations(2), + transformed_realizations(3), + transformed_realizations(4)}, + energy); + + mode_2_params = {transformed_realizations(5), transformed_realizations(6), + transformed_realizations(7)}; + mode_2_freqs = + modal_frequencies(std::vector{transformed_realizations(5), + transformed_realizations(6), + transformed_realizations(7)}, + energy); + + mode_1_mean = transformed_realizations(11); + mode_2_mean = transformed_realizations(14); + + // Check if any mode 1 dominant frequencies across all non-dimensional energy values + // are greater than the corresponding values for mode 2 + freq_comparison = false; + for (unsigned int i = 0; i < mode_1_freqs.size(); ++i) { + if (mode_1_freqs[i] > mode_2_freqs[i]) { + freq_comparison = true; + break; + } + } + } + + return transformed_realizations; +} + +std::vector stochastic::VlachosEtAl::modal_frequencies( + const std::vector& parameters, + const std::vector& energy) const { + std::vector frequencies(energy.size()); + + for (unsigned int i = 0; i < energy.size(); ++i) { + frequencies[i] = parameters[2] * std::pow(0.5 + energy[i], parameters[0]) * + std::pow(1.5 - energy[i], parameters[1]); + } + + return frequencies; +} + +std::vector stochastic::VlachosEtAl::energy_accumulation( + const std::vector& parameters, + const std::vector& times) const { + std::vector accumulated_energy(times.size()); + + for (unsigned int i = 0; i < times.size(); ++i) { + accumulated_energy[i] = + std::exp(-std::pow(times[i] / parameters[0], -parameters[1])) / + std::exp(-std::pow(1.0 / parameters[0], -parameters[1])); + } + + return accumulated_energy; +} + +std::vector stochastic::VlachosEtAl::modal_participation_factor( + const std::vector& parameters, + const std::vector& energy) const { + std::vector participation_factor(energy.size()); + + for (unsigned int i = 0; i < energy.size(); ++i) { + participation_factor[i] = std::pow( + 10.0, + parameters[0] * std::exp(-std::pow( + (energy[i] - parameters[1]) / parameters[2], 2)) + + parameters[3] * + std::exp( + -std::pow((energy[i] - parameters[4]) / parameters[5], 2)) - + 2.0); + } + + return participation_factor; +} + +std::vector stochastic::VlachosEtAl::amplitude_modulating_function( + double duration, double total_energy, const std::vector& parameters, + const std::vector& times) const { + std::vector func_vals(times.size()); + + double mult_term = total_energy * parameters[1] / (parameters[0] * duration); + double exponent_1 = std::pow(1.0 / parameters[0], -parameters[1]); + + for (unsigned int i = 0; i < times.size(); ++i) { + func_vals[i] = mult_term * + std::exp(exponent_1 - std::pow(times[i] / parameters[0], -parameters[1])) * + std::pow(times[i] / parameters[0], -1.0 - parameters[1]); + } + + return func_vals; +} + +Eigen::VectorXd stochastic::VlachosEtAl::kt_2( + const std::vector& parameters, + const std::vector& frequencies, + const std::vector& highpass_butter) const { + Eigen::VectorXd power_spectrum(frequencies.size()); + double mode1 = 0, mode2 = 0; + + for (unsigned int i = 0; i < frequencies.size(); ++i) { + mode1 = parameters[2] * + (1.0 + 4.0 * parameters[1] * parameters[1] * + std::pow(frequencies[i] / parameters[0], 2)) / + (std::pow(1 - std::pow(frequencies[i] / parameters[0], 2), 2) + + 4.0 * parameters[1] * parameters[1] * + std::pow(frequencies[i] / parameters[0], 2)); + mode2 = parameters[5] * + (1.0 + 4.0 * parameters[4] * parameters[4] * + std::pow(frequencies[i] / parameters[3], 2)) / + (std::pow(1 - std::pow(frequencies[i] / parameters[3], 2), 2) + + 4.0 * parameters[4] * parameters[4] * + std::pow(frequencies[i] / parameters[3], 2)); + + power_spectrum[i] = highpass_butter[i] * (mode1 + mode2); + } + + return power_spectrum; +} + +void stochastic::VlachosEtAl::rotate_acceleration( + const std::vector& acceleration, std::vector& x_accels, + std::vector& y_accels, bool units) const { + + x_accels.resize(acceleration.size()); + y_accels.resize(acceleration.size()); + + double conversion_factor = units ? 100.0 * 9.81 : 100.0; + + // No orientation specified to acceleration oriented along x-axis + if (std::abs(orientation_) < 1E-6) { + for (unsigned int i = 0; i < acceleration.size(); ++i) { + // Division by conversion_factor to convert either to m/s^2 or g + x_accels[i] = acceleration[i] / conversion_factor; + } + y_accels.assign(acceleration.size(), 0.0); + // Rotate accelerations to match orientation + } else { + for (unsigned int i = 0; i < acceleration.size(); ++i) { + // Division by conversion_factor to convert either to m/s^2 or g + x_accels[i] = + acceleration[i] * std::cos(orientation_ * M_PI / 180.0) / conversion_factor; + y_accels[i] = + acceleration[i] * std::sin(orientation_ * M_PI / 180.0) / conversion_factor; + } + } +} diff --git a/modules/createEVENT/common/smelt/vlachos_et_al.h b/modules/createEVENT/common/smelt/vlachos_et_al.h new file mode 100644 index 000000000..cd953e59f --- /dev/null +++ b/modules/createEVENT/common/smelt/vlachos_et_al.h @@ -0,0 +1,277 @@ +#ifndef _VLACHOS_ET_AL_H_ +#define _VLACHOS_ET_AL_H_ + +#include +#include +#include +#include +#include "distribution.h" +#include "json_object.h" +#include "numeric_utils.h" +#include "stochastic_model.h" + +namespace stochastic { +/** + * Stochastic model for generating scenario specific ground + * motion time histories. This is based on the paper: + * Vlachos C., Papakonstantinou K.G., & Deodatis G. (2018). + * Predictive model for site specific simulation of ground motions based on + * earthquake scenarios. Earthquake Engineering & Structural Dynamics, 47(1), + * 195-218. + */ +class VlachosEtAl : public StochasticModel { + public: + /** + * @constructor Delete default constructor + */ + VlachosEtAl() = default; + + /** + * @constructor Construct scenario specific ground motion model based on input + * parameters + * @param[in] moment_magnitude Moment magnitude of earthquake scenario + * @param[in] rupture_distance Closest-to-site rupture distance in kilometers + * @param[in] vs30 Soil shear wave velocity averaged over top 30 meters in + * meters per second + * @param[in] orientation Orientation of acceleration relative to global + * coordinates. Represents counter-clockwise angle (in + * degrees) away from x-axis rotating around z-axis in + * right-handed coordinate system. + * @param[in] num_spectra Number of evolutionary power spectra that should be + * generated. + * @param[in] num_sims Number of simulated ground motion time histories that + * should be generated per evolutionary power + */ + VlachosEtAl(double moment_magnitude, double rupture_distance, double vs30, + double orientation, unsigned int num_spectra, + unsigned int num_sims); + + /** + * @constructor Construct scenario specific ground motion model based on input + * parameters + * @param[in] moment_magnitude Moment magnitude of earthquake scenario + * @param[in] rupture_distance Closest-to-site rupture distance in kilometers + * @param[in] vs30 Soil shear wave velocity averaged over top 30 meters in + * meters per second + * @param[in] orientation Orientation of acceleration relative to global + * coordinates. Represents counter-clockwise angle (in + * degrees) away from x-axis rotating around z-axis in + * right-handed coordinate system. + * @param[in] num_spectra Number of evolutionary power spectra that should be + * generated. + * @param[in] num_sims Number of simulated ground motion time histories that + * should be generated per evolutionary power + * @param[in] seed_value Value to seed random variables with to ensure + * repeatability + */ + VlachosEtAl(double moment_magnitude, double rupture_distance, double vs30, + double orientation, unsigned int num_spectra, + unsigned int num_sims, int seed_value); + + /** + * @destructor Virtual destructor + */ + virtual ~VlachosEtAl() {}; + + /** + * Delete copy constructor + */ + VlachosEtAl(const VlachosEtAl&) = delete; + + /** + * Delete assignment operator + */ + VlachosEtAl& operator=(const VlachosEtAl&) = delete; + + /** + * Generate ground motion time histories based on input parameters + * and store outputs as JSON object. Throws exception if errors + * are encountered during time history generation. + * @param[in] event_name Name to assign to event + * @param[in] units Indicates that time histories should be returned in + * units of g. Defaults to false where time histories + * are returned in units of m/s^2 + * @return JsonObject containing time histories + */ + utilities::JsonObject generate(const std::string& event_name, + bool units = false) override; + + /** + * Generate ground motion time histories based on input parameters + * and write results to file in JSON format. Throws exception if + * errors are encountered during time history generation. + * @param[in] event_name Name to assign to event + * @param[in, out] output_location Location to write outputs to + * @param[in] units Indicates that time histories should be returned in + * units of g. Defaults to false where time histories + * are returned in units of m/s^2 + * @return Returns true if successful, false otherwise + */ + bool generate(const std::string& event_name, + const std::string& output_location, + bool units = false) override; + + /** + * Compute a family of time histories for a particular power spectrum + * @param[in, out] time_histories Location where time histories should be + * stored + * @param[in] parameters Set of model parameters to use for calculating power + * specturm and time histories + * @return Returns true if successful, false otherwise + */ + bool time_history_family(std::vector>& time_histories, + const Eigen::VectorXd& parameters) const; + + /** + * Simulate fully non-stationary ground motion sample realization based on + * time and frequency discretization and the discretized evolutionary + * power spectrum. This is described by Eq-19 on page 8. + * @param[in, out] time_history Location where time history should be stored + * @param[in] power_spectrum Matrix containing values of power spectrum over + * range of frequencies at specified times. + */ + void simulate_time_history(std::vector& time_history, + const Eigen::MatrixXd& power_spectrum) const; + + /** + * Post-process the input time history as described in Vlachos et al. using + * multiple-window estimation technique after Conte & Peng (1997) and + * highpass Butterworth filter + * @param[in, out] time_history Time history to post-process. Post-processed + * results are also stored here. + * @param[in] filter_imp_resp Impulse response of Butterworth filter + * @return Returns true if successful, false otherwise + */ + bool post_process(std::vector& time_history, + const std::vector& filter_imp_resp) const; + + /** + * Identifies modal frequency parameters for mode 1 and 2 + * @param[in] initial_params Initial set of parameters + * @return Vector of identified parameters + */ + Eigen::VectorXd identify_parameters(const Eigen::VectorXd& initial_params) const; + + /** + * Calculate the dominant modal frequencies as a function of non-dimensional + * cumulative energy and the model parameters Q_k, alpha_k, and beta_k where + * k is the mode (either 1 or 2). This is defined by Eq-8 on page 6. + * @param[in] parameters Vector of values for alpha_k, beta_k and Q_k at k-th + * mode + * @param[in] energy Vector of non-dimensional energy values at which to + * calculate frequency + * @return Vector of frequency values at input energy values + */ + std::vector modal_frequencies( + const std::vector& parameters, + const std::vector& energy) const; + + /** + * Calculate the energy accumulation over the times specified in the inputs as + * defined by Eq-5 on page 6. + * @param[in] parameters Vector of values containing energy parameters gamma + * and delta + * @param[in] times Vector containing non-dimensional times at which to + * calculate energy accumulation + * @return Vector containing accumulated energy values at specified input + * times + */ + std::vector energy_accumulation( + const std::vector& parameters, + const std::vector& times) const; + + /** + * Calculate the logarithmic normalized modal participation factor for the + * second mode as defined by Eq-11 on page 7. + * @param[in] parameters Vector of values for F(I), mu(I), sigma(I), F(II), + * mu(II) and sigma(II) + * @param[in] energy Vector of non-dimensional energy values at which to + * calculate frequency + * @return Vector containing values of logarithmic normalized modal + * participation factor for second mode at input energy values + */ + std::vector modal_participation_factor( + const std::vector& parameters, + const std::vector& energy) const; + + /** + * Calculate amplitude modulating function as defined by Eq-7 on page 6 + * @param[in] duration Total duration of target seismic record + * @param[in] total_energy Total energy content of seismic ground acceleration + * @param[in] parameters Vector of values containing energy parameters gamma + * and delta + * @param[in] times Vector containing non-dimensional times at which to + * calculate energy accumulation + * @return Vector containing values of amplitude modulating function at input + * times + */ + std::vector amplitude_modulating_function( + double duration, double total_energy, + const std::vector& parameters, + const std::vector& times) const; + + /** + * Calculate evolutionary power spectrum at specific time using parametric, + * bimodal, fully non-stationary Kinai-Tajimi (K-T) model of seismic ground + * acceleration signal consisting of 2 distinct spectral modes. This is + * described by Eq-1 on page 4. + * @param[in] parameters Vector of parameters fg_1, zeta_1, S0_1, fg_2, zeta_2 + * and S0_2 which define the time varying modal dominant + * frequency, model apparent damping ratio, and modal + * participation factor associated with the two modes. + * NOTE: Values of fg_1, fg_2 and S0_2 must be for the + * particular time at which the power spectrum is + * desired + * @param[in] frequencies Vector of frequencies at which to evaluate + * evolutionary power spectrum + * @param[in] highpass_butter Vector of Butterworth filter transfer function + * energy content at input frequencies + * @return Vector containing values for model evolutionary power spectrum at + * time defined by input parameters fg_1, fg_2 and S0_2. + */ + Eigen::VectorXd kt_2(const std::vector& parameters, + const std::vector& frequencies, + const std::vector& highpass_butter) const; + + /** + * Rotate acceleration based on orientation angle + * @param[in] acceleration Acceleration to rotate + * @param[in, out] x_accels Vector to store x-component of acceleration to + * @param[in, out] y_accels Vector to story y-component of acceleration to + * @param[in] g_units Indicates that time histories should be returned in + * units of g + */ + void rotate_acceleration(const std::vector& acceleration, + std::vector& x_accels, + std::vector& y_accels, bool g_units) const; + + private: + double moment_magnitude_; /**< Moment magnitude for scenario */ + double rupture_dist_; /**< Closest-to-site rupture distance in kilometers */ + double vs30_; /**< Soil shear wave velocity averaged over top 30 meters in + meters per second */ + double orientation_; /**< Counter-clockwise angle away from global x-axis */ + double time_step_; /**< Temporal discretization. Set to 0.01 seconds */ + double freq_step_; /**< Frequency discretization. Set to 0.2 Hz */ + double cutoff_freq_; /**< Cutoff frequency */ + unsigned int num_spectra_; /**< Number of evolutionary power spectra that + should be generated */ + unsigned int num_sims_; /**< Number of simulated ground motion time histories + that should be generated per evolutionary power + spectrum */ + int seed_value_; /**< Integer to seed random distributions with */ + Eigen::VectorXd means_; /**< Mean values of model parameters */ + Eigen::MatrixXd covariance_; /**< Covariance matrix for model parameters */ + std::vector> + model_parameters_; /**< Distrubutions for 18-parameter model */ + Eigen::Matrix + parameter_realizations_; /**< Random realizations of normal model parameters */ + Eigen::Matrix + physical_parameters_; /**< Normal parameters transformed to + physical space */ + std::shared_ptr + sample_generator_; /**< Multivariate normal random number generator */ +}; +} // namespace stochastic + +#endif // _VLACHOS_ET_AL_H_ diff --git a/modules/createEVENT/common/smelt/wind_profile.cc b/modules/createEVENT/common/smelt/wind_profile.cc new file mode 100644 index 000000000..f0f05cbf5 --- /dev/null +++ b/modules/createEVENT/common/smelt/wind_profile.cc @@ -0,0 +1,58 @@ +#ifndef _WIND_PROFILE_H_ +#define _WIND_PROFILE_H_ + +#define _USE_MATH_DEFINES +#include +#include +#include +#include +#include + +namespace wind { + +std::function&, double, + double, std::vector&)> + exposure_category_velocity() { + return [](const std::string& exposure_category, + const std::vector& heights, double karman_const, + double gust_speed, std::vector& velocity_prof) -> double { + double roughness_ht = 0.0, power_factor = 0.0, power_exponent = 0.0; + + // Set parameters based on exposure category + if (exposure_category == "A") { + roughness_ht = 2.0; + power_factor = 0.3; + power_exponent = 1.0 / 3.0; + } else if (exposure_category == "B") { + roughness_ht = 0.3; + power_factor = 0.45; + power_exponent = 1.0 / 4.0; + } else if (exposure_category == "C") { + roughness_ht = 0.02; + power_factor = 0.65; + power_exponent = 1.0 / 6.5; + } else if (exposure_category == "D") { + roughness_ht = 0.005; + power_factor = 0.80; + power_exponent = 1.0 / 9.0; + } else { + throw std::invalid_argument( + "\nERROR: In wind::exposure_category_velocity function: Input " + "exposure " + "category is not valid, please check input value\n"); + } + + velocity_prof.resize(heights.size()); + + for (unsigned int i = 0; i < heights.size(); ++i) { + velocity_prof[i] = gust_speed * power_factor * + std::pow(heights[i] / 10.0, power_exponent); + } + + return gust_speed * power_factor * karman_const / + std::log(10.0 / roughness_ht); + }; +} +} // namespace wind + +#endif // _WIND_PROFILE_H_ diff --git a/modules/createEVENT/common/smelt/wind_profile.h b/modules/createEVENT/common/smelt/wind_profile.h new file mode 100644 index 000000000..f7845910a --- /dev/null +++ b/modules/createEVENT/common/smelt/wind_profile.h @@ -0,0 +1,30 @@ +#ifndef _WIND_PROFILE_H_ +#define _WIND_PROFILE_H_ + +#define _USE_MATH_DEFINES +#include +#include +#include + +/** + * Wind profile generation functionality + */ +namespace wind { + +/** + * Function that calculated the vertical wind velocity and friction velocity + * profiles based on ASCE exposure categories using power law description for + * wind velocity + * @param[in] exposure_cat ASCE exposure category + * @param[in] heights Vector of heights at which to calculate velocity + * @param[in] karman_const Value to use for Von Karman constant + * @param[in] gust_speed Gust wind speed + * @param[in, out] velocity_prof Vector to store vertical velocity profile in + * @return Value of the friction velocity + */ +std::function&, double, + double, std::vector&)> + exposure_category_velocity(); +} // namespace wind + +#endif // _WIND_PROFILE_H_ diff --git a/modules/createEVENT/common/smelt/window.h b/modules/createEVENT/common/smelt/window.h new file mode 100644 index 000000000..4637bbf36 --- /dev/null +++ b/modules/createEVENT/common/smelt/window.h @@ -0,0 +1,35 @@ +#ifndef _WINDOW_H_ +#define _WINDOW_H_ + +#define _USE_MATH_DEFINES +#include +#include +#include +#include +#include + +/** + * Signal processing functionality + */ +namespace signal_processing { + +/** + * Function that calculated the Hann window for the input window length + * @param[in] window_length Desired length of window + * @return Vector filled with Hann window function evaluations based on input + * window length + */ +std::function hann_window = + [](unsigned int window_length) -> Eigen::VectorXd { + Eigen::VectorXd hann(window_length); + double number_of_points = static_cast(window_length - 1); + + for (unsigned int i = 0; i < hann.size(); ++i) { + hann[i] = 0.5 * (1.0 - std::cos(2.0 * M_PI * i / number_of_points)); + } + + return hann; +}; +} // namespace signal_processing + +#endif // _WINDOW_H_ diff --git a/modules/createEVENT/common/smelt/wittig_sinha.cc b/modules/createEVENT/common/smelt/wittig_sinha.cc new file mode 100644 index 000000000..8c41f8398 --- /dev/null +++ b/modules/createEVENT/common/smelt/wittig_sinha.cc @@ -0,0 +1,335 @@ +#include +#include +#include +#include +// Boost random generator +#include +#include +#include +// Eigen dense matrices +#include + +#include "function_dispatcher.h" +#include "json_object.h" +#include "numeric_utils.h" +#include "wittig_sinha.h" + +stochastic::WittigSinha::WittigSinha(std::string exposure_category, + double gust_speed, double height, + unsigned int num_floors, double total_time) + : StochasticModel(), + exposure_category_{exposure_category}, + gust_speed_{gust_speed * 0.44704}, // Convert from mph to m/s + bldg_height_{height}, + num_floors_{num_floors}, + seed_value_{std::numeric_limits::infinity()}, + local_x_{std::vector(1, 1.0)}, + local_y_{std::vector(1, 1.0)}, + freq_cutoff_{5.0}, + time_step_{1.0 / (2.0 * freq_cutoff_)} { + model_name_ = "WittigSinha"; + num_times_ = + static_cast(std::ceil(total_time / time_step_)) % 2 == 0 + ? static_cast(std::ceil(total_time / time_step_)) + : static_cast(std::ceil(total_time / time_step_) + 1); + + // Calculate range of frequencies based on cutoff frequency + num_freqs_ = num_times_ / 2; + frequencies_.resize(num_freqs_); + + for (unsigned int i = 0; i < frequencies_.size(); ++i) { + frequencies_[i] = (i + 1) * freq_cutoff_ / num_freqs_; + } + + // Calculate heights of each floor + heights_ = std::vector(num_floors_); + heights_[0] = bldg_height_ / num_floors_; + + for (unsigned int i = 1; i < heights_.size(); ++i) { + heights_[i] = heights_[i - 1] + bldg_height_ / num_floors_; + } + + // Calculate velocity profile + friction_velocity_ = + Dispatcher&, double, + double, std::vector&>::instance() + ->dispatch("ExposureCategoryVel", exposure_category, heights_, 0.4, + gust_speed, wind_velocities_); +} + +stochastic::WittigSinha::WittigSinha(std::string exposure_category, + double gust_speed, double height, + unsigned int num_floors, double total_time, + int seed_value) + : WittigSinha(exposure_category, gust_speed, height, num_floors, + total_time) +{ + seed_value_ = seed_value; +} + +stochastic::WittigSinha::WittigSinha(std::string exposure_category, + double gust_speed, + const std::vector& heights, + const std::vector& x_locations, + const std::vector& y_locations, + double total_time) + : StochasticModel(), + exposure_category_{exposure_category}, + gust_speed_{gust_speed * 0.44704}, // Convert from mph to m/s + seed_value_{std::numeric_limits::infinity()}, + heights_{heights}, + local_x_{x_locations}, + local_y_{y_locations}, + freq_cutoff_{5.0}, + time_step_{1.0 / (2.0 * freq_cutoff_)} +{ + model_name_ = "WittigSinha"; + num_times_ = + static_cast(std::ceil(total_time / time_step_)) % 2 == 0 + ? static_cast(std::ceil(total_time / time_step_)) + : static_cast(std::ceil(total_time / time_step_) + 1); + + // Calculate range of frequencies based on cutoff frequency + num_freqs_ = num_times_ / 2; + frequencies_.resize(num_freqs_); + + for (unsigned int i = 0; i < frequencies_.size(); ++i) { + frequencies_[i] = i * freq_cutoff_ / num_freqs_; + } + + // Calculate velocity profile + friction_velocity_ = + Dispatcher&, double, + double, std::vector&>::instance() + ->dispatch("ExposureCategoryVel", exposure_category, heights_, 0.4, + gust_speed, wind_velocities_); +} + +stochastic::WittigSinha::WittigSinha(std::string exposure_category, + double gust_speed, + const std::vector& heights, + const std::vector& x_locations, + const std::vector& y_locations, + double total_time, int seed_value) + : WittigSinha(exposure_category, gust_speed, heights, x_locations, y_locations, total_time) +{ + seed_value_ = seed_value; +} + +utilities::JsonObject stochastic::WittigSinha::generate(const std::string& event_name, bool units) { + // Initialize wind velocity vectors + std::vector>>> wind_vels( + local_x_.size(), + std::vector>>( + local_y_.size(), + std::vector>( + heights_.size(), std::vector(num_times_, 0.0)))); + + Eigen::MatrixXcd complex_random_vals(num_freqs_, heights_.size()); + + // Loop over heights to find time histories + try { + for (unsigned int i = 0; i < local_x_.size(); ++i) { + for (unsigned int j = 0; j < local_y_.size(); ++j) { + // Generate complex random numbers to use for calculation of discrete + // time series + complex_random_vals = complex_random_numbers(); + for (unsigned int k = 0; k < heights_.size(); ++k) { + wind_vels[i][j][k] = gen_location_hist(complex_random_vals, k, units); + } + } + } + } catch (const std::exception& e) { + std::cerr << "\nERROR: In stochastic::WittigSinha::generate: " + << e.what() << std::endl; + } + + // Create JsonObject for event + auto event = utilities::JsonObject(); + event.add_value("dT", time_step_); + event.add_value("numSteps", num_times_); + + // Consider case when only looking at floor loads, so only have time histories as + // one location along the z-axis + if (local_x_.size() == 1 && local_y_.size() == 1) { + // Arrays of patterns and time histories for each floor + std::vector pattern_array(heights_.size()); + std::vector event_array(1); + std::vector time_history_array(heights_.size()); + auto time_history = utilities::JsonObject(); + event_array[0].add_value("type", "Wind"); + event_array[0].add_value("subtype", model_name_); + + for (unsigned int i = 0; i < heights_.size(); ++i) { + // Create pattern + pattern_array[i].add_value("name", std::to_string(i + 1)); + pattern_array[i].add_value("timeSeries", std::to_string(i + 1)); + pattern_array[i].add_value("type", "WindFloorLoad"); + pattern_array[i].add_value("floor", std::to_string(i + 1)); + pattern_array[i].add_value("dof", 1); + pattern_array[i].add_value("profileVelocity", wind_velocities_[i]); + + // Create time histories + time_history.add_value("name", std::to_string(i + 1)); + time_history.add_value("dT", time_step_); + time_history.add_value("type", "Value"); + time_history.add_value("data", wind_vels[0][0][i]); + time_history_array[i] = time_history; + time_history.clear(); + } + + event_array[0].add_value("timeSeries", time_history_array); + event_array[0].add_value("pattern", pattern_array); + event.add_value("Events", event_array); + } else { + throw std::runtime_error( + "ERROR: In stochastic::WittigSinha::generate: Currently, only supports " + "time histories along z-axis at single location\n"); + } + + return event; +} + +bool stochastic::WittigSinha::generate(const std::string& event_name, + const std::string& output_location, + bool units) { + + bool status = true; + // Generate time histories at specified locations + try { + auto json_output = generate(event_name, units); + json_output.write_to_file(output_location); + } catch (const std::exception& e) { + std::cerr << e.what(); + status = false; + throw; + } + + return status; +} + +Eigen::MatrixXd stochastic::WittigSinha::cross_spectral_density(double frequency) const { + // Coefficient for coherence function + double coherence_coeff = 10.0; + Eigen::MatrixXd cross_spectral_density = + Eigen::MatrixXd::Zero(heights_.size(), heights_.size()); + + for (unsigned int i = 0; i < cross_spectral_density.rows(); ++i) { + cross_spectral_density(i, i) = + 200.0 * friction_velocity_ * friction_velocity_ * heights_[i] / + (wind_velocities_[i] * + std::pow(1.0 + 50.0 * frequency * heights_[i] / wind_velocities_[i], + 5.0 / 3.0)); + } + + for (unsigned int i = 0; i < cross_spectral_density.rows(); ++i) { + for (unsigned int j = i + 1; j < cross_spectral_density.cols(); ++j) { + cross_spectral_density(i, j) = + std::sqrt(cross_spectral_density(i, i) * + cross_spectral_density(j, j)) * + std::exp(-coherence_coeff * frequency * + std::abs(heights_[i] - heights_[j]) / + (0.5 * (wind_velocities_[i] + wind_velocities_[j]))) * + 0.999; + } + } + + // Get diagonal of cross spectral density matrix--avoids compiler errors where type + // of diagonal matrix is not correctly deduced + Eigen::MatrixXd diag_mat = cross_spectral_density.diagonal().asDiagonal(); + + return cross_spectral_density.transpose() + cross_spectral_density - diag_mat; +} + +Eigen::MatrixXcd stochastic::WittigSinha::complex_random_numbers() const { + // Construct random number generator for standard normal distribution + static unsigned int history_seed = static_cast(std::time(nullptr)); + history_seed = history_seed + 10; + + auto generator = + seed_value_ != std::numeric_limits::infinity() + ? boost::random::mt19937(static_cast(seed_value_ + 10)) + : boost::random::mt19937(history_seed); + + boost::random::normal_distribution<> distribution; + boost::random::variate_generator> + distribution_gen(generator, distribution); + + // Generate white noise consisting of complex numbers + Eigen::MatrixXcd white_noise(heights_.size(), num_freqs_); + + for (unsigned int i = 0; i < white_noise.rows(); ++i) { + for (unsigned int j = 0; j < white_noise.cols(); ++j) { + white_noise(i, j) = std::complex( + distribution_gen() * std::sqrt(0.5), + distribution_gen() * std::sqrt(std::complex(-0.5)).imag()); + } + } + + // Iterator over all frequencies and generate complex random numbers + // for discrete time series simulation + Eigen::MatrixXd cross_spec_density_matrix(heights_.size(), heights_.size()); + Eigen::MatrixXcd complex_random(num_freqs_, heights_.size()); + + for (unsigned int i = 0; i < frequencies_.size(); ++i) { + // Calculate cross-spectral density matrix for current frequency + cross_spec_density_matrix = cross_spectral_density(frequencies_[i]); + + // Find lower Cholesky factorization of cross-spectral density + Eigen::Matrix lower_cholesky; + + try { + auto llt = cross_spec_density_matrix.llt(); + lower_cholesky = llt.matrixL(); + + if (llt.info() == Eigen::NumericalIssue) { + throw std::runtime_error( + "\nERROR: In stochastic::WittigSinha::generate method: Cross-Spectral Density " + "matrix is not positive semi-definite\n"); + } + } catch (const std::exception& e) { + std::cerr << "\nERROR: In time history generation: " << e.what() + << std::endl; + } + + // This is Equation 5(a) from Wittig & Sinha (1975) + complex_random.row(i) = num_freqs_ * + std::sqrt(2.0 * freq_cutoff_ / num_freqs_) * + lower_cholesky * white_noise.col(i); + } + + return complex_random; +} + +std::vector stochastic::WittigSinha::gen_location_hist( + const Eigen::MatrixXcd& random_numbers, unsigned int column_index, + bool units) const { + + // This following block implements what is expressed in Equations 7 & 8 + Eigen::VectorXcd complex_full_range = Eigen::VectorXcd::Zero(2 * num_freqs_); + + complex_full_range.segment(1, num_freqs_) = + random_numbers.block(0, column_index, num_freqs_, 1); + + complex_full_range.segment(num_freqs_ + 1, num_freqs_ - 1) = + random_numbers.block(0, column_index, num_freqs_ - 1, 1) + .reverse() + .conjugate(); + + complex_full_range(num_freqs_) = std::abs(random_numbers(num_freqs_ - 1, column_index)); + + // Calculate wind speed using real portion of inverse Fast Fourier Transform + // full range of random numbers + std::vector node_time_history(complex_full_range.size()); + numeric_utils::inverse_fft(complex_full_range, node_time_history); + + // Check if time histories need to be converted to ft/s + if (units) { + for (auto & val : node_time_history) { + val = val * 3.28084; + } + } + + return node_time_history; +} diff --git a/modules/createEVENT/common/smelt/wittig_sinha.h b/modules/createEVENT/common/smelt/wittig_sinha.h new file mode 100644 index 000000000..de7df08dd --- /dev/null +++ b/modules/createEVENT/common/smelt/wittig_sinha.h @@ -0,0 +1,182 @@ +#ifndef _WITTIG_SINHA_H_ +#define _WITTIG_SINHA_H_ + +#include +#include +#include +#include "json_object.h" +#include "stochastic_model.h" + +namespace stochastic { + +/** + * Stochastic model for generating wind loads using discrete frequency + * functions with Cholesky decomposition and FFTs, as described in + * Wittig & Sinha (1975), "Simulation of multicorrelated random + * processes using the FFT algorithm" + */ +class WittigSinha : public StochasticModel { + public: + /** + * @constructor Default constructor + */ + WittigSinha() = default; + + /** + * @constructor Construct wind load generator based on model input parameters + * using exposure category-based velocity profile. Divides building height + * equally by number of floors, providing time histories at floors. + * @param[in] exposure_category Exposure category based on ASCE-7 + * @param[in] gust_speed Gust speed of wind in mph + * @param[in] height Building height + * @param[in] num_floors Number of floors in building + * @param[in] total_time Total time desired for time history + */ + WittigSinha(std::string exposure_category, double gust_speed, + double height, unsigned int num_floors, double total_time); + + /** + * @constructor Construct wind load generator based on model input parameters + * using exposure category-based velcoty profile with specified seed value. + * Divides building height equally by number of floors, providing time + * histories at floors. + * @param[in] exposure_category Exposure category based on ASCE-7 + * @param[in] gust_speed Gust speed of wind in mph + * @param[in] height Building height + * @param[in] num_floors Number of floors in building + * @param[in] total_time Total time desired for time history + * @param[in] seed_value Value to seed random variables with to ensure + * repeatability + */ + WittigSinha(std::string exposure_category, double gust_speed, + double height, unsigned int num_floors, double total_time, + int seed_value); + + /** + * @constructor Construct wind load generator based on model input parameters + * using exposure category-based velocity profile at specific horizontal and + * vertical locations. + * @param[in] exposure_category Exposure category based on ASCE-7 + * @param[in] gust_speed Gust speed of wind in mph + * @param[in] heights Vector of heights at which to calculate time histories + * @param[in] x_locations Vector of x locations at which to calculate time histories + * @param[in] y_locations Vector of y locations at which to calculate time histories + * @param[in] total_time Total time desired for time history + */ + WittigSinha(std::string exposure_category, double gust_speed, + const std::vector& heights, + const std::vector& x_locations, + const std::vector& y_locations, double total_time); + + /** + * @constructor Construct wind load generator based on model input parameters + * using exposure category-based velocity profile at specific horizontal and + * vertical locations with specified seed value. + * @param[in] exposure_category Exposure category based on ASCE-7 + * @param[in] gust_speed Gust speed of wind in mph + * @param[in] heights Vector of heights at which to calculate time histories + * @param[in] x_locations Vector of x locations at which to calculate time histories + * @param[in] y_locations Vector of y locations at which to calculate time histories + * @param[in] total_time Total time desired for time history + * @param[in] seed_value Value to seed random variables with to ensure + * repeatability + */ + WittigSinha(std::string exposure_category, double gust_speed, + const std::vector& heights, + const std::vector& x_locations, + const std::vector& y_locations, double total_time, + int seed_value); + + /** + * @destructor Virtual destructor + */ + virtual ~WittigSinha() {}; + + /** + * Delete copy constructor + */ + WittigSinha(const WittigSinha&) = delete; + + /** + * Delete assignment operator + */ + WittigSinha& operator=(const WittigSinha&) = delete; + + /** + * Generate wind velocity time histories based on Wittig & Sinha (1975) model + * with provided inputs and store outputs as JSON object + * @param[in] event_name Name to assign to event + * @param[in] units Indicates that time histories should be returned in + * units of ft/s. Defaults to false where time histories + * are returned in units of m/s + * @return JsonObject containing loading time histories + */ + utilities::JsonObject generate(const std::string& event_name, + bool units = false) override; + + /** + * Generate wind velocity time histories based on Wittig & Sinha (1975) model + * with provided inputs and write results to file in JSON format + * @param[in] event_name Name to assign to event + * @param[in, out] output_location Location to write outputs to + * @param[in] units Indicates that time histories should be returned in + * units of ft/s. Defaults to false where time histories + * are returned in units of m/s + * @return Returns true if successful, false otherwise + */ + bool generate(const std::string& event_name, + const std::string& output_location, bool units = false) override; + + /** + * Calculate the cross-spectral density matrix + * @param[in] frequency Frequency at which to calculate cross-spectral density + * @return Matrix containing cross-spectral density functions + */ + Eigen::MatrixXd cross_spectral_density(double frequency) const; + + /** + * Generate matrix of complex random number from standard normal distribution scaled + * by lower Cholesky decomposition of the cross-spectral density matrix + * @return A matrix containing complex random numbers + */ + Eigen::MatrixXcd complex_random_numbers() const; + + /** + * Generate velocity time histories at vertical location specified + * @param[in] random_numbers Matrix of complex random numbers to use for + * velocity time history generation + * @param[in] column_index Index for column to use in input random numbers + * matrix + * @param[in] units Indicates that time histories should be returned in + * units of ft/s. Otherwise time histories are returned + * in units of m/s + * @return Vector containing velocity time history for vertical location + * requested + */ + std::vector gen_location_hist(const Eigen::MatrixXcd& random_numbers, + unsigned int column_index, + bool units) const; + + private: + std::string exposure_category_; /**< Exposure category for building based on ASCE-7 */ + double gust_speed_; /**< Gust speed for wind */ + double bldg_height_; /**< Height of building */ + unsigned int num_floors_; /**< Number of floors */ + int seed_value_; /**< Integer to seed random distributions with */ + std::vector heights_; /**< Locations along building height at which + velocities are generated */ + std::vector local_x_; /**< Locations along local x-axis at which to + generate velocities */ + std::vector local_y_; /**< Locations along local y-axis at which to + generate velocities */ + double freq_cutoff_; /**< Cut-off frequency */ + double time_step_; /**< Time step in time histories */ + unsigned int num_times_; /**< Total number of time steps */ + unsigned int num_freqs_; /**< Total number of frequency steps */ + std::vector frequencies_; /**< Range of frequencies */ + std::vector wind_velocities_; /**< Vertical wind velocity profile */ + double friction_velocity_; /**< Friction velocity */ +}; +} // namespace stochastic + +#endif // _WITTIG_SINHA_H_ diff --git a/modules/createEVENT/stochasticGroundMotion/CMakeLists.txt b/modules/createEVENT/stochasticGroundMotion/CMakeLists.txt index ecd3ae58d..e202d83d2 100644 --- a/modules/createEVENT/stochasticGroundMotion/CMakeLists.txt +++ b/modules/createEVENT/stochasticGroundMotion/CMakeLists.txt @@ -1,4 +1,9 @@ -simcenter_add_executable(NAME StochasticGM - DEPENDS CONAN_PKG::smelt CONAN_PKG::kissfft) +#simcenter_add_executable(NAME StochasticGM +# DEPENDS CONAN_PKG::kissfft smelt) + +add_executable(StochasticGM command_parser.cpp eq_generator.cpp main.cpp) + +include_directories(../common/smelt) +target_link_libraries (StochasticGM CONAN_PKG::kissfft smelt) set_property(TARGET StochasticGM PROPERTY CXX_STANDARD 17) diff --git a/modules/createEVENT/stochasticWind/CMakeLists.txt b/modules/createEVENT/stochasticWind/CMakeLists.txt index a47587260..84ea60221 100644 --- a/modules/createEVENT/stochasticWind/CMakeLists.txt +++ b/modules/createEVENT/stochasticWind/CMakeLists.txt @@ -1,4 +1,10 @@ -simcenter_add_executable(NAME StochasticWind - DEPENDS CONAN_PKG::smelt CONAN_PKG::kissfft common) +#simcenter_add_executable(NAME StochasticWind +# DEPENDS CONAN_PKG::kissfft smelt common) + +add_executable(StochasticWind command_parser.cpp floor_forces.cpp wind_generator.cpp main.cpp) + +include_directories(../common/smelt) +target_link_libraries (StochasticWind CONAN_PKG::kissfft smelt common) set_property(TARGET StochasticWind PROPERTY CXX_STANDARD 17) + From 5cd75506e7770da93d54c8d9489a731482035f31 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 11 Sep 2024 14:11:02 -0700 Subject: [PATCH 44/59] fmk - setting libcurl back to 8.1.1 and adding conan disable check compiler to be on for --- conanfile.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/conanfile.py b/conanfile.py index 772baeaeb..6cca02130 100644 --- a/conanfile.py +++ b/conanfile.py @@ -24,7 +24,7 @@ class simCenterBackendApps(ConanFile): # noqa: D101 requires = [ # noqa: RUF012 'jansson/2.13.1', 'zlib/1.2.11', - 'libcurl/8.6.0', + 'libcurl/8.1.1', 'eigen/3.3.7', 'clara/1.1.5', 'jsonformoderncpp/3.7.0', From 0e6a557121bf6a547a3ea1cd03599543bf133c80 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 11 Sep 2024 14:11:35 -0700 Subject: [PATCH 45/59] fmk - that last DISABLE CHECK --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 090dcf32f..8cfddbfa3 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -29,7 +29,7 @@ PROJECT(SimCenterBackendApplications) set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${PROJECT_SOURCE_DIR}/cmake) set(CMAKE_VERBOSE_MAKEFILE ON) set (CMAKE_OSX_ARCHITECTURES "x86_64" CACHE INTERNAL "" FORCE) - +SET (CONAN_DISABLE_CHECK_COMPILER ON) include(SimCenterFunctions) include(CMakeParseArguments) From c2a5f1671eb9e3bc22bfdb6c3314f2c3107f4edf Mon Sep 17 00:00:00 2001 From: jinyan1214 Date: Thu, 12 Sep 2024 17:22:21 -0700 Subject: [PATCH 46/59] jz - Capacity Spectrum Method in R2D --- modules/Workflow/WorkflowApplications.json | 4 + modules/performSIMULATION/CMakeLists.txt | 1 + .../capacitySpectrum/CMakeLists.txt | 13 + .../capacitySpectrum/CapacityModels.py | 306 ++++++++++++ .../capacitySpectrum/DampingModels.py | 257 ++++++++++ .../capacitySpectrum/DemandModels.py | 323 +++++++++++++ .../capacitySpectrum/HC_capacity_data.csv | 37 ++ .../capacitySpectrum/LC_capacity_data.csv | 37 ++ .../capacitySpectrum/MC_capacity_data.csv | 37 ++ .../capacitySpectrum/PC_capacity_data.csv | 37 ++ .../hazus_capacity_alpha2.csv | 37 ++ .../capacitySpectrum/hazus_kappa_data.csv | 36 ++ .../hazus_typical_roof_height.csv | 37 ++ .../capacitySpectrum/runCMS.py | 438 ++++++++++++++++++ 14 files changed, 1600 insertions(+) create mode 100644 modules/performSIMULATION/capacitySpectrum/CMakeLists.txt create mode 100644 modules/performSIMULATION/capacitySpectrum/CapacityModels.py create mode 100644 modules/performSIMULATION/capacitySpectrum/DampingModels.py create mode 100644 modules/performSIMULATION/capacitySpectrum/DemandModels.py create mode 100644 modules/performSIMULATION/capacitySpectrum/HC_capacity_data.csv create mode 100644 modules/performSIMULATION/capacitySpectrum/LC_capacity_data.csv create mode 100644 modules/performSIMULATION/capacitySpectrum/MC_capacity_data.csv create mode 100644 modules/performSIMULATION/capacitySpectrum/PC_capacity_data.csv create mode 100644 modules/performSIMULATION/capacitySpectrum/hazus_capacity_alpha2.csv create mode 100644 modules/performSIMULATION/capacitySpectrum/hazus_kappa_data.csv create mode 100644 modules/performSIMULATION/capacitySpectrum/hazus_typical_roof_height.csv create mode 100644 modules/performSIMULATION/capacitySpectrum/runCMS.py diff --git a/modules/Workflow/WorkflowApplications.json b/modules/Workflow/WorkflowApplications.json index 8748fc167..c9d828bb6 100644 --- a/modules/Workflow/WorkflowApplications.json +++ b/modules/Workflow/WorkflowApplications.json @@ -974,6 +974,10 @@ "Name": "IMasEDP", "ExecutablePath": "applications/performSIMULATION/IMasEDP/IMasEDP.py" }, + { + "Name": "CapacitySpectrumMethod", + "ExecutablePath": "applications/performSIMULATION/capacitySpectrum/runCMS.py" + }, { "Name": "SurrogateSimulation", "ExecutablePath": "applications/performSIMULATION/surrogateSimulation/SurrogateSimulation.py" diff --git a/modules/performSIMULATION/CMakeLists.txt b/modules/performSIMULATION/CMakeLists.txt index 64bdece13..e3e0a381d 100644 --- a/modules/performSIMULATION/CMakeLists.txt +++ b/modules/performSIMULATION/CMakeLists.txt @@ -6,4 +6,5 @@ add_subdirectory(IMasEDP) add_subdirectory(customPy) add_subdirectory(surrogateRegionalPy) add_subdirectory(surrogateSimulation) +add_subdirectory(capacitySpectrum) diff --git a/modules/performSIMULATION/capacitySpectrum/CMakeLists.txt b/modules/performSIMULATION/capacitySpectrum/CMakeLists.txt new file mode 100644 index 000000000..cc057dcaa --- /dev/null +++ b/modules/performSIMULATION/capacitySpectrum/CMakeLists.txt @@ -0,0 +1,13 @@ +simcenter_add_python_script(SCRIPT CapacityModels.py) +simcenter_add_python_script(SCRIPT DampingModels.py) +simcenter_add_python_script(SCRIPT DemandModels.py) +simcenter_add_python_script(SCRIPT runCMS.py) +simcenter_add_python_script(SCRIPT hazus_capacity_alpha2.csv) +simcenter_add_python_script(SCRIPT hazus_kappa_data.csv) +simcenter_add_python_script(SCRIPT hazus_typical_roof_height.csv) +simcenter_add_python_script(SCRIPT HC_capacity_data.csv) +simcenter_add_python_script(SCRIPT MC_capacity_data.csv) +simcenter_add_python_script(SCRIPT LC_capacity_data.csv) +simcenter_add_python_script(SCRIPT PC_capacity_data.csv) + + diff --git a/modules/performSIMULATION/capacitySpectrum/CapacityModels.py b/modules/performSIMULATION/capacitySpectrum/CapacityModels.py new file mode 100644 index 000000000..9a09759fc --- /dev/null +++ b/modules/performSIMULATION/capacitySpectrum/CapacityModels.py @@ -0,0 +1,306 @@ +# # noqa: N999, D100 +# Copyright (c) 2018 Leland Stanford Junior University +# Copyright (c) 2018 The Regents of the University of California +# +# This file is part of the SimCenter Backend Applications +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# You should have received a copy of the BSD 3-Clause License along with +# this file. If not, see . +# +# Contributors: +# Jinyan Zhao +# +# References: +# 1. Cao, T., & Petersen, M. D. (2006). Uncertainty of earthquake losses due to +# model uncertainty of input ground motions in the Los Angeles area. Bulletin of +# the Seismological Society of America, 96(2), 365-376. +# 2. Steelman, J., & Hajjar, J. F. (2008). Systemic validation of consequence-based +# risk management for seismic regional losses. +# 3. Newmark, N. M., & Hall, W. J. (1982). Earthquake spectra and design. +# Engineering monographs on earthquake criteria. +# 4. FEMA (2022), HAZUS – Multi-hazard Loss Estimation Methodology 5.0, +# Earthquake Model Technical Manual, Federal Emergency Management Agency, Washington D.C. + + + +import os +import sys +import time + +import numpy as np +import pandas as pd + +ap_DesignLevel = {1940: 'LC', 1975: 'MC', 2100: 'HC'} +# original: +# ap_DesignLevel = {1940: 'PC', 1940: 'LC', 1975: 'MC', 2100: 'HC'} +# Note that the duplicated key is ignored, and Python keeps the last +# entry. + +ap_DesignLevel_W1 = {0: 'LC', 1975: 'MC', 2100: 'HC'} +# original: +# ap_DesignLevel_W1 = {0: 'PC', 0: 'LC', 1975: 'MC', 2100: 'HC'} +# same thing applies + +def convert_story_rise(structureType, stories): + if structureType in ['W1', 'W2', 'S3', 'PC1', 'MH']: + # These archetypes have no rise information in their IDs + rise = None + + else: + # First, check if we have valid story information + try: + stories = int(stories) + + except (ValueError, TypeError): + raise ValueError( + 'Missing "NumberOfStories" information, ' + 'cannot infer `rise` attribute of archetype' + ) + + if structureType == 'RM1': + if stories <= 3: + rise = "L" + + else: + rise = "M" + + elif structureType == 'URM': + if stories <= 2: + rise = "L" + + else: + rise = "M" + + elif structureType in [ + 'S1', + 'S2', + 'S4', + 'S5', + 'C1', + 'C2', + 'C3', + 'PC2', + 'RM2', + ]: + if stories <= 3: + rise = "L" + + elif stories <= 7: + rise = "M" + + else: + rise = "H" + + return rise + +def auto_populate_hazus(GI): + # get the building parameters + bt = GI['StructureType'] # building type + + # get the design level + dl = GI.get('DesignLevel', None) + if dl is None: + # If there is no DesignLevel provided, we assume that the YearBuilt is + # available + year_built = GI['YearBuilt'] + + if 'W1' in bt: + DesignL = ap_DesignLevel_W1 + else: + DesignL = ap_DesignLevel + + for year in sorted(DesignL.keys()): + if year_built <= year: + dl = DesignL[year] + break + + # get the number of stories / height + stories = GI.get('NumberOfStories', None) + + # We assume that the structure type does not include height information + # and we append it here based on the number of story information + rise = convert_story_rise(bt, stories) + + if rise is not None: + LF = f'{bt}{rise}' + else: + LF = f'{bt}' + return LF, dl + + +class capacity_model_base: + """ + A class to represent the base of capacity models. + + Attributes: + ---------- + + Methods: + ------- + """ + def __init__(self): + pass + def name(self): + return 'capacity_model_base' + +class cao_peterson_2006(capacity_model_base): + """ + A class to represent the capacity model in Cao and Peterson 2006. + + Attributes: + ---------- + Dy : float + Yield displacement. In the unit of (inch) + Ay : float + Yield acceleration. In the unit of (g) + Du : float + Ultimate displacement. In the unit of (inch) + Au : float + Ultimate acceleration. In the unit of (g) + Ax : float + Parameter in Eq. A5 of Cao and Peterson 2006. In the unit of (g) + B : float + Parameter in Eq. A5 of Cao and Peterson 2006. In the unit of (g) + C : float + Parameter in Eq. A5 of Cao and Peterson 2006. In the unit of (inch) + + Methods: + ------- + """ + def __init__(self, Dy, Ay, Du, Au, dD = 0.001): + # region between elastic and perfectly plastic + sd_elpl = np.arange(Dy,Du,dD) + # Eq. B3 in Steelman & Hajjar 2008 + Ax = (Au**2*Dy - Ay**2*Du)/(2*Au*Dy - Ay*Dy - Ay*Du) + # Eq. B4 in Steelman & Hajjar 2008 + B = Au - Ax + # Eq. B5 in Steelman & Hajjar 2008 + C = (Dy*B**2*(Du-Dy)/(Ay*(Ay-Ax)))**0.5 + # Eq. B1 in Steelman & Hajjar 2008 + sa_elpl = Ax + B*(1 - ((sd_elpl-Du)/C)**2)**0.5 + # elastic and perfectly plastic regions + sd_el = np.arange(0,Dy,dD) + sd_pl = np.arange(Du,4*Du,dD) + + sa_el = sd_el*Ay/Dy + sa_pl = Au*np.ones(len(sd_pl)) + + self.sd = np.concatenate((sd_el,sd_elpl,sd_pl)) + self.sa = np.concatenate((sa_el,sa_elpl,sa_pl)) + self.Ax = Ax + self.B = B + self.C = C + self.Du = Du + self.Ay = Ay + self.Dy = Dy + + def name(self): + return 'cao_peterson_2006' + +class HAZUS_cao_peterson_2006(capacity_model_base): + """ + A class to represent the capacity model in Cao and Peterson 2006. + + Attributes: + ---------- + Dy : float + Yield displacement. In the unit of (inch) + Ay : float + Yield acceleration. In the unit of (g) + Du : float + Ultimate displacement. In the unit of (inch) + Au : float + Ultimate acceleration. In the unit of (g) + Ax : float + Parameter in Eq. A5 of Cao and Peterson 2006. In the unit of (g) + B : float + Parameter in Eq. A5 of Cao and Peterson 2006. In the unit of (g) + C : float + Parameter in Eq. A5 of Cao and Peterson 2006. In the unit of (inch) + + Methods: + ------- + """ + def __init__(self, general_info, dD = 0.001): + # HAZUS capacity data: Table 5-7 to Tabl 5-10 in HAZUS 5.1 + self.capacity_data = dict() + self.capacity_data['HC'] = pd.read_csv(os.path.join(os.path.dirname(__file__), + 'HC_capacity_data.csv'), + index_col=0).to_dict(orient='index') + self.capacity_data['MC'] = pd.read_csv(os.path.join(os.path.dirname(__file__), + 'MC_capacity_data.csv'), + index_col=0).to_dict(orient='index') + self.capacity_data['LC'] = pd.read_csv(os.path.join(os.path.dirname(__file__), + 'LC_capacity_data.csv'), + index_col=0).to_dict(orient='index') + self.capacity_data['PC'] = pd.read_csv(os.path.join(os.path.dirname(__file__), + 'PC_capacity_data.csv'), + index_col=0).to_dict(orient='index') + self.capacity_data['alpha2'] = pd.read_csv(os.path.join(os.path.dirname(__file__), + 'hazus_capacity_alpha2.csv'), + index_col=0).to_dict(orient='index') + self.capacity_data['roof_height'] = pd.read_csv(os.path.join(os.path.dirname(__file__), + 'hazus_typical_roof_height.csv'), + index_col=0).to_dict(orient='index') + # auto populate to get the parameters + self.HAZUS_type, self.design_level = auto_populate_hazus(general_info) + try: + self.Du = self.capacity_data[self.design_level][self.HAZUS_type]['Du'] + self.Au = self.capacity_data[self.design_level][self.HAZUS_type]['Au'] + self.Dy = self.capacity_data[self.design_level][self.HAZUS_type]['Dy'] + self.Ay = self.capacity_data[self.design_level][self.HAZUS_type]['Ay'] + except KeyError: + raise KeyError(f'No capacity data for {self.HAZUS_type} and {self.design_level}') + self.cao_peterson_2006 = cao_peterson_2006(self.Dy, self.Ay, self.Du, self.Au, dD) + self.Ax = self.cao_peterson_2006.Ax + self.B = self.cao_peterson_2006.B + self.C = self.cao_peterson_2006.C + + def get_capacity_curve(self, sd_max): + sd = self.cao_peterson_2006.sd + sa = self.cao_peterson_2006.sa + if sd_max > sd[-1]: + num_points = min(500, int((sd_max - self.cao_peterson_2006.sd[-1])/0.001)) + sd = np.concatenate((sd,np.linspace( + self.cao_peterson_2006.sd[-1], sd_max, num_points))) + sa = np.concatenate((sa, sa[-1]*np.ones(num_points))) + return sd, sa + + # def get_capacity_curve(self): + # return self.cao_peterson_2006.sd, self.cao_peterson_2006.sa + + def get_hazus_alpha2(self): + return self.capacity_data['alpha2'][self.HAZUS_type]['alpha2'] + + def get_hazus_roof_height(self): + return self.capacity_data['roof_height'][self.HAZUS_type]['roof_height_ft'] + + def name(self): + return 'HAZUS_cao_peterson_2006' + \ No newline at end of file diff --git a/modules/performSIMULATION/capacitySpectrum/DampingModels.py b/modules/performSIMULATION/capacitySpectrum/DampingModels.py new file mode 100644 index 000000000..66131af38 --- /dev/null +++ b/modules/performSIMULATION/capacitySpectrum/DampingModels.py @@ -0,0 +1,257 @@ +# # noqa: N999, D100 +# Copyright (c) 2018 Leland Stanford Junior University +# Copyright (c) 2018 The Regents of the University of California +# +# This file is part of the SimCenter Backend Applications +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# You should have received a copy of the BSD 3-Clause License along with +# this file. If not, see . +# +# Contributors: +# Jinyan Zhao +# Tamika Bassman +# Adam Zsarnóczay +# +# References: +# 1. Cao, T., & Petersen, M. D. (2006). Uncertainty of earthquake losses due to +# model uncertainty of input ground motions in the Los Angeles area. Bulletin of +# the Seismological Society of America, 96(2), 365-376. +# 2. Steelman, J., & Hajjar, J. F. (2008). Systemic validation of consequence-based +# risk management for seismic regional losses. +# 3. Newmark, N. M., & Hall, W. J. (1982). Earthquake spectra and design. +# Engineering monographs on earthquake criteria. +# 4. FEMA (2022), HAZUS – Multi-hazard Loss Estimation Methodology 5.0, +# Earthquake Model Technical Manual, Federal Emergency Management Agency, Washington D.C. + + + +import os +import sys +import time + +import numpy as np +import pandas as pd + +class damping_model_base: + """ + A class to represent the base of damping models. + + Attributes: + ---------- + + Methods: + ------- + + """ + def __init__(self): + pass + def name(self): + return 'damping_model_base' + +class damping_model_hazus(damping_model_base): + """ + A class to represent the hazus damping models. + + Attributes: + ---------- + beta_elastic_map : dict + The damping ratio is suggested by FEMA HAZUS Below Eq. 5-9, which in turn + is based on Newmark and Hall 1982. + The median value of the dampling ratio in Table 3 of Newmark and Hall 1982 + is used. E.g. For steel buildings, the damping ratio is assumed as + (6+12.5)/2=9.25%, which is the average of welded steel and bolted steel. + Masonry buildings are assumed to have a damping ratio similar to reinforced + concrete buildings. Mobile homes are assumed to have a damping ratio similar + to steel buildings. + + + Methods: + ------- + get_beta_elastic : Calculate the elastic damping ratio beta. + """ + def __init__(self): + self.beta_elastic_map = { + 'W1': 15, + 'W2': 15, + 'S1L': 10, + 'S1M': 7, + 'S1H': 5, + 'S2L': 10, + 'S2M': 7, + 'S2H': 5, + 'S3': 7, + 'S4L': 10, + 'S4M': 7, + 'S4H': 5, + 'S5L': 10, + 'S5M': 7, + 'S5H': 5, + 'C1L': 10, + 'C1M': 8.5, + 'C1H': 7, + 'C2L': 10, + 'C2M': 8.5, + 'C2H': 7, + 'C3L': 10, + 'C3M': 8.5, + 'C3H': 7, + 'PC1': 8.5, + 'PC2L': 10, + 'PC2M': 8.5, + 'PC2H': 7, + 'RM1L': 10, + 'RM1M': 8.5, + 'RM2L': 10, + 'RM2M': 8.5, + 'RM2H': 7, + 'URML': 8.5, + 'URMM': 8.5, + 'MH': 9.25 + } + self.kappa_data = pd.read_csv(os.path.join(os.path.dirname(__file__), + 'hazus_kappa_data.csv'), + index_col=0, header=None) + self.kappa_col_map = {'HC':{'S':1, 'M':2, 'L':3}, + 'MC':{'S':4, 'M':5, 'L':6}, + 'LC':{'S':7, 'M':8, 'L':9}, + 'PC':{'S':10, 'M':11, 'L':12}, + } + + def get_beta_elastic(self, HAZUS_bldg_type): + """ + Calculate the elastic damping ratio beta. + + Parameters: + ----------- + HAZUS_bldg_type : str + The HAZUS building type. + + Returns: + -------- + beta : float + The elastic damping ratio beta. + """ + if HAZUS_bldg_type not in self.beta_elastic_map.keys(): + sys.exit(f'The building type {HAZUS_bldg_type} is not in the damping' + 'model.') + beta = self.beta_elastic_map[HAZUS_bldg_type] + return beta + def get_kappa(self, HAZUS_bldg_type, design_level, Mw): + """ + Calculate the kappa in Table 5-33 of FEMA HAZUS 2022. + + Parameters: + ----------- + HAZUS_bldg_type : str + The HAZUS building type. + + Returns: + -------- + kappa : float + The kappa in Table 5-33 of FEMA HAZUS 2022. + """ + if HAZUS_bldg_type not in self.beta_elastic_map.keys(): + sys.exit(f'The building type {HAZUS_bldg_type} is not in the damping' + 'model.') + # Infer duration according to HAZUS 2022 below Table 5-33 + if Mw <= 5.5: + duration = 'S' + elif Mw < 7.5: + duration = 'M' + else: + duration = 'L' + col = self.kappa_col_map[design_level][duration] + kappa = self.kappa_data.loc[HAZUS_bldg_type, col] + return kappa + + def get_name(self): + return 'damping_model_hazus' + +class HAZUS_cao_peterson_2006(damping_model_base): + """ + A class to represent the damping model in Cao and Peterson 2006. + + Attributes: + ---------- + + Methods: + ------- + """ + def __init__(self, demand, capacity, base_model = damping_model_hazus()): + self.supported_capacity_model = ['HAZUS_cao_peterson_2006'] + self.supported_demand_model = ['HAZUS', 'HAZUS_lin_chang_2003'] + self.base_model = base_model + if capacity.name() not in self.supported_capacity_model: + sys.exit(f'The capacity model {capacity.name()} is not compatible' + 'with the damping model: cao_peterson_2006.') + if demand.name() not in self.supported_demand_model: + sys.exit(f'The demand model {demand.name()} is not compatible' + 'with the damping model: cao_peterson_2006.') + self.capacity = capacity + self.HAZUS_type = capacity.HAZUS_type + self.design_level = capacity.design_level + self.Mw = demand.Mw + + + def get_beta(self, Dp, Ap): + """ + Equation B.44-B.45 in Steelman & Hajjar (2010), which are originally published + in Cao and Peterson 2006 + """ + try: + beta_elastic = self.base_model.get_beta_elastic(self.HAZUS_type) + except: # noqa: E722 + sys.exit(f'The base model {self.base_model} does not have a useful' + 'get_beta_elastic method.') + try: + kappa = self.base_model.get_kappa(self.HAZUS_type, self.design_level, self.Mw) + except: # noqa: E722 + sys.exit(f'The base model {self.base_model} does not have a useful' + 'get_kappa method.') + Du = self.capacity.Du + Ax = self.capacity.Ax + B = self.capacity.B + C = self.capacity.C + Kt = (Du-Dp)/(Ap-Ax)*(B/C)**2 # Eq B.46 + Ke = self.capacity.Ay/self.capacity.Dy # Eq B.47 + area_h = max(0,4*(Ap-Dp*Ke)*(Dp*Kt-Ap)/(Ke-Kt)) # Eq. B.45 + # beta is in the unit of percentage + # beta_h = kappa*area_h/(2*3.1416*Dp*Ap) * 100# Eq. B.44 + beta_h = kappa*area_h/(2*3.1416*Dp*Ap)# Eq. B.44 + return beta_elastic + beta_h + + def get_beta_elastic(self): + return self.base_model.get_beta_elastic(self.HAZUS_type) + + + + + + + \ No newline at end of file diff --git a/modules/performSIMULATION/capacitySpectrum/DemandModels.py b/modules/performSIMULATION/capacitySpectrum/DemandModels.py new file mode 100644 index 000000000..fe19f75d0 --- /dev/null +++ b/modules/performSIMULATION/capacitySpectrum/DemandModels.py @@ -0,0 +1,323 @@ +# # noqa: N999, D100 +# Copyright (c) 2018 Leland Stanford Junior University +# Copyright (c) 2018 The Regents of the University of California +# +# This file is part of the SimCenter Backend Applications +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# You should have received a copy of the BSD 3-Clause License along with +# this file. If not, see . +# +# Contributors: +# Jinyan Zhao +# +# References: +# 1. Cao, T., & Petersen, M. D. (2006). Uncertainty of earthquake losses due to +# model uncertainty of input ground motions in the Los Angeles area. Bulletin of +# the Seismological Society of America, 96(2), 365-376. +# 2. Steelman, J., & Hajjar, J. F. (2008). Systemic validation of consequence-based +# risk management for seismic regional losses. +# 3. Newmark, N. M., & Hall, W. J. (1982). Earthquake spectra and design. +# Engineering monographs on earthquake criteria. +# 4. FEMA (2022), HAZUS – Multi-hazard Loss Estimation Methodology 5.0, +# Earthquake Model Technical Manual, Federal Emergency Management Agency, Washington D.C. + + + +import os +import sys +import time + +import numpy as np +import pandas as pd + +class demand_model_base: + """ + A class to represent the base of demand models. + + Attributes: + ---------- + T : numpy.ndarray + Periods in the demand spectrum. + dem_sd_05 : numpy.ndarray + Spectrum displacement in the demand spectrum at 5% damping. In the unit of (inch) + dem_sa_05 : numpy.ndarray + Spectrum acceleration in the demand spectrum at 5% damping. In the unit of (g) + + Methods: + ------- + """ + def __init__(self, T, dem_sd_05, dem_sa_05): + self.T = T + self.dem_sd_05 = dem_sd_05 + self.dem_sa_05 = dem_sa_05 + +class HAZUS(demand_model_base): + """ + A class to represent the design spectrum from HAZUS V5 (2022), section 4.1.3.2 + + Attributes: + ---------- + Tvd : float + Tvd as HAZUS Eq. 4-4. + Default value is 10 s as suggested by HAZUS (Below Eq. 4-4). + Tav : float + Tav as Cao and Peterson 2006. Figure A1. + T : numpy.ndarray + Periods in the demand spectrum. + dem_sd_05 : numpy.ndarray + Spectrum displacement in the demand spectrum at 5% damping. In the unit of (inch) + dem_sa_05 : numpy.ndarray + Spectrum acceleration in the demand spectrum at 5% damping. In the unit of (g) + + Methods: + ------- + """ + def __init__(self, Mw = 7.0): + self.Tvd = np.power(10, (Mw - 5)/2) + self.T = [0.01, 0.02, 0.03, 0.05, 0.075, 0.1, 0.15, 0.2, + 0.25, 0.3, 0.4, 0.5, 0.75, 1, 1.5, 2, 3, 4, 5, 7.5, 10] + self.Mw = Mw + + def set_IMs(self, sa_03, sa_10): + self.sa_03 = sa_03 + self.sa_10 = sa_10 + self.Tav = sa_10/sa_03 + self.g = 386 + # insert tvd and tav in self.T + self.T.append(self.Tvd) + self.T.append(self.Tav) + self.T = np.sort(self.T) + self.dem_sd_05 = np.zeros_like(self.T) + self.dem_sa_05 = np.zeros_like(self.T) + ## Eq A1a to Eq A2c in Cao and Peterson 2006 + for i, t in enumerate(self.T): + if t <= self.Tav: + self.dem_sa_05[i] = sa_03 + self.dem_sd_05[i] = self.g/(4 * np.pi**2) * t**2 * self.dem_sa_05[i] # Eq. A2 + elif t <= self.Tvd: + self.dem_sa_05[i] = sa_10/t + self.dem_sd_05[i] = self.g/(4 * np.pi**2) * t**2 * self.dem_sa_05[i] # Eq. A2 + else: + self.dem_sa_05[i] = sa_10 * self.Tvd / t**2 # Ea. A1a + self.dem_sd_05[i] = self.g/(4 * np.pi**2) * t**2 * self.dem_sa_05[i] + + def get_sa(self, T): + # return np.interp(T, self.T, self.dem_sa_05) + if T <= self.Tav: + return self.sa_03 + elif T <= self.Tvd: + return self.sa_10/T + else: + return self.sa_10 * self.Tvd / T**2 + + def get_sd(self, T): + # return np.interp(T, self.T, self.dem_sd_05) + # return np.interp(T, self.T, self.dem_sa_05) + if T <= self.Tav: + return self.get_sd_from_sa(self.sa_03, T) + elif T <= self.Tvd: + return self.get_sd_from_sa(self.sa_10/T, T) + else: + return self.get_sd_from_sa(self.sa_10 * self.Tvd / T**2, T) + + def get_sd_from_sa(self, sa, T): + return self.g/(4 * np.pi**2) * T**2 * sa + + def set_Tavb(self, damping_model, tol = 0.05, max_iter = 100): + x_prev = 5 # Start with 5% damping + for i in range(max_iter): + beta = x_prev + ra = 2.12/(3.21-0.68*np.log(beta)) + Tavb = self.Tav * (2.12/(3.21-0.68*np.log(beta)))/(1.65/(2.31-0.41*np.log(beta))) + sa = self.get_sa(Tavb) / ra + sd = self.get_sd_from_sa(sa, Tavb) + beta_eff = damping_model.get_beta(sd, sa) + x_next = beta_eff + if np.abs(x_next - x_prev) < tol: + self.Tavb = self.Tav * (2.12/(3.21-0.68*np.log(beta_eff)))/(1.65/(2.31-0.41*np.log(beta_eff))) + break + x_prev = x_next + if (getattr(self, 'Tavb', None) is None or (3.21-0.68*np.log(beta_eff)) < 0 + or 2.12/(3.21-0.68*np.log(beta)) < 1): + # raise a warning + # print('WARNING: in HAZUS demand model, the Tavb is not converged.') + self.Tavb = self.Tav + + + def set_beta_tvd(self, damping_model, tol = 0.05, max_iter = 100): + x_prev = 5 # Start with 5% damping + max_iter = 100 + tol = 0.05 + for i in range(max_iter): + beta = x_prev + Tvd = self.Tvd + rd = 1.65/(2.31-0.41*np.log(beta)) + sa = self.get_sa(Tvd)/rd + sd = self.get_sd_from_sa(sa, Tvd) + beta_eff = damping_model.get_beta(sd, sa) + x_next = beta_eff + if np.abs(x_next - x_prev) < tol: + self.beta_tvd = x_next + break + x_prev = x_next + if (getattr(self, 'beta_tvd', None) is None or (2.31-0.41*np.log(self.beta_tvd)) < 0 + or 1.65/(2.31-0.41*np.log(self.beta_tvd)) < 1): + # raise a warning + # print('WARNING: in HAZUS demand model, the beta_tvd is not converged.') + self.beta_tvd = -1 # This will be overwritten in get_reduced_demand. + + + def get_reduced_demand(self, beta_eff): + if getattr(self, 'Tavb', None) is None: + raise ValueError('The Tavb is not set yet.') + if getattr(self, 'beta_tvd', None) is None: + raise ValueError('The beta_tvd is not set yet.') + RA = 2.12/(3.21-0.68*np.log(beta_eff)) + Rv = 1.65/(2.31-0.41*np.log(beta_eff)) + if self.beta_tvd < 0: + RD = 1.39/(1.82 - 0.27 * np.log(beta_eff)) # EQ A9 in Cao and Peterson 2006 + else: + RD = (1.65/(2.31-0.41*np.log(self.beta_tvd))) + dem_sa = np.zeros_like(np.array(self.T)) + dem_sd = np.zeros_like(np.array(self.T)) + for i, t in enumerate(self.T): + if t <= self.Tavb: + dem_sa[i] = self.get_sa(t) / RA + dem_sd[i] = self.get_sd_from_sa(dem_sa[i], t) + elif t <= self.Tvd: + dem_sa[i] = self.get_sa(t) / Rv + dem_sd[i] = self.get_sd_from_sa(dem_sa[i], t) + else: + dem_sa[i] = self.get_sa(t) / RD + dem_sd[i] = self.get_sd_from_sa(dem_sa[i], t) + return dem_sd, dem_sa + + + def set_ruduction_factor(self, beta_eff): + if getattr(self, 'Tavb', None) is None: + raise ValueError('The Tavb is not set yet.') + self.RA = 2.12/(3.21-0.68*np.log(beta_eff)) + self.Rv = 1.65/(2.31-0.41*np.log(beta_eff)) + self.RD = (1.65/(2.31-0.41*np.log(beta_eff))) + + # def __init__(self, sa_03, sa_10, Mw = 7.0): + # self.Tvd = np.power(10, (Mw - 5)/2) + # self.Tav = sa_10/sa_03 + # g = 386 + # # self.T is defined as typical GMPEs + # self.T = [0.01, 0.02, 0.03, 0.05, 0.075, 0.1, 0.15, 0.2, + # 0.25, 0.3, 0.4, 0.5, 0.75, 1, 1.5, 2, 3, 4, 5, 7.5, 10] + # # insert tvd and tav in self.T + # self.T.append(self.Tvd) + # self.T.append(self.Tav) + # self.T = np.sort(self.T) + # self.dem_sd_05 = np.zeros_like(self.T) + # self.dem_sa_05 = np.zeros_like(self.T) + # ## Eq A1a to Eq A2c in Cao and Peterson 2006 + # for i, t in enumerate(self.T): + # if t <= self.Tav: + # self.dem_sa_05[i] = sa_03 + # self.dem_sd_05[i] = g/(4 * np.pi**2) * t**2 * self.dem_sa_05[i] # Eq. A2 + # elif t <= self.Tvd: + # self.dem_sa_05[i] = sa_10/t + # self.dem_sd_05[i] = g/(4 * np.pi**2) * t**2 * self.dem_sa_05[i] # Eq. A2 + # else: + # self.dem_sa_05[i] = sa_10 * self.Tvd / t**2 # Ea. A1a + # self.dem_sd_05[i] = g/(4 * np.pi**2) * t**2 * self.dem_sa_05[i] + # self.Mw = Mw + + def name(self): + return 'HAZUS' + + @staticmethod + def check_IM(IM_header): + if 'SA_0.3' not in IM_header: + raise ValueError('The IM header should contain SA_0.3') + if 'SA_1.0' not in IM_header: + raise ValueError('The IM header of should contain SA_1.0') + +class HAZUS_lin_chang_2003(HAZUS): + """ + A class to represent the design spectrum from HAZUS V5 (2022), and the + damping deduction relationship from Lin and Chang 2003. + """ + def __init__(self, Mw = 7.0): + super().__init__(Mw) + def name(self): + return "HAZUS_lin_chang_2003" + def get_dmf(self, beta_eff, T): + alpha = 1.303+0.436*np.log(beta_eff) + R = 1-alpha*T**0.3/(T+1)**0.65 + return R + def get_reduced_demand(self, beta_eff): + if getattr(self, 'Tavb', None) is None: + raise ValueError('The Tavb is not set yet.') + if getattr(self, 'beta_tvd', None) is None: + raise ValueError('The beta_tvd is not set yet.') + dem_sa = np.zeros_like(np.array(self.T)) + dem_sd = np.zeros_like(np.array(self.T)) + for i, t in enumerate(self.T): + R = self.get_dmf(beta_eff, t) + if t <= self.Tavb: + dem_sa[i] = self.get_sa(t) / R + dem_sd[i] = self.get_sd_from_sa(dem_sa[i], t) + elif t <= self.Tvd: + dem_sa[i] = self.get_sa(t) / R + dem_sd[i] = self.get_sd_from_sa(dem_sa[i], t) + else: + dem_sa[i] = self.get_sa(t) / R + dem_sd[i] = self.get_sd_from_sa(dem_sa[i], t) + return dem_sd, dem_sa + +class ASCE_7_10(demand_model_base): + """ + A class to represent the design spectrum from ASCE_7_10. + + Attributes: + ---------- + Tvd : float + Tvd as HAZUS Eq. 4-4. + Tav : float + Tav as Cao and Peterson 2006. Figure A1. + T : numpy.ndarray + Periods in the demand spectrum. + dem_sd_05 : numpy.ndarray + Spectrum displacement in the demand spectrum at 5% damping. In the unit of (inch) + dem_sa_05 : numpy.ndarray + Spectrum acceleration in the demand spectrum at 5% damping. In the unit of (g) + + Methods: + ------- + """ + + def __init__(self, T, dem_sd_05, dem_sa_05): + self.T = T + self.dem_sd_05 = dem_sd_05 + self.dem_sa_05 = dem_sa_05 + diff --git a/modules/performSIMULATION/capacitySpectrum/HC_capacity_data.csv b/modules/performSIMULATION/capacitySpectrum/HC_capacity_data.csv new file mode 100644 index 000000000..b6fe20984 --- /dev/null +++ b/modules/performSIMULATION/capacitySpectrum/HC_capacity_data.csv @@ -0,0 +1,37 @@ +type,Dy,Ay,Du,Au +W1,0.48,0.4,11.51,1.2 +W2,0.626,0.4,12.528,1 +S1L,0.611,0.25,14.667,0.749 +S1M,1.775,0.156,28.4,0.468 +S1H,4.657,0.098,55.884,0.293 +S2L,0.626,0.4,10.023,0.8 +S2M,2.426,0.333,25.876,0.667 +S2H,7.746,0.254,61.965,0.508 +S3,0.626,0.4,10.023,0.8 +S4L,0.384,0.32,6.906,0.72 +S4M,1.092,0.267,13.1,0.6 +S4H,3.486,0.203,31.37,0.457 +S5L,0.12,0.1,1.199,0.2 +S5M,0.341,0.083,2.274,0.167 +S5H,1.089,0.063,5.446,0.127 +C1L,0.391,0.25,9.387,0.749 +C1M,1.152,0.208,18.436,0.624 +C1H,2.011,0.098,24.13,0.293 +C2L,0.48,0.4,9.592,1 +C2M,1.038,0.333,13.841,0.833 +C2H,2.939,0.254,29.394,0.635 +C3L,0.12,0.1,1.349,0.225 +C3M,0.26,0.083,1.946,0.188 +C3H,0.735,0.063,4.134,0.143 +PC1,0.719,0.6,11.51,1.2 +PC2L,0.48,0.4,7.673,0.8 +PC2M,1.038,0.333,11.073,0.667 +PC2H,2.939,0.254,23.515,0.508 +RM1L,0.639,0.533,10.229,1.066 +RM1M,1.384,0.444,14.76,0.889 +RM2L,0.639,0.533,10.229,1.066 +RM2M,1.384,0.444,14.76,0.889 +RM2H,3.918,0.338,31.346,0.677 +URML,0.24,0.2,2.397,0.4 +URMM,0.272,0.111,1.812,0.222 +MH,0.18,0.15,2.158,0.3 \ No newline at end of file diff --git a/modules/performSIMULATION/capacitySpectrum/LC_capacity_data.csv b/modules/performSIMULATION/capacitySpectrum/LC_capacity_data.csv new file mode 100644 index 000000000..6b4b5a1a3 --- /dev/null +++ b/modules/performSIMULATION/capacitySpectrum/LC_capacity_data.csv @@ -0,0 +1,37 @@ +type,Dy,Ay,Du,Au +W1,0.24,0.2,4.316,0.6 +W2,0.157,0.1,2.349,0.25 +S1L,0.153,0.062,2.292,0.187 +S1M,0.444,0.039,4.437,0.117 +S1H,1.164,0.024,8.732,0.073 +S2L,0.157,0.1,1.566,0.2 +S2M,0.607,0.083,4.043,0.167 +S2H,1.936,0.063,9.682,0.127 +S3,0.157,0.1,1.566,0.2 +S4L,0.096,0.08,1.079,0.18 +S4M,0.273,0.067,2.047,0.15 +S4H,0.871,0.051,4.902,0.114 +S5L,0.12,0.1,1.199,0.2 +S5M,0.341,0.083,2.274,0.167 +S5H,1.089,0.063,5.446,0.127 +C1L,0.098,0.062,1.467,0.187 +C1M,0.288,0.052,2.881,0.156 +C1H,0.503,0.024,3.77,0.073 +C2L,0.12,0.1,1.499,0.25 +C2M,0.26,0.083,2.163,0.208 +C2H,0.735,0.063,4.593,0.159 +C3L,0.12,0.1,1.349,0.225 +C3M,0.26,0.083,1.946,0.188 +C3H,0.735,0.063,4.134,0.143 +PC1,0.18,0.15,1.798,0.3 +PC2L,0.12,0.1,1.199,0.2 +PC2M,0.26,0.083,1.73,0.167 +PC2H,0.735,0.063,3.674,0.127 +RM1L,0.16,0.133,1.598,0.267 +RM1M,0.346,0.111,2.306,0.222 +RM2L,0.16,0.133,1.598,0.267 +RM2M,0.346,0.111,2.306,0.222 +RM2H,0.98,0.085,4.898,0.169 +URML,0.24,0.2,2.397,0.4 +URMM,0.272,0.111,1.812,0.222 +MH,0.18,0.15,2.158,0.3 \ No newline at end of file diff --git a/modules/performSIMULATION/capacitySpectrum/MC_capacity_data.csv b/modules/performSIMULATION/capacitySpectrum/MC_capacity_data.csv new file mode 100644 index 000000000..ec5e96e0b --- /dev/null +++ b/modules/performSIMULATION/capacitySpectrum/MC_capacity_data.csv @@ -0,0 +1,37 @@ +type,Dy,Ay,Du,Au +W1,0.36,0.3,6.475,0.9 +W2,0.313,0.2,4.698,0.5 +S1L,0.306,0.125,5.5,0.375 +S1M,0.888,0.078,10.651,0.234 +S1H,2.329,0.049,20.957,0.147 +S2L,0.313,0.2,3.758,0.4 +S2M,1.213,0.167,9.704,0.333 +S2H,3.873,0.127,23.237,0.254 +S3,0.313,0.2,3.758,0.4 +S4L,0.192,0.16,2.59,0.36 +S4M,0.546,0.133,4.913,0.3 +S4H,1.743,0.102,11.76,0.228 +S5L,0.12,0.1,1.199,0.2 +S5M,0.341,0.083,2.274,0.167 +S5H,1.089,0.063,5.446,0.127 +C1L,0.196,0.125,3.52,0.375 +C1M,0.576,0.104,6.914,0.312 +C1H,1.005,0.049,9.049,0.147 +C2L,0.24,0.2,3.597,0.5 +C2M,0.519,0.167,5.191,0.417 +C2H,1.47,0.127,11.023,0.317 +C3L,0.12,0.1,1.349,0.225 +C3M,0.26,0.083,1.946,0.188 +C3H,0.735,0.063,4.134,0.143 +PC1,0.36,0.3,4.316,0.6 +PC2L,0.24,0.2,2.878,0.4 +PC2M,0.519,0.167,4.153,0.333 +PC2H,1.47,0.127,8.818,0.254 +RM1L,0.32,0.267,3.836,0.533 +RM1M,0.692,0.222,5.535,0.444 +RM2L,0.32,0.267,3.836,0.533 +RM2M,0.692,0.222,5.535,0.444 +RM2H,1.959,0.169,11.755,0.338 +URML,0.24,0.2,2.397,0.4 +URMM,0.272,0.111,1.812,0.222 +MH,0.18,0.15,2.158,0.3 \ No newline at end of file diff --git a/modules/performSIMULATION/capacitySpectrum/PC_capacity_data.csv b/modules/performSIMULATION/capacitySpectrum/PC_capacity_data.csv new file mode 100644 index 000000000..c50122b71 --- /dev/null +++ b/modules/performSIMULATION/capacitySpectrum/PC_capacity_data.csv @@ -0,0 +1,37 @@ +type,Dy,Ay,Du,Au +W1,0.24,0.2,4.316,0.6 +W2,0.157,0.1,2.349,0.25 +S1L,0.153,0.062,2.292,0.187 +S1M,0.444,0.039,4.437,0.117 +S1H,1.164,0.024,8.732,0.073 +S2L,0.157,0.1,1.566,0.2 +S2M,0.607,0.083,4.043,0.167 +S2H,1.936,0.063,9.682,0.127 +S3,0.157,0.1,1.566,0.2 +S4L,0.096,0.08,1.079,0.18 +S4M,0.273,0.067,2.047,0.15 +S4H,0.871,0.051,4.902,0.114 +S5L,0.12,0.1,1.199,0.2 +S5M,0.341,0.083,2.274,0.167 +S5H,1.089,0.063,5.446,0.127 +C1L,0.098,0.062,1.467,0.187 +C1M,0.288,0.052,2.881,0.156 +C1H,0.503,0.024,3.77,0.073 +C2L,0.12,0.1,1.499,0.25 +C2M,0.26,0.083,2.163,0.208 +C2H,0.735,0.063,4.593,0.159 +C3L,0.12,0.1,1.349,0.225 +C3M,0.26,0.083,1.946,0.188 +C3H,0.735,0.063,4.134,0.143 +PC1,0.18,0.15,1.798,0.3 +PC2L,0.12,0.1,1.199,0.2 +PC2M,0.26,0.083,1.73,0.167 +PC2H,0.735,0.063,3.674,0.127 +RM1L,0.16,0.133,1.598,0.267 +RM1M,0.346,0.111,2.306,0.222 +RM2L,0.16,0.133,1.598,0.267 +RM2M,0.346,0.111,2.306,0.222 +RM2H,0.98,0.085,4.898,0.169 +URML,0.24,0.2,2.397,0.4 +URMM,0.272,0.111,1.812,0.222 +MH,0.09,0.075,0.719,0.15 \ No newline at end of file diff --git a/modules/performSIMULATION/capacitySpectrum/hazus_capacity_alpha2.csv b/modules/performSIMULATION/capacitySpectrum/hazus_capacity_alpha2.csv new file mode 100644 index 000000000..6b39e3001 --- /dev/null +++ b/modules/performSIMULATION/capacitySpectrum/hazus_capacity_alpha2.csv @@ -0,0 +1,37 @@ +type,alpha2 +W1,0.75 +W2,0.75 +S1L,0.75 +S1M,0.75 +S1H,0.6 +S2L,0.75 +S2M,0.75 +S2H,0.6 +S3,0.75 +S4L,0.75 +S4M,0.75 +S4H,0.6 +S5L,0.75 +S5M,0.75 +S5H,0.6 +C1L,0.75 +C1M,0.75 +C1H,0.6 +C2L,0.75 +C2M,0.75 +C2H,0.6 +C3L,0.75 +C3M,0.75 +C3H,0.6 +PC1,0.75 +PC2L,0.75 +PC2M,0.75 +PC2H,0.6 +RM1L,0.75 +RM1M,0.75 +RM2L,0.75 +RM2M,0.75 +RM2H,0.6 +URML,0.75 +URMM,0.75 +MH,1 \ No newline at end of file diff --git a/modules/performSIMULATION/capacitySpectrum/hazus_kappa_data.csv b/modules/performSIMULATION/capacitySpectrum/hazus_kappa_data.csv new file mode 100644 index 000000000..8c02b08c5 --- /dev/null +++ b/modules/performSIMULATION/capacitySpectrum/hazus_kappa_data.csv @@ -0,0 +1,36 @@ +W1,1,0.8,0.5,0.9,0.6,0.3,0.7,0.4,0.2,0.5,0.3,0.1 +W2,0.9,0.6,0.4,0.8,0.4,0.2,0.6,0.3,0.1,0.4,0.2,0 +S1L,0.9,0.6,0.4,0.8,0.4,0.2,0.6,0.3,0.1,0.4,0.2,0 +S1M,0.9,0.6,0.4,0.8,0.4,0.2,0.6,0.3,0.1,0.4,0.2,0.2 +S1H,0.9,0.6,0.4,0.8,0.4,0.2,0.6,0.3,0.1,0.4,0.2,0.2 +S2L,0.7,0.5,0.3,0.6,0.4,0.2,0.5,0.3,0.1,0.4,0.2,0 +S2M,0.7,0.5,0.3,0.6,0.4,0.2,0.5,0.3,0.1,0.4,0.2,0 +S2H,0.7,0.5,0.3,0.6,0.4,0.2,0.5,0.3,0.1,0.4,0.2,0 +S3,0.7,0.5,0.3,0.6,0.4,0.2,0.5,0.3,0.1,0.4,0.2,0 +S4L,0.7,0.5,0.3,0.6,0.4,0.2,0.5,0.3,0.1,0.4,0.2,0 +S4M,0.7,0.5,0.3,0.6,0.4,0.2,0.5,0.3,0.1,0.4,0.2,0 +S4H,0.7,0.5,0.3,0.6,0.4,0.2,0.5,0.3,0.1,0.4,0.2,0 +S5L,0.5,0.3,0.1,0.5,0.3,0.1,0.5,0.3,0.1,0.4,0.2,0 +S5M,0.5,0.3,0.1,0.5,0.3,0.1,0.5,0.3,0.1,0.4,0.2,0 +S5H,0.5,0.3,0.1,0.5,0.3,0.1,0.5,0.3,0.1,0.4,0.2,0 +C1L,0.9,0.6,0.4,0.8,0.4,0.2,0.6,0.3,0.1,0.4,0.2,0 +C1M,0.9,0.6,0.4,0.8,0.4,0.2,0.6,0.3,0.1,0.4,0.2,0 +C1H,0.9,0.6,0.4,0.8,0.4,0.2,0.6,0.3,0.1,0.4,0.2,0 +C2L,0.9,0.6,0.4,0.8,0.4,0.2,0.6,0.3,0.1,0.4,0.2,0 +C2M,0.9,0.6,0.4,0.8,0.4,0.2,0.6,0.3,0.1,0.4,0.2,0 +C2H,0.9,0.6,0.4,0.8,0.4,0.2,0.6,0.3,0.1,0.4,0.2,0 +C3L,0.5,0.3,0.1,0.5,0.3,0.1,0.5,0.3,0.1,0.4,0.2,0 +C3M,0.5,0.3,0.1,0.5,0.3,0.1,0.5,0.3,0.1,0.4,0.2,0 +C3H,0.5,0.3,0.1,0.5,0.3,0.1,0.5,0.3,0.1,0.4,0.2,0 +PC1,0.7,0.5,0.3,0.6,0.4,0.2,0.5,0.3,0.1,0.4,0.2,0 +PC2L,0.7,0.5,0.3,0.6,0.4,0.2,0.5,0.3,0.1,0.4,0.2,0 +PC2M,0.7,0.5,0.3,0.6,0.4,0.2,0.5,0.3,0.1,0.4,0.2,0 +PC2H,0.7,0.5,0.3,0.6,0.4,0.2,0.5,0.3,0.1,0.4,0.2,0 +RM1L,0.9,0.6,0.4,0.8,0.4,0.2,0.6,0.3,0.1,0.4,0.2,0 +RM1M,0.9,0.6,0.4,0.8,0.4,0.2,0.6,0.3,0.1,0.4,0.2,0 +RM2L,0.9,0.6,0.4,0.8,0.4,0.2,0.6,0.3,0.1,0.4,0.2,0 +RM2M,0.9,0.6,0.4,0.8,0.4,0.2,0.6,0.3,0.1,0.4,0.2,0 +RM2H,0.9,0.6,0.4,0.8,0.4,0.2,0.6,0.3,0.1,0.4,0.2,0 +URML,0.5,0.3,0.1,0.5,0.3,0.1,0.5,0.3,0.1,0.4,0.2,0 +URMM,0.5,0.3,0.1,0.5,0.3,0.1,0.5,0.3,0.1,0.4,0.2,0 +MH,0.8,0.4,0.2,0.8,0.4,0.2,0.8,0.4,0.2,0.6,0.3,0.1 \ No newline at end of file diff --git a/modules/performSIMULATION/capacitySpectrum/hazus_typical_roof_height.csv b/modules/performSIMULATION/capacitySpectrum/hazus_typical_roof_height.csv new file mode 100644 index 000000000..deefbb674 --- /dev/null +++ b/modules/performSIMULATION/capacitySpectrum/hazus_typical_roof_height.csv @@ -0,0 +1,37 @@ +type,roof_height_ft +W1,14 +W2,24 +S1L,24 +S1M,60 +S1H,156 +S2L,24 +S2M,60 +S2H,156 +S3,15 +S4L,24 +S4M,60 +S4H,156 +S5L,24 +S5M,60 +S5H,156 +C1L,20 +C1M,50 +C1H,120 +C2L,20 +C2M,50 +C2H,120 +C3L,20 +C3M,50 +C3H,120 +PC1,15 +PC2L,20 +PC2M,50 +PC2H,120 +RM1L,20 +RM1M,50 +RM2L,20 +RM2M,50 +RM2H,120 +URML,15 +URMM,35 +MH,10 \ No newline at end of file diff --git a/modules/performSIMULATION/capacitySpectrum/runCMS.py b/modules/performSIMULATION/capacitySpectrum/runCMS.py new file mode 100644 index 000000000..6551f34b0 --- /dev/null +++ b/modules/performSIMULATION/capacitySpectrum/runCMS.py @@ -0,0 +1,438 @@ +# -*- coding: utf-8 -*- +# +# Copyright (c) 2023 Leland Stanford Junior University +# Copyright (c) 2023 The Regents of the University of California +# +# This file is part of pelicun. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# You should have received a copy of the BSD 3-Clause License along with +# pelicun. If not, see . +# +# Contributors: +# Jinyan Zhao +# Tamika Bassman +# Adam Zsarnóczay + +import argparse +import json +import logging +import os +import sys +from pathlib import Path, PurePath + +import CapacityModels +import DampingModels +import DemandModels +import numpy as np + +# import the common constants and methods +this_dir = Path(os.path.dirname(os.path.abspath(__file__))).resolve() # noqa: PTH100, PTH120 +main_dir = this_dir.parents[1] +sys.path.insert(0, str(main_dir / 'common')) +import simcenter_common # noqa: E402 + + +def find_performance_point(cap_x,cap_y,dem_x,dem_y,dd=0.001): + """Interpolate to have matching discretization for cap/demand curves. + + Created by: Tamika Bassman. + """ + # Interpolate to have matching discretization for cap/demand curves + x_interp = np.arange(0,min(cap_x[-1],dem_x[-1])+dd,dd) + dem_y_interp = np.interp(x_interp,dem_x,dem_y) + cap_y_interp = np.interp(x_interp,cap_x,cap_y) + + # # Enforce capacity curve to have same final length as spectrum + # cap_y = cap_y[:min(len(cap_x),len(spec_x))] + + # Find sign changes in the difference between the two curves - these are + # effectively intersections between the two curves + curves_diff = dem_y_interp - cap_y_interp + # adapted from https://stackoverflow.com/questions/4111412/how-do-i-get-a-list-of-indices-of-non-zero-elements-in-a-list + id_sign_changes = [n for n,(i,j) in enumerate(zip(curves_diff[:-1],curves_diff[1:])) if i*j <= 0] + + # id_sign_changes = [] + # for i,sign in enumerate(curves_diff[:-1]): + # if curves_diff[i]*curves_diff[i+1]<=0: + # # print(i) + # id_sign_changes += [i] + + # If sign changes detected, return the first (smallest abscissa) as the PP + if len(id_sign_changes) > 0: + ix = id_sign_changes[0] + perf_x = x_interp[ix] + perf_y = np.average([cap_y_interp[ix],dem_y_interp[ix]]) + elif dem_y_interp[0] > cap_y_interp[0]: + perf_x = x_interp[-1] + perf_y = cap_y_interp[-1] + elif dem_y_interp[0] < cap_y_interp[0]: + perf_x = 0.001 # x_interp[0] + perf_y = 0.001 # cap_y_interp[0] + # except IndexError as err: + # print('No performance point found; curves do not intersect.') + # print('IndexError: ') + # print(err) + + return perf_x,perf_y + +def find_unit_scale_factor(aim): + """Find the unit scale factor based on the AIM file. + + Args: + AIM (dict): The AIM file content as a dictionary. + + Returns + ------- + dict: A dictionary with the scale factors for different units. + + Raises + ------ + KeyError: If 'units' or 'RegionalEvent' are not defined in the AIM file. + """ + general_info = aim['GeneralInformation'] + if general_info.get('units', None) is None: + msg = 'No units defined in the AIM file' + raise KeyError(msg) + units = general_info['units'] + length_units = units.get('length', None) + time_units = units.get('time', None) + if aim.get('RegionalEvent', None) is None: + msg = 'No RegionalEvent defined in the AIM file' + raise KeyError(msg) + f_scale_im_user_to_cms = {} + f_time_in = getattr(simcenter_common, time_units, None) + f_length_in = getattr(simcenter_common, length_units, None) + for name, unit in aim['RegionalEvent']['units'].items(): + unit_type = None + for base_unit_type, unit_set in simcenter_common.unit_types.items(): + if unit in unit_set: + unit_type = base_unit_type + # If the input event unit is acceleration, conver to g + if unit_type == 'acceleration': + f_in = f_length_in / f_time_in**2.0 + f_out = 1 / simcenter_common.g + f_scale_im_user_to_cms[name] = f_in * f_out + else: + f_scale_im_user_to_cms[name] = 1 + f_scale_edp_cms_to_user = {} + f_scale_edp_cms_to_user['1-SA-1-1'] = simcenter_common.g / ( + f_length_in / f_time_in**2.0) + f_scale_edp_cms_to_user['1-PRD-1-1'] = simcenter_common.inch / f_length_in + + return f_scale_im_user_to_cms, f_scale_edp_cms_to_user + + +def run_csm(demand_model, capacity_model, damping_model, tol, max_iter, im_i): + """Run the Capacity Spectrum Method (CSM) analysis. + + Args: + demand_model (object): The demand model used in the analysis. + capacity_model (object): The capacity model used in the analysis. + damping_model (object): The damping model used in the analysis. + tol (float): The tolerance for convergence. + max_iter (int): The maximum number of iterations. + im_i (int): The intensity measure index. + + Returns + ------- + tuple: A tuple containing the effective damping ratio and the performance point. + + Raises + ------ + ValueError: If the analysis does not converge within the maximum number of iterations. + """ + beta_eff = damping_model.get_beta_elastic() + beta_d = beta_eff + + # Track convergence + iter_sd = [] # intermediate predictions of Sd @ PP + iter_sa = [] # intermediate predictions of Sa @ PP + # Iterate to find converged PP + for i in range(max_iter): + # Calc demand spectrum + dem_sd, dem_sa = demand_model.get_reduced_demand(beta_eff) + # create capacity curve + cap_sd, cap_sa = capacity_model.get_capacity_curve(dem_sd[-1]) + # Calc intersection (PP) + perf_sd, perf_sa = find_performance_point(cap_sd,cap_sa,dem_sd,dem_sa) + iter_sd.append(perf_sd) + iter_sa.append(perf_sa) + + # Calc effective damping at this point on the capacity curve + beta_eff = damping_model.get_beta(perf_sd,perf_sa) + + # Check if tolerance met on damping ratios of capacity, demand cueves at this point + if abs(beta_d - beta_eff) <= tol: + # print('Final Iteration #%d' % (i+1)) + # print('IM realization #%d' % (im_i)) + # print('Performance Point: (%.3f,%.3f)' % (perf_sd, perf_sa)) + # # print('Final Demand Spectrum Damping: %.3f' % beta_d) + # print('Final Elastic Damping: %.3f' % damping_model.get_beta_elastic()) + # print('Final Capacity Curve Eff. Damping: %.3f' % beta_eff) + # print('\n') + break + # If not met, adjust the demand spectrum accordingly and reiterate + # print('Iteration #%d' % (i+1)) + # print('Performance Point: (%.3f,%.3f)' % (perf_sd, perf_sa)) + # print('Demand Spectrum Damping: %.3f' % beta_d) + # print('Capacity Curve Eff. Damping: %.3f' % beta_eff) + # print('\n') + dem_sd, dem_sa = demand_model.get_reduced_demand(beta_eff) + beta_d = beta_eff + if i == max_iter - 1: + logging.warning(f'The capacity spectrum method did not converge for the {im_i}th IM realization.') + + return perf_sd, perf_sa + + +def write_RV(AIM_input_path, EVENT_input_path): # noqa: C901, N802, N803, D103 + + # open the AIM file + with open(AIM_input_path, encoding='utf-8') as f: # noqa: PTH123 + AIM_in = json.load(f) # noqa: N806 + applications = AIM_in['Applications'] + UQ_app = applications['UQ']['Application'] # noqa: N806 + + # Raise an error if the UQ application is not None + if UQ_app != "None": + msg = "This app is only used when UQ is None, similar to IMasEDP" + raise ValueError(msg) + + # get the simulation application + SIM_input = applications['Simulation'] # noqa: N806 + if SIM_input['Application'] != 'CapacitySpectrumMethod': + msg = "Wrong simulation application is called" + raise ValueError(msg) + SIM_input_data = SIM_input['ApplicationData'] # noqa: N806 + tol = SIM_input_data.get('tolerance', 0.05) + max_iter = SIM_input_data.get('max_iter', 100) + + + # open the event file and get the list of events + with open(EVENT_input_path, encoding='utf-8') as f: # noqa: PTH123 + EVENT_in = json.load(f) # noqa: N806 + + # if there is a list of possible events, load all of them + if len(EVENT_in['randomVariables']) > 0: + event_list = EVENT_in['randomVariables'][0]['elements'] + else: + event_list = [ + EVENT_in['Events'][0]['event_id'], + ] + + evt = EVENT_in['Events'][0] + data_dir = Path(evt['data_dir']) + f_scale = evt['unitScaleFactor'] + f_scale_im_user_to_cms, f_scale_edp_cms_to_user = find_unit_scale_factor(AIM_in) + + file_sample_dict = {} + + for e_i, event in enumerate(event_list): + filename, sample_id, __ = event.split('x') + + if filename not in file_sample_dict: + file_sample_dict.update({filename: [[], []]}) + + file_sample_dict[filename][0].append(e_i) + file_sample_dict[filename][1].append(int(sample_id)) + + IM_samples = None # noqa: N806 + + for filename in file_sample_dict: + # get the header + header_data = np.genfromtxt( + data_dir / filename, + delimiter=',', + names=None, + max_rows=1, + dtype=str, + ndmin=1, + ) + header = header_data # .dtype. + + data = np.genfromtxt(data_dir / filename, delimiter=',', skip_header=1) + + # get the number of columns and reshape the data + col_count = len(header) + if col_count > 1: + data = data.reshape((data.size // col_count, col_count)) + else: + data = np.atleast_1d(data) + + # choose the right samples + samples = data[file_sample_dict[filename][1]] + + if IM_samples is None: + if len(samples.shape) > 1: + IM_samples = np.zeros((len(event_list), samples.shape[1])) # noqa: N806 + else: + IM_samples = np.zeros(len(event_list)) # noqa: N806 + + IM_samples[file_sample_dict[filename][0]] = samples + + if len(IM_samples.shape) == 1: + IM_samples = np.reshape(IM_samples, (IM_samples.shape[0], 1)) # noqa: N806 + + ### IM_samples are scaled to the units defined in the AIM file + IM_samples = IM_samples.T # noqa: N806 + for c_i, col in enumerate(header): + f_i = f_scale.get(col.strip(), f_scale.get('ALL', None)) + f_i_to_cms = f_scale_im_user_to_cms.get(col.strip(), f_scale_im_user_to_cms.get('ALL', None)) + if f_i is None: + raise ValueError(f'No units defined for {col}') # noqa: EM102, TRY003 + + IM_samples[c_i] *= f_i * f_i_to_cms + IM_samples = IM_samples.T # noqa: N806 + + # np.savetxt( + # Path(PurePath(EVENT_input_path).parent) / 'IM_samples.csv', + # IM_samples, + # delimiter=',', + # header=',' + ', '.join(['sa_03', 'sa_10']), + # comments='', + # ) + + # the first column is Spectrum Acceleration, the second column is Spectrum Displacement + EDP_output = np.zeros((IM_samples.shape[0], 2)) # noqa: N806 + + # demand model + demand_model_name = SIM_input_data['DemandModel']['Name'] + if demand_model_name in ['HAZUS', 'HAZUS_lin_chang_2003']: + demand_model = getattr(DemandModels, demand_model_name)( + Mw=SIM_input_data['DemandModel']['Parameters']['EarthquakeMagnitude'] + ) + # DemandModels.HAZUS.check_IM(header) + # demand_model = DemandModels.HAZUS( + # Mw=SIM_input_data['DemandModel']['Parameters']['EarthquakeMagnitude'] + # ) + # capacity model + capacity_model_name = SIM_input_data['CapacityModel']['Name'] + if capacity_model_name == 'HAZUS_cao_peterson_2006': + capacity_model = CapacityModels.HAZUS_cao_peterson_2006( + general_info=AIM_in['GeneralInformation']) + + # damping model + damping_model_name = SIM_input_data['DampingModel']['Name'] + if damping_model_name == 'HAZUS_cao_peterson_2006': + damping_model = DampingModels.HAZUS_cao_peterson_2006(demand_model, capacity_model) + + # Loop through each IM sample + for ind in range(IM_samples.shape[0]): + # update demand model based on IM sample + if demand_model_name in ['HAZUS', 'HAZUS_lin_chang_2003']: + demand_model = getattr(DemandModels, demand_model_name)( + Mw=SIM_input_data['DemandModel']['Parameters']['EarthquakeMagnitude'] + ) + sa_03_ind = np.where(header == 'SA_0.3')[0][0] + sa_03 = IM_samples[ind, sa_03_ind] + sa_10_ind = np.where(header == 'SA_1.0')[0][0] + sa_10 = IM_samples[ind, sa_10_ind] + demand_model.set_IMs(sa_03, sa_10) + demand_model.set_Tavb(damping_model) + demand_model.set_beta_tvd(damping_model) + # if (damping_model_name == 'HAZUS_cao_peterson_2006' + # and capacity_model_name == 'HAZUS_cao_peterson_2006'): + # damping_model.set_HAZUS_bldg_type(capacity_model.get_hazus_bldg_type()) + + # iterate to get sd and sa + perf_sd, perf_sa = run_csm(demand_model, capacity_model, damping_model, tol, max_iter, ind) + EDP_output[ind, 0] = perf_sa + + # Table 5-1 in Hazus, convert to inches + general_info = AIM_in['GeneralInformation'] + if general_info.get('RoofHeight', None) is not None: + roof_height = general_info['RoofHeight'] + else: + roof_height = capacity_model.get_hazus_roof_height()*12 + drift_ratio = perf_sd / capacity_model.get_hazus_alpha2() / roof_height + EDP_output[ind, 1] = drift_ratio + + ### Convert EDPs to the units defined in the AIM file + EDP_output = EDP_output.T # noqa: N806 + for c_i, col in enumerate(['1-SA-1-1']): + f_i = f_scale_edp_cms_to_user.get(col.strip(), f_scale.get('ALL', None)) + if f_i is None: + raise ValueError(f'No units defined for {col}') # noqa: EM102, TRY003 + EDP_output[c_i] *= f_i + EDP_output = EDP_output.T # noqa: N806 + + index = np.reshape(np.arange(EDP_output.shape[0]), (EDP_output.shape[0], 1)) + + EDP_output = np.concatenate([index, EDP_output], axis=1) # noqa: N806 + + working_dir = Path(PurePath(EVENT_input_path).parent) + # working_dir = posixpath.dirname(EVENT_input_path) + + # prepare the header + header_out = ['1-PFA-0-0', '1-PRD-1-1'] + + np.savetxt( + working_dir / 'response.csv', + EDP_output, + delimiter=',', + header=',' + ', '.join(header_out), + comments='', + ) + + +# TODO: consider removing this function # noqa: TD002 +# It is not used currently +def create_EDP(EVENT_input_path, EDP_input_path): # noqa: N802, N803, D103 + # load the EDP file + with open(EDP_input_path, encoding='utf-8') as f: # noqa: PTH123 + EDP_in = json.load(f) # noqa: N806 + + # load the EVENT file + with open(EVENT_input_path, encoding='utf-8') as f: # noqa: PTH123 + EVENT_in = json.load(f) # noqa: N806 + + # store the IM(s) in the EDP file + for edp in EDP_in['EngineeringDemandParameters'][0]['responses']: + for im in EVENT_in['Events']: + if edp['type'] in im.keys(): # noqa: SIM118 + edp['scalar_data'] = [im[edp['type']]] + + with open(EDP_input_path, 'w', encoding='utf-8') as f: # noqa: PTH123 + json.dump(EDP_in, f, indent=2) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--filenameAIM', default=None) + parser.add_argument('--filenameSAM', default=None) + parser.add_argument('--filenameEVENT') + parser.add_argument('--filenameEDP') + parser.add_argument('--filenameSIM', default=None) + parser.add_argument('--getRV', nargs='?', const=True, default=False) + args = parser.parse_args() + + if args.getRV: + sys.exit(write_RV(args.filenameAIM, args.filenameEVENT)) + else: + sys.exit(create_EDP(args.filenameEVENT, args.filenameEDP)) From 0bd8b01f5353ec631c648cac9a3185c2ab063a39 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 12 Sep 2024 17:28:54 -0700 Subject: [PATCH 47/59] fmk - adding an ifdef and define for M_PI in window.h as M_PI not in standadrd --- modules/createEVENT/common/smelt/window.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/modules/createEVENT/common/smelt/window.h b/modules/createEVENT/common/smelt/window.h index 4637bbf36..a2eb170ea 100644 --- a/modules/createEVENT/common/smelt/window.h +++ b/modules/createEVENT/common/smelt/window.h @@ -8,6 +8,9 @@ #include #include +#ifndef M_PI + #define M_PI 3.14159265358979323846 +#endif /** * Signal processing functionality */ From 468e0c0c5f7faa3fc8618c83344115aab59112f5 Mon Sep 17 00:00:00 2001 From: jinyan1214 Date: Thu, 12 Sep 2024 17:53:17 -0700 Subject: [PATCH 48/59] ruff fix --- .../regionalGroundMotion/FetchOpenSHA.py | 6 +- .../regionalGroundMotion/HazardSimulation.py | 10 +- .../gmpe/CorrelationModel.py | 2 +- .../capacitySpectrum/CapacityModels.py | 134 +++++--- .../capacitySpectrum/DampingModels.py | 129 ++++---- .../capacitySpectrum/DemandModels.py | 298 +++++++++++++----- .../capacitySpectrum/runCMS.py | 5 +- 7 files changed, 370 insertions(+), 214 deletions(-) diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/FetchOpenSHA.py b/modules/performRegionalEventSimulation/regionalGroundMotion/FetchOpenSHA.py index ac91e1054..f90569b3c 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/FetchOpenSHA.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/FetchOpenSHA.py @@ -506,12 +506,12 @@ def export_to_json( # noqa: C901, D103 minMag=0.0, # noqa: N803 maxMag=10.0, # noqa: N803 maxDistance=1000.0, # noqa: N803 - use_hdf5=False, # noqa: N803 + use_hdf5=False, # noqa: FBT002 ): # Initializing erf_data = {'type': 'FeatureCollection'} - site_loc = Location(site_loc[0], site_loc[1]) # noqa: F405 - site = Site(site_loc) # noqa: F405 + site_loc = Location(site_loc[0], site_loc[1]) # type: ignore # noqa: F405 + site = Site(site_loc) # type: ignore # noqa: F405 # Total source number num_sources = erf.getNumSources() source_tag = [] diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulation.py b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulation.py index 53dcc70ab..b98306223 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulation.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulation.py @@ -571,13 +571,13 @@ def hazard_job(hazard_info): # noqa: C901, D103, PLR0915 os.makedirs(f"{os.environ.get('OQ_DATADIR')}") # noqa: PTH103 # import modules - # from ComputeIntensityMeasure import * # noqa: F403 - # from CreateScenario import * # noqa: F403 - # from CreateStation import * # noqa: F403 + # from ComputeIntensityMeasure import * + # from CreateScenario import * + # from CreateStation import * # # KZ-08/23/22: adding hazard occurrence model - # from HazardOccurrence import * # noqa: F403 - # from SelectGroundMotion import * # noqa: F403 + # from HazardOccurrence import * + # from SelectGroundMotion import * if oq_flag: # import FetchOpenQuake diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/CorrelationModel.py b/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/CorrelationModel.py index 70aabc58f..bf11b83b8 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/CorrelationModel.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/CorrelationModel.py @@ -307,7 +307,7 @@ def compute_rho_loth_baker_correlation_2013(T1, T2, h, B1, B2, B3): # noqa: N80 return rho # noqa: DOC201, RET504, RUF100 -def loth_baker_correlation_2013(stations, im_name_list, stn_dist, num_simu): # noqa: C901 +def loth_baker_correlation_2013(stations, im_name_list, stn_dist, num_simu): """Simulating intra-event residuals Reference: Loth and Baker (2013) A spatial cross-correlation model of spectral diff --git a/modules/performSIMULATION/capacitySpectrum/CapacityModels.py b/modules/performSIMULATION/capacitySpectrum/CapacityModels.py index 9a09759fc..1b8b64d04 100644 --- a/modules/performSIMULATION/capacitySpectrum/CapacityModels.py +++ b/modules/performSIMULATION/capacitySpectrum/CapacityModels.py @@ -1,4 +1,4 @@ -# # noqa: N999, D100 +# # noqa: D100, INP001 # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California # @@ -35,16 +35,16 @@ # # Contributors: # Jinyan Zhao -# +# # References: -# 1. Cao, T., & Petersen, M. D. (2006). Uncertainty of earthquake losses due to -# model uncertainty of input ground motions in the Los Angeles area. Bulletin of +# 1. Cao, T., & Petersen, M. D. (2006). Uncertainty of earthquake losses due to +# model uncertainty of input ground motions in the Los Angeles area. Bulletin of # the Seismological Society of America, 96(2), 365-376. # 2. Steelman, J., & Hajjar, J. F. (2008). Systemic validation of consequence-based # risk management for seismic regional losses. -# 3. Newmark, N. M., & Hall, W. J. (1982). Earthquake spectra and design. +# 3. Newmark, N. M., & Hall, W. J. (1982). Earthquake spectra and design. # Engineering monographs on earthquake criteria. -# 4. FEMA (2022), HAZUS – Multi-hazard Loss Estimation Methodology 5.0, +# 4. FEMA (2022), HAZUS - Multi-hazard Loss Estimation Methodology 5.0, # Earthquake Model Technical Manual, Federal Emergency Management Agency, Washington D.C. @@ -56,18 +56,33 @@ import numpy as np import pandas as pd -ap_DesignLevel = {1940: 'LC', 1975: 'MC', 2100: 'HC'} +ap_DesignLevel = {1940: 'LC', 1975: 'MC', 2100: 'HC'} # noqa: N816 # original: # ap_DesignLevel = {1940: 'PC', 1940: 'LC', 1975: 'MC', 2100: 'HC'} # Note that the duplicated key is ignored, and Python keeps the last # entry. -ap_DesignLevel_W1 = {0: 'LC', 1975: 'MC', 2100: 'HC'} +ap_DesignLevel_W1 = {0: 'LC', 1975: 'MC', 2100: 'HC'} # noqa: N816 # original: # ap_DesignLevel_W1 = {0: 'PC', 0: 'LC', 1975: 'MC', 2100: 'HC'} # same thing applies -def convert_story_rise(structureType, stories): +def convert_story_rise(structureType, stories): # noqa: N803 + """ + Convert the story type and number of stories to rise attribute of archetype. + + Parameters + ---------- + structureType : str + The type of the structure. + stories : int + The number of stories. + + Returns + ------- + rise : str or None + The rise attribute of the archetype. + """ if structureType in ['W1', 'W2', 'S3', 'PC1', 'MH']: # These archetypes have no rise information in their IDs rise = None @@ -78,20 +93,23 @@ def convert_story_rise(structureType, stories): stories = int(stories) except (ValueError, TypeError): - raise ValueError( + msg = ( 'Missing "NumberOfStories" information, ' 'cannot infer `rise` attribute of archetype' ) + raise ValueError( # noqa: B904 + msg + ) if structureType == 'RM1': - if stories <= 3: + if stories <= 3: # noqa: PLR2004 rise = "L" else: rise = "M" elif structureType == 'URM': - if stories <= 2: + if stories <= 2: # noqa: PLR2004 rise = "L" else: @@ -108,10 +126,10 @@ def convert_story_rise(structureType, stories): 'PC2', 'RM2', ]: - if stories <= 3: + if stories <= 3: # noqa: PLR2004 rise = "L" - elif stories <= 7: + elif stories <= 7: # noqa: PLR2004 rise = "M" else: @@ -119,11 +137,26 @@ def convert_story_rise(structureType, stories): return rise -def auto_populate_hazus(GI): +def auto_populate_hazus(GI): # noqa: N803 + """ + Auto-populate the HAZUS parameters based on the given building information. + + Parameters + ---------- + GI : dict + The building information. + + Returns + ------- + LF : str + The load factor. + dl : str + The design level. + """ # get the building parameters bt = GI['StructureType'] # building type - # get the design level + # get the design level dl = GI.get('DesignLevel', None) if dl is None: # If there is no DesignLevel provided, we assume that the YearBuilt is @@ -131,10 +164,10 @@ def auto_populate_hazus(GI): year_built = GI['YearBuilt'] if 'W1' in bt: - DesignL = ap_DesignLevel_W1 + DesignL = ap_DesignLevel_W1 # noqa: N806 else: - DesignL = ap_DesignLevel - + DesignL = ap_DesignLevel # noqa: N806 + for year in sorted(DesignL.keys()): if year_built <= year: dl = DesignL[year] @@ -148,32 +181,33 @@ def auto_populate_hazus(GI): rise = convert_story_rise(bt, stories) if rise is not None: - LF = f'{bt}{rise}' + LF = f'{bt}{rise}' # noqa: N806 else: - LF = f'{bt}' + LF = f'{bt}' # noqa: N806 return LF, dl class capacity_model_base: """ A class to represent the base of capacity models. - - Attributes: + + Attributes ---------- - - Methods: + + Methods ------- - """ + """ # noqa: D414 + def __init__(self): pass - def name(self): + def name(self): # noqa: D102 return 'capacity_model_base' class cao_peterson_2006(capacity_model_base): """ A class to represent the capacity model in Cao and Peterson 2006. - - Attributes: + + Attributes ---------- Dy : float Yield displacement. In the unit of (inch) @@ -189,19 +223,20 @@ class cao_peterson_2006(capacity_model_base): Parameter in Eq. A5 of Cao and Peterson 2006. In the unit of (g) C : float Parameter in Eq. A5 of Cao and Peterson 2006. In the unit of (inch) - - Methods: + + Methods ------- - """ - def __init__(self, Dy, Ay, Du, Au, dD = 0.001): + """ # noqa: D414 + + def __init__(self, Dy, Ay, Du, Au, dD = 0.001): # noqa: N803 # region between elastic and perfectly plastic sd_elpl = np.arange(Dy,Du,dD) # Eq. B3 in Steelman & Hajjar 2008 - Ax = (Au**2*Dy - Ay**2*Du)/(2*Au*Dy - Ay*Dy - Ay*Du) + Ax = (Au**2*Dy - Ay**2*Du)/(2*Au*Dy - Ay*Dy - Ay*Du) # noqa: N806 # Eq. B4 in Steelman & Hajjar 2008 - B = Au - Ax + B = Au - Ax # noqa: N806 # Eq. B5 in Steelman & Hajjar 2008 - C = (Dy*B**2*(Du-Dy)/(Ay*(Ay-Ax)))**0.5 + C = (Dy*B**2*(Du-Dy)/(Ay*(Ay-Ax)))**0.5 # noqa: N806 # Eq. B1 in Steelman & Hajjar 2008 sa_elpl = Ax + B*(1 - ((sd_elpl-Du)/C)**2)**0.5 # elastic and perfectly plastic regions @@ -219,15 +254,15 @@ def __init__(self, Dy, Ay, Du, Au, dD = 0.001): self.Du = Du self.Ay = Ay self.Dy = Dy - - def name(self): + + def name(self): # noqa: D102 return 'cao_peterson_2006' - + class HAZUS_cao_peterson_2006(capacity_model_base): """ A class to represent the capacity model in Cao and Peterson 2006. - - Attributes: + + Attributes ---------- Dy : float Yield displacement. In the unit of (inch) @@ -243,10 +278,11 @@ class HAZUS_cao_peterson_2006(capacity_model_base): Parameter in Eq. A5 of Cao and Peterson 2006. In the unit of (g) C : float Parameter in Eq. A5 of Cao and Peterson 2006. In the unit of (inch) - - Methods: + + Methods ------- - """ + """ # noqa: D414 + def __init__(self, general_info, dD = 0.001): # HAZUS capacity data: Table 5-7 to Tabl 5-10 in HAZUS 5.1 self.capacity_data = dict() @@ -276,12 +312,13 @@ def __init__(self, general_info, dD = 0.001): self.Dy = self.capacity_data[self.design_level][self.HAZUS_type]['Dy'] self.Ay = self.capacity_data[self.design_level][self.HAZUS_type]['Ay'] except KeyError: - raise KeyError(f'No capacity data for {self.HAZUS_type} and {self.design_level}') + msg = f'No capacity data for {self.HAZUS_type} and {self.design_level}' + raise KeyError(msg) # noqa: B904 self.cao_peterson_2006 = cao_peterson_2006(self.Dy, self.Ay, self.Du, self.Au, dD) self.Ax = self.cao_peterson_2006.Ax self.B = self.cao_peterson_2006.B self.C = self.cao_peterson_2006.C - + def get_capacity_curve(self, sd_max): sd = self.cao_peterson_2006.sd sa = self.cao_peterson_2006.sa @@ -294,13 +331,12 @@ def get_capacity_curve(self, sd_max): # def get_capacity_curve(self): # return self.cao_peterson_2006.sd, self.cao_peterson_2006.sa - + def get_hazus_alpha2(self): return self.capacity_data['alpha2'][self.HAZUS_type]['alpha2'] - + def get_hazus_roof_height(self): return self.capacity_data['roof_height'][self.HAZUS_type]['roof_height_ft'] def name(self): return 'HAZUS_cao_peterson_2006' - \ No newline at end of file diff --git a/modules/performSIMULATION/capacitySpectrum/DampingModels.py b/modules/performSIMULATION/capacitySpectrum/DampingModels.py index 66131af38..9926805ed 100644 --- a/modules/performSIMULATION/capacitySpectrum/DampingModels.py +++ b/modules/performSIMULATION/capacitySpectrum/DampingModels.py @@ -1,4 +1,4 @@ -# # noqa: N999, D100 +# # noqa: D100, INP001 # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California # @@ -37,16 +37,16 @@ # Jinyan Zhao # Tamika Bassman # Adam Zsarnóczay -# +# # References: -# 1. Cao, T., & Petersen, M. D. (2006). Uncertainty of earthquake losses due to -# model uncertainty of input ground motions in the Los Angeles area. Bulletin of +# 1. Cao, T., & Petersen, M. D. (2006). Uncertainty of earthquake losses due to +# model uncertainty of input ground motions in the Los Angeles area. Bulletin of # the Seismological Society of America, 96(2), 365-376. # 2. Steelman, J., & Hajjar, J. F. (2008). Systemic validation of consequence-based # risk management for seismic regional losses. -# 3. Newmark, N. M., & Hall, W. J. (1982). Earthquake spectra and design. +# 3. Newmark, N. M., & Hall, W. J. (1982). Earthquake spectra and design. # Engineering monographs on earthquake criteria. -# 4. FEMA (2022), HAZUS – Multi-hazard Loss Estimation Methodology 5.0, +# 4. FEMA (2022), HAZUS - Multi-hazard Loss Estimation Methodology 5.0, # Earthquake Model Technical Manual, Federal Emergency Management Agency, Washington D.C. @@ -58,43 +58,46 @@ import numpy as np import pandas as pd + class damping_model_base: """ A class to represent the base of damping models. - - Attributes: + + Attributes ---------- - - Methods: + + Methods ------- - """ + """ # noqa: D414 + def __init__(self): pass - def name(self): + def name(self): # noqa: D102 return 'damping_model_base' class damping_model_hazus(damping_model_base): """ A class to represent the hazus damping models. - - Attributes: + + Attributes ---------- beta_elastic_map : dict The damping ratio is suggested by FEMA HAZUS Below Eq. 5-9, which in turn is based on Newmark and Hall 1982. The median value of the dampling ratio in Table 3 of Newmark and Hall 1982 - is used. E.g. For steel buildings, the damping ratio is assumed as + is used. E.g. For steel buildings, the damping ratio is assumed as (6+12.5)/2=9.25%, which is the average of welded steel and bolted steel. Masonry buildings are assumed to have a damping ratio similar to reinforced concrete buildings. Mobile homes are assumed to have a damping ratio similar to steel buildings. - - Methods: + + Methods ------- get_beta_elastic : Calculate the elastic damping ratio beta. """ + def __init__(self): self.beta_elastic_map = { 'W1': 15, @@ -112,7 +115,7 @@ def __init__(self): 'S5L': 10, 'S5M': 7, 'S5H': 5, - 'C1L': 10, + 'C1L': 10, 'C1M': 8.5, 'C1H': 7, 'C2L': 10, @@ -134,7 +137,7 @@ def __init__(self): 'URMM': 8.5, 'MH': 9.25 } - self.kappa_data = pd.read_csv(os.path.join(os.path.dirname(__file__), + self.kappa_data = pd.read_csv(os.path.join(os.path.dirname(__file__), # noqa: PTH118, PTH120 'hazus_kappa_data.csv'), index_col=0, header=None) self.kappa_col_map = {'HC':{'S':1, 'M':2, 'L':3}, @@ -143,67 +146,67 @@ def __init__(self): 'PC':{'S':10, 'M':11, 'L':12}, } - def get_beta_elastic(self, HAZUS_bldg_type): + def get_beta_elastic(self, HAZUS_bldg_type): # noqa: N803 """ Calculate the elastic damping ratio beta. - - Parameters: - ----------- + + Parameters + ---------- HAZUS_bldg_type : str The HAZUS building type. - - Returns: - -------- + + Returns + ------- beta : float The elastic damping ratio beta. """ - if HAZUS_bldg_type not in self.beta_elastic_map.keys(): + if HAZUS_bldg_type not in self.beta_elastic_map: sys.exit(f'The building type {HAZUS_bldg_type} is not in the damping' 'model.') - beta = self.beta_elastic_map[HAZUS_bldg_type] - return beta - def get_kappa(self, HAZUS_bldg_type, design_level, Mw): + return self.beta_elastic_map[HAZUS_bldg_type] + + def get_kappa(self, HAZUS_bldg_type, design_level, Mw): # noqa: N803 """ Calculate the kappa in Table 5-33 of FEMA HAZUS 2022. - - Parameters: - ----------- + + Parameters + ---------- HAZUS_bldg_type : str The HAZUS building type. - - Returns: - -------- + + Returns + ------- kappa : float The kappa in Table 5-33 of FEMA HAZUS 2022. """ - if HAZUS_bldg_type not in self.beta_elastic_map.keys(): + if HAZUS_bldg_type not in self.beta_elastic_map: sys.exit(f'The building type {HAZUS_bldg_type} is not in the damping' 'model.') # Infer duration according to HAZUS 2022 below Table 5-33 - if Mw <= 5.5: + if Mw <= 5.5: # noqa: PLR2004 duration = 'S' - elif Mw < 7.5: + elif Mw < 7.5: # noqa: PLR2004 duration = 'M' else: duration = 'L' col = self.kappa_col_map[design_level][duration] - kappa = self.kappa_data.loc[HAZUS_bldg_type, col] - return kappa - - def get_name(self): + return self.kappa_data.loc[HAZUS_bldg_type, col] + + def get_name(self): # noqa: D102 return 'damping_model_hazus' class HAZUS_cao_peterson_2006(damping_model_base): """ A class to represent the damping model in Cao and Peterson 2006. - - Attributes: + + Attributes ---------- - - Methods: + + Methods ------- - """ - def __init__(self, demand, capacity, base_model = damping_model_hazus()): + """ # noqa: D414 + + def __init__(self, demand, capacity, base_model = damping_model_hazus()): # noqa: B008 self.supported_capacity_model = ['HAZUS_cao_peterson_2006'] self.supported_demand_model = ['HAZUS', 'HAZUS_lin_chang_2003'] self.base_model = base_model @@ -217,13 +220,13 @@ def __init__(self, demand, capacity, base_model = damping_model_hazus()): self.HAZUS_type = capacity.HAZUS_type self.design_level = capacity.design_level self.Mw = demand.Mw - - def get_beta(self, Dp, Ap): + + def get_beta(self, Dp, Ap): # noqa: N803 """ Equation B.44-B.45 in Steelman & Hajjar (2010), which are originally published - in Cao and Peterson 2006 - """ + in Cao and Peterson 2006. + """ # noqa: D205 try: beta_elastic = self.base_model.get_beta_elastic(self.HAZUS_type) except: # noqa: E722 @@ -234,24 +237,18 @@ def get_beta(self, Dp, Ap): except: # noqa: E722 sys.exit(f'The base model {self.base_model} does not have a useful' 'get_kappa method.') - Du = self.capacity.Du - Ax = self.capacity.Ax - B = self.capacity.B - C = self.capacity.C - Kt = (Du-Dp)/(Ap-Ax)*(B/C)**2 # Eq B.46 - Ke = self.capacity.Ay/self.capacity.Dy # Eq B.47 + Du = self.capacity.Du # noqa: N806 + Ax = self.capacity.Ax # noqa: N806 + B = self.capacity.B # noqa: N806 + C = self.capacity.C # noqa: N806 + Kt = (Du-Dp)/(Ap-Ax)*(B/C)**2 # Eq B.46 # noqa: N806 + Ke = self.capacity.Ay/self.capacity.Dy # Eq B.47 # noqa: N806 area_h = max(0,4*(Ap-Dp*Ke)*(Dp*Kt-Ap)/(Ke-Kt)) # Eq. B.45 # beta is in the unit of percentage # beta_h = kappa*area_h/(2*3.1416*Dp*Ap) * 100# Eq. B.44 beta_h = kappa*area_h/(2*3.1416*Dp*Ap)# Eq. B.44 return beta_elastic + beta_h - def get_beta_elastic(self): + def get_beta_elastic(self): # noqa: D102 return self.base_model.get_beta_elastic(self.HAZUS_type) - - - - - - \ No newline at end of file diff --git a/modules/performSIMULATION/capacitySpectrum/DemandModels.py b/modules/performSIMULATION/capacitySpectrum/DemandModels.py index fe19f75d0..8e73e17fd 100644 --- a/modules/performSIMULATION/capacitySpectrum/DemandModels.py +++ b/modules/performSIMULATION/capacitySpectrum/DemandModels.py @@ -1,4 +1,4 @@ -# # noqa: N999, D100 +# # noqa: D100, INP001 # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California # @@ -35,16 +35,16 @@ # # Contributors: # Jinyan Zhao -# +# # References: -# 1. Cao, T., & Petersen, M. D. (2006). Uncertainty of earthquake losses due to -# model uncertainty of input ground motions in the Los Angeles area. Bulletin of +# 1. Cao, T., & Petersen, M. D. (2006). Uncertainty of earthquake losses due to +# model uncertainty of input ground motions in the Los Angeles area. Bulletin of # the Seismological Society of America, 96(2), 365-376. # 2. Steelman, J., & Hajjar, J. F. (2008). Systemic validation of consequence-based # risk management for seismic regional losses. -# 3. Newmark, N. M., & Hall, W. J. (1982). Earthquake spectra and design. +# 3. Newmark, N. M., & Hall, W. J. (1982). Earthquake spectra and design. # Engineering monographs on earthquake criteria. -# 4. FEMA (2022), HAZUS – Multi-hazard Loss Estimation Methodology 5.0, +# 4. FEMA (2022), HAZUS - Multi-hazard Loss Estimation Methodology 5.0, # Earthquake Model Technical Manual, Federal Emergency Management Agency, Washington D.C. @@ -56,11 +56,12 @@ import numpy as np import pandas as pd + class demand_model_base: """ A class to represent the base of demand models. - - Attributes: + + Attributes ---------- T : numpy.ndarray Periods in the demand spectrum. @@ -68,20 +69,19 @@ class demand_model_base: Spectrum displacement in the demand spectrum at 5% damping. In the unit of (inch) dem_sa_05 : numpy.ndarray Spectrum acceleration in the demand spectrum at 5% damping. In the unit of (g) - - Methods: ------- """ - def __init__(self, T, dem_sd_05, dem_sa_05): + + def __init__(self, T, dem_sd_05, dem_sa_05): # noqa: N803 self.T = T self.dem_sd_05 = dem_sd_05 self.dem_sa_05 = dem_sa_05 class HAZUS(demand_model_base): """ - A class to represent the design spectrum from HAZUS V5 (2022), section 4.1.3.2 - - Attributes: + A class to represent the design spectrum from HAZUS V5 (2022), section 4.1.3.2. + + Attributes ---------- Tvd : float Tvd as HAZUS Eq. 4-4. @@ -94,17 +94,28 @@ class HAZUS(demand_model_base): Spectrum displacement in the demand spectrum at 5% damping. In the unit of (inch) dem_sa_05 : numpy.ndarray Spectrum acceleration in the demand spectrum at 5% damping. In the unit of (g) - - Methods: + + Methods ------- - """ - def __init__(self, Mw = 7.0): + """ # noqa: D414 + + def __init__(self, Mw = 7.0): # noqa: N803 self.Tvd = np.power(10, (Mw - 5)/2) - self.T = [0.01, 0.02, 0.03, 0.05, 0.075, 0.1, 0.15, 0.2, + self.T = [0.01, 0.02, 0.03, 0.05, 0.075, 0.1, 0.15, 0.2, 0.25, 0.3, 0.4, 0.5, 0.75, 1, 1.5, 2, 3, 4, 5, 7.5, 10] self.Mw = Mw - - def set_IMs(self, sa_03, sa_10): + + def set_IMs(self, sa_03, sa_10): # noqa: N802 + """ + Set the input motions for the demand model. + + Parameters + ---------- + sa_03 : float + Spectral acceleration at 0.3 seconds. + sa_10 : float + Spectral acceleration at 1.0 seconds. + """ self.sa_03 = sa_03 self.sa_10 = sa_10 self.Tav = sa_10/sa_03 @@ -125,36 +136,87 @@ def set_IMs(self, sa_03, sa_10): self.dem_sd_05[i] = self.g/(4 * np.pi**2) * t**2 * self.dem_sa_05[i] # Eq. A2 else: self.dem_sa_05[i] = sa_10 * self.Tvd / t**2 # Ea. A1a - self.dem_sd_05[i] = self.g/(4 * np.pi**2) * t**2 * self.dem_sa_05[i] + self.dem_sd_05[i] = self.g/(4 * np.pi**2) * t**2 * self.dem_sa_05[i] + + def get_sa(self, T): # noqa: N803 + """ + Get the spectral acceleration for a given period. - def get_sa(self, T): + Parameters + ---------- + T : float + The period for which to calculate the spectral acceleration. + + Returns + ------- + float + The spectral acceleration for the given period. + """ # return np.interp(T, self.T, self.dem_sa_05) - if T <= self.Tav: + if self.Tav >= T: return self.sa_03 - elif T <= self.Tvd: + if self.Tvd >= T: return self.sa_10/T - else: - return self.sa_10 * self.Tvd / T**2 - - def get_sd(self, T): + return self.sa_10 * self.Tvd / T**2 + + def get_sd(self, T): # noqa: N803 + """ + Get the spectrum displacement for a given period. + + Parameters + ---------- + T : float + The period for which to calculate the spectrum displacement. + + Returns + ------- + float + The spectrum displacement for the given period. + """ # return np.interp(T, self.T, self.dem_sd_05) # return np.interp(T, self.T, self.dem_sa_05) - if T <= self.Tav: + if self.Tav >= T: return self.get_sd_from_sa(self.sa_03, T) - elif T <= self.Tvd: + if self.Tvd >= T: return self.get_sd_from_sa(self.sa_10/T, T) - else: - return self.get_sd_from_sa(self.sa_10 * self.Tvd / T**2, T) - - def get_sd_from_sa(self, sa, T): + return self.get_sd_from_sa(self.sa_10 * self.Tvd / T**2, T) + + def get_sd_from_sa(self, sa, T): # noqa: N803 + """ + Calculate the spectrum displacement for a given spectral acceleration and period. + + Parameters + ---------- + sa : float + The spectral acceleration. + T : float + The period. + + Returns + ------- + float + The spectrum displacement. + """ return self.g/(4 * np.pi**2) * T**2 * sa - - def set_Tavb(self, damping_model, tol = 0.05, max_iter = 100): + + def set_Tavb(self, damping_model, tol = 0.05, max_iter = 100): # noqa: N802 + """ + Set the Tavb attribute of the HAZUS demand model. + + Parameters + ---------- + damping_model : object + The damping model used to calculate the beta_eff. + tol : float, optional + The tolerance for convergence, by default 0.05. + max_iter : int, optional + The maximum number of iterations, by default 100. + """ x_prev = 5 # Start with 5% damping - for i in range(max_iter): + for _i in range(max_iter): beta = x_prev ra = 2.12/(3.21-0.68*np.log(beta)) - Tavb = self.Tav * (2.12/(3.21-0.68*np.log(beta)))/(1.65/(2.31-0.41*np.log(beta))) + Tavb = self.Tav * (2.12/(3.21-0.68*np.log(beta)))/(1.65/(2.31-0.41*np.log(beta))) # noqa: N806 sa = self.get_sa(Tavb) / ra sd = self.get_sd_from_sa(sa, Tavb) beta_eff = damping_model.get_beta(sd, sa) @@ -167,16 +229,28 @@ def set_Tavb(self, damping_model, tol = 0.05, max_iter = 100): or 2.12/(3.21-0.68*np.log(beta)) < 1): # raise a warning # print('WARNING: in HAZUS demand model, the Tavb is not converged.') - self.Tavb = self.Tav + self.Tavb = self.Tav def set_beta_tvd(self, damping_model, tol = 0.05, max_iter = 100): + """ + Set the beta_tvd attribute of the HAZUS demand model. + + Parameters + ---------- + damping_model : object + The damping model used to calculate the beta_eff. + tol : float, optional + The tolerance for convergence, by default 0.05. + max_iter : int, optional + The maximum number of iterations, by default 100. + """ x_prev = 5 # Start with 5% damping max_iter = 100 tol = 0.05 - for i in range(max_iter): + for _i in range(max_iter): beta = x_prev - Tvd = self.Tvd + Tvd = self.Tvd # noqa: N806 rd = 1.65/(2.31-0.41*np.log(beta)) sa = self.get_sa(Tvd)/rd sd = self.get_sd_from_sa(sa, Tvd) @@ -194,16 +268,31 @@ def set_beta_tvd(self, damping_model, tol = 0.05, max_iter = 100): def get_reduced_demand(self, beta_eff): + """ + Calculate the reduced demand for a given effective damping ratio. + + Parameters + ---------- + beta_eff : float + The effective damping ratio. + + Returns + ------- + tuple + A tuple containing the reduced spectrum displacement and reduced spectrum acceleration. + """ if getattr(self, 'Tavb', None) is None: - raise ValueError('The Tavb is not set yet.') + msg = 'The Tavb is not set yet.' + raise ValueError(msg) if getattr(self, 'beta_tvd', None) is None: - raise ValueError('The beta_tvd is not set yet.') - RA = 2.12/(3.21-0.68*np.log(beta_eff)) - Rv = 1.65/(2.31-0.41*np.log(beta_eff)) + msg = 'The beta_tvd is not set yet.' + raise ValueError(msg) + RA = 2.12/(3.21-0.68*np.log(beta_eff)) # noqa: N806 + Rv = 1.65/(2.31-0.41*np.log(beta_eff)) # noqa: N806 if self.beta_tvd < 0: - RD = 1.39/(1.82 - 0.27 * np.log(beta_eff)) # EQ A9 in Cao and Peterson 2006 + RD = 1.39/(1.82 - 0.27 * np.log(beta_eff)) # EQ A9 in Cao and Peterson 2006 # noqa: N806 else: - RD = (1.65/(2.31-0.41*np.log(self.beta_tvd))) + RD = (1.65/(2.31-0.41*np.log(self.beta_tvd))) # noqa: N806 dem_sa = np.zeros_like(np.array(self.T)) dem_sd = np.zeros_like(np.array(self.T)) for i, t in enumerate(self.T): @@ -217,21 +306,30 @@ def get_reduced_demand(self, beta_eff): dem_sa[i] = self.get_sa(t) / RD dem_sd[i] = self.get_sd_from_sa(dem_sa[i], t) return dem_sd, dem_sa - - + + def set_ruduction_factor(self, beta_eff): + """ + Set the reduction factor for a given effective damping ratio. + + Parameters + ---------- + beta_eff : float + The effective damping ratio. + """ if getattr(self, 'Tavb', None) is None: - raise ValueError('The Tavb is not set yet.') + msg = 'The Tavb is not set yet.' + raise ValueError(msg) self.RA = 2.12/(3.21-0.68*np.log(beta_eff)) self.Rv = 1.65/(2.31-0.41*np.log(beta_eff)) - self.RD = (1.65/(2.31-0.41*np.log(beta_eff))) - + self.RD = (1.65/(2.31-0.41*np.log(beta_eff))) + # def __init__(self, sa_03, sa_10, Mw = 7.0): # self.Tvd = np.power(10, (Mw - 5)/2) # self.Tav = sa_10/sa_03 - # g = 386 + # g = 386 # # self.T is defined as typical GMPEs - # self.T = [0.01, 0.02, 0.03, 0.05, 0.075, 0.1, 0.15, 0.2, + # self.T = [0.01, 0.02, 0.03, 0.05, 0.075, 0.1, 0.15, 0.2, # 0.25, 0.3, 0.4, 0.5, 0.75, 1, 1.5, 2, 3, 4, 5, 7.5, 10] # # insert tvd and tav in self.T # self.T.append(self.Tvd) @@ -249,57 +347,83 @@ def set_ruduction_factor(self, beta_eff): # self.dem_sd_05[i] = g/(4 * np.pi**2) * t**2 * self.dem_sa_05[i] # Eq. A2 # else: # self.dem_sa_05[i] = sa_10 * self.Tvd / t**2 # Ea. A1a - # self.dem_sd_05[i] = g/(4 * np.pi**2) * t**2 * self.dem_sa_05[i] + # self.dem_sd_05[i] = g/(4 * np.pi**2) * t**2 * self.dem_sa_05[i] # self.Mw = Mw - + def name(self): + """ + Get the name of the demand model. + + Returns + ------- + str + The name of the demand model. + """ return 'HAZUS' - + @staticmethod - def check_IM(IM_header): + def check_IM(IM_header): # noqa: N802, N803 + """ + Check the IM header. + + Parameters + ---------- + IM_header : str + The IM header to be checked. + + Raises + ------ + ValueError + If the IM header does not contain the required information. + """ if 'SA_0.3' not in IM_header: - raise ValueError('The IM header should contain SA_0.3') + msg = 'The IM header should contain SA_0.3' + raise ValueError(msg) if 'SA_1.0' not in IM_header: - raise ValueError('The IM header of should contain SA_1.0') + msg = 'The IM header of should contain SA_1.0' + raise ValueError(msg) class HAZUS_lin_chang_2003(HAZUS): + """ A class to represent the design spectrum from HAZUS V5 (2022), and the damping deduction relationship from Lin and Chang 2003. - """ - def __init__(self, Mw = 7.0): + """ # noqa: D205 + + def __init__(self, Mw = 7.0): # noqa: N803 super().__init__(Mw) - def name(self): + + def name(self): # noqa: D102 return "HAZUS_lin_chang_2003" - def get_dmf(self, beta_eff, T): + + def get_dmf(self, beta_eff, T): # noqa: D102, N803 alpha = 1.303+0.436*np.log(beta_eff) - R = 1-alpha*T**0.3/(T+1)**0.65 - return R - def get_reduced_demand(self, beta_eff): + return 1-alpha*T**0.3/(T+1)**0.65 + + def get_reduced_demand(self, beta_eff): # noqa: D102 if getattr(self, 'Tavb', None) is None: - raise ValueError('The Tavb is not set yet.') + msg = 'The Tavb is not set yet.' + raise ValueError(msg) if getattr(self, 'beta_tvd', None) is None: - raise ValueError('The beta_tvd is not set yet.') + msg = 'The beta_tvd is not set yet.' + raise ValueError(msg) + dem_sa = np.zeros_like(np.array(self.T)) dem_sd = np.zeros_like(np.array(self.T)) + for i, t in enumerate(self.T): - R = self.get_dmf(beta_eff, t) - if t <= self.Tavb: - dem_sa[i] = self.get_sa(t) / R - dem_sd[i] = self.get_sd_from_sa(dem_sa[i], t) - elif t <= self.Tvd: - dem_sa[i] = self.get_sa(t) / R - dem_sd[i] = self.get_sd_from_sa(dem_sa[i], t) - else: - dem_sa[i] = self.get_sa(t) / R - dem_sd[i] = self.get_sd_from_sa(dem_sa[i], t) + R = self.get_dmf(beta_eff, t) # noqa: N806 + + dem_sa[i] = self.get_sa(t) / R + dem_sd[i] = self.get_sd_from_sa(dem_sa[i], t) + return dem_sd, dem_sa class ASCE_7_10(demand_model_base): """ A class to represent the design spectrum from ASCE_7_10. - - Attributes: + + Attributes ---------- Tvd : float Tvd as HAZUS Eq. 4-4. @@ -311,12 +435,12 @@ class ASCE_7_10(demand_model_base): Spectrum displacement in the demand spectrum at 5% damping. In the unit of (inch) dem_sa_05 : numpy.ndarray Spectrum acceleration in the demand spectrum at 5% damping. In the unit of (g) - - Methods: + + Methods ------- - """ - - def __init__(self, T, dem_sd_05, dem_sa_05): + """ # noqa: D414 + + def __init__(self, T, dem_sd_05, dem_sa_05): # noqa: N803 self.T = T self.dem_sd_05 = dem_sd_05 self.dem_sa_05 = dem_sa_05 diff --git a/modules/performSIMULATION/capacitySpectrum/runCMS.py b/modules/performSIMULATION/capacitySpectrum/runCMS.py index 6551f34b0..03d667338 100644 --- a/modules/performSIMULATION/capacitySpectrum/runCMS.py +++ b/modules/performSIMULATION/capacitySpectrum/runCMS.py @@ -1,5 +1,4 @@ -# -*- coding: utf-8 -*- -# +# # noqa: D100, INP001 # Copyright (c) 2023 Leland Stanford Junior University # Copyright (c) 2023 The Regents of the University of California # @@ -356,7 +355,7 @@ def write_RV(AIM_input_path, EVENT_input_path): # noqa: C901, N802, N803, D103 demand_model.set_IMs(sa_03, sa_10) demand_model.set_Tavb(damping_model) demand_model.set_beta_tvd(damping_model) - # if (damping_model_name == 'HAZUS_cao_peterson_2006' + # if (damping_model_name == 'HAZUS_cao_peterson_2006' # and capacity_model_name == 'HAZUS_cao_peterson_2006'): # damping_model.set_HAZUS_bldg_type(capacity_model.get_hazus_bldg_type()) From 21339b5093302ec33722e46224f2b3bbe4005aa8 Mon Sep 17 00:00:00 2001 From: jinyan1214 Date: Thu, 12 Sep 2024 17:54:49 -0700 Subject: [PATCH 49/59] ruff noqa unfixable --- .../regionalGroundMotion/CreateStation.py | 8 +++---- .../regionalGroundMotion/FetchOpenSHA.py | 8 +++---- .../regionalGroundMotion/GMSimulators.py | 2 +- .../regionalGroundMotion/GlobalVariable.py | 2 +- .../capacitySpectrum/CapacityModels.py | 24 +++++++++---------- 5 files changed, 22 insertions(+), 22 deletions(-) diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/CreateStation.py b/modules/performRegionalEventSimulation/regionalGroundMotion/CreateStation.py index 994777186..347a03235 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/CreateStation.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/CreateStation.py @@ -37,7 +37,7 @@ # Kuanshi Zhong # -import socket +import socket # noqa: I001 import sys import numpy as np @@ -53,7 +53,7 @@ if importlib.util.find_spec('contextlib') is None: subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'contextlib']) # noqa: S603 -import joblib +import joblib # noqa: I001 import contextlib from joblib import Parallel, delayed import multiprocessing @@ -499,7 +499,7 @@ def create_stations( # noqa: C901, PLR0912, PLR0915 ) ] else: - with tqdm_joblib(tqdm(desc="Get z2pt5 from openSHA", total=selected_stn.shape[0])) as progress_bar: + with tqdm_joblib(tqdm(desc="Get z2pt5 from openSHA", total=selected_stn.shape[0])) as progress_bar: # noqa: F841 z2pt5_results = Parallel(n_jobs=num_cores)(delayed(get_site_z2pt5_from_opensha)( lat, lon ) for lat, lon in zip( @@ -747,7 +747,7 @@ def parallel_interpolation(func, lat, lon): lon: list of longitude Output: data: list of interpolated data - """ + """ # noqa: D205, D400 return func(lat, lon) def get_vs30_thompson(lat, lon): diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/FetchOpenSHA.py b/modules/performRegionalEventSimulation/regionalGroundMotion/FetchOpenSHA.py index f90569b3c..e4dbd4eab 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/FetchOpenSHA.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/FetchOpenSHA.py @@ -37,7 +37,7 @@ # Kuanshi Zhong # -import numpy as np +import numpy as np # noqa: I001 import pandas as pd import ujson import socket @@ -53,7 +53,7 @@ GlobalVariable.JVM_started = True if importlib.util.find_spec('jpype') is None: subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'JPype1']) # noqa: S603 - import jpype + import jpype # noqa: I001 # from jpype import imports import jpype.imports from jpype.types import * # noqa: F403 @@ -99,7 +99,7 @@ try: from scratch.UCERF3.erf.mean import MeanUCERF3 except ModuleNotFoundError: - MeanUCERF3 = jpype.JClass('scratch.UCERF3.erf.mean.MeanUCERF3') # noqa: F405 + MeanUCERF3 = jpype.JClass('scratch.UCERF3.erf.mean.MeanUCERF3') # noqa: F405, RUF100 from org.opensha.sha.gcim.calc import * # noqa: F403 from org.opensha.sha.gcim.imr.attenRelImpl import * # noqa: F403 @@ -657,7 +657,7 @@ def export_to_json( # noqa: C901, D103 import h5py with h5py.File(outfile, 'w') as h5file: # Store the geometry as a string array - h5file.create_dataset('geometry', data=gdf.geometry.astype(str).values.astype('S')) + h5file.create_dataset('geometry', data=gdf.geometry.astype(str).values.astype('S')) # noqa: PD011, F405 # return return erf_data diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/GMSimulators.py b/modules/performRegionalEventSimulation/regionalGroundMotion/GMSimulators.py index bfe4eed82..192f1cd35 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/GMSimulators.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/GMSimulators.py @@ -41,7 +41,7 @@ # Anne Husley # Kuanshi Zhong # Jinyan Zhao -import sys +import sys # noqa: I001 import time import warnings diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/GlobalVariable.py b/modules/performRegionalEventSimulation/regionalGroundMotion/GlobalVariable.py index 08a9c992b..cbc4addbe 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/GlobalVariable.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/GlobalVariable.py @@ -1 +1 @@ -JVM_started = False \ No newline at end of file +JVM_started = False # noqa: INP001, D100 diff --git a/modules/performSIMULATION/capacitySpectrum/CapacityModels.py b/modules/performSIMULATION/capacitySpectrum/CapacityModels.py index 1b8b64d04..29edecd89 100644 --- a/modules/performSIMULATION/capacitySpectrum/CapacityModels.py +++ b/modules/performSIMULATION/capacitySpectrum/CapacityModels.py @@ -283,25 +283,25 @@ class HAZUS_cao_peterson_2006(capacity_model_base): ------- """ # noqa: D414 - def __init__(self, general_info, dD = 0.001): + def __init__(self, general_info, dD = 0.001): # noqa: N803 # HAZUS capacity data: Table 5-7 to Tabl 5-10 in HAZUS 5.1 - self.capacity_data = dict() - self.capacity_data['HC'] = pd.read_csv(os.path.join(os.path.dirname(__file__), + self.capacity_data = dict() # noqa: C408 + self.capacity_data['HC'] = pd.read_csv(os.path.join(os.path.dirname(__file__), # noqa: PTH118, PTH120 'HC_capacity_data.csv'), index_col=0).to_dict(orient='index') - self.capacity_data['MC'] = pd.read_csv(os.path.join(os.path.dirname(__file__), + self.capacity_data['MC'] = pd.read_csv(os.path.join(os.path.dirname(__file__), # noqa: PTH118, PTH120 'MC_capacity_data.csv'), index_col=0).to_dict(orient='index') - self.capacity_data['LC'] = pd.read_csv(os.path.join(os.path.dirname(__file__), + self.capacity_data['LC'] = pd.read_csv(os.path.join(os.path.dirname(__file__), # noqa: PTH118, PTH120 'LC_capacity_data.csv'), index_col=0).to_dict(orient='index') - self.capacity_data['PC'] = pd.read_csv(os.path.join(os.path.dirname(__file__), + self.capacity_data['PC'] = pd.read_csv(os.path.join(os.path.dirname(__file__), # noqa: PTH118, PTH120 'PC_capacity_data.csv'), index_col=0).to_dict(orient='index') - self.capacity_data['alpha2'] = pd.read_csv(os.path.join(os.path.dirname(__file__), + self.capacity_data['alpha2'] = pd.read_csv(os.path.join(os.path.dirname(__file__), # noqa: PTH118, PTH120 'hazus_capacity_alpha2.csv'), index_col=0).to_dict(orient='index') - self.capacity_data['roof_height'] = pd.read_csv(os.path.join(os.path.dirname(__file__), + self.capacity_data['roof_height'] = pd.read_csv(os.path.join(os.path.dirname(__file__), # noqa: PTH118, PTH120 'hazus_typical_roof_height.csv'), index_col=0).to_dict(orient='index') # auto populate to get the parameters @@ -319,7 +319,7 @@ def __init__(self, general_info, dD = 0.001): self.B = self.cao_peterson_2006.B self.C = self.cao_peterson_2006.C - def get_capacity_curve(self, sd_max): + def get_capacity_curve(self, sd_max): # noqa: D102 sd = self.cao_peterson_2006.sd sa = self.cao_peterson_2006.sa if sd_max > sd[-1]: @@ -332,11 +332,11 @@ def get_capacity_curve(self, sd_max): # def get_capacity_curve(self): # return self.cao_peterson_2006.sd, self.cao_peterson_2006.sa - def get_hazus_alpha2(self): + def get_hazus_alpha2(self): # noqa: D102 return self.capacity_data['alpha2'][self.HAZUS_type]['alpha2'] - def get_hazus_roof_height(self): + def get_hazus_roof_height(self): # noqa: D102 return self.capacity_data['roof_height'][self.HAZUS_type]['roof_height_ft'] - def name(self): + def name(self): # noqa: D102 return 'HAZUS_cao_peterson_2006' From 6979db15182c8025e37d283bca86878f3bd3a37e Mon Sep 17 00:00:00 2001 From: jinyan1214 Date: Thu, 12 Sep 2024 17:59:48 -0700 Subject: [PATCH 50/59] ruff format --- conanfile.py | 5 +- .../regionalGroundMotion/CreateScenario.py | 2 +- .../regionalGroundMotion/CreateStation.py | 50 ++++--- .../regionalGroundMotion/FetchOpenSHA.py | 35 +++-- .../regionalGroundMotion/GMSimulators.py | 14 +- .../HazardSimulationEQ.py | 6 +- .../regionalGroundMotion/ScenarioForecast.py | 5 +- .../gmpe/CorrelationModel.py | 15 +- .../capacitySpectrum/CapacityModels.py | 121 ++++++++++------ .../capacitySpectrum/DampingModels.py | 80 +++++++---- .../capacitySpectrum/DemandModels.py | 132 ++++++++++++------ .../capacitySpectrum/runCMS.py | 131 +++++++++-------- 12 files changed, 375 insertions(+), 221 deletions(-) diff --git a/conanfile.py b/conanfile.py index 6cca02130..454e10029 100644 --- a/conanfile.py +++ b/conanfile.py @@ -2,6 +2,7 @@ from conans import CMake, ConanFile + class simCenterBackendApps(ConanFile): # noqa: D101 name = 'SimCenterBackendApplications' version = '1.2.2' @@ -30,8 +31,8 @@ class simCenterBackendApps(ConanFile): # noqa: D101 'jsonformoderncpp/3.7.0', 'nanoflann/1.3.2', 'nlopt/2.7.1', - "boost/1.71.0", - 'kissfft/131.1.0' + 'boost/1.71.0', + 'kissfft/131.1.0', ] # Custom attributes for Bincrafters recipe conventions diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/CreateScenario.py b/modules/performRegionalEventSimulation/regionalGroundMotion/CreateScenario.py index c257fa985..7a97d043a 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/CreateScenario.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/CreateScenario.py @@ -519,7 +519,7 @@ def create_earthquake_scenarios( # noqa: C901, D103 minMag=min_M, maxMag=max_M, maxDistance=max_R, - use_hdf5=use_hdf5 + use_hdf5=use_hdf5, ) # Parsing data # feat = erf_data['features'] diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/CreateStation.py b/modules/performRegionalEventSimulation/regionalGroundMotion/CreateStation.py index 347a03235..46048c9bf 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/CreateStation.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/CreateStation.py @@ -58,9 +58,11 @@ from joblib import Parallel, delayed import multiprocessing + @contextlib.contextmanager def tqdm_joblib(tqdm_object): """Context manager to patch joblib to report into tqdm progress bar given as argument.""" + class TqdmBatchCompletionCallback(joblib.parallel.BatchCompletionCallBack): def __call__(self, *args, **kwargs): tqdm_object.update(n=self.batch_size) @@ -74,6 +76,7 @@ def __call__(self, *args, **kwargs): joblib.parallel.BatchCompletionCallBack = old_batch_callback tqdm_object.close() + if 'stampede2' not in socket.gethostname(): from FetchOpenSHA import ( get_site_vs30_from_opensha, @@ -238,8 +241,10 @@ def create_stations( # noqa: C901, PLR0912, PLR0915 ] # Check if any duplicated points if selected_stn.duplicated(subset=[lon_label, lat_label]).any(): - sys.exit('Error: Duplicated lat and lon in the Site File (.csv), ' - f'please check site \n{selected_stn[selected_stn.duplicated(subset=[lon_label, lat_label], keep = False)].index.tolist()}') + sys.exit( + 'Error: Duplicated lat and lon in the Site File (.csv), ' + f'please check site \n{selected_stn[selected_stn.duplicated(subset=[lon_label, lat_label], keep = False)].index.tolist()}' + ) STN = [] # noqa: N806 stn_file = {'Stations': []} @@ -478,13 +483,16 @@ def create_stations( # noqa: C901, PLR0912, PLR0915 ) ] else: - with tqdm_joblib(tqdm(desc="Get z1pt0 from openSHA", total=selected_stn.shape[0])) as progress_bar: - z1pt0_results = Parallel(n_jobs=num_cores)(delayed(get_site_z1pt0_from_opensha)( - lat, lon - ) for lat, lon in zip( - selected_stn['Latitude'].tolist(), - selected_stn['Longitude'].tolist(), - )) + with tqdm_joblib( + tqdm(desc='Get z1pt0 from openSHA', total=selected_stn.shape[0]) + ) as progress_bar: + z1pt0_results = Parallel(n_jobs=num_cores)( + delayed(get_site_z1pt0_from_opensha)(lat, lon) + for lat, lon in zip( + selected_stn['Latitude'].tolist(), + selected_stn['Longitude'].tolist(), + ) + ) if z25Config['Type'] == 'OpenSHA default model': z25_tag = z25Config['z25_tag'] if z25_tag == 2: # noqa: PLR2004 @@ -499,13 +507,16 @@ def create_stations( # noqa: C901, PLR0912, PLR0915 ) ] else: - with tqdm_joblib(tqdm(desc="Get z2pt5 from openSHA", total=selected_stn.shape[0])) as progress_bar: # noqa: F841 - z2pt5_results = Parallel(n_jobs=num_cores)(delayed(get_site_z2pt5_from_opensha)( - lat, lon - ) for lat, lon in zip( - selected_stn['Latitude'].tolist(), - selected_stn['Longitude'].tolist(), - )) + with tqdm_joblib( + tqdm(desc='Get z2pt5 from openSHA', total=selected_stn.shape[0]) + ) as progress_bar: # noqa: F841 + z2pt5_results = Parallel(n_jobs=num_cores)( + delayed(get_site_z2pt5_from_opensha)(lat, lon) + for lat, lon in zip( + selected_stn['Latitude'].tolist(), + selected_stn['Longitude'].tolist(), + ) + ) ground_failure_input_keys = set() for ind in tqdm(range(selected_stn.shape[0]), desc='Stations'): @@ -739,6 +750,7 @@ def get_vs30_global(lat, lon): # return return vs30 # noqa: DOC201, RET504, RUF100 + def parallel_interpolation(func, lat, lon): """Interpolate data in parallel Input: @@ -750,6 +762,7 @@ def parallel_interpolation(func, lat, lon): """ # noqa: D205, D400 return func(lat, lon) + def get_vs30_thompson(lat, lon): """Interpolate global Vs30 at given latitude and longitude Input: @@ -786,7 +799,10 @@ def get_vs30_thompson(lat, lon): def get_z1(vs30): """Compute z1 based on the prediction equation by Chiou and Youngs (2013) (unit of vs30 is meter/second and z1 is meter).""" - return np.exp(-7.15 / 4.0 * np.log((vs30**4 + 571.0**4) / (1360.0**4 + 571.0**4))) + return np.exp( + -7.15 / 4.0 * np.log((vs30**4 + 571.0**4) / (1360.0**4 + 571.0**4)) + ) + def get_z25(z1): """Compute z25 based on the prediction equation by Campbell and Bozorgnia (2013).""" diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/FetchOpenSHA.py b/modules/performRegionalEventSimulation/regionalGroundMotion/FetchOpenSHA.py index e4dbd4eab..ceabc14bd 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/FetchOpenSHA.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/FetchOpenSHA.py @@ -49,14 +49,17 @@ if 'stampede2' not in socket.gethostname(): import GlobalVariable + if GlobalVariable.JVM_started is False: GlobalVariable.JVM_started = True if importlib.util.find_spec('jpype') is None: subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'JPype1']) # noqa: S603 import jpype # noqa: I001 + # from jpype import imports import jpype.imports from jpype.types import * # noqa: F403 + memory_total = psutil.virtual_memory().total / (1024.0**3) memory_request = int(memory_total * 0.75) jpype.addClassPath('./lib/OpenSHA-1.5.2.jar') @@ -529,10 +532,13 @@ def export_to_json( # noqa: C901, D103 source_collection = source_collection[ source_collection['sourceDist'] < maxDistance ] - #Collecting source features + # Collecting source features if not use_hdf5: feature_collection = [] - for i in tqdm(range(source_collection.shape[0]), desc=f'Find ruptures with in {maxDistance} km'): + for i in tqdm( + range(source_collection.shape[0]), + desc=f'Find ruptures with in {maxDistance} km', + ): source_index = source_collection.iloc[i, 0] # Getting rupture distances rupSource = erf.getSource(source_index) # noqa: N806 @@ -596,10 +602,18 @@ def export_to_json( # noqa: C901, D103 # these calls are time-consuming, so only run them if one needs # detailed outputs of the sources cur_dict['properties'].update({'Distance': float(cur_dist)}) - distanceRup = rupture.getRuptureSurface().getDistanceRup(site_loc) # noqa: N806 - cur_dict['properties'].update({'DistanceRup': float(distanceRup)}) - distanceSeis = rupture.getRuptureSurface().getDistanceSeis(site_loc) # noqa: N806 - cur_dict['properties'].update({'DistanceSeis': float(distanceSeis)}) + distanceRup = rupture.getRuptureSurface().getDistanceRup( + site_loc + ) # noqa: N806 + cur_dict['properties'].update( + {'DistanceRup': float(distanceRup)} + ) + distanceSeis = rupture.getRuptureSurface().getDistanceSeis( + site_loc + ) # noqa: N806 + cur_dict['properties'].update( + {'DistanceSeis': float(distanceSeis)} + ) distanceJB = rupture.getRuptureSurface().getDistanceJB(site_loc) # noqa: N806 cur_dict['properties'].update({'DistanceJB': float(distanceJB)}) distanceX = rupture.getRuptureSurface().getDistanceX(site_loc) # noqa: N806 @@ -607,7 +621,9 @@ def export_to_json( # noqa: C901, D103 Prob = rupture.getProbability() # noqa: N806 cur_dict['properties'].update({'Probability': float(Prob)}) maf = rupture.getMeanAnnualRate(erf.getTimeSpan().getDuration()) - cur_dict['properties'].update({'MeanAnnualRate': abs(float(maf))}) + cur_dict['properties'].update( + {'MeanAnnualRate': abs(float(maf))} + ) # Geometry cur_dict['geometry'] = dict() # noqa: C408 if ruptureSurface.isPointSurface(): @@ -655,9 +671,12 @@ def export_to_json( # noqa: C901, D103 ujson.dump(erf_data, f, indent=2) else: import h5py + with h5py.File(outfile, 'w') as h5file: # Store the geometry as a string array - h5file.create_dataset('geometry', data=gdf.geometry.astype(str).values.astype('S')) # noqa: PD011, F405 + h5file.create_dataset( + 'geometry', data=gdf.geometry.astype(str).values.astype('S') + ) # noqa: PD011, F405 # return return erf_data diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/GMSimulators.py b/modules/performRegionalEventSimulation/regionalGroundMotion/GMSimulators.py index 192f1cd35..fba4eeb6b 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/GMSimulators.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/GMSimulators.py @@ -202,15 +202,21 @@ def _compute_distance_matrix(self): # # Computing station-wise distances # tmp[i, j] = CorrelationModel.get_distance_from_lat_lon(loc_i, loc_j) # self.stn_dist = tmp - loc_i = np.array([[self.sites[i]['lat'], self.sites[i]['lon']] for i in range(self.num_sites)]) + loc_i = np.array( + [ + [self.sites[i]['lat'], self.sites[i]['lon']] + for i in range(self.num_sites) + ] + ) loc_i_gdf = gpd.GeoDataFrame( - {'geometry': gpd.points_from_xy(loc_i[:, 1], loc_i[:, 0])}, crs='EPSG:4326' + {'geometry': gpd.points_from_xy(loc_i[:, 1], loc_i[:, 0])}, + crs='EPSG:4326', ).to_crs('EPSG:6500') lat = loc_i_gdf.geometry.y lon = loc_i_gdf.geometry.x loc_i = np.array([[lon[i], lat[i]] for i in range(self.num_sites)]) loc_j = np.array([[lon[i], lat[i]] for i in range(self.num_sites)]) - distances = cdist(loc_i, loc_j, 'euclidean')/1000 # in km + distances = cdist(loc_i, loc_j, 'euclidean') / 1000 # in km self.stn_dist = distances def set_num_simu(self, num_simu): # noqa: D102 @@ -476,7 +482,7 @@ def compute_intra_event_residual_i(self, cm, im_name_list, num_simu): # noqa: D elif cm == 'Markhvida et al. (2017)': num_pc = 19 residuals = CorrelationModel.markhvida_ceferino_baker_correlation_2017( - self.sites, im_name_list, num_simu, self.stn_dist, num_pc + self.sites, im_name_list, num_simu, self.stn_dist, num_pc ) elif cm == 'Du & Ning (2021)': num_pc = 23 diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py index eef93e904..8a494c283 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py @@ -558,15 +558,19 @@ def hazard_job(hazard_info): # noqa: C901, D103, PLR0915 if 'stampede2' not in socket.gethostname(): import GlobalVariable + if GlobalVariable.JVM_started is False: GlobalVariable.JVM_started = True if importlib.util.find_spec('jpype') is None: - subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'JPype1']) # noqa: S603 + subprocess.check_call( + [sys.executable, '-m', 'pip', 'install', 'JPype1'] + ) # noqa: S603 import jpype # from jpype import imports import jpype.imports from jpype.types import * # noqa: F403 + memory_total = psutil.virtual_memory().total / (1024.0**3) memory_request = int(memory_total * 0.75) jpype.addClassPath('./lib/OpenSHA-1.5.2.jar') diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/ScenarioForecast.py b/modules/performRegionalEventSimulation/regionalGroundMotion/ScenarioForecast.py index 7686bd8dc..24f32dd47 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/ScenarioForecast.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/ScenarioForecast.py @@ -97,10 +97,13 @@ if 'stampede2' not in socket.gethostname(): import GlobalVariable + if GlobalVariable.JVM_started is False: GlobalVariable.JVM_started = True if importlib.util.find_spec('jpype') is None: - subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'JPype1']) # noqa: S603 + subprocess.check_call( + [sys.executable, '-m', 'pip', 'install', 'JPype1'] + ) # noqa: S603 import jpype # from jpype import imports diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/CorrelationModel.py b/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/CorrelationModel.py index bf11b83b8..8c7e8ef2e 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/CorrelationModel.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/CorrelationModel.py @@ -354,7 +354,7 @@ def loth_baker_correlation_2013(stations, im_name_list, stn_dist, num_simu): # mu = np.zeros(num_stations * num_periods) # residuals_raw = np.random.multivariate_normal(mu, covMatrix, num_simu) # Replace np multivariate_normal with cholesky and standard normal - standard_normal =np.random.standard_normal((num_simu, num_stations)) + standard_normal = np.random.standard_normal((num_simu, num_stations)) chole_lower = scipy.linalg.cholesky(cov_matrix, lower=True) corr_samples = chole_lower @ standard_normal.T residuals_raw = corr_samples.T @@ -473,7 +473,7 @@ def markhvida_ceferino_baker_correlation_2017( # noqa: C901 # Creating a covariance matrices for each of the principal components if c1.iloc[0, i] == 0: # nug - cov_matrix= np.eye(num_stations) * c0.iloc[0, i] + cov_matrix = np.eye(num_stations) * c0.iloc[0, i] else: # iso nest cov_matrix = ( @@ -485,7 +485,7 @@ def markhvida_ceferino_baker_correlation_2017( # noqa: C901 # mu, cov_matrix, num_simu # ).T # Replace np multivariate_normal with cholesky and standard normal - standard_normal =np.random.standard_normal((num_simu, num_stations)) + standard_normal = np.random.standard_normal((num_simu, num_stations)) chole_lower = scipy.linalg.cholesky(cov_matrix, lower=True) corr_samples = chole_lower @ standard_normal.T residuals_pca[:, :, i] = corr_samples @@ -606,8 +606,8 @@ def du_ning_correlation_2021(stations, im_name_list, num_simu, stn_dist, num_pc= num_pc = num_pc - 1 residuals_pca = np.zeros((num_stations, num_simu, num_pc)) for i in range(num_pc): - # from tqdm import tqdm - # for i in tqdm(range(num_pc)): + # from tqdm import tqdm + # for i in tqdm(range(num_pc)): if a1.iloc[0, i] == 0: # nug cov_matrix = np.eye(num_stations) * c1.iloc[0, i] @@ -622,7 +622,7 @@ def du_ning_correlation_2021(stations, im_name_list, num_simu, stn_dist, num_pc= # mu, cov_matrix, num_simu # ).T # Replace np multivariate_normal with cholesky and standard normal - standard_normal =np.random.standard_normal((num_simu, num_stations)) + standard_normal = np.random.standard_normal((num_simu, num_stations)) chole_lower = scipy.linalg.cholesky(cov_matrix, lower=True) corr_samples = chole_lower @ standard_normal.T residuals_pca[:, :, i] = corr_samples @@ -651,7 +651,8 @@ def du_ning_correlation_2021(stations, im_name_list, num_simu, stn_dist, num_pc= residuals = np.empty([num_stations, num_periods, num_simu]) for i in range(num_simu): residuals[:, :, i] = np.reshape( - np.matmul(residuals_pca[:, i, :], simu_coef[:,:-1].T), residuals[:, :, i].shape + np.matmul(residuals_pca[:, i, :], simu_coef[:, :-1].T), + residuals[:, :, i].shape, ) # return diff --git a/modules/performSIMULATION/capacitySpectrum/CapacityModels.py b/modules/performSIMULATION/capacitySpectrum/CapacityModels.py index 29edecd89..112a173a5 100644 --- a/modules/performSIMULATION/capacitySpectrum/CapacityModels.py +++ b/modules/performSIMULATION/capacitySpectrum/CapacityModels.py @@ -48,7 +48,6 @@ # Earthquake Model Technical Manual, Federal Emergency Management Agency, Washington D.C. - import os import sys import time @@ -67,6 +66,7 @@ # ap_DesignLevel_W1 = {0: 'PC', 0: 'LC', 1975: 'MC', 2100: 'HC'} # same thing applies + def convert_story_rise(structureType, stories): # noqa: N803 """ Convert the story type and number of stories to rise attribute of archetype. @@ -103,17 +103,17 @@ def convert_story_rise(structureType, stories): # noqa: N803 if structureType == 'RM1': if stories <= 3: # noqa: PLR2004 - rise = "L" + rise = 'L' else: - rise = "M" + rise = 'M' elif structureType == 'URM': if stories <= 2: # noqa: PLR2004 - rise = "L" + rise = 'L' else: - rise = "M" + rise = 'M' elif structureType in [ 'S1', @@ -127,16 +127,17 @@ def convert_story_rise(structureType, stories): # noqa: N803 'RM2', ]: if stories <= 3: # noqa: PLR2004 - rise = "L" + rise = 'L' elif stories <= 7: # noqa: PLR2004 - rise = "M" + rise = 'M' else: - rise = "H" + rise = 'H' return rise + def auto_populate_hazus(GI): # noqa: N803 """ Auto-populate the HAZUS parameters based on the given building information. @@ -200,9 +201,11 @@ class capacity_model_base: def __init__(self): pass + def name(self): # noqa: D102 return 'capacity_model_base' + class cao_peterson_2006(capacity_model_base): """ A class to represent the capacity model in Cao and Peterson 2006. @@ -228,26 +231,26 @@ class cao_peterson_2006(capacity_model_base): ------- """ # noqa: D414 - def __init__(self, Dy, Ay, Du, Au, dD = 0.001): # noqa: N803 + def __init__(self, Dy, Ay, Du, Au, dD=0.001): # noqa: N803 # region between elastic and perfectly plastic - sd_elpl = np.arange(Dy,Du,dD) + sd_elpl = np.arange(Dy, Du, dD) # Eq. B3 in Steelman & Hajjar 2008 - Ax = (Au**2*Dy - Ay**2*Du)/(2*Au*Dy - Ay*Dy - Ay*Du) # noqa: N806 + Ax = (Au**2 * Dy - Ay**2 * Du) / (2 * Au * Dy - Ay * Dy - Ay * Du) # noqa: N806 # Eq. B4 in Steelman & Hajjar 2008 - B = Au - Ax # noqa: N806 + B = Au - Ax # noqa: N806 # Eq. B5 in Steelman & Hajjar 2008 - C = (Dy*B**2*(Du-Dy)/(Ay*(Ay-Ax)))**0.5 # noqa: N806 + C = (Dy * B**2 * (Du - Dy) / (Ay * (Ay - Ax))) ** 0.5 # noqa: N806 # Eq. B1 in Steelman & Hajjar 2008 - sa_elpl = Ax + B*(1 - ((sd_elpl-Du)/C)**2)**0.5 + sa_elpl = Ax + B * (1 - ((sd_elpl - Du) / C) ** 2) ** 0.5 # elastic and perfectly plastic regions - sd_el = np.arange(0,Dy,dD) - sd_pl = np.arange(Du,4*Du,dD) + sd_el = np.arange(0, Dy, dD) + sd_pl = np.arange(Du, 4 * Du, dD) - sa_el = sd_el*Ay/Dy - sa_pl = Au*np.ones(len(sd_pl)) + sa_el = sd_el * Ay / Dy + sa_pl = Au * np.ones(len(sd_pl)) - self.sd = np.concatenate((sd_el,sd_elpl,sd_pl)) - self.sa = np.concatenate((sa_el,sa_elpl,sa_pl)) + self.sd = np.concatenate((sd_el, sd_elpl, sd_pl)) + self.sa = np.concatenate((sa_el, sa_elpl, sa_pl)) self.Ax = Ax self.B = B self.C = C @@ -258,6 +261,7 @@ def __init__(self, Dy, Ay, Du, Au, dD = 0.001): # noqa: N803 def name(self): # noqa: D102 return 'cao_peterson_2006' + class HAZUS_cao_peterson_2006(capacity_model_base): """ A class to represent the capacity model in Cao and Peterson 2006. @@ -283,27 +287,51 @@ class HAZUS_cao_peterson_2006(capacity_model_base): ------- """ # noqa: D414 - def __init__(self, general_info, dD = 0.001): # noqa: N803 + def __init__(self, general_info, dD=0.001): # noqa: N803 # HAZUS capacity data: Table 5-7 to Tabl 5-10 in HAZUS 5.1 self.capacity_data = dict() # noqa: C408 - self.capacity_data['HC'] = pd.read_csv(os.path.join(os.path.dirname(__file__), # noqa: PTH118, PTH120 - 'HC_capacity_data.csv'), - index_col=0).to_dict(orient='index') - self.capacity_data['MC'] = pd.read_csv(os.path.join(os.path.dirname(__file__), # noqa: PTH118, PTH120 - 'MC_capacity_data.csv'), - index_col=0).to_dict(orient='index') - self.capacity_data['LC'] = pd.read_csv(os.path.join(os.path.dirname(__file__), # noqa: PTH118, PTH120 - 'LC_capacity_data.csv'), - index_col=0).to_dict(orient='index') - self.capacity_data['PC'] = pd.read_csv(os.path.join(os.path.dirname(__file__), # noqa: PTH118, PTH120 - 'PC_capacity_data.csv'), - index_col=0).to_dict(orient='index') - self.capacity_data['alpha2'] = pd.read_csv(os.path.join(os.path.dirname(__file__), # noqa: PTH118, PTH120 - 'hazus_capacity_alpha2.csv'), - index_col=0).to_dict(orient='index') - self.capacity_data['roof_height'] = pd.read_csv(os.path.join(os.path.dirname(__file__), # noqa: PTH118, PTH120 - 'hazus_typical_roof_height.csv'), - index_col=0).to_dict(orient='index') + self.capacity_data['HC'] = pd.read_csv( + os.path.join( + os.path.dirname(__file__), # noqa: PTH118, PTH120 + 'HC_capacity_data.csv', + ), + index_col=0, + ).to_dict(orient='index') + self.capacity_data['MC'] = pd.read_csv( + os.path.join( + os.path.dirname(__file__), # noqa: PTH118, PTH120 + 'MC_capacity_data.csv', + ), + index_col=0, + ).to_dict(orient='index') + self.capacity_data['LC'] = pd.read_csv( + os.path.join( + os.path.dirname(__file__), # noqa: PTH118, PTH120 + 'LC_capacity_data.csv', + ), + index_col=0, + ).to_dict(orient='index') + self.capacity_data['PC'] = pd.read_csv( + os.path.join( + os.path.dirname(__file__), # noqa: PTH118, PTH120 + 'PC_capacity_data.csv', + ), + index_col=0, + ).to_dict(orient='index') + self.capacity_data['alpha2'] = pd.read_csv( + os.path.join( + os.path.dirname(__file__), # noqa: PTH118, PTH120 + 'hazus_capacity_alpha2.csv', + ), + index_col=0, + ).to_dict(orient='index') + self.capacity_data['roof_height'] = pd.read_csv( + os.path.join( + os.path.dirname(__file__), # noqa: PTH118, PTH120 + 'hazus_typical_roof_height.csv', + ), + index_col=0, + ).to_dict(orient='index') # auto populate to get the parameters self.HAZUS_type, self.design_level = auto_populate_hazus(general_info) try: @@ -314,7 +342,9 @@ def __init__(self, general_info, dD = 0.001): # noqa: N803 except KeyError: msg = f'No capacity data for {self.HAZUS_type} and {self.design_level}' raise KeyError(msg) # noqa: B904 - self.cao_peterson_2006 = cao_peterson_2006(self.Dy, self.Ay, self.Du, self.Au, dD) + self.cao_peterson_2006 = cao_peterson_2006( + self.Dy, self.Ay, self.Du, self.Au, dD + ) self.Ax = self.cao_peterson_2006.Ax self.B = self.cao_peterson_2006.B self.C = self.cao_peterson_2006.C @@ -323,10 +353,13 @@ def get_capacity_curve(self, sd_max): # noqa: D102 sd = self.cao_peterson_2006.sd sa = self.cao_peterson_2006.sa if sd_max > sd[-1]: - num_points = min(500, int((sd_max - self.cao_peterson_2006.sd[-1])/0.001)) - sd = np.concatenate((sd,np.linspace( - self.cao_peterson_2006.sd[-1], sd_max, num_points))) - sa = np.concatenate((sa, sa[-1]*np.ones(num_points))) + num_points = min( + 500, int((sd_max - self.cao_peterson_2006.sd[-1]) / 0.001) + ) + sd = np.concatenate( + (sd, np.linspace(self.cao_peterson_2006.sd[-1], sd_max, num_points)) + ) + sa = np.concatenate((sa, sa[-1] * np.ones(num_points))) return sd, sa # def get_capacity_curve(self): diff --git a/modules/performSIMULATION/capacitySpectrum/DampingModels.py b/modules/performSIMULATION/capacitySpectrum/DampingModels.py index 9926805ed..654dfdfd3 100644 --- a/modules/performSIMULATION/capacitySpectrum/DampingModels.py +++ b/modules/performSIMULATION/capacitySpectrum/DampingModels.py @@ -50,7 +50,6 @@ # Earthquake Model Technical Manual, Federal Emergency Management Agency, Washington D.C. - import os import sys import time @@ -73,9 +72,11 @@ class damping_model_base: def __init__(self): pass + def name(self): # noqa: D102 return 'damping_model_base' + class damping_model_hazus(damping_model_base): """ A class to represent the hazus damping models. @@ -135,15 +136,21 @@ def __init__(self): 'RM2H': 7, 'URML': 8.5, 'URMM': 8.5, - 'MH': 9.25 + 'MH': 9.25, } - self.kappa_data = pd.read_csv(os.path.join(os.path.dirname(__file__), # noqa: PTH118, PTH120 - 'hazus_kappa_data.csv'), - index_col=0, header=None) - self.kappa_col_map = {'HC':{'S':1, 'M':2, 'L':3}, - 'MC':{'S':4, 'M':5, 'L':6}, - 'LC':{'S':7, 'M':8, 'L':9}, - 'PC':{'S':10, 'M':11, 'L':12}, + self.kappa_data = pd.read_csv( + os.path.join( + os.path.dirname(__file__), # noqa: PTH118, PTH120 + 'hazus_kappa_data.csv', + ), + index_col=0, + header=None, + ) + self.kappa_col_map = { + 'HC': {'S': 1, 'M': 2, 'L': 3}, + 'MC': {'S': 4, 'M': 5, 'L': 6}, + 'LC': {'S': 7, 'M': 8, 'L': 9}, + 'PC': {'S': 10, 'M': 11, 'L': 12}, } def get_beta_elastic(self, HAZUS_bldg_type): # noqa: N803 @@ -161,8 +168,9 @@ def get_beta_elastic(self, HAZUS_bldg_type): # noqa: N803 The elastic damping ratio beta. """ if HAZUS_bldg_type not in self.beta_elastic_map: - sys.exit(f'The building type {HAZUS_bldg_type} is not in the damping' - 'model.') + sys.exit( + f'The building type {HAZUS_bldg_type} is not in the damping' 'model.' + ) return self.beta_elastic_map[HAZUS_bldg_type] def get_kappa(self, HAZUS_bldg_type, design_level, Mw): # noqa: N803 @@ -180,8 +188,9 @@ def get_kappa(self, HAZUS_bldg_type, design_level, Mw): # noqa: N803 The kappa in Table 5-33 of FEMA HAZUS 2022. """ if HAZUS_bldg_type not in self.beta_elastic_map: - sys.exit(f'The building type {HAZUS_bldg_type} is not in the damping' - 'model.') + sys.exit( + f'The building type {HAZUS_bldg_type} is not in the damping' 'model.' + ) # Infer duration according to HAZUS 2022 below Table 5-33 if Mw <= 5.5: # noqa: PLR2004 duration = 'S' @@ -195,6 +204,7 @@ def get_kappa(self, HAZUS_bldg_type, design_level, Mw): # noqa: N803 def get_name(self): # noqa: D102 return 'damping_model_hazus' + class HAZUS_cao_peterson_2006(damping_model_base): """ A class to represent the damping model in Cao and Peterson 2006. @@ -206,22 +216,25 @@ class HAZUS_cao_peterson_2006(damping_model_base): ------- """ # noqa: D414 - def __init__(self, demand, capacity, base_model = damping_model_hazus()): # noqa: B008 + def __init__(self, demand, capacity, base_model=damping_model_hazus()): # noqa: B008 self.supported_capacity_model = ['HAZUS_cao_peterson_2006'] self.supported_demand_model = ['HAZUS', 'HAZUS_lin_chang_2003'] self.base_model = base_model if capacity.name() not in self.supported_capacity_model: - sys.exit(f'The capacity model {capacity.name()} is not compatible' - 'with the damping model: cao_peterson_2006.') + sys.exit( + f'The capacity model {capacity.name()} is not compatible' + 'with the damping model: cao_peterson_2006.' + ) if demand.name() not in self.supported_demand_model: - sys.exit(f'The demand model {demand.name()} is not compatible' - 'with the damping model: cao_peterson_2006.') + sys.exit( + f'The demand model {demand.name()} is not compatible' + 'with the damping model: cao_peterson_2006.' + ) self.capacity = capacity self.HAZUS_type = capacity.HAZUS_type self.design_level = capacity.design_level self.Mw = demand.Mw - def get_beta(self, Dp, Ap): # noqa: N803 """ Equation B.44-B.45 in Steelman & Hajjar (2010), which are originally published @@ -229,26 +242,31 @@ def get_beta(self, Dp, Ap): # noqa: N803 """ # noqa: D205 try: beta_elastic = self.base_model.get_beta_elastic(self.HAZUS_type) - except: # noqa: E722 - sys.exit(f'The base model {self.base_model} does not have a useful' - 'get_beta_elastic method.') + except: # noqa: E722 + sys.exit( + f'The base model {self.base_model} does not have a useful' + 'get_beta_elastic method.' + ) try: - kappa = self.base_model.get_kappa(self.HAZUS_type, self.design_level, self.Mw) - except: # noqa: E722 - sys.exit(f'The base model {self.base_model} does not have a useful' - 'get_kappa method.') + kappa = self.base_model.get_kappa( + self.HAZUS_type, self.design_level, self.Mw + ) + except: # noqa: E722 + sys.exit( + f'The base model {self.base_model} does not have a useful' + 'get_kappa method.' + ) Du = self.capacity.Du # noqa: N806 Ax = self.capacity.Ax # noqa: N806 B = self.capacity.B # noqa: N806 C = self.capacity.C # noqa: N806 - Kt = (Du-Dp)/(Ap-Ax)*(B/C)**2 # Eq B.46 # noqa: N806 - Ke = self.capacity.Ay/self.capacity.Dy # Eq B.47 # noqa: N806 - area_h = max(0,4*(Ap-Dp*Ke)*(Dp*Kt-Ap)/(Ke-Kt)) # Eq. B.45 + Kt = (Du - Dp) / (Ap - Ax) * (B / C) ** 2 # Eq B.46 # noqa: N806 + Ke = self.capacity.Ay / self.capacity.Dy # Eq B.47 # noqa: N806 + area_h = max(0, 4 * (Ap - Dp * Ke) * (Dp * Kt - Ap) / (Ke - Kt)) # Eq. B.45 # beta is in the unit of percentage # beta_h = kappa*area_h/(2*3.1416*Dp*Ap) * 100# Eq. B.44 - beta_h = kappa*area_h/(2*3.1416*Dp*Ap)# Eq. B.44 + beta_h = kappa * area_h / (2 * 3.1416 * Dp * Ap) # Eq. B.44 return beta_elastic + beta_h def get_beta_elastic(self): # noqa: D102 return self.base_model.get_beta_elastic(self.HAZUS_type) - diff --git a/modules/performSIMULATION/capacitySpectrum/DemandModels.py b/modules/performSIMULATION/capacitySpectrum/DemandModels.py index 8e73e17fd..c874de3eb 100644 --- a/modules/performSIMULATION/capacitySpectrum/DemandModels.py +++ b/modules/performSIMULATION/capacitySpectrum/DemandModels.py @@ -48,7 +48,6 @@ # Earthquake Model Technical Manual, Federal Emergency Management Agency, Washington D.C. - import os import sys import time @@ -73,10 +72,11 @@ class demand_model_base: """ def __init__(self, T, dem_sd_05, dem_sa_05): # noqa: N803 - self.T = T + self.T = T self.dem_sd_05 = dem_sd_05 self.dem_sa_05 = dem_sa_05 + class HAZUS(demand_model_base): """ A class to represent the design spectrum from HAZUS V5 (2022), section 4.1.3.2. @@ -99,10 +99,31 @@ class HAZUS(demand_model_base): ------- """ # noqa: D414 - def __init__(self, Mw = 7.0): # noqa: N803 - self.Tvd = np.power(10, (Mw - 5)/2) - self.T = [0.01, 0.02, 0.03, 0.05, 0.075, 0.1, 0.15, 0.2, - 0.25, 0.3, 0.4, 0.5, 0.75, 1, 1.5, 2, 3, 4, 5, 7.5, 10] + def __init__(self, Mw=7.0): # noqa: N803 + self.Tvd = np.power(10, (Mw - 5) / 2) + self.T = [ + 0.01, + 0.02, + 0.03, + 0.05, + 0.075, + 0.1, + 0.15, + 0.2, + 0.25, + 0.3, + 0.4, + 0.5, + 0.75, + 1, + 1.5, + 2, + 3, + 4, + 5, + 7.5, + 10, + ] self.Mw = Mw def set_IMs(self, sa_03, sa_10): # noqa: N802 @@ -118,7 +139,7 @@ def set_IMs(self, sa_03, sa_10): # noqa: N802 """ self.sa_03 = sa_03 self.sa_10 = sa_10 - self.Tav = sa_10/sa_03 + self.Tav = sa_10 / sa_03 self.g = 386 # insert tvd and tav in self.T self.T.append(self.Tvd) @@ -130,13 +151,19 @@ def set_IMs(self, sa_03, sa_10): # noqa: N802 for i, t in enumerate(self.T): if t <= self.Tav: self.dem_sa_05[i] = sa_03 - self.dem_sd_05[i] = self.g/(4 * np.pi**2) * t**2 * self.dem_sa_05[i] # Eq. A2 + self.dem_sd_05[i] = ( + self.g / (4 * np.pi**2) * t**2 * self.dem_sa_05[i] + ) # Eq. A2 elif t <= self.Tvd: - self.dem_sa_05[i] = sa_10/t - self.dem_sd_05[i] = self.g/(4 * np.pi**2) * t**2 * self.dem_sa_05[i] # Eq. A2 + self.dem_sa_05[i] = sa_10 / t + self.dem_sd_05[i] = ( + self.g / (4 * np.pi**2) * t**2 * self.dem_sa_05[i] + ) # Eq. A2 else: - self.dem_sa_05[i] = sa_10 * self.Tvd / t**2 # Ea. A1a - self.dem_sd_05[i] = self.g/(4 * np.pi**2) * t**2 * self.dem_sa_05[i] + self.dem_sa_05[i] = sa_10 * self.Tvd / t**2 # Ea. A1a + self.dem_sd_05[i] = ( + self.g / (4 * np.pi**2) * t**2 * self.dem_sa_05[i] + ) def get_sa(self, T): # noqa: N803 """ @@ -156,7 +183,7 @@ def get_sa(self, T): # noqa: N803 if self.Tav >= T: return self.sa_03 if self.Tvd >= T: - return self.sa_10/T + return self.sa_10 / T return self.sa_10 * self.Tvd / T**2 def get_sd(self, T): # noqa: N803 @@ -178,7 +205,7 @@ def get_sd(self, T): # noqa: N803 if self.Tav >= T: return self.get_sd_from_sa(self.sa_03, T) if self.Tvd >= T: - return self.get_sd_from_sa(self.sa_10/T, T) + return self.get_sd_from_sa(self.sa_10 / T, T) return self.get_sd_from_sa(self.sa_10 * self.Tvd / T**2, T) def get_sd_from_sa(self, sa, T): # noqa: N803 @@ -197,9 +224,9 @@ def get_sd_from_sa(self, sa, T): # noqa: N803 float The spectrum displacement. """ - return self.g/(4 * np.pi**2) * T**2 * sa + return self.g / (4 * np.pi**2) * T**2 * sa - def set_Tavb(self, damping_model, tol = 0.05, max_iter = 100): # noqa: N802 + def set_Tavb(self, damping_model, tol=0.05, max_iter=100): # noqa: N802 """ Set the Tavb attribute of the HAZUS demand model. @@ -212,27 +239,37 @@ def set_Tavb(self, damping_model, tol = 0.05, max_iter = 100): # noqa: N802 max_iter : int, optional The maximum number of iterations, by default 100. """ - x_prev = 5 # Start with 5% damping + x_prev = 5 # Start with 5% damping for _i in range(max_iter): beta = x_prev - ra = 2.12/(3.21-0.68*np.log(beta)) - Tavb = self.Tav * (2.12/(3.21-0.68*np.log(beta)))/(1.65/(2.31-0.41*np.log(beta))) # noqa: N806 + ra = 2.12 / (3.21 - 0.68 * np.log(beta)) + Tavb = ( + self.Tav + * (2.12 / (3.21 - 0.68 * np.log(beta))) + / (1.65 / (2.31 - 0.41 * np.log(beta))) + ) # noqa: N806 sa = self.get_sa(Tavb) / ra sd = self.get_sd_from_sa(sa, Tavb) beta_eff = damping_model.get_beta(sd, sa) x_next = beta_eff if np.abs(x_next - x_prev) < tol: - self.Tavb = self.Tav * (2.12/(3.21-0.68*np.log(beta_eff)))/(1.65/(2.31-0.41*np.log(beta_eff))) + self.Tavb = ( + self.Tav + * (2.12 / (3.21 - 0.68 * np.log(beta_eff))) + / (1.65 / (2.31 - 0.41 * np.log(beta_eff))) + ) break x_prev = x_next - if (getattr(self, 'Tavb', None) is None or (3.21-0.68*np.log(beta_eff)) < 0 - or 2.12/(3.21-0.68*np.log(beta)) < 1): + if ( + getattr(self, 'Tavb', None) is None + or (3.21 - 0.68 * np.log(beta_eff)) < 0 + or 2.12 / (3.21 - 0.68 * np.log(beta)) < 1 + ): # raise a warning # print('WARNING: in HAZUS demand model, the Tavb is not converged.') self.Tavb = self.Tav - - def set_beta_tvd(self, damping_model, tol = 0.05, max_iter = 100): + def set_beta_tvd(self, damping_model, tol=0.05, max_iter=100): """ Set the beta_tvd attribute of the HAZUS demand model. @@ -245,14 +282,14 @@ def set_beta_tvd(self, damping_model, tol = 0.05, max_iter = 100): max_iter : int, optional The maximum number of iterations, by default 100. """ - x_prev = 5 # Start with 5% damping + x_prev = 5 # Start with 5% damping max_iter = 100 tol = 0.05 for _i in range(max_iter): beta = x_prev Tvd = self.Tvd # noqa: N806 - rd = 1.65/(2.31-0.41*np.log(beta)) - sa = self.get_sa(Tvd)/rd + rd = 1.65 / (2.31 - 0.41 * np.log(beta)) + sa = self.get_sa(Tvd) / rd sd = self.get_sd_from_sa(sa, Tvd) beta_eff = damping_model.get_beta(sd, sa) x_next = beta_eff @@ -260,12 +297,14 @@ def set_beta_tvd(self, damping_model, tol = 0.05, max_iter = 100): self.beta_tvd = x_next break x_prev = x_next - if (getattr(self, 'beta_tvd', None) is None or (2.31-0.41*np.log(self.beta_tvd)) < 0 - or 1.65/(2.31-0.41*np.log(self.beta_tvd)) < 1): + if ( + getattr(self, 'beta_tvd', None) is None + or (2.31 - 0.41 * np.log(self.beta_tvd)) < 0 + or 1.65 / (2.31 - 0.41 * np.log(self.beta_tvd)) < 1 + ): # raise a warning # print('WARNING: in HAZUS demand model, the beta_tvd is not converged.') - self.beta_tvd = -1 # This will be overwritten in get_reduced_demand. - + self.beta_tvd = -1 # This will be overwritten in get_reduced_demand. def get_reduced_demand(self, beta_eff): """ @@ -287,12 +326,14 @@ def get_reduced_demand(self, beta_eff): if getattr(self, 'beta_tvd', None) is None: msg = 'The beta_tvd is not set yet.' raise ValueError(msg) - RA = 2.12/(3.21-0.68*np.log(beta_eff)) # noqa: N806 - Rv = 1.65/(2.31-0.41*np.log(beta_eff)) # noqa: N806 + RA = 2.12 / (3.21 - 0.68 * np.log(beta_eff)) # noqa: N806 + Rv = 1.65 / (2.31 - 0.41 * np.log(beta_eff)) # noqa: N806 if self.beta_tvd < 0: - RD = 1.39/(1.82 - 0.27 * np.log(beta_eff)) # EQ A9 in Cao and Peterson 2006 # noqa: N806 + RD = 1.39 / ( + 1.82 - 0.27 * np.log(beta_eff) + ) # EQ A9 in Cao and Peterson 2006 # noqa: N806 else: - RD = (1.65/(2.31-0.41*np.log(self.beta_tvd))) # noqa: N806 + RD = 1.65 / (2.31 - 0.41 * np.log(self.beta_tvd)) # noqa: N806 dem_sa = np.zeros_like(np.array(self.T)) dem_sd = np.zeros_like(np.array(self.T)) for i, t in enumerate(self.T): @@ -307,7 +348,6 @@ def get_reduced_demand(self, beta_eff): dem_sd[i] = self.get_sd_from_sa(dem_sa[i], t) return dem_sd, dem_sa - def set_ruduction_factor(self, beta_eff): """ Set the reduction factor for a given effective damping ratio. @@ -320,9 +360,9 @@ def set_ruduction_factor(self, beta_eff): if getattr(self, 'Tavb', None) is None: msg = 'The Tavb is not set yet.' raise ValueError(msg) - self.RA = 2.12/(3.21-0.68*np.log(beta_eff)) - self.Rv = 1.65/(2.31-0.41*np.log(beta_eff)) - self.RD = (1.65/(2.31-0.41*np.log(beta_eff))) + self.RA = 2.12 / (3.21 - 0.68 * np.log(beta_eff)) + self.Rv = 1.65 / (2.31 - 0.41 * np.log(beta_eff)) + self.RD = 1.65 / (2.31 - 0.41 * np.log(beta_eff)) # def __init__(self, sa_03, sa_10, Mw = 7.0): # self.Tvd = np.power(10, (Mw - 5)/2) @@ -383,22 +423,22 @@ def check_IM(IM_header): # noqa: N802, N803 msg = 'The IM header of should contain SA_1.0' raise ValueError(msg) -class HAZUS_lin_chang_2003(HAZUS): +class HAZUS_lin_chang_2003(HAZUS): """ A class to represent the design spectrum from HAZUS V5 (2022), and the damping deduction relationship from Lin and Chang 2003. """ # noqa: D205 - def __init__(self, Mw = 7.0): # noqa: N803 + def __init__(self, Mw=7.0): # noqa: N803 super().__init__(Mw) def name(self): # noqa: D102 - return "HAZUS_lin_chang_2003" + return 'HAZUS_lin_chang_2003' def get_dmf(self, beta_eff, T): # noqa: D102, N803 - alpha = 1.303+0.436*np.log(beta_eff) - return 1-alpha*T**0.3/(T+1)**0.65 + alpha = 1.303 + 0.436 * np.log(beta_eff) + return 1 - alpha * T**0.3 / (T + 1) ** 0.65 def get_reduced_demand(self, beta_eff): # noqa: D102 if getattr(self, 'Tavb', None) is None: @@ -419,6 +459,7 @@ def get_reduced_demand(self, beta_eff): # noqa: D102 return dem_sd, dem_sa + class ASCE_7_10(demand_model_base): """ A class to represent the design spectrum from ASCE_7_10. @@ -441,7 +482,6 @@ class ASCE_7_10(demand_model_base): """ # noqa: D414 def __init__(self, T, dem_sd_05, dem_sa_05): # noqa: N803 - self.T = T + self.T = T self.dem_sd_05 = dem_sd_05 self.dem_sa_05 = dem_sa_05 - diff --git a/modules/performSIMULATION/capacitySpectrum/runCMS.py b/modules/performSIMULATION/capacitySpectrum/runCMS.py index 03d667338..6611bc67b 100644 --- a/modules/performSIMULATION/capacitySpectrum/runCMS.py +++ b/modules/performSIMULATION/capacitySpectrum/runCMS.py @@ -57,48 +57,53 @@ import simcenter_common # noqa: E402 -def find_performance_point(cap_x,cap_y,dem_x,dem_y,dd=0.001): - """Interpolate to have matching discretization for cap/demand curves. - - Created by: Tamika Bassman. - """ - # Interpolate to have matching discretization for cap/demand curves - x_interp = np.arange(0,min(cap_x[-1],dem_x[-1])+dd,dd) - dem_y_interp = np.interp(x_interp,dem_x,dem_y) - cap_y_interp = np.interp(x_interp,cap_x,cap_y) - - # # Enforce capacity curve to have same final length as spectrum - # cap_y = cap_y[:min(len(cap_x),len(spec_x))] - - # Find sign changes in the difference between the two curves - these are - # effectively intersections between the two curves - curves_diff = dem_y_interp - cap_y_interp - # adapted from https://stackoverflow.com/questions/4111412/how-do-i-get-a-list-of-indices-of-non-zero-elements-in-a-list - id_sign_changes = [n for n,(i,j) in enumerate(zip(curves_diff[:-1],curves_diff[1:])) if i*j <= 0] - - # id_sign_changes = [] - # for i,sign in enumerate(curves_diff[:-1]): - # if curves_diff[i]*curves_diff[i+1]<=0: - # # print(i) - # id_sign_changes += [i] - - # If sign changes detected, return the first (smallest abscissa) as the PP - if len(id_sign_changes) > 0: - ix = id_sign_changes[0] - perf_x = x_interp[ix] - perf_y = np.average([cap_y_interp[ix],dem_y_interp[ix]]) - elif dem_y_interp[0] > cap_y_interp[0]: - perf_x = x_interp[-1] - perf_y = cap_y_interp[-1] - elif dem_y_interp[0] < cap_y_interp[0]: - perf_x = 0.001 # x_interp[0] - perf_y = 0.001 # cap_y_interp[0] - # except IndexError as err: - # print('No performance point found; curves do not intersect.') - # print('IndexError: ') - # print(err) - - return perf_x,perf_y +def find_performance_point(cap_x, cap_y, dem_x, dem_y, dd=0.001): + """Interpolate to have matching discretization for cap/demand curves. + + Created by: Tamika Bassman. + """ + # Interpolate to have matching discretization for cap/demand curves + x_interp = np.arange(0, min(cap_x[-1], dem_x[-1]) + dd, dd) + dem_y_interp = np.interp(x_interp, dem_x, dem_y) + cap_y_interp = np.interp(x_interp, cap_x, cap_y) + + # # Enforce capacity curve to have same final length as spectrum + # cap_y = cap_y[:min(len(cap_x),len(spec_x))] + + # Find sign changes in the difference between the two curves - these are + # effectively intersections between the two curves + curves_diff = dem_y_interp - cap_y_interp + # adapted from https://stackoverflow.com/questions/4111412/how-do-i-get-a-list-of-indices-of-non-zero-elements-in-a-list + id_sign_changes = [ + n + for n, (i, j) in enumerate(zip(curves_diff[:-1], curves_diff[1:])) + if i * j <= 0 + ] + + # id_sign_changes = [] + # for i,sign in enumerate(curves_diff[:-1]): + # if curves_diff[i]*curves_diff[i+1]<=0: + # # print(i) + # id_sign_changes += [i] + + # If sign changes detected, return the first (smallest abscissa) as the PP + if len(id_sign_changes) > 0: + ix = id_sign_changes[0] + perf_x = x_interp[ix] + perf_y = np.average([cap_y_interp[ix], dem_y_interp[ix]]) + elif dem_y_interp[0] > cap_y_interp[0]: + perf_x = x_interp[-1] + perf_y = cap_y_interp[-1] + elif dem_y_interp[0] < cap_y_interp[0]: + perf_x = 0.001 # x_interp[0] + perf_y = 0.001 # cap_y_interp[0] + # except IndexError as err: + # print('No performance point found; curves do not intersect.') + # print('IndexError: ') + # print(err) + + return perf_x, perf_y + def find_unit_scale_factor(aim): """Find the unit scale factor based on the AIM file. @@ -141,7 +146,8 @@ def find_unit_scale_factor(aim): f_scale_im_user_to_cms[name] = 1 f_scale_edp_cms_to_user = {} f_scale_edp_cms_to_user['1-SA-1-1'] = simcenter_common.g / ( - f_length_in / f_time_in**2.0) + f_length_in / f_time_in**2.0 + ) f_scale_edp_cms_to_user['1-PRD-1-1'] = simcenter_common.inch / f_length_in return f_scale_im_user_to_cms, f_scale_edp_cms_to_user @@ -169,9 +175,9 @@ def run_csm(demand_model, capacity_model, damping_model, tol, max_iter, im_i): beta_eff = damping_model.get_beta_elastic() beta_d = beta_eff - # Track convergence - iter_sd = [] # intermediate predictions of Sd @ PP - iter_sa = [] # intermediate predictions of Sa @ PP + # Track convergence + iter_sd = [] # intermediate predictions of Sd @ PP + iter_sa = [] # intermediate predictions of Sa @ PP # Iterate to find converged PP for i in range(max_iter): # Calc demand spectrum @@ -179,12 +185,12 @@ def run_csm(demand_model, capacity_model, damping_model, tol, max_iter, im_i): # create capacity curve cap_sd, cap_sa = capacity_model.get_capacity_curve(dem_sd[-1]) # Calc intersection (PP) - perf_sd, perf_sa = find_performance_point(cap_sd,cap_sa,dem_sd,dem_sa) + perf_sd, perf_sa = find_performance_point(cap_sd, cap_sa, dem_sd, dem_sa) iter_sd.append(perf_sd) iter_sa.append(perf_sa) # Calc effective damping at this point on the capacity curve - beta_eff = damping_model.get_beta(perf_sd,perf_sa) + beta_eff = damping_model.get_beta(perf_sd, perf_sa) # Check if tolerance met on damping ratios of capacity, demand cueves at this point if abs(beta_d - beta_eff) <= tol: @@ -205,13 +211,14 @@ def run_csm(demand_model, capacity_model, damping_model, tol, max_iter, im_i): dem_sd, dem_sa = demand_model.get_reduced_demand(beta_eff) beta_d = beta_eff if i == max_iter - 1: - logging.warning(f'The capacity spectrum method did not converge for the {im_i}th IM realization.') + logging.warning( + f'The capacity spectrum method did not converge for the {im_i}th IM realization.' + ) return perf_sd, perf_sa def write_RV(AIM_input_path, EVENT_input_path): # noqa: C901, N802, N803, D103 - # open the AIM file with open(AIM_input_path, encoding='utf-8') as f: # noqa: PTH123 AIM_in = json.load(f) # noqa: N806 @@ -219,20 +226,19 @@ def write_RV(AIM_input_path, EVENT_input_path): # noqa: C901, N802, N803, D103 UQ_app = applications['UQ']['Application'] # noqa: N806 # Raise an error if the UQ application is not None - if UQ_app != "None": - msg = "This app is only used when UQ is None, similar to IMasEDP" + if UQ_app != 'None': + msg = 'This app is only used when UQ is None, similar to IMasEDP' raise ValueError(msg) # get the simulation application SIM_input = applications['Simulation'] # noqa: N806 if SIM_input['Application'] != 'CapacitySpectrumMethod': - msg = "Wrong simulation application is called" + msg = 'Wrong simulation application is called' raise ValueError(msg) SIM_input_data = SIM_input['ApplicationData'] # noqa: N806 tol = SIM_input_data.get('tolerance', 0.05) max_iter = SIM_input_data.get('max_iter', 100) - # open the event file and get the list of events with open(EVENT_input_path, encoding='utf-8') as f: # noqa: PTH123 EVENT_in = json.load(f) # noqa: N806 @@ -302,7 +308,9 @@ def write_RV(AIM_input_path, EVENT_input_path): # noqa: C901, N802, N803, D103 IM_samples = IM_samples.T # noqa: N806 for c_i, col in enumerate(header): f_i = f_scale.get(col.strip(), f_scale.get('ALL', None)) - f_i_to_cms = f_scale_im_user_to_cms.get(col.strip(), f_scale_im_user_to_cms.get('ALL', None)) + f_i_to_cms = f_scale_im_user_to_cms.get( + col.strip(), f_scale_im_user_to_cms.get('ALL', None) + ) if f_i is None: raise ValueError(f'No units defined for {col}') # noqa: EM102, TRY003 @@ -334,12 +342,15 @@ def write_RV(AIM_input_path, EVENT_input_path): # noqa: C901, N802, N803, D103 capacity_model_name = SIM_input_data['CapacityModel']['Name'] if capacity_model_name == 'HAZUS_cao_peterson_2006': capacity_model = CapacityModels.HAZUS_cao_peterson_2006( - general_info=AIM_in['GeneralInformation']) + general_info=AIM_in['GeneralInformation'] + ) # damping model damping_model_name = SIM_input_data['DampingModel']['Name'] if damping_model_name == 'HAZUS_cao_peterson_2006': - damping_model = DampingModels.HAZUS_cao_peterson_2006(demand_model, capacity_model) + damping_model = DampingModels.HAZUS_cao_peterson_2006( + demand_model, capacity_model + ) # Loop through each IM sample for ind in range(IM_samples.shape[0]): @@ -360,7 +371,9 @@ def write_RV(AIM_input_path, EVENT_input_path): # noqa: C901, N802, N803, D103 # damping_model.set_HAZUS_bldg_type(capacity_model.get_hazus_bldg_type()) # iterate to get sd and sa - perf_sd, perf_sa = run_csm(demand_model, capacity_model, damping_model, tol, max_iter, ind) + perf_sd, perf_sa = run_csm( + demand_model, capacity_model, damping_model, tol, max_iter, ind + ) EDP_output[ind, 0] = perf_sa # Table 5-1 in Hazus, convert to inches @@ -368,7 +381,7 @@ def write_RV(AIM_input_path, EVENT_input_path): # noqa: C901, N802, N803, D103 if general_info.get('RoofHeight', None) is not None: roof_height = general_info['RoofHeight'] else: - roof_height = capacity_model.get_hazus_roof_height()*12 + roof_height = capacity_model.get_hazus_roof_height() * 12 drift_ratio = perf_sd / capacity_model.get_hazus_alpha2() / roof_height EDP_output[ind, 1] = drift_ratio From 2320235596214206ccad5679f547df6bb457571f Mon Sep 17 00:00:00 2001 From: jinyan1214 Date: Thu, 12 Sep 2024 18:04:40 -0700 Subject: [PATCH 51/59] ruff try num2 --- .../common/smelt/dabaghi_der_kiureghian.cc | 8 +++---- .../common/smelt/dabaghi_der_kiureghian.h | 8 +++---- .../createEVENT/common/smelt/json_object.h | 2 +- .../createEVENT/common/smelt/nelder_mead.cc | 4 ++-- .../createEVENT/common/smelt/numeric_utils.cc | 4 ++-- .../createEVENT/common/smelt/vlachos_et_al.cc | 2 +- .../createEVENT/common/smelt/vlachos_et_al.h | 4 ++-- .../regionalGroundMotion/FetchOpenSHA.py | 15 ++++++------ .../HazardSimulationEQ.py | 4 ++-- .../regionalGroundMotion/ScenarioForecast.py | 4 ++-- .../capacitySpectrum/CapacityModels.py | 24 +++++++++---------- .../capacitySpectrum/DampingModels.py | 4 ++-- .../capacitySpectrum/DemandModels.py | 8 +++---- .../capacitySpectrum/runCMS.py | 2 +- 14 files changed, 47 insertions(+), 46 deletions(-) diff --git a/modules/createEVENT/common/smelt/dabaghi_der_kiureghian.cc b/modules/createEVENT/common/smelt/dabaghi_der_kiureghian.cc index 0ae2b30b2..9a2d0cf17 100644 --- a/modules/createEVENT/common/smelt/dabaghi_der_kiureghian.cc +++ b/modules/createEVENT/common/smelt/dabaghi_der_kiureghian.cc @@ -406,7 +406,7 @@ utilities::JsonObject stochastic::DabaghiDerKiureghian::generate( } } - // Baseline correct trunacted non-pulse-like motions + // Baseline correct truncated non-pulse-like motions for (unsigned int i = 0; i < num_sims_nopulse_; ++i) { for (unsigned int j = 0; j < num_realizations_; ++j) { baseline_correct_time_history(nopulse_motions_comp1[i][j], gfactor, @@ -787,7 +787,7 @@ void stochastic::DabaghiDerKiureghian::transform_parameters_from_normal_space( params_fitted1_(17), params_fitted2_(17), params_fitted3_(17), params_lower_bound_(17)); - // Calculate depth_to_rupture compenent 2 + // Calculate depth_to_rupture component 2 beta_dist = Factory::instance()->create( "BetaDist", std::move(params_fitted1_(18)), @@ -1203,7 +1203,7 @@ double stochastic::DabaghiDerKiureghian::calc_time_to_intensity( std::vector stochastic::DabaghiDerKiureghian::calc_linear_filter( unsigned int num_steps, const Eigen::VectorXd& filter_params, double t01, double tmid, double t99) const { - // Mininum frequency in Hz + // Minimum frequency in Hz double min_freq = 0.3; std::vector filter_func(num_steps); // Frequency at tmid, in Hz @@ -1458,7 +1458,7 @@ void stochastic::DabaghiDerKiureghian::baseline_correct_time_history( std::vector& time_history, double gfactor, unsigned int order) const { - // Calculate velocity and displacment time histories + // Calculate velocity and displacement time histories std::vector vel_series(time_history.size()); std::vector disp_series(time_history.size()); diff --git a/modules/createEVENT/common/smelt/dabaghi_der_kiureghian.h b/modules/createEVENT/common/smelt/dabaghi_der_kiureghian.h index 26f5bb04a..2a75d89a5 100644 --- a/modules/createEVENT/common/smelt/dabaghi_der_kiureghian.h +++ b/modules/createEVENT/common/smelt/dabaghi_der_kiureghian.h @@ -218,7 +218,7 @@ class DabaghiDerKiureghian : public StochasticModel { unsigned int num_gms = 1) const; /** - * Backcalculate modulating parameters given Arias Intesity and duration parameters + * Backcalculate modulating parameters given Arias Intensity and duration parameters * @param[in] q_params Vector containing Ia, D595, D05, and D030 * @param[in] t0 Initial time. Defaults to 0.0. * @return Vector containing parameters alpha, beta, tmaxq, and c @@ -256,7 +256,7 @@ class DabaghiDerKiureghian : public StochasticModel { * @param[in] d095_target Time from t0 to time of 95% Arias intensity of * target motion * @param[in] t0 Start time of modulating function and of target ground motion - * @return ERrro in modulating function + * @return error in modulating function */ double calc_parameter_error(const std::vector& parameters, double d05_target, double d030_target, @@ -399,8 +399,8 @@ class DabaghiDerKiureghian : public StochasticModel { Eigen::VectorXd std_dev_nopulse_; /**< No-pulse-like parameter standard deviation */ Eigen::MatrixXd corr_matrix_pulse_; /**< Pulse-like parameter correlation matrix */ Eigen::MatrixXd corr_matrix_nopulse_; /**< No-pulse-like parameter correlation matrix */ - Eigen::MatrixXd beta_distribution_pulse_; /**< Beta distrubution parameters for pulse-like motion */ - Eigen::MatrixXd beta_distribution_nopulse_; /**< Beta distrubution parameters for no-pulse-like motion */ + Eigen::MatrixXd beta_distribution_pulse_; /**< Beta distribution parameters for pulse-like motion */ + Eigen::MatrixXd beta_distribution_nopulse_; /**< Beta distribution parameters for no-pulse-like motion */ Eigen::VectorXd params_lower_bound_; /**< Lower bound for marginal distributions fitted to params (Table 5 in Dabaghi & Der Kiureghian, 2017) */ Eigen::VectorXd params_upper_bound_; /**< Upper bound for marginal distributions fitted to params diff --git a/modules/createEVENT/common/smelt/json_object.h b/modules/createEVENT/common/smelt/json_object.h index 12a98426f..d2667d032 100644 --- a/modules/createEVENT/common/smelt/json_object.h +++ b/modules/createEVENT/common/smelt/json_object.h @@ -101,7 +101,7 @@ class JsonObject { /** * Get underlying JSON library object - * @return Copy of interal JSON implementation + * @return Copy of internal JSON implementation */ json get_library_json() const { return json_object_; diff --git a/modules/createEVENT/common/smelt/nelder_mead.cc b/modules/createEVENT/common/smelt/nelder_mead.cc index 8b96029e9..1eecbb6f7 100644 --- a/modules/createEVENT/common/smelt/nelder_mead.cc +++ b/modules/createEVENT/common/smelt/nelder_mead.cc @@ -10,7 +10,7 @@ std::vector optimization::NelderMead::minimize( std::function&)>& objective_function) { // Create vector of deltas with length equal to the number of dimensions std::vector deltas(initial_point.size(), delta); - // Call minmize with vector of deltas + // Call minimize with vector of deltas return minimize(initial_point, deltas, objective_function); } @@ -33,7 +33,7 @@ std::vector optimization::NelderMead::minimize( } } - // Call minimize with matrix definining initial simplex + // Call minimize with matrix defining initial simplex return minimize(simplex, objective_function); } diff --git a/modules/createEVENT/common/smelt/numeric_utils.cc b/modules/createEVENT/common/smelt/numeric_utils.cc index 645c39c47..6ab0490ce 100644 --- a/modules/createEVENT/common/smelt/numeric_utils.cc +++ b/modules/createEVENT/common/smelt/numeric_utils.cc @@ -55,7 +55,7 @@ bool convolve_1d(const std::vector& input_x, // conv_status = vsldConvExec1D(conv_task, input_x.data(), 1, input_y.data(), 1, // response.data(), 1); - // // Check if convolution exectution was successful + // // Check if convolution execution was successful // if (conv_status != VSL_STATUS_OK) { // throw std::runtime_error( // "\nERROR: in numeric_utils::convolve_1d: Error in convolution " @@ -159,7 +159,7 @@ bool inverse_fft(std::vector> input_vector, // } // // Set the backward scale factor to be 1 divided by the size of the input vector - // // to make the backward tranform the inverse of the forward transform + // // to make the backward transform the inverse of the forward transform // fft_status = DftiSetValue(fft_descriptor, DFTI_BACKWARD_SCALE, // static_cast(1.0 / input_vector.size())); // if (fft_status != DFTI_NO_ERROR) { diff --git a/modules/createEVENT/common/smelt/vlachos_et_al.cc b/modules/createEVENT/common/smelt/vlachos_et_al.cc index 0065eef25..74b422a8a 100644 --- a/modules/createEVENT/common/smelt/vlachos_et_al.cc +++ b/modules/createEVENT/common/smelt/vlachos_et_al.cc @@ -721,7 +721,7 @@ Eigen::VectorXd stochastic::VlachosEtAl::identify_parameters( energy[i] = energy[i - 1] + 0.05; } - // Initialze mode 1 parameters and frequencies + // Initialize mode 1 parameters and frequencies std::vector mode_1_params = {initial_params(2), initial_params(3), initial_params(4)}; diff --git a/modules/createEVENT/common/smelt/vlachos_et_al.h b/modules/createEVENT/common/smelt/vlachos_et_al.h index cd953e59f..32e119983 100644 --- a/modules/createEVENT/common/smelt/vlachos_et_al.h +++ b/modules/createEVENT/common/smelt/vlachos_et_al.h @@ -116,7 +116,7 @@ class VlachosEtAl : public StochasticModel { * @param[in, out] time_histories Location where time histories should be * stored * @param[in] parameters Set of model parameters to use for calculating power - * specturm and time histories + * spectrum and time histories * @return Returns true if successful, false otherwise */ bool time_history_family(std::vector>& time_histories, @@ -263,7 +263,7 @@ class VlachosEtAl : public StochasticModel { Eigen::VectorXd means_; /**< Mean values of model parameters */ Eigen::MatrixXd covariance_; /**< Covariance matrix for model parameters */ std::vector> - model_parameters_; /**< Distrubutions for 18-parameter model */ + model_parameters_; /**< Distributions for 18-parameter model */ Eigen::Matrix parameter_realizations_; /**< Random realizations of normal model parameters */ Eigen::Matrix diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/FetchOpenSHA.py b/modules/performRegionalEventSimulation/regionalGroundMotion/FetchOpenSHA.py index ceabc14bd..15bd14380 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/FetchOpenSHA.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/FetchOpenSHA.py @@ -54,7 +54,7 @@ GlobalVariable.JVM_started = True if importlib.util.find_spec('jpype') is None: subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'JPype1']) # noqa: S603 - import jpype # noqa: I001 + import jpype # noqa: I001, RUF100 # from jpype import imports import jpype.imports @@ -602,15 +602,15 @@ def export_to_json( # noqa: C901, D103 # these calls are time-consuming, so only run them if one needs # detailed outputs of the sources cur_dict['properties'].update({'Distance': float(cur_dist)}) - distanceRup = rupture.getRuptureSurface().getDistanceRup( + distanceRup = rupture.getRuptureSurface().getDistanceRup( # noqa: N806 site_loc - ) # noqa: N806 + ) # noqa: N806, RUF100 cur_dict['properties'].update( {'DistanceRup': float(distanceRup)} ) - distanceSeis = rupture.getRuptureSurface().getDistanceSeis( + distanceSeis = rupture.getRuptureSurface().getDistanceSeis( # noqa: N806 site_loc - ) # noqa: N806 + ) # noqa: N806, RUF100 cur_dict['properties'].update( {'DistanceSeis': float(distanceSeis)} ) @@ -675,8 +675,9 @@ def export_to_json( # noqa: C901, D103 with h5py.File(outfile, 'w') as h5file: # Store the geometry as a string array h5file.create_dataset( - 'geometry', data=gdf.geometry.astype(str).values.astype('S') - ) # noqa: PD011, F405 + 'geometry', + data=gdf.geometry.astype(str).values.astype('S'), # noqa: PD011, F405 + ) # noqa: F405, PD011, RUF100 # return return erf_data diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py index 8a494c283..766f017d0 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py @@ -562,9 +562,9 @@ def hazard_job(hazard_info): # noqa: C901, D103, PLR0915 if GlobalVariable.JVM_started is False: GlobalVariable.JVM_started = True if importlib.util.find_spec('jpype') is None: - subprocess.check_call( + subprocess.check_call( # noqa: S603 [sys.executable, '-m', 'pip', 'install', 'JPype1'] - ) # noqa: S603 + ) # noqa: RUF100, S603 import jpype # from jpype import imports diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/ScenarioForecast.py b/modules/performRegionalEventSimulation/regionalGroundMotion/ScenarioForecast.py index 24f32dd47..39e20ad58 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/ScenarioForecast.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/ScenarioForecast.py @@ -101,9 +101,9 @@ if GlobalVariable.JVM_started is False: GlobalVariable.JVM_started = True if importlib.util.find_spec('jpype') is None: - subprocess.check_call( + subprocess.check_call( # noqa: S603 [sys.executable, '-m', 'pip', 'install', 'JPype1'] - ) # noqa: S603 + ) # noqa: RUF100, S603 import jpype # from jpype import imports diff --git a/modules/performSIMULATION/capacitySpectrum/CapacityModels.py b/modules/performSIMULATION/capacitySpectrum/CapacityModels.py index 112a173a5..c5e3c9bd7 100644 --- a/modules/performSIMULATION/capacitySpectrum/CapacityModels.py +++ b/modules/performSIMULATION/capacitySpectrum/CapacityModels.py @@ -291,43 +291,43 @@ def __init__(self, general_info, dD=0.001): # noqa: N803 # HAZUS capacity data: Table 5-7 to Tabl 5-10 in HAZUS 5.1 self.capacity_data = dict() # noqa: C408 self.capacity_data['HC'] = pd.read_csv( - os.path.join( - os.path.dirname(__file__), # noqa: PTH118, PTH120 + os.path.join( # noqa: PTH118 + os.path.dirname(__file__), # noqa: PTH118, PTH120, RUF100 'HC_capacity_data.csv', ), index_col=0, ).to_dict(orient='index') self.capacity_data['MC'] = pd.read_csv( - os.path.join( - os.path.dirname(__file__), # noqa: PTH118, PTH120 + os.path.join( # noqa: PTH118 + os.path.dirname(__file__), # noqa: PTH118, PTH120, RUF100 'MC_capacity_data.csv', ), index_col=0, ).to_dict(orient='index') self.capacity_data['LC'] = pd.read_csv( - os.path.join( - os.path.dirname(__file__), # noqa: PTH118, PTH120 + os.path.join( # noqa: PTH118 + os.path.dirname(__file__), # noqa: PTH118, PTH120, RUF100 'LC_capacity_data.csv', ), index_col=0, ).to_dict(orient='index') self.capacity_data['PC'] = pd.read_csv( - os.path.join( - os.path.dirname(__file__), # noqa: PTH118, PTH120 + os.path.join( # noqa: PTH118 + os.path.dirname(__file__), # noqa: PTH118, PTH120, RUF100 'PC_capacity_data.csv', ), index_col=0, ).to_dict(orient='index') self.capacity_data['alpha2'] = pd.read_csv( - os.path.join( - os.path.dirname(__file__), # noqa: PTH118, PTH120 + os.path.join( # noqa: PTH118 + os.path.dirname(__file__), # noqa: PTH118, PTH120, RUF100 'hazus_capacity_alpha2.csv', ), index_col=0, ).to_dict(orient='index') self.capacity_data['roof_height'] = pd.read_csv( - os.path.join( - os.path.dirname(__file__), # noqa: PTH118, PTH120 + os.path.join( # noqa: PTH118 + os.path.dirname(__file__), # noqa: PTH118, PTH120, RUF100 'hazus_typical_roof_height.csv', ), index_col=0, diff --git a/modules/performSIMULATION/capacitySpectrum/DampingModels.py b/modules/performSIMULATION/capacitySpectrum/DampingModels.py index 654dfdfd3..feb455461 100644 --- a/modules/performSIMULATION/capacitySpectrum/DampingModels.py +++ b/modules/performSIMULATION/capacitySpectrum/DampingModels.py @@ -139,8 +139,8 @@ def __init__(self): 'MH': 9.25, } self.kappa_data = pd.read_csv( - os.path.join( - os.path.dirname(__file__), # noqa: PTH118, PTH120 + os.path.join( # noqa: PTH118 + os.path.dirname(__file__), # noqa: PTH118, PTH120, RUF100 'hazus_kappa_data.csv', ), index_col=0, diff --git a/modules/performSIMULATION/capacitySpectrum/DemandModels.py b/modules/performSIMULATION/capacitySpectrum/DemandModels.py index c874de3eb..3c81b0c11 100644 --- a/modules/performSIMULATION/capacitySpectrum/DemandModels.py +++ b/modules/performSIMULATION/capacitySpectrum/DemandModels.py @@ -243,11 +243,11 @@ def set_Tavb(self, damping_model, tol=0.05, max_iter=100): # noqa: N802 for _i in range(max_iter): beta = x_prev ra = 2.12 / (3.21 - 0.68 * np.log(beta)) - Tavb = ( + Tavb = ( # noqa: N806 self.Tav * (2.12 / (3.21 - 0.68 * np.log(beta))) / (1.65 / (2.31 - 0.41 * np.log(beta))) - ) # noqa: N806 + ) # noqa: N806, RUF100 sa = self.get_sa(Tavb) / ra sd = self.get_sd_from_sa(sa, Tavb) beta_eff = damping_model.get_beta(sd, sa) @@ -329,9 +329,9 @@ def get_reduced_demand(self, beta_eff): RA = 2.12 / (3.21 - 0.68 * np.log(beta_eff)) # noqa: N806 Rv = 1.65 / (2.31 - 0.41 * np.log(beta_eff)) # noqa: N806 if self.beta_tvd < 0: - RD = 1.39 / ( + RD = 1.39 / ( # noqa: N806 1.82 - 0.27 * np.log(beta_eff) - ) # EQ A9 in Cao and Peterson 2006 # noqa: N806 + ) # EQ A9 in Cao and Peterson 2006 # noqa: N806, RUF100 else: RD = 1.65 / (2.31 - 0.41 * np.log(self.beta_tvd)) # noqa: N806 dem_sa = np.zeros_like(np.array(self.T)) diff --git a/modules/performSIMULATION/capacitySpectrum/runCMS.py b/modules/performSIMULATION/capacitySpectrum/runCMS.py index 6611bc67b..c17fb7fba 100644 --- a/modules/performSIMULATION/capacitySpectrum/runCMS.py +++ b/modules/performSIMULATION/capacitySpectrum/runCMS.py @@ -137,7 +137,7 @@ def find_unit_scale_factor(aim): for base_unit_type, unit_set in simcenter_common.unit_types.items(): if unit in unit_set: unit_type = base_unit_type - # If the input event unit is acceleration, conver to g + # If the input event unit is acceleration, convert to g if unit_type == 'acceleration': f_in = f_length_in / f_time_in**2.0 f_out = 1 / simcenter_common.g From 03cf2894744bd1bb87a974583688b0d358c69fb4 Mon Sep 17 00:00:00 2001 From: jinyan1214 Date: Thu, 12 Sep 2024 18:06:58 -0700 Subject: [PATCH 52/59] ruff try num 3 --- modules/performSIMULATION/capacitySpectrum/CapacityModels.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/performSIMULATION/capacitySpectrum/CapacityModels.py b/modules/performSIMULATION/capacitySpectrum/CapacityModels.py index c5e3c9bd7..c55830d46 100644 --- a/modules/performSIMULATION/capacitySpectrum/CapacityModels.py +++ b/modules/performSIMULATION/capacitySpectrum/CapacityModels.py @@ -288,7 +288,7 @@ class HAZUS_cao_peterson_2006(capacity_model_base): """ # noqa: D414 def __init__(self, general_info, dD=0.001): # noqa: N803 - # HAZUS capacity data: Table 5-7 to Tabl 5-10 in HAZUS 5.1 + # HAZUS capacity data: Table 5-7 to Table 5-10 in HAZUS 5.1 self.capacity_data = dict() # noqa: C408 self.capacity_data['HC'] = pd.read_csv( os.path.join( # noqa: PTH118 From faa470ccdb5802729d9bd4c45ade382fbb1bea2b Mon Sep 17 00:00:00 2001 From: yisangriB Date: Mon, 16 Sep 2024 17:26:09 -0700 Subject: [PATCH 53/59] sy - making dakota constant work in both RV & optimization problems --- modules/performUQ/dakota/dakotaProcedures.cpp | 86 ++++++++++++------- 1 file changed, 56 insertions(+), 30 deletions(-) diff --git a/modules/performUQ/dakota/dakotaProcedures.cpp b/modules/performUQ/dakota/dakotaProcedures.cpp index c295b2753..8983a7356 100755 --- a/modules/performUQ/dakota/dakotaProcedures.cpp +++ b/modules/performUQ/dakota/dakotaProcedures.cpp @@ -13,36 +13,65 @@ int writeRV(std::ostream &dakotaFile, struct randomVariables &theRandomVariables, std::string idVariables, std::vector &rvList, bool includeActiveText = true){ + dakotaFile << "variables \n "; // sy - because 'discrete_state_set' was coming before 'variables', when changing one variable to a constant in quoFEM example int numContinuousDesign = theRandomVariables.continuousDesignRVs.size(); + std::cout<<"numContinuousDesign"< 0) { - dakotaFile << " continuous_design = " << numContinuousDesign << "\n initial_point = "; - // std::list::iterator it; - for (auto it = theRandomVariables.continuousDesignRVs.begin(); it != theRandomVariables.continuousDesignRVs.end(); it++) - dakotaFile << it->initialPoint << " "; - dakotaFile << "\n lower_bounds = "; - for (auto it = theRandomVariables.continuousDesignRVs.begin(); it != theRandomVariables.continuousDesignRVs.end(); it++) - dakotaFile << it->lowerBound << " "; - dakotaFile << "\n upper_bounds = "; - for (auto it = theRandomVariables.continuousDesignRVs.begin(); it != theRandomVariables.continuousDesignRVs.end(); it++) - dakotaFile << it->upperBound << " "; - dakotaFile << "\n descriptors = "; - for (auto it = theRandomVariables.continuousDesignRVs.begin(); it != theRandomVariables.continuousDesignRVs.end(); it++) { - dakotaFile << "\'" << it->name << "\' "; - rvList.push_back(it->name); - } - dakotaFile << "\n\n"; + if (!idVariables.empty()) { + dakotaFile << "id_variables = '" << idVariables << "'\n"; + } + + if (numContinuousDesign > 0) { + dakotaFile << " continuous_design = " << numContinuousDesign << "\n initial_point = "; + // std::list::iterator it; + for (auto it = theRandomVariables.continuousDesignRVs.begin(); it != theRandomVariables.continuousDesignRVs.end(); it++) + dakotaFile << it->initialPoint << " "; + dakotaFile << "\n lower_bounds = "; + for (auto it = theRandomVariables.continuousDesignRVs.begin(); it != theRandomVariables.continuousDesignRVs.end(); it++) + dakotaFile << it->lowerBound << " "; + dakotaFile << "\n upper_bounds = "; + for (auto it = theRandomVariables.continuousDesignRVs.begin(); it != theRandomVariables.continuousDesignRVs.end(); it++) + dakotaFile << it->upperBound << " "; + dakotaFile << "\n descriptors = "; + for (auto it = theRandomVariables.continuousDesignRVs.begin(); it != theRandomVariables.continuousDesignRVs.end(); it++) { + dakotaFile << "\'" << it->name << "\' "; + rvList.push_back(it->name); + } + dakotaFile << "\n\n"; } + // sy - Constant in For optimization cases + + int numConstant = theRandomVariables.constantRVs.size(); + if (numConstant > 0) { + dakotaFile << " discrete_state_set \n real = " << numConstant; + dakotaFile << "\n elements_per_variable = "; + for (auto it = theRandomVariables.constantRVs.begin(); it != theRandomVariables.constantRVs.end(); it++) + dakotaFile << "1 "; //std::list::iterator it; + dakotaFile << "\n elements = "; + for (auto it = theRandomVariables.constantRVs.begin(); it != theRandomVariables.constantRVs.end(); it++) + dakotaFile << it->value << " "; + dakotaFile << "\n descriptors = "; + for (auto it = theRandomVariables.constantRVs.begin(); it != theRandomVariables.constantRVs.end(); it++) { + dakotaFile << "\'" << it->name << "\' "; + rvList.push_back(it->name); + } + dakotaFile << "\n\n"; + } + + + return 0; + } + + + // sy - Constant in random analysis + int numConstant = theRandomVariables.constantRVs.size(); if (numConstant > 0) { dakotaFile << " discrete_state_set \n real = " << numConstant; @@ -80,19 +109,16 @@ writeRV(std::ostream &dakotaFile, struct randomVariables &theRandomVariables, st dakotaFile << "\n\n"; } + + - return 0; - } if (includeActiveText == true) { if (idVariables.empty()) - dakotaFile << "variables \n active uncertain \n"; + dakotaFile << "active uncertain \n"; else - dakotaFile << "variables \n id_variables = '" << idVariables << "'\n active uncertain \n"; - } else { - dakotaFile << "variables \n"; - } - + dakotaFile << "id_variables = '" << idVariables << "'\n active uncertain \n"; + } // int numNormalUncertain = theRandomVariables.normalRVs.size(); int numNormal = theRandomVariables.normalRVs.size(); From 76c809460b28c0145cf67243cded4ec9dadef108 Mon Sep 17 00:00:00 2001 From: yisangriB Date: Mon, 16 Sep 2024 17:27:11 -0700 Subject: [PATCH 54/59] sy - fixing indentations --- modules/performUQ/dakota/dakotaProcedures.cpp | 1116 ++++++++--------- 1 file changed, 558 insertions(+), 558 deletions(-) diff --git a/modules/performUQ/dakota/dakotaProcedures.cpp b/modules/performUQ/dakota/dakotaProcedures.cpp index 8983a7356..be7538025 100755 --- a/modules/performUQ/dakota/dakotaProcedures.cpp +++ b/modules/performUQ/dakota/dakotaProcedures.cpp @@ -11,66 +11,42 @@ #include "../common/parseWorkflowInput.h" int -writeRV(std::ostream &dakotaFile, struct randomVariables &theRandomVariables, std::string idVariables, std::vector &rvList, bool includeActiveText = true){ - - dakotaFile << "variables \n "; // sy - because 'discrete_state_set' was coming before 'variables', when changing one variable to a constant in quoFEM example - - int numContinuousDesign = theRandomVariables.continuousDesignRVs.size(); - - std::cout<<"numContinuousDesign"< 0) { - dakotaFile << " continuous_design = " << numContinuousDesign << "\n initial_point = "; - // std::list::iterator it; - for (auto it = theRandomVariables.continuousDesignRVs.begin(); it != theRandomVariables.continuousDesignRVs.end(); it++) - dakotaFile << it->initialPoint << " "; - dakotaFile << "\n lower_bounds = "; - for (auto it = theRandomVariables.continuousDesignRVs.begin(); it != theRandomVariables.continuousDesignRVs.end(); it++) - dakotaFile << it->lowerBound << " "; - dakotaFile << "\n upper_bounds = "; - for (auto it = theRandomVariables.continuousDesignRVs.begin(); it != theRandomVariables.continuousDesignRVs.end(); it++) - dakotaFile << it->upperBound << " "; - dakotaFile << "\n descriptors = "; - for (auto it = theRandomVariables.continuousDesignRVs.begin(); it != theRandomVariables.continuousDesignRVs.end(); it++) { - dakotaFile << "\'" << it->name << "\' "; - rvList.push_back(it->name); - } - dakotaFile << "\n\n"; - } +writeRV(std::ostream &dakotaFile, struct randomVariables &theRandomVariables, std::string idVariables, std::vector &rvList, bool includeActiveText = true) { + dakotaFile << "variables \n "; // sy - because 'discrete_state_set' was coming before 'variables', when changing one variable to a constant in quoFEM example - // sy - Constant in For optimization cases - - int numConstant = theRandomVariables.constantRVs.size(); - if (numConstant > 0) { - dakotaFile << " discrete_state_set \n real = " << numConstant; - dakotaFile << "\n elements_per_variable = "; - for (auto it = theRandomVariables.constantRVs.begin(); it != theRandomVariables.constantRVs.end(); it++) - dakotaFile << "1 "; //std::list::iterator it; - dakotaFile << "\n elements = "; - for (auto it = theRandomVariables.constantRVs.begin(); it != theRandomVariables.constantRVs.end(); it++) - dakotaFile << it->value << " "; - dakotaFile << "\n descriptors = "; - for (auto it = theRandomVariables.constantRVs.begin(); it != theRandomVariables.constantRVs.end(); it++) { - dakotaFile << "\'" << it->name << "\' "; - rvList.push_back(it->name); - } - dakotaFile << "\n\n"; - } + int numContinuousDesign = theRandomVariables.continuousDesignRVs.size(); + std::cout << "numContinuousDesign" << std::endl; + std::cout << numContinuousDesign << std::endl; - return 0; + if (numContinuousDesign != 0) { + + if (!idVariables.empty()) { + dakotaFile << "id_variables = '" << idVariables << "'\n"; + } + + if (numContinuousDesign > 0) { + dakotaFile << " continuous_design = " << numContinuousDesign << "\n initial_point = "; + // std::list::iterator it; + for (auto it = theRandomVariables.continuousDesignRVs.begin(); it != theRandomVariables.continuousDesignRVs.end(); it++) + dakotaFile << it->initialPoint << " "; + dakotaFile << "\n lower_bounds = "; + for (auto it = theRandomVariables.continuousDesignRVs.begin(); it != theRandomVariables.continuousDesignRVs.end(); it++) + dakotaFile << it->lowerBound << " "; + dakotaFile << "\n upper_bounds = "; + for (auto it = theRandomVariables.continuousDesignRVs.begin(); it != theRandomVariables.continuousDesignRVs.end(); it++) + dakotaFile << it->upperBound << " "; + dakotaFile << "\n descriptors = "; + for (auto it = theRandomVariables.continuousDesignRVs.begin(); it != theRandomVariables.continuousDesignRVs.end(); it++) { + dakotaFile << "\'" << it->name << "\' "; + rvList.push_back(it->name); + } + dakotaFile << "\n\n"; } - // sy - Constant in random analysis + // sy - Constant in For optimization cases int numConstant = theRandomVariables.constantRVs.size(); if (numConstant > 0) { @@ -89,333 +65,357 @@ writeRV(std::ostream &dakotaFile, struct randomVariables &theRandomVariables, st dakotaFile << "\n\n"; } - int numRealDiscrete = theRandomVariables.discreteDesignSetRVs.size(); - if (numRealDiscrete > 0) { - dakotaFile << " discrete_design_set \n real = " << numRealDiscrete; - dakotaFile << "\n elements_per_variable = "; - for (auto it = theRandomVariables.discreteDesignSetRVs.begin(); it != theRandomVariables.discreteDesignSetRVs.end(); it++) - dakotaFile << it->elements.size() << " "; //std::list::iterator it; - dakotaFile << "\n elements = "; - for (auto it = theRandomVariables.discreteDesignSetRVs.begin(); it != theRandomVariables.discreteDesignSetRVs.end(); it++) { - it->elements.sort(); - for (auto element = it->elements.begin(); element != it->elements.end(); element++) - dakotaFile << " \'" << *element << "\'"; - } - dakotaFile << "\n descriptors = "; - for (auto it = theRandomVariables.discreteDesignSetRVs.begin(); it != theRandomVariables.discreteDesignSetRVs.end(); it++) { - dakotaFile << "\'" << it->name << "\' "; - rvList.push_back(it->name); - } - dakotaFile << "\n\n"; - } + + return 0; + } + // sy - Constant in random analysis + + int numConstant = theRandomVariables.constantRVs.size(); + if (numConstant > 0) { + dakotaFile << " discrete_state_set \n real = " << numConstant; + dakotaFile << "\n elements_per_variable = "; + for (auto it = theRandomVariables.constantRVs.begin(); it != theRandomVariables.constantRVs.end(); it++) + dakotaFile << "1 "; //std::list::iterator it; + dakotaFile << "\n elements = "; + for (auto it = theRandomVariables.constantRVs.begin(); it != theRandomVariables.constantRVs.end(); it++) + dakotaFile << it->value << " "; + dakotaFile << "\n descriptors = "; + for (auto it = theRandomVariables.constantRVs.begin(); it != theRandomVariables.constantRVs.end(); it++) { + dakotaFile << "\'" << it->name << "\' "; + rvList.push_back(it->name); + } + dakotaFile << "\n\n"; + } - + int numRealDiscrete = theRandomVariables.discreteDesignSetRVs.size(); + if (numRealDiscrete > 0) { + dakotaFile << " discrete_design_set \n real = " << numRealDiscrete; + dakotaFile << "\n elements_per_variable = "; + for (auto it = theRandomVariables.discreteDesignSetRVs.begin(); it != theRandomVariables.discreteDesignSetRVs.end(); it++) + dakotaFile << it->elements.size() << " "; //std::list::iterator it; + dakotaFile << "\n elements = "; + for (auto it = theRandomVariables.discreteDesignSetRVs.begin(); it != theRandomVariables.discreteDesignSetRVs.end(); it++) { + it->elements.sort(); + for (auto element = it->elements.begin(); element != it->elements.end(); element++) + dakotaFile << " \'" << *element << "\'"; + } + dakotaFile << "\n descriptors = "; + for (auto it = theRandomVariables.discreteDesignSetRVs.begin(); it != theRandomVariables.discreteDesignSetRVs.end(); it++) { + dakotaFile << "\'" << it->name << "\' "; + rvList.push_back(it->name); + } + dakotaFile << "\n\n"; + } - if (includeActiveText == true) { - if (idVariables.empty()) - dakotaFile << "active uncertain \n"; - else - dakotaFile << "id_variables = '" << idVariables << "'\n active uncertain \n"; - } - // int numNormalUncertain = theRandomVariables.normalRVs.size(); - - int numNormal = theRandomVariables.normalRVs.size(); - if (theRandomVariables.normalRVs.size() > 0) { - dakotaFile << " normal_uncertain = " << numNormal << "\n means = "; - // std::list::iterator it; - for (auto it = theRandomVariables.normalRVs.begin(); it != theRandomVariables.normalRVs.end(); it++) - dakotaFile << it->mean << " "; - dakotaFile << "\n std_deviations = "; - for (auto it = theRandomVariables.normalRVs.begin(); it != theRandomVariables.normalRVs.end(); it++) - dakotaFile << it->stdDev << " "; - dakotaFile << "\n descriptors = "; - for (auto it = theRandomVariables.normalRVs.begin(); it != theRandomVariables.normalRVs.end(); it++) { - dakotaFile << "\'" << it->name << "\' "; - rvList.push_back(it->name); - } - dakotaFile << "\n\n"; + + + + if (includeActiveText == true) { + if (idVariables.empty()) + dakotaFile << "active uncertain \n"; + else + dakotaFile << "id_variables = '" << idVariables << "'\n active uncertain \n"; + } + // int numNormalUncertain = theRandomVariables.normalRVs.size(); + + int numNormal = theRandomVariables.normalRVs.size(); + if (theRandomVariables.normalRVs.size() > 0) { + dakotaFile << " normal_uncertain = " << numNormal << "\n means = "; + // std::list::iterator it; + for (auto it = theRandomVariables.normalRVs.begin(); it != theRandomVariables.normalRVs.end(); it++) + dakotaFile << it->mean << " "; + dakotaFile << "\n std_deviations = "; + for (auto it = theRandomVariables.normalRVs.begin(); it != theRandomVariables.normalRVs.end(); it++) + dakotaFile << it->stdDev << " "; + dakotaFile << "\n descriptors = "; + for (auto it = theRandomVariables.normalRVs.begin(); it != theRandomVariables.normalRVs.end(); it++) { + dakotaFile << "\'" << it->name << "\' "; + rvList.push_back(it->name); } - int numLognormal = theRandomVariables.lognormalRVs.size(); - if (numLognormal > 0) { - dakotaFile << " lognormal_uncertain = " << numLognormal << "\n means = "; - // std::list::iterator it; - for (auto it = theRandomVariables.lognormalRVs.begin(); it != theRandomVariables.lognormalRVs.end(); it++) - dakotaFile << it->mean << " "; - dakotaFile << "\n std_deviations = "; - for (auto it = theRandomVariables.lognormalRVs.begin(); it != theRandomVariables.lognormalRVs.end(); it++) - dakotaFile << it->stdDev << " "; - dakotaFile << "\n descriptors = "; - for (auto it = theRandomVariables.lognormalRVs.begin(); it != theRandomVariables.lognormalRVs.end(); it++) { - dakotaFile << "\'" << it->name << "\' "; - rvList.push_back(it->name); - } - dakotaFile << "\n\n"; + dakotaFile << "\n\n"; + } + + int numLognormal = theRandomVariables.lognormalRVs.size(); + if (numLognormal > 0) { + dakotaFile << " lognormal_uncertain = " << numLognormal << "\n means = "; + // std::list::iterator it; + for (auto it = theRandomVariables.lognormalRVs.begin(); it != theRandomVariables.lognormalRVs.end(); it++) + dakotaFile << it->mean << " "; + dakotaFile << "\n std_deviations = "; + for (auto it = theRandomVariables.lognormalRVs.begin(); it != theRandomVariables.lognormalRVs.end(); it++) + dakotaFile << it->stdDev << " "; + dakotaFile << "\n descriptors = "; + for (auto it = theRandomVariables.lognormalRVs.begin(); it != theRandomVariables.lognormalRVs.end(); it++) { + dakotaFile << "\'" << it->name << "\' "; + rvList.push_back(it->name); } + dakotaFile << "\n\n"; + } - int numUniform = theRandomVariables.uniformRVs.size(); - if (numUniform > 0) { - dakotaFile << " uniform_uncertain = " << numUniform << "\n lower_bounds = "; - // std::list::iterator it; - for (auto it = theRandomVariables.uniformRVs.begin(); it != theRandomVariables.uniformRVs.end(); it++) - dakotaFile << it->lowerBound << " "; - dakotaFile << "\n upper_bound = "; - for (auto it = theRandomVariables.uniformRVs.begin(); it != theRandomVariables.uniformRVs.end(); it++) - dakotaFile << it->upperBound << " "; - dakotaFile << "\n descriptors = "; - for (auto it = theRandomVariables.uniformRVs.begin(); it != theRandomVariables.uniformRVs.end(); it++) { - dakotaFile << "\'" << it->name << "\' "; - rvList.push_back(it->name); - } - dakotaFile << "\n\n"; + int numUniform = theRandomVariables.uniformRVs.size(); + if (numUniform > 0) { + dakotaFile << " uniform_uncertain = " << numUniform << "\n lower_bounds = "; + // std::list::iterator it; + for (auto it = theRandomVariables.uniformRVs.begin(); it != theRandomVariables.uniformRVs.end(); it++) + dakotaFile << it->lowerBound << " "; + dakotaFile << "\n upper_bound = "; + for (auto it = theRandomVariables.uniformRVs.begin(); it != theRandomVariables.uniformRVs.end(); it++) + dakotaFile << it->upperBound << " "; + dakotaFile << "\n descriptors = "; + for (auto it = theRandomVariables.uniformRVs.begin(); it != theRandomVariables.uniformRVs.end(); it++) { + dakotaFile << "\'" << it->name << "\' "; + rvList.push_back(it->name); } + dakotaFile << "\n\n"; + } - int numWeibull = theRandomVariables.weibullRVs.size(); - if (numWeibull > 0) { - dakotaFile << " weibull_uncertain = " << numWeibull << "\n alphas = "; - // std::list::iterator it; - for (auto it = theRandomVariables.weibullRVs.begin(); it != theRandomVariables.weibullRVs.end(); it++) - dakotaFile << it->shapeParam << " "; - dakotaFile << "\n betas = "; - for (auto it = theRandomVariables.weibullRVs.begin(); it != theRandomVariables.weibullRVs.end(); it++) - dakotaFile << it->scaleParam << " "; - dakotaFile << "\n descriptors = "; - for (auto it = theRandomVariables.weibullRVs.begin(); it != theRandomVariables.weibullRVs.end(); it++) { - dakotaFile << "\'" << it->name << "\' "; - rvList.push_back(it->name); - } - dakotaFile << "\n\n"; + int numWeibull = theRandomVariables.weibullRVs.size(); + if (numWeibull > 0) { + dakotaFile << " weibull_uncertain = " << numWeibull << "\n alphas = "; + // std::list::iterator it; + for (auto it = theRandomVariables.weibullRVs.begin(); it != theRandomVariables.weibullRVs.end(); it++) + dakotaFile << it->shapeParam << " "; + dakotaFile << "\n betas = "; + for (auto it = theRandomVariables.weibullRVs.begin(); it != theRandomVariables.weibullRVs.end(); it++) + dakotaFile << it->scaleParam << " "; + dakotaFile << "\n descriptors = "; + for (auto it = theRandomVariables.weibullRVs.begin(); it != theRandomVariables.weibullRVs.end(); it++) { + dakotaFile << "\'" << it->name << "\' "; + rvList.push_back(it->name); } + dakotaFile << "\n\n"; + } - int numGumbell = theRandomVariables.gumbellRVs.size(); - if (numGumbell > 0) { - dakotaFile << " gumbel_uncertain = " << numGumbell << "\n alphas = "; - // std::list::iterator it; - for (auto it = theRandomVariables.gumbellRVs.begin(); it != theRandomVariables.gumbellRVs.end(); it++) - dakotaFile << it->alphas << " "; - dakotaFile << "\n betas = "; - for (auto it = theRandomVariables.gumbellRVs.begin(); it != theRandomVariables.gumbellRVs.end(); it++) - dakotaFile << it->betas << " "; - dakotaFile << "\n descriptors = "; - for (auto it = theRandomVariables.gumbellRVs.begin(); it != theRandomVariables.gumbellRVs.end(); it++) { - dakotaFile << "\'" << it->name << "\' "; - rvList.push_back(it->name); - } - dakotaFile << "\n\n"; + int numGumbell = theRandomVariables.gumbellRVs.size(); + if (numGumbell > 0) { + dakotaFile << " gumbel_uncertain = " << numGumbell << "\n alphas = "; + // std::list::iterator it; + for (auto it = theRandomVariables.gumbellRVs.begin(); it != theRandomVariables.gumbellRVs.end(); it++) + dakotaFile << it->alphas << " "; + dakotaFile << "\n betas = "; + for (auto it = theRandomVariables.gumbellRVs.begin(); it != theRandomVariables.gumbellRVs.end(); it++) + dakotaFile << it->betas << " "; + dakotaFile << "\n descriptors = "; + for (auto it = theRandomVariables.gumbellRVs.begin(); it != theRandomVariables.gumbellRVs.end(); it++) { + dakotaFile << "\'" << it->name << "\' "; + rvList.push_back(it->name); } + dakotaFile << "\n\n"; + } - int numGamma = theRandomVariables.gammaRVs.size(); - if (numGamma > 0) { - dakotaFile << " gamma_uncertain = " << numGamma << "\n alphas = "; - std::list::iterator it; - for (auto it = theRandomVariables.gammaRVs.begin(); it != theRandomVariables.gammaRVs.end(); it++) - dakotaFile << it->alphas << " "; - dakotaFile << "\n betas = "; - for (auto it = theRandomVariables.gammaRVs.begin(); it != theRandomVariables.gammaRVs.end(); it++) - dakotaFile << it->betas << " "; - dakotaFile << "\n descriptors = "; - for (auto it = theRandomVariables.gammaRVs.begin(); it != theRandomVariables.gammaRVs.end(); it++) { - dakotaFile << "\'" << it->name << "\' "; - rvList.push_back(it->name); - } - dakotaFile << "\n\n"; + int numGamma = theRandomVariables.gammaRVs.size(); + if (numGamma > 0) { + dakotaFile << " gamma_uncertain = " << numGamma << "\n alphas = "; + std::list::iterator it; + for (auto it = theRandomVariables.gammaRVs.begin(); it != theRandomVariables.gammaRVs.end(); it++) + dakotaFile << it->alphas << " "; + dakotaFile << "\n betas = "; + for (auto it = theRandomVariables.gammaRVs.begin(); it != theRandomVariables.gammaRVs.end(); it++) + dakotaFile << it->betas << " "; + dakotaFile << "\n descriptors = "; + for (auto it = theRandomVariables.gammaRVs.begin(); it != theRandomVariables.gammaRVs.end(); it++) { + dakotaFile << "\'" << it->name << "\' "; + rvList.push_back(it->name); } + dakotaFile << "\n\n"; + } - int numBeta = theRandomVariables.betaRVs.size(); - if (numBeta > 0) { - dakotaFile << " beta_uncertain = " << numBeta << "\n alphas = "; - //std::list::iterator it; - for (auto it = theRandomVariables.betaRVs.begin(); it != theRandomVariables.betaRVs.end(); it++) - dakotaFile << it->alphas << " "; - dakotaFile << "\n betas = "; - for (auto it = theRandomVariables.betaRVs.begin(); it != theRandomVariables.betaRVs.end(); it++) - dakotaFile << it->betas << " "; - dakotaFile << "\n lower_bounds = "; - for (auto it = theRandomVariables.betaRVs.begin(); it != theRandomVariables.betaRVs.end(); it++) - dakotaFile << it->lowerBound << " "; - dakotaFile << "\n upper_bounds = "; - for (auto it = theRandomVariables.betaRVs.begin(); it != theRandomVariables.betaRVs.end(); it++) - dakotaFile << it->upperBound << " "; - dakotaFile << "\n descriptors = "; - for (auto it = theRandomVariables.betaRVs.begin(); it != theRandomVariables.betaRVs.end(); it++) { - dakotaFile << "\'" << it->name << "\' "; - rvList.push_back(it->name); - } - dakotaFile << "\n\n"; + int numBeta = theRandomVariables.betaRVs.size(); + if (numBeta > 0) { + dakotaFile << " beta_uncertain = " << numBeta << "\n alphas = "; + //std::list::iterator it; + for (auto it = theRandomVariables.betaRVs.begin(); it != theRandomVariables.betaRVs.end(); it++) + dakotaFile << it->alphas << " "; + dakotaFile << "\n betas = "; + for (auto it = theRandomVariables.betaRVs.begin(); it != theRandomVariables.betaRVs.end(); it++) + dakotaFile << it->betas << " "; + dakotaFile << "\n lower_bounds = "; + for (auto it = theRandomVariables.betaRVs.begin(); it != theRandomVariables.betaRVs.end(); it++) + dakotaFile << it->lowerBound << " "; + dakotaFile << "\n upper_bounds = "; + for (auto it = theRandomVariables.betaRVs.begin(); it != theRandomVariables.betaRVs.end(); it++) + dakotaFile << it->upperBound << " "; + dakotaFile << "\n descriptors = "; + for (auto it = theRandomVariables.betaRVs.begin(); it != theRandomVariables.betaRVs.end(); it++) { + dakotaFile << "\'" << it->name << "\' "; + rvList.push_back(it->name); } + dakotaFile << "\n\n"; + } - int numExponential = theRandomVariables.exponentialRVs.size(); - if (numExponential > 0) { - dakotaFile << " exponential_uncertain = " << numExponential << "\n betas = "; - for (auto it = theRandomVariables.betaRVs.begin(); it != theRandomVariables.betaRVs.end(); it++) - dakotaFile << it->alphas << " "; - dakotaFile << "\n descriptors = "; - for (auto it = theRandomVariables.exponentialRVs.begin(); it != theRandomVariables.exponentialRVs.end(); it++) { - dakotaFile << "\'" << it->name << "\' "; - rvList.push_back(it->name); + int numExponential = theRandomVariables.exponentialRVs.size(); + if (numExponential > 0) { + dakotaFile << " exponential_uncertain = " << numExponential << "\n betas = "; + for (auto it = theRandomVariables.betaRVs.begin(); it != theRandomVariables.betaRVs.end(); it++) + dakotaFile << it->alphas << " "; + dakotaFile << "\n descriptors = "; + for (auto it = theRandomVariables.exponentialRVs.begin(); it != theRandomVariables.exponentialRVs.end(); it++) { + dakotaFile << "\'" << it->name << "\' "; + rvList.push_back(it->name); + } + dakotaFile << "\n\n"; + } + + // int numConstant = theRandomVariables.constantRVs.size(); + // if (numConstant > 0) { + // dakotaFile << " discrete_state_set \n real = " << numConstant; + // dakotaFile << "\n elements_per_variable = "; + // for (auto it = theRandomVariables.constantRVs.begin(); it != theRandomVariables.constantRVs.end(); it++) + // dakotaFile << "1 "; //std::list::iterator it; + // dakotaFile << "\n elements = "; + // for (auto it = theRandomVariables.constantRVs.begin(); it != theRandomVariables.constantRVs.end(); it++) + // dakotaFile << it->value << " "; + // dakotaFile << "\n descriptors = "; + // for (auto it = theRandomVariables.constantRVs.begin(); it != theRandomVariables.constantRVs.end(); it++) { + // dakotaFile << "\'" << it->name << "\' "; + // rvList.push_back(it->name); + // } + // dakotaFile << "\n\n"; + // } + + // discreteUncertainSetRVs - int, string, real + std::list theDiscreteIntList; + std::list theDiscreteRealList; + std::list theDiscreteStringList; + + int numDiscreteInt = theRandomVariables.discreteUncertainIntegerSetRVs.size(); + int numDiscreteString = theRandomVariables.discreteDesignSetRVs.size(); + int numDiscreteReal = theRandomVariables.discreteUncertainRealSetRVs.size(); + + // std::cerr << "DISCRETE: numInt: " << numDiscreteInt << " numReal: " << numDiscreteReal << "\n"; + + if (numDiscreteInt != 0 || numDiscreteReal != 0 || numDiscreteString != 0) { + + dakotaFile << " discrete_uncertain_set"; + + if (numDiscreteInt != 0) { + + theDiscreteIntList = theRandomVariables.discreteUncertainIntegerSetRVs; + + dakotaFile << "\n integer = " << theDiscreteIntList.size(); + dakotaFile << "\n num_set_values = "; + for (auto it = theDiscreteIntList.begin(); it != theDiscreteIntList.end(); it++) + dakotaFile << it->elements.size() << " "; + dakotaFile << "\n set_values = "; + for (auto it = theDiscreteIntList.begin(); it != theDiscreteIntList.end(); it++) { + auto elementsList = it->elements; + for (auto eit = elementsList.begin(); eit != elementsList.end(); eit++) + dakotaFile << *eit << " "; + dakotaFile << " "; } - dakotaFile << "\n\n"; - } - - // int numConstant = theRandomVariables.constantRVs.size(); - // if (numConstant > 0) { - // dakotaFile << " discrete_state_set \n real = " << numConstant; - // dakotaFile << "\n elements_per_variable = "; - // for (auto it = theRandomVariables.constantRVs.begin(); it != theRandomVariables.constantRVs.end(); it++) - // dakotaFile << "1 "; //std::list::iterator it; - // dakotaFile << "\n elements = "; - // for (auto it = theRandomVariables.constantRVs.begin(); it != theRandomVariables.constantRVs.end(); it++) - // dakotaFile << it->value << " "; - // dakotaFile << "\n descriptors = "; - // for (auto it = theRandomVariables.constantRVs.begin(); it != theRandomVariables.constantRVs.end(); it++) { - // dakotaFile << "\'" << it->name << "\' "; - // rvList.push_back(it->name); - // } - // dakotaFile << "\n\n"; - // } - - // discreteUncertainSetRVs - int, string, real - std::list theDiscreteIntList; - std::list theDiscreteRealList; - std::list theDiscreteStringList; - - int numDiscreteInt = theRandomVariables.discreteUncertainIntegerSetRVs.size(); - int numDiscreteString = theRandomVariables.discreteDesignSetRVs.size(); - int numDiscreteReal = theRandomVariables.discreteUncertainRealSetRVs.size(); - - // std::cerr << "DISCRETE: numInt: " << numDiscreteInt << " numReal: " << numDiscreteReal << "\n"; - - if (numDiscreteInt != 0 || numDiscreteReal != 0 || numDiscreteString != 0) { - - dakotaFile << " discrete_uncertain_set"; - - if (numDiscreteInt != 0) { - - theDiscreteIntList = theRandomVariables.discreteUncertainIntegerSetRVs; - - dakotaFile << "\n integer = " << theDiscreteIntList.size(); - dakotaFile << "\n num_set_values = "; - for (auto it = theDiscreteIntList.begin(); it != theDiscreteIntList.end(); it++) - dakotaFile << it->elements.size() << " "; - dakotaFile << "\n set_values = "; - for (auto it = theDiscreteIntList.begin(); it != theDiscreteIntList.end(); it++) { - auto elementsList = it->elements; - for (auto eit = elementsList.begin(); eit != elementsList.end(); eit++) - dakotaFile << *eit << " "; - dakotaFile << " "; - } - dakotaFile << "\n set_probabilities = "; - for (auto it = theDiscreteIntList.begin(); it != theDiscreteIntList.end(); it++) { - auto probsList = it->weights; - for (auto pit = probsList.begin(); pit != probsList.end(); pit++) - dakotaFile << *pit << " "; - dakotaFile << " "; - } - dakotaFile << "\n descriptors = "; - for (auto it = theDiscreteIntList.begin(); it != theDiscreteIntList.end(); it++) - dakotaFile << "\'" << it->name << "\' "; + dakotaFile << "\n set_probabilities = "; + for (auto it = theDiscreteIntList.begin(); it != theDiscreteIntList.end(); it++) { + auto probsList = it->weights; + for (auto pit = probsList.begin(); pit != probsList.end(); pit++) + dakotaFile << *pit << " "; + dakotaFile << " "; } + dakotaFile << "\n descriptors = "; + for (auto it = theDiscreteIntList.begin(); it != theDiscreteIntList.end(); it++) + dakotaFile << "\'" << it->name << "\' "; + } - if (numDiscreteReal != 0) { - - theDiscreteRealList = theRandomVariables.discreteUncertainRealSetRVs; - - dakotaFile << "\n real = " << theDiscreteRealList.size(); - dakotaFile << "\n num_set_values = "; - for (auto it = theDiscreteRealList.begin(); it != theDiscreteRealList.end(); it++) - dakotaFile << it->elements.size() << " "; - dakotaFile << "\n set_values = "; - for (auto it = theDiscreteRealList.begin(); it != theDiscreteRealList.end(); it++) { - auto elementsList = it->elements; - for (auto eit = elementsList.begin(); eit != elementsList.end(); eit++) - dakotaFile << *eit << " "; - dakotaFile << " "; - } - dakotaFile << "\n set_probabilities = "; - for (auto it = theDiscreteRealList.begin(); it != theDiscreteRealList.end(); it++) { - auto probsList = it->weights; - for (auto pit = probsList.begin(); pit != probsList.end(); pit++) - dakotaFile << *pit << " "; - dakotaFile << " "; - } - dakotaFile << "\n descriptors = "; - for (auto it = theDiscreteRealList.begin(); it != theDiscreteRealList.end(); it++) - dakotaFile << "\'" << it->name << "\' "; + if (numDiscreteReal != 0) { + + theDiscreteRealList = theRandomVariables.discreteUncertainRealSetRVs; + + dakotaFile << "\n real = " << theDiscreteRealList.size(); + dakotaFile << "\n num_set_values = "; + for (auto it = theDiscreteRealList.begin(); it != theDiscreteRealList.end(); it++) + dakotaFile << it->elements.size() << " "; + dakotaFile << "\n set_values = "; + for (auto it = theDiscreteRealList.begin(); it != theDiscreteRealList.end(); it++) { + auto elementsList = it->elements; + for (auto eit = elementsList.begin(); eit != elementsList.end(); eit++) + dakotaFile << *eit << " "; + dakotaFile << " "; } - - - if (numDiscreteString != 0) { - - theDiscreteStringList = theRandomVariables.discreteDesignSetRVs; - - dakotaFile << "\n string = " << theDiscreteStringList.size(); - dakotaFile << "\n num_set_values = "; - for (auto it = theDiscreteStringList.begin(); it != theDiscreteStringList.end(); it++) - dakotaFile << it->elements.size() << " "; - dakotaFile << "\n set_values = "; - for (auto it = theDiscreteStringList.begin(); it != theDiscreteStringList.end(); it++) { - it->elements.sort(); // sort the elements NEEDED THOUGH NOT IN DAKOTA DOC! - for (auto element = it->elements.begin(); element != it->elements.end(); element++) - dakotaFile << " \'" << *element << "\'"; - } - dakotaFile << "\n descriptors = "; - for (auto it = theDiscreteStringList.begin(); it != theDiscreteStringList.end(); it++) { - dakotaFile << "\'" << it->name << "\' "; - } + dakotaFile << "\n set_probabilities = "; + for (auto it = theDiscreteRealList.begin(); it != theDiscreteRealList.end(); it++) { + auto probsList = it->weights; + for (auto pit = probsList.begin(); pit != probsList.end(); pit++) + dakotaFile << *pit << " "; + dakotaFile << " "; } - dakotaFile << "\n\n"; + dakotaFile << "\n descriptors = "; + for (auto it = theDiscreteRealList.begin(); it != theDiscreteRealList.end(); it++) + dakotaFile << "\'" << it->name << "\' "; } - // if no random variables .. create 1 call & call it dummy! - int numRV = theRandomVariables.numRandomVariables; - if (numRV == 0) { - dakotaFile << " discrete_uncertain_set\n string 1 \n num_set_values = 2"; - dakotaFile << "\n set_values '1' '2'"; - dakotaFile << "\n descriptors = dummy\n"; - rvList.push_back(std::string("dummy")); + if (numDiscreteString != 0) { + + theDiscreteStringList = theRandomVariables.discreteDesignSetRVs; + + dakotaFile << "\n string = " << theDiscreteStringList.size(); + dakotaFile << "\n num_set_values = "; + for (auto it = theDiscreteStringList.begin(); it != theDiscreteStringList.end(); it++) + dakotaFile << it->elements.size() << " "; + dakotaFile << "\n set_values = "; + for (auto it = theDiscreteStringList.begin(); it != theDiscreteStringList.end(); it++) { + it->elements.sort(); // sort the elements NEEDED THOUGH NOT IN DAKOTA DOC! + for (auto element = it->elements.begin(); element != it->elements.end(); element++) + dakotaFile << " \'" << *element << "\'"; + } + dakotaFile << "\n descriptors = "; + for (auto it = theDiscreteStringList.begin(); it != theDiscreteStringList.end(); it++) { + dakotaFile << "\'" << it->name << "\' "; + } } dakotaFile << "\n\n"; + } + + + // if no random variables .. create 1 call & call it dummy! + int numRV = theRandomVariables.numRandomVariables; + if (numRV == 0) { + dakotaFile << " discrete_uncertain_set\n string 1 \n num_set_values = 2"; + dakotaFile << "\n set_values '1' '2'"; + dakotaFile << "\n descriptors = dummy\n"; + rvList.push_back(std::string("dummy")); + } + dakotaFile << "\n\n"; + + int corrSize = theRandomVariables.ordering.size(); - int corrSize = theRandomVariables.ordering.size(); - - /* ***************************************************** - if (!theRandomVariables.corrMat.empty()) { - - if (theRandomVariables.corrMat[0] !=0 ) { - - std::cerr << "NEW ORDER: \n" << " size: " << corrSize << "\n"; - std::vector newOrder; - - for (int i=0; i<18; i++) { - for (int j=0; j newOrder; + + for (int i=0; i<18; i++) { + for (int j=0; j &edpList, - const char *calFileName, - std::vector &scaleFactors) { - + const char *calFileName, + std::vector &scaleFactors) { + int numResponses = 0; dakotaFile << "responses\n"; @@ -485,35 +485,35 @@ writeResponse(std::ostream &dakotaFile, // // quoFEM .. just a list of straight EDP // - + numResponses = json_array_size(rootEDP); - + std::vector lenList(numResponses, 1); - + int numFieldResponses = 0; int numScalarResponses = 0; - - if (!(idResponse.compare("calibration") == 0 || idResponse.compare("BayesCalibration") == 0 || idResponse.compare("optimization") == 0)){ + + if (!(idResponse.compare("calibration") == 0 || idResponse.compare("BayesCalibration") == 0 || idResponse.compare("optimization") == 0)) { dakotaFile << " response_functions = " << numResponses << "\n response_descriptors = "; } else if (idResponse.compare("optimization") == 0) { dakotaFile << " objective_functions = " << numResponses << "\n descriptors = "; } else dakotaFile << " calibration_terms = " << numResponses << "\n response_descriptors = "; - - for (int j=0; j 0) { if (!(idResponse.compare("calibration") == 0 || idResponse.compare("BayesCalibration") == 0)) { if (numScalarResponses > 0) { - dakotaFile << "\n scalar_responses = " << numScalarResponses; + dakotaFile << "\n scalar_responses = " << numScalarResponses; } dakotaFile << "\n field_responses = " << numFieldResponses << "\n lengths = "; } else { if (numScalarResponses > 0) { - dakotaFile << "\n scalar_calibration_terms = " << numScalarResponses; + dakotaFile << "\n scalar_calibration_terms = " << numScalarResponses; } dakotaFile << "\n field_calibration_terms = " << numFieldResponses << "\n lengths = "; } @@ -535,12 +535,12 @@ writeResponse(std::ostream &dakotaFile, json_t *theEDP_Item = json_array_get(rootEDP, j); std::string varType = json_string_value(json_object_get(theEDP_Item, "type")); if (varType.compare("field") == 0) { - int len = json_integer_value(json_object_get(theEDP_Item, "length")); - dakotaFile << len << " "; - lenList[j] = len; + int len = json_integer_value(json_object_get(theEDP_Item, "length")); + dakotaFile << len << " "; + lenList[j] = len; } } - + // bool readFieldCoords = true; // if (readFieldCoords) { // dakotaFile << "\n read_field_coordinates" << "\n num_coordinates_per_field = "; @@ -553,40 +553,40 @@ writeResponse(std::ostream &dakotaFile, // // } } - + if ((idResponse.compare("calibration") == 0) || (idResponse.compare("BayesCalibration") == 0)) { std::vector errFilenameList = {}; std::stringstream errTypeStringStream; - + int numExp = processDataFiles(calFileName, edpList, lenList, numResponses, numFieldResponses, errFilenameList, - errTypeStringStream, idResponse, scaleFactors); - + errTypeStringStream, idResponse, scaleFactors); + bool readCalibrationData = true; if (readCalibrationData) { if (numFieldResponses > 0) { - int nExp = numExp; - if (nExp < 1) { - nExp = 1; - } - dakotaFile << "\n calibration_data"; - dakotaFile << "\n num_experiments = " << nExp; - if (idResponse.compare("BayesCalibration") == 0) { - dakotaFile << "\n experiment_variance_type = "; - dakotaFile << errTypeStringStream.str(); - } + int nExp = numExp; + if (nExp < 1) { + nExp = 1; + } + dakotaFile << "\n calibration_data"; + dakotaFile << "\n num_experiments = " << nExp; + if (idResponse.compare("BayesCalibration") == 0) { + dakotaFile << "\n experiment_variance_type = "; + dakotaFile << errTypeStringStream.str(); + } } else { - int nExp = numExp; - if (nExp < 1) { - nExp = 1; - } - dakotaFile << "\n calibration_data_file = 'quoFEMScalarCalibrationData.cal'"; - dakotaFile << "\n freeform"; - dakotaFile << "\n num_experiments = " << nExp; - if (idResponse.compare("BayesCalibration") == 0) { - dakotaFile << "\n experiment_variance_type = "; - dakotaFile << errTypeStringStream.str(); - } + int nExp = numExp; + if (nExp < 1) { + nExp = 1; + } + dakotaFile << "\n calibration_data_file = 'quoFEMScalarCalibrationData.cal'"; + dakotaFile << "\n freeform"; + dakotaFile << "\n num_experiments = " << nExp; + if (idResponse.compare("BayesCalibration") == 0) { + dakotaFile << "\n experiment_variance_type = "; + dakotaFile << errTypeStringStream.str(); + } } } } @@ -608,13 +608,13 @@ writeResponse(std::ostream &dakotaFile, int writeDakotaInputFile(std::ostream &dakotaFile, - json_t *uqData, - json_t *rootEDP, - struct randomVariables &theRandomVariables, - std::string &workflowDriver, - std::vector &rvList, - std::vector &edpList, - int evalConcurrency) { + json_t *uqData, + json_t *rootEDP, + struct randomVariables &theRandomVariables, + std::string &workflowDriver, + std::vector &rvList, + std::vector &edpList, + int evalConcurrency) { int evaluationConcurrency = evalConcurrency; @@ -627,7 +627,7 @@ writeDakotaInputFile(std::ostream &dakotaFile, } // Save all the working dirs? bool saveWorkDirs = true; - json_t *saveDirs = json_object_get(uqData,"saveWorkDir"); + json_t *saveDirs = json_object_get(uqData, "saveWorkDir"); if (saveDirs != NULL) { if (json_is_false(saveDirs)) saveWorkDirs = false; @@ -638,10 +638,10 @@ writeDakotaInputFile(std::ostream &dakotaFile, if (strcmp(type, "Sensitivity Analysis") == 0) sensitivityAnalysis = true; - json_t *EDPs = json_object_get(rootEDP,"EngineeringDemandParameters"); + json_t *EDPs = json_object_get(rootEDP, "EngineeringDemandParameters"); int numResponses = 0; if (EDPs != NULL) { - numResponses = json_integer_value(json_object_get(rootEDP,"total_number_edp")); + numResponses = json_integer_value(json_object_get(rootEDP, "total_number_edp")); } else { numResponses = json_array_size(rootEDP); } @@ -652,19 +652,19 @@ writeDakotaInputFile(std::ostream &dakotaFile, if ((strcmp(type, "Forward Propagation") == 0) || sensitivityAnalysis == true) { - json_t *samplingMethodData = json_object_get(uqData,"samplingMethodData"); + json_t *samplingMethodData = json_object_get(uqData, "samplingMethodData"); - const char *method = json_string_value(json_object_get(samplingMethodData,"method")); + const char *method = json_string_value(json_object_get(samplingMethodData, "method")); - if (strcmp(method,"Monte Carlo")==0) { - int numSamples = json_integer_value(json_object_get(samplingMethodData,"samples")); - int seed = json_integer_value(json_object_get(samplingMethodData,"seed")); + if (strcmp(method, "Monte Carlo") == 0) { + int numSamples = json_integer_value(json_object_get(samplingMethodData, "samples")); + int seed = json_integer_value(json_object_get(samplingMethodData, "seed")); dakotaFile << "environment \n tabular_data \n tabular_data_file = 'dakotaTab.out' \n\n"; dakotaFile << "method, \n sampling \n sample_type = random \n samples = " << numSamples << " \n seed = " << seed << "\n\n"; if (sensitivityAnalysis == true) - dakotaFile << "variance_based_decomp \n\n"; + dakotaFile << "variance_based_decomp \n\n"; const char * calFileName = new char[1]; std::string emptyString; @@ -674,10 +674,10 @@ writeDakotaInputFile(std::ostream &dakotaFile, writeResponse(dakotaFile, rootEDP, emptyString, false, false, edpList, calFileName, scaleFactors); } - else if (strcmp(method,"LHS")==0) { + else if (strcmp(method, "LHS") == 0) { - int numSamples = json_integer_value(json_object_get(samplingMethodData,"samples")); - int seed = json_integer_value(json_object_get(samplingMethodData,"seed")); + int numSamples = json_integer_value(json_object_get(samplingMethodData, "samples")); + int seed = json_integer_value(json_object_get(samplingMethodData, "seed")); //std::cerr << numSamples << " " << seed; @@ -685,7 +685,7 @@ writeDakotaInputFile(std::ostream &dakotaFile, dakotaFile << "method,\n sampling\n sample_type = lhs \n samples = " << numSamples << " \n seed = " << seed << "\n\n"; if (sensitivityAnalysis == true) - dakotaFile << "variance_based_decomp \n\n"; + dakotaFile << "variance_based_decomp \n\n"; const char * calFileName = new char[1]; @@ -714,23 +714,23 @@ writeDakotaInputFile(std::ostream &dakotaFile, } */ // } - else if (strcmp(method,"Gaussian Process Regression")==0) { + else if (strcmp(method, "Gaussian Process Regression") == 0) { - int trainingSamples = json_integer_value(json_object_get(samplingMethodData,"trainingSamples")); - int trainingSeed = json_integer_value(json_object_get(samplingMethodData,"trainingSeed")); - const char *trainMethod = json_string_value(json_object_get(samplingMethodData,"trainingMethod")); - int samplingSamples = json_integer_value(json_object_get(samplingMethodData,"samplingSamples")); - int samplingSeed = json_integer_value(json_object_get(samplingMethodData,"samplingSeed")); - const char *sampleMethod = json_string_value(json_object_get(samplingMethodData,"samplingMethod")); + int trainingSamples = json_integer_value(json_object_get(samplingMethodData, "trainingSamples")); + int trainingSeed = json_integer_value(json_object_get(samplingMethodData, "trainingSeed")); + const char *trainMethod = json_string_value(json_object_get(samplingMethodData, "trainingMethod")); + int samplingSamples = json_integer_value(json_object_get(samplingMethodData, "samplingSamples")); + int samplingSeed = json_integer_value(json_object_get(samplingMethodData, "samplingSeed")); + const char *sampleMethod = json_string_value(json_object_get(samplingMethodData, "samplingMethod")); - const char *surrogateMethod = json_string_value(json_object_get(samplingMethodData,"surrogateSurfaceMethod")); + const char *surrogateMethod = json_string_value(json_object_get(samplingMethodData, "surrogateSurfaceMethod")); std::string trainingMethod(trainMethod); std::string samplingMethod(sampleMethod); - if (strcmp(trainMethod,"Monte Carlo") == 0) - trainingMethod = "random"; - if (strcmp(sampleMethod,"Monte Carlo") == 0) - samplingMethod = "random"; + if (strcmp(trainMethod, "Monte Carlo") == 0) + trainingMethod = "random"; + if (strcmp(sampleMethod, "Monte Carlo") == 0) + samplingMethod = "random"; dakotaFile << "environment \n method_pointer = 'SurrogateMethod' \n tabular_data \n tabular_data_file = 'dakotaTab.out'\n"; @@ -738,14 +738,14 @@ writeDakotaInputFile(std::ostream &dakotaFile, dakotaFile << "method \n id_method = 'SurrogateMethod' \n model_pointer = 'SurrogateModel'\n"; dakotaFile << " sampling \n samples = " << samplingSamples << "\n seed = " << samplingSeed << "\n sample_type = " - << samplingMethod << "\n\n"; + << samplingMethod << "\n\n"; dakotaFile << "model \n id_model = 'SurrogateModel' \n surrogate global \n dace_method_pointer = 'TrainingMethod'\n " - << surrogateMethod << "\n\n"; + << surrogateMethod << "\n\n"; dakotaFile << "method \n id_method = 'TrainingMethod' \n model_pointer = 'TrainingModel'\n"; dakotaFile << " sampling \n samples = " << trainingSamples << "\n seed = " << trainingSeed << "\n sample_type = " - << trainingMethod << "\n\n"; + << trainingMethod << "\n\n"; dakotaFile << "model \n id_model = 'TrainingModel' \n single \n interface_pointer = 'SimulationInterface'"; const char * calFileName = new char[1]; @@ -758,29 +758,29 @@ writeDakotaInputFile(std::ostream &dakotaFile, } - else if (strcmp(method,"Polynomial Chaos Expansion")==0) { + else if (strcmp(method, "Polynomial Chaos Expansion") == 0) { - const char *dataMethod = json_string_value(json_object_get(samplingMethodData,"dataMethod")); - int intValue = json_integer_value(json_object_get(samplingMethodData,"level")); - int samplingSeed = json_integer_value(json_object_get(samplingMethodData,"samplingSeed")); - int samplingSamples = json_integer_value(json_object_get(samplingMethodData,"samplingSamples")); - const char *sampleMethod = json_string_value(json_object_get(samplingMethodData,"samplingMethod")); + const char *dataMethod = json_string_value(json_object_get(samplingMethodData, "dataMethod")); + int intValue = json_integer_value(json_object_get(samplingMethodData, "level")); + int samplingSeed = json_integer_value(json_object_get(samplingMethodData, "samplingSeed")); + int samplingSamples = json_integer_value(json_object_get(samplingMethodData, "samplingSamples")); + const char *sampleMethod = json_string_value(json_object_get(samplingMethodData, "samplingMethod")); std::string pceMethod; - if (strcmp(dataMethod,"Quadrature") == 0) - pceMethod = "quadrature_order = "; - else if (strcmp(dataMethod,"Smolyak Sparse_Grid") == 0) - pceMethod = "sparse_grid_level = "; - else if (strcmp(dataMethod,"Stroud Cubature") == 0) - pceMethod = "cubature_integrand = "; - else if (strcmp(dataMethod,"Orthogonal Least_Interpolation") == 0) - pceMethod = "orthogonal_least_squares collocation_points = "; + if (strcmp(dataMethod, "Quadrature") == 0) + pceMethod = "quadrature_order = "; + else if (strcmp(dataMethod, "Smolyak Sparse_Grid") == 0) + pceMethod = "sparse_grid_level = "; + else if (strcmp(dataMethod, "Stroud Cubature") == 0) + pceMethod = "cubature_integrand = "; + else if (strcmp(dataMethod, "Orthogonal Least_Interpolation") == 0) + pceMethod = "orthogonal_least_squares collocation_points = "; else - pceMethod = "quadrature_order = "; + pceMethod = "quadrature_order = "; std::string samplingMethod(sampleMethod); - if (strcmp(sampleMethod,"Monte Carlo") == 0) - samplingMethod = "random"; + if (strcmp(sampleMethod, "Monte Carlo") == 0) + samplingMethod = "random"; dakotaFile << "environment \n tabular_data \n tabular_data_file = 'a.out'\n\n"; // a.out for trial data const char * calFileName = new char[1]; @@ -793,10 +793,10 @@ writeDakotaInputFile(std::ostream &dakotaFile, dakotaFile << "method \n polynomial_chaos \n " << pceMethod << intValue; dakotaFile << "\n samples_on_emulator = " << samplingSamples << "\n seed = " << samplingSeed << "\n sample_type = " - << samplingMethod << "\n"; + << samplingMethod << "\n"; dakotaFile << " probability_levels = "; - for (int i=0; i> tmp) { -// // maybe some checks, i.e. , -// dakotaFile << tmp << " "; +// // maybe some checks, i.e. , +// dakotaFile << tmp << " "; // } // dakotaFile << "\n"; // } } else if ((strcmp(type, "Inverse Problem") == 0)) { - json_t *methodData = json_object_get(uqData,"bayesianCalibrationMethodData"); + json_t *methodData = json_object_get(uqData, "bayesianCalibrationMethodData"); - const char *method = json_string_value(json_object_get(methodData,"method")); + const char *method = json_string_value(json_object_get(methodData, "method")); /* const char *emulator = json_string_value(json_object_get(methodData,"emulator")); @@ -1013,46 +1013,46 @@ writeDakotaInputFile(std::ostream &dakotaFile, emulatorString = "sc"; */ - int chainSamples = json_integer_value(json_object_get(methodData,"chainSamples")); - int seed = json_integer_value(json_object_get(methodData,"seed")); - int burnInSamples = json_integer_value(json_object_get(methodData,"burnInSamples")); - int jumpStep = json_integer_value(json_object_get(methodData,"jumpStep")); + int chainSamples = json_integer_value(json_object_get(methodData, "chainSamples")); + int seed = json_integer_value(json_object_get(methodData, "seed")); + int burnInSamples = json_integer_value(json_object_get(methodData, "burnInSamples")); + int jumpStep = json_integer_value(json_object_get(methodData, "jumpStep")); // int maxIterations = json_integer_value(json_object_get(methodData,"maxIter")); // double tol = json_number_value(json_object_get(methodData,"tol")); const char *calFileName = json_string_value(json_object_get(methodData, "calibrationDataFile")); - if (strcmp(method,"DREAM")==0) { + if (strcmp(method, "DREAM") == 0) { - int chains = json_integer_value(json_object_get(methodData,"chains")); + int chains = json_integer_value(json_object_get(methodData, "chains")); dakotaFile << "environment \n tabular_data \n tabular_data_file = 'dakotaTab.out' \n\n"; dakotaFile << "method \n bayes_calibration dream " - << "\n chain_samples = " << chainSamples - << "\n chains = " << chains - << "\n jump_step = " << jumpStep - << "\n burn_in_samples = " << burnInSamples - << "\n seed = " << seed - << "\n calibrate_error_multipliers per_response"; + << "\n chain_samples = " << chainSamples + << "\n chains = " << chains + << "\n jump_step = " << jumpStep + << "\n burn_in_samples = " << burnInSamples + << "\n seed = " << seed + << "\n calibrate_error_multipliers per_response"; - dakotaFile << "\n scaling\n" << "\n"; + dakotaFile << "\n scaling\n" << "\n"; } else { - const char *mcmc = json_string_value(json_object_get(methodData,"mcmcMethod")); + const char *mcmc = json_string_value(json_object_get(methodData, "mcmcMethod")); std::string mcmcString("dram"); - if (strcmp(mcmc,"Delayed Rejection")==0) - mcmcString = "delayed_rejection"; - else if (strcmp(mcmc,"Adaptive Metropolis")==0) - mcmcString = "adaptive_metropolis"; - else if (strcmp(mcmc,"Metropolis Hastings")==0) - mcmcString = "metropolis_hastings"; - else if (strcmp(mcmc,"Multilevel")==0) - mcmcString = "multilevel"; + if (strcmp(mcmc, "Delayed Rejection") == 0) + mcmcString = "delayed_rejection"; + else if (strcmp(mcmc, "Adaptive Metropolis") == 0) + mcmcString = "adaptive_metropolis"; + else if (strcmp(mcmc, "Metropolis Hastings") == 0) + mcmcString = "metropolis_hastings"; + else if (strcmp(mcmc, "Multilevel") == 0) + mcmcString = "multilevel"; dakotaFile << "environment \n tabular_data \n tabular_data_file = 'dakotaTab.out' \n\n"; dakotaFile << "method \n bayes_calibration queso\n " << mcmc - << "\n chain_samples = " << chainSamples - << "\n burn_in_samples = " << burnInSamples << "\n\n"; + << "\n chain_samples = " << chainSamples + << "\n burn_in_samples = " << burnInSamples << "\n\n"; } std::string calibrationString("BayesCalibration"); @@ -1067,20 +1067,20 @@ writeDakotaInputFile(std::ostream &dakotaFile, int numRVs = theRandomVariables.numRandomVariables; - json_t *methodData = json_object_get(uqData,"optimizationMethodData"); + json_t *methodData = json_object_get(uqData, "optimizationMethodData"); - const char *method = json_string_value(json_object_get(methodData,"method")); + const char *method = json_string_value(json_object_get(methodData, "method")); std::string methodString("coliny_pattern_search"); bool gradientBool = false; bool hessianBool = false; - if (strcmp(method,"Derivative-Free Local Search")==0) + if (strcmp(method, "Derivative-Free Local Search") == 0) methodString = "coliny_pattern_search"; - gradientBool = false; - hessianBool = false; + gradientBool = false; + hessianBool = false; - int maxIterations = json_integer_value(json_object_get(methodData,"maxIterations")); - double tol = json_number_value(json_object_get(methodData,"convergenceTol")); + int maxIterations = json_integer_value(json_object_get(methodData, "maxIterations")); + double tol = json_number_value(json_object_get(methodData, "convergenceTol")); double contractionFactor = json_number_value(json_object_get(methodData, "contractionFactor")); double initialDelta = json_number_value(json_object_get(methodData, "initialDelta")); int maxFunEvals = json_integer_value(json_object_get(methodData, "maxFunEvals")); @@ -1090,15 +1090,15 @@ writeDakotaInputFile(std::ostream &dakotaFile, dakotaFile << "environment \n tabular_data \n tabular_data_file = 'dakotaTab.out' \n\n"; - dakotaFile << "method, \n " << methodString - << "\n contraction_factor = " << contractionFactor - << "\n convergence_tolerance = " << tol - << "\n initial_delta = " << initialDelta - << "\n max_function_evaluations = " << maxFunEvals - << "\n max_iterations = " << maxIterations - << "\n total_pattern_size = " << 2*numRVs - << "\n variable_tolerance = " << thresholdDelta; - // << "\n solution_target = " << solutionTarget + dakotaFile << "method, \n " << methodString + << "\n contraction_factor = " << contractionFactor + << "\n convergence_tolerance = " << tol + << "\n initial_delta = " << initialDelta + << "\n max_function_evaluations = " << maxFunEvals + << "\n max_iterations = " << maxIterations + << "\n total_pattern_size = " << 2 * numRVs + << "\n variable_tolerance = " << thresholdDelta; + // << "\n solution_target = " << solutionTarget // if (strcmp(factors,"") != 0) dakotaFile << "\n scaling\n"; From 68522e650902a3e9d1065131875f11af2c16fb35 Mon Sep 17 00:00:00 2001 From: fmckenna Date: Tue, 17 Sep 2024 15:53:41 -0700 Subject: [PATCH 55/59] fmk - changes to smelt code for compilation issues (in appveyor) --- modules/createEVENT/common/smelt/beta_dist.cc | 6 +++--- modules/createEVENT/common/smelt/factory.h | 2 +- .../common/smelt/inv_gauss_dist.cc | 6 +++--- .../common/smelt/lognormal_dist.cc | 6 +++--- .../createEVENT/common/smelt/normal_dist.cc | 6 +++--- .../createEVENT/common/smelt/numeric_utils.cc | 21 +++++++++++-------- .../createEVENT/common/smelt/numeric_utils.h | 6 +++--- .../common/smelt/students_t_dist.cc | 8 +++---- 8 files changed, 32 insertions(+), 29 deletions(-) diff --git a/modules/createEVENT/common/smelt/beta_dist.cc b/modules/createEVENT/common/smelt/beta_dist.cc index ba86f0f92..3e9c4dd5c 100644 --- a/modules/createEVENT/common/smelt/beta_dist.cc +++ b/modules/createEVENT/common/smelt/beta_dist.cc @@ -4,9 +4,9 @@ stochastic::BetaDistribution::BetaDistribution(double alpha, double beta) : Distribution(), - alpha_{alpha}, - beta_{beta}, - distribution_{alpha, beta_} + alpha_(alpha), + beta_(beta), + distribution_(alpha, beta_) {} std::vector stochastic::BetaDistribution::cumulative_dist_func( diff --git a/modules/createEVENT/common/smelt/factory.h b/modules/createEVENT/common/smelt/factory.h index a6f39a5b3..4948c9cd9 100644 --- a/modules/createEVENT/common/smelt/factory.h +++ b/modules/createEVENT/common/smelt/factory.h @@ -125,7 +125,7 @@ class Factory { } }; - std::map> registry; /**< Register of + std::map > registry; /**< Register of factory functions */ }; diff --git a/modules/createEVENT/common/smelt/inv_gauss_dist.cc b/modules/createEVENT/common/smelt/inv_gauss_dist.cc index 771aa7bec..b9786e11d 100644 --- a/modules/createEVENT/common/smelt/inv_gauss_dist.cc +++ b/modules/createEVENT/common/smelt/inv_gauss_dist.cc @@ -5,9 +5,9 @@ stochastic::InverseGaussianDistribution::InverseGaussianDistribution( double mean, double std_dev) : Distribution(), - mean_{mean}, - std_dev_{std_dev}, - distribution_{mean, std_dev_} + mean_(mean), + std_dev_(std_dev), + distribution_(mean, std_dev_) {} std::vector diff --git a/modules/createEVENT/common/smelt/lognormal_dist.cc b/modules/createEVENT/common/smelt/lognormal_dist.cc index 8e1f525af..f57ae371b 100644 --- a/modules/createEVENT/common/smelt/lognormal_dist.cc +++ b/modules/createEVENT/common/smelt/lognormal_dist.cc @@ -4,9 +4,9 @@ stochastic::LognormalDistribution::LognormalDistribution(double mean, double std_dev) : Distribution(), - mean_{mean}, - std_dev_{std_dev}, - distribution_{mean, std_dev_} + mean_(mean), + std_dev_(std_dev), + distribution_(mean, std_dev_) {} std::vector stochastic::LognormalDistribution::cumulative_dist_func( diff --git a/modules/createEVENT/common/smelt/normal_dist.cc b/modules/createEVENT/common/smelt/normal_dist.cc index de5441b49..c631be47d 100644 --- a/modules/createEVENT/common/smelt/normal_dist.cc +++ b/modules/createEVENT/common/smelt/normal_dist.cc @@ -4,9 +4,9 @@ stochastic::NormalDistribution::NormalDistribution(double mean, double std_dev) : Distribution(), - mean_{mean}, - std_dev_{std_dev}, - distribution_{mean, std_dev_} + mean_(mean), + std_dev_(std_dev), + distribution_(mean, std_dev) {} std::vector stochastic::NormalDistribution::cumulative_dist_func( diff --git a/modules/createEVENT/common/smelt/numeric_utils.cc b/modules/createEVENT/common/smelt/numeric_utils.cc index 6ab0490ce..0fcb1ae67 100644 --- a/modules/createEVENT/common/smelt/numeric_utils.cc +++ b/modules/createEVENT/common/smelt/numeric_utils.cc @@ -89,8 +89,11 @@ bool convolve_1d(const std::vector& input_x, } // Prepare complex input arrays for FFT - std::vector input_x_fft(n_fft, {0, 0}); - std::vector input_y_fft(n_fft, {0, 0}); + kiss_fft_cpx zero_value; + zero_value.r = 0; + zero_value.i = 0; + std::vector input_x_fft(n_fft, zero_value); + std::vector input_y_fft(n_fft, zero_value); // Copy real inputs to complex arrays (imaginary part is 0) for (int i = 0; i < n_x; ++i) { @@ -132,7 +135,7 @@ bool convolve_1d(const std::vector& input_x, return true; } -bool inverse_fft(std::vector> input_vector, +bool inverse_fft(std::vector > input_vector, std::vector& output_vector) { //output_vector.resize(input_vector.size()); @@ -235,7 +238,7 @@ bool inverse_fft(std::vector> input_vector, bool inverse_fft(const Eigen::VectorXcd& input_vector, Eigen::VectorXd& output_vector) { // Convert input Eigen vector to std vector - std::vector> input_vals(input_vector.size()); + std::vector > input_vals(input_vector.size()); std::vector outputs(input_vals.size()); Eigen::VectorXcd::Map(&input_vals[0], input_vector.size()) = input_vector; @@ -255,7 +258,7 @@ bool inverse_fft(const Eigen::VectorXcd& input_vector, bool inverse_fft(const Eigen::VectorXcd& input_vector, std::vector& output_vector) { // Convert input Eigen vector to std vector - std::vector> input_vals(input_vector.size()); + std::vector > input_vals(input_vector.size()); Eigen::VectorXcd::Map(&input_vals[0], input_vector.size()) = input_vector; output_vector.resize(input_vector.size()); @@ -270,10 +273,10 @@ bool inverse_fft(const Eigen::VectorXcd& input_vector, } bool fft(std::vector input_vector, - std::vector>& output_vector) { + std::vector >& output_vector) { // Convert input vector to complex values - // std::vector> input_complex(input_vector.size()); + // std::vector > input_complex(input_vector.size()); // std::copy(input_vector.begin(), input_vector.end(), input_complex.begin()); // output_vector.resize(input_vector.size()); @@ -363,7 +366,7 @@ bool fft(const Eigen::VectorXd& input_vector, Eigen::VectorXcd& output_vector) { // Convert input Eigen vector to std vector std::vector input_vals(input_vector.size()); - std::vector> outputs(input_vals.size()); + std::vector > outputs(input_vals.size()); Eigen::VectorXd::Map(&input_vals[0], input_vector.size()) = input_vector; try { @@ -380,7 +383,7 @@ bool fft(const Eigen::VectorXd& input_vector, Eigen::VectorXcd& output_vector) { } bool fft(const Eigen::VectorXd& input_vector, - std::vector>& output_vector) { + std::vector >& output_vector) { // Convert input Eigen vector to std vector std::vector input_vals(input_vector.size()); Eigen::VectorXd::Map(&input_vals[0], input_vector.size()) = input_vector; diff --git a/modules/createEVENT/common/smelt/numeric_utils.h b/modules/createEVENT/common/smelt/numeric_utils.h index 1411c2a3f..af792a990 100644 --- a/modules/createEVENT/common/smelt/numeric_utils.h +++ b/modules/createEVENT/common/smelt/numeric_utils.h @@ -39,7 +39,7 @@ bool convolve_1d(const std::vector& input_x, * @param[in, out] output_vector Vector to write output to * @return Returns true if computations were successful, false otherwise */ -bool inverse_fft(std::vector> input_vector, +bool inverse_fft(std::vector > input_vector, std::vector& output_vector); /** @@ -70,7 +70,7 @@ bool inverse_fft(const Eigen::VectorXcd& input_vector, * @return Returns true if computations were successful, false otherwise */ bool fft(std::vector input_vector, - std::vector>& output_vector); + std::vector >& output_vector); /** * Computes the real portion of the 1-dimensional Fast Fourier Transform @@ -89,7 +89,7 @@ bool fft(const Eigen::VectorXd& input_vector, Eigen::VectorXcd& output_vector); * @return Returns true if computations were successful, false otherwise */ bool fft(const Eigen::VectorXd& input_vector, - std::vector>& output_vector); + std::vector >& output_vector); /** * Calculate the integral of the input vector with uniform spacing diff --git a/modules/createEVENT/common/smelt/students_t_dist.cc b/modules/createEVENT/common/smelt/students_t_dist.cc index 7b44f6a6a..8bee8c615 100644 --- a/modules/createEVENT/common/smelt/students_t_dist.cc +++ b/modules/createEVENT/common/smelt/students_t_dist.cc @@ -6,10 +6,10 @@ stochastic::StudentstDistribution::StudentstDistribution(double mean, double std_dev, double dof) : Distribution(), - mean_{mean}, - std_dev_{std_dev}, - dof_{dof}, - distribution_{dof_} + mean_(mean), + std_dev_(std_dev), + dof_(dof), + distribution_(dof_) {} std::vector stochastic::StudentstDistribution::cumulative_dist_func( From 65ea00d3d9d2a885553e08ea11d192a950804b97 Mon Sep 17 00:00:00 2001 From: yisangriB Date: Tue, 17 Sep 2024 18:39:29 -0700 Subject: [PATCH 56/59] sy - important update to smelt cmake --- modules/performUQ/SimCenterUQ/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/performUQ/SimCenterUQ/CMakeLists.txt b/modules/performUQ/SimCenterUQ/CMakeLists.txt index 48fa82a5b..358729cb7 100644 --- a/modules/performUQ/SimCenterUQ/CMakeLists.txt +++ b/modules/performUQ/SimCenterUQ/CMakeLists.txt @@ -3,4 +3,4 @@ simcenter_add_python_script(SCRIPT surrogateBuild.py) simcenter_add_python_script(SCRIPT UQengine.py) simcenter_add_python_script(SCRIPT runPLoM.py) add_subdirectory(PLoM) -#add_subdirectory(nataf_gsa) +add_subdirectory(nataf_gsa) From c563f23904c262f3f12af6c54e194f0af023bc1d Mon Sep 17 00:00:00 2001 From: yisangriB Date: Tue, 17 Sep 2024 18:42:04 -0700 Subject: [PATCH 57/59] sy - important update to smelt cmake --- modules/createEVENT/stochasticGroundMotion/CMakeLists.txt | 6 ++++-- modules/createEVENT/stochasticWind/CMakeLists.txt | 2 ++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/modules/createEVENT/stochasticGroundMotion/CMakeLists.txt b/modules/createEVENT/stochasticGroundMotion/CMakeLists.txt index e202d83d2..19365cb5a 100644 --- a/modules/createEVENT/stochasticGroundMotion/CMakeLists.txt +++ b/modules/createEVENT/stochasticGroundMotion/CMakeLists.txt @@ -1,5 +1,5 @@ -#simcenter_add_executable(NAME StochasticGM -# DEPENDS CONAN_PKG::kissfft smelt) +#simcenter_add_executable(StochasticGM +# command_parser.cpp eq_generator.cpp main.cpp) add_executable(StochasticGM command_parser.cpp eq_generator.cpp main.cpp) @@ -7,3 +7,5 @@ include_directories(../common/smelt) target_link_libraries (StochasticGM CONAN_PKG::kissfft smelt) set_property(TARGET StochasticGM PROPERTY CXX_STANDARD 17) + +install(TARGETS StochasticGM DESTINATION ${PROJECT_SOURCE_DIR}/applications/createEVENT/stochasticGroundMotion) \ No newline at end of file diff --git a/modules/createEVENT/stochasticWind/CMakeLists.txt b/modules/createEVENT/stochasticWind/CMakeLists.txt index 84ea60221..b906b88a8 100644 --- a/modules/createEVENT/stochasticWind/CMakeLists.txt +++ b/modules/createEVENT/stochasticWind/CMakeLists.txt @@ -8,3 +8,5 @@ target_link_libraries (StochasticWind CONAN_PKG::kissfft smelt common) set_property(TARGET StochasticWind PROPERTY CXX_STANDARD 17) + +install(TARGETS StochasticWind DESTINATION ${PROJECT_SOURCE_DIR}/applications/createEVENT/stochasticWind) \ No newline at end of file From 7186747e68aeafa4e2af1745b7f1f215216214af Mon Sep 17 00:00:00 2001 From: yisangriB Date: Tue, 17 Sep 2024 18:42:21 -0700 Subject: [PATCH 58/59] sy - undo uncomment --- modules/performUQ/SimCenterUQ/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/performUQ/SimCenterUQ/CMakeLists.txt b/modules/performUQ/SimCenterUQ/CMakeLists.txt index 358729cb7..48fa82a5b 100644 --- a/modules/performUQ/SimCenterUQ/CMakeLists.txt +++ b/modules/performUQ/SimCenterUQ/CMakeLists.txt @@ -3,4 +3,4 @@ simcenter_add_python_script(SCRIPT surrogateBuild.py) simcenter_add_python_script(SCRIPT UQengine.py) simcenter_add_python_script(SCRIPT runPLoM.py) add_subdirectory(PLoM) -add_subdirectory(nataf_gsa) +#add_subdirectory(nataf_gsa) From d3c350b1f1524f88b3cb1df700fbb5284adaa3d9 Mon Sep 17 00:00:00 2001 From: yisangriB Date: Tue, 17 Sep 2024 18:45:05 -0700 Subject: [PATCH 59/59] sy - undo unnecessary change --- modules/createEVENT/stochasticGroundMotion/CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/createEVENT/stochasticGroundMotion/CMakeLists.txt b/modules/createEVENT/stochasticGroundMotion/CMakeLists.txt index 19365cb5a..114572521 100644 --- a/modules/createEVENT/stochasticGroundMotion/CMakeLists.txt +++ b/modules/createEVENT/stochasticGroundMotion/CMakeLists.txt @@ -1,5 +1,5 @@ -#simcenter_add_executable(StochasticGM -# command_parser.cpp eq_generator.cpp main.cpp) +#simcenter_add_executable(NAME StochasticGM +# DEPENDS CONAN_PKG::kissfft smelt) add_executable(StochasticGM command_parser.cpp eq_generator.cpp main.cpp)