From 67b7314225c56ddd9522627dcf7546518a7d9e94 Mon Sep 17 00:00:00 2001 From: jinyan1214 Date: Fri, 14 Jun 2024 13:36:43 +0800 Subject: [PATCH 1/9] landslide initial --- .../HazardSimulationEQ.py | 3 + .../database/CMakeLists.txt | 2 +- ...Wills_etal_2015_CA_Geologic_Properties.csv | 19 + .../regionalGroundMotion/landslide.py | 483 ++++++++++++++++++ .../regionalGroundMotion/liquefaction.py | 4 +- 5 files changed, 508 insertions(+), 3 deletions(-) create mode 100644 modules/performRegionalEventSimulation/regionalGroundMotion/database/groundfailure/Wills_etal_2015_CA_Geologic_Properties.csv create mode 100644 modules/performRegionalEventSimulation/regionalGroundMotion/landslide.py diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py index 9eceb4ce4..52ffec56b 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py @@ -342,6 +342,9 @@ def hazard_job(hazard_info): ln_im_mr, mag_maf, im_list ) gf_im_list += settlement_info['Output'] + if "Liquefaction" in ground_failure_info.keys(): + import landslide + diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/database/CMakeLists.txt b/modules/performRegionalEventSimulation/regionalGroundMotion/database/CMakeLists.txt index f79a961e9..0276b2ded 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/database/CMakeLists.txt +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/database/CMakeLists.txt @@ -1,2 +1,2 @@ add_subdirectory(gmdb) - +add_subdirectory(groundfailure) diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/database/groundfailure/Wills_etal_2015_CA_Geologic_Properties.csv b/modules/performRegionalEventSimulation/regionalGroundMotion/database/groundfailure/Wills_etal_2015_CA_Geologic_Properties.csv new file mode 100644 index 000000000..1e00416b8 --- /dev/null +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/database/groundfailure/Wills_etal_2015_CA_Geologic_Properties.csv @@ -0,0 +1,19 @@ +Unit Abbreviation,Friction Angle - Mean (degrees),Friction Angle - Median (degrees),Friction Angle - CoV (%),Friction Angle - Min (degrees),Friction Angle - Max (degrees),Cohesion - Mean (kPa),Cohesion - Median (kPa),Cohesion - CoV (%),Cohesion - Min (kPa),Cohesion - Max (kPa) +adf,9999,9999,0,9999,9999,9999,9999,0,9999,9999 +Qi,17,19,52,3,28,15.75,11.97,52,5.99,27.53 +af/Qi,9999,9999,0,9999,9999,9999,9999,0,9999,9999 +Qal1,23,23,46,8,44,32.46,23.94,82,1.96,82.83 +Qal2,23,23,46,8,44,32.46,23.94,82,1.96,82.83 +Qal3,23,23,46,8,44,32.46,23.94,82,1.96,82.83 +Qoa,29,30,37,13,46,33.13,23.94,106,0.05,91.21 +Qs,36,37,13,13,46,10.58,4.79,170,0.05,43.09 +QT,26,26,42,28,42,43.33,35.91,79,0.05,100.55 +Tsh,27,27,40,9,45,40.79,29.93,117,2.35,111.85 +Tss,27,27,40,9,45,40.79,29.93,117,2.35,111.85 +Tv,30,29,46,15,44,25.57,27.53,65,5.46,43.52 +sp,28,26,42,13,48,48.17,35.91,97,2.39,149.63 +Kss,24,24,42,8,40,36.48,28.73,85,1.1,101.6 +KJf,26,25,39,12,43,43.24,29.21,113,2.92,106.77 +crystalline,26,26,35,13,38,18.15,16.76,82,0.05,42.66 +crystalin2,40,40,25,30,50,23.94,23.94,100,0.05,35.91 +water,9999,9999,0,9999,9999,9999,9999,0,9999,9999 diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/landslide.py b/modules/performRegionalEventSimulation/regionalGroundMotion/landslide.py new file mode 100644 index 000000000..474540206 --- /dev/null +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/landslide.py @@ -0,0 +1,483 @@ +import numpy as np +import rasterio as rio +from scipy.interpolate import interp2d +import sys, warnings, shapely, pandas, os +from pyproj import Transformer +from pyproj import CRS +from enum import Enum +import geopandas as gpd +from scipy.spatial import ConvexHull +import pandas as pd + +## Helper functions +def sampleRaster(raster_file_path, raster_crs, x, y, interp_scheme = 'nearest',\ + dtype = None): + """performs 2D interpolation at (x,y) pairs. Accepted interp_scheme = 'nearest', 'linear', 'cubic', and 'quintic'""" + print(f"Sampling from the Raster File: {os.path.basename(raster_file_path)}...") + invalid_value = np.nan + xy_crs = CRS.from_user_input(4326) + raster_crs = CRS.from_user_input(raster_crs) + with rio.open(raster_file_path) as raster_file: + try: + raster_data = raster_file.read() + if raster_data.shape[0] > 1: + warnings.warn(f"More than one band in the file {raster_file_path}, the first band is used.") + except: + sys.exit(f"Can not read data from {raster_file_path}") + if xy_crs != raster_crs: + # make transformer for reprojection + transformer_xy_to_data = Transformer.from_crs(xy_crs, raster_crs,\ + always_xy=True) + # reproject and store + x_proj, y_proj = transformer_xy_to_data.transform(x, y) + x = x_proj + y = y_proj + n_sample = len(x) + if interp_scheme == 'nearest': + sample = np.array([val[0] for val in raster_file.sample(list(zip(x,y)))]) + else: + # create x and y ticks for grid + x_tick = np.linspace(raster_file.bounds.left, \ + raster_file.bounds.right, raster_file.width, endpoint=False) + y_tick = np.linspace(raster_file.bounds.bottom,\ + raster_file.bounds.top, raster_file.height, endpoint=False) + # create interp2d function + interp_function = interp2d( + x_tick, y_tick, np.flipud(raster_file.read(1)), + kind=interp_scheme, fill_value=invalid_value) + # get samples + sample = np.transpose( + [interp_function(x[i],y[i]) for i in range(n_sample)] + )[0] + # convert to target datatype + if dtype is not None: + sample = sample.astype(dtype) + # clean up invalid values (returned as 1e38 by NumPy) + sample[abs(sample)>1e10] = invalid_value + return sample + +## Helper functions +def sampleVector(vector_file_path, vector_crs, x, y, dtype = None): + """performs spatial join of vector_file with xy'""" + print(f"Sampling from the Vector File: {os.path.basename(vector_file_path)}...") + invalid_value = np.nan + xy_crs = CRS.from_user_input(4326) + vector_gdf = gpd.read_file(vector_file_path) + if vector_gdf.crs != vector_crs: + sys.exit(f"The CRS of vector file {vector_file_path} is {vector_gdf.crs}, and doesn't match the input CRS ({xy_crs}) defined for liquefaction triggering models") + if xy_crs != vector_crs: + # make transformer for reprojection + transformer_xy_to_data = Transformer.from_crs(xy_crs, vector_crs,\ + always_xy=True) + # reproject and store + x_proj, y_proj = transformer_xy_to_data.transform(x, y) + x = x_proj + y = y_proj + # Create a convex hull containing all sites + sites = np.array([x, y]).transpose() + try: + hull = ConvexHull(sites) + vertices = hull.vertices + vertices = sites[np.append(vertices, vertices[0])] + centroid = np.mean(vertices, axis=0) + vertices = vertices + 0.05 * (vertices - centroid) + RoI = shapely.geometry.Polygon(vertices) + except: + centroid = shapely.geometry.Point(np.mean(x), np.mean(y)) + points = [shapely.geometry.Point(x[i], y[i]) for i in range(len(x))] + if len(points) == 1: + distances = [0.1] # Degree + else: + distances = [point.distance(centroid) for point in points] + max_distance = max(distances)*1.2 + angles = np.linspace(0, 2 * np.pi, 36) + circle_points = [(centroid.x + max_distance * np.cos(angle), \ + centroid.y + max_distance * np.sin(angle)) for angle in angles] + RoI = shapely.geometry.Polygon(circle_points) + data = dict() + for col in vector_gdf.columns: + data.update({col:[]}) + for row_index in vector_gdf.index: + new_geom = RoI.intersection(vector_gdf.loc[row_index, 'geometry']) + if new_geom.is_empty: + continue + columns = list(vector_gdf.columns) + columns.remove('geometry') + for col in columns: + data[col].append(vector_gdf.loc[row_index, col]) + data['geometry'].append(new_geom) + del vector_gdf + gdf_roi = gpd.GeoDataFrame(data, geometry="geometry", crs=4326) + geometry = [shapely.geometry.Point(lon, lat) for lon, lat in zip(x, y)] + gdf_sites = gpd.GeoDataFrame(geometry=geometry, crs=4326).reset_index() + merged = gpd.GeoDataFrame.sjoin(gdf_roi, gdf_sites, how = 'inner', predicate = 'contains') + merged = merged.set_index('index_right').sort_index().drop(columns=['geometry']) + gdf_sites = pandas.merge(gdf_sites, merged, on = 'index', how = 'left') + gdf_sites.drop(columns=['geometry', 'index'], inplace=True) + return gdf_sites + +def find_additional_output_req(liq_info, current_step): + additional_output_keys = [] + if current_step == 'Triggering': + trigging_parameters = liq_info['Triggering']\ + ['Parameters'].keys() + triger_dist_water = liq_info['Triggering']['Parameters'].get('DistWater', None) + if triger_dist_water is None: + return additional_output_keys + lat_dist_water = liq_info['LateralSpreading']['Parameters'].get('DistWater', None) + if 'LateralSpreading' in liq_info.keys(): + lat_dist_water = liq_info['LateralSpreading']['Parameters'].get('DistWater', None) + if (liq_info['LateralSpreading']['Model'] == 'Hazus2020')\ + and (lat_dist_water==triger_dist_water): + additional_output_keys.append('dist_to_water') + return additional_output_keys + +def infer_from_geologic_map(map_path, map_crs, lon_station, lat_station): + gdf_units = sampleVector(map_path, map_crs, lon_station, lat_station, dtype = None) + gdf_units = gdf_units['UnitAbbr', 'geometry'] + gdf_units = gdf_units.fillna('water') + default_geo_prop_fpath = os.path.join(os.path.abspath(__file__), 'database',\ + 'groundfailure', 'Wills_etal_2015_CA_Geologic_Properties.csv') + default_geo_prop = pd.read_csv(default_geo_prop_fpath) + unique_geo_unit = np.unique(gdf_units['UnitAbbr']) + phi_mean = np.empty_like(gdf_units['UnitAbbr']) + coh_mean = np.empty_like(gdf_units['UnitAbbr']) + for each in unique_geo_unit: + rows_with_geo_unit = np.where(gdf_units['UnitAbbr'].values==each)[0] + rows_for_param = np.where(default_geo_prop['Unit Abbreviation'].values==each)[0][0] + phi_mean[rows_with_geo_unit] = \ + default_geo_prop['Friction Angle - Median (degrees)'][rows_for_param] + coh_mean[rows_with_geo_unit] = \ + default_geo_prop['Cohesion - Median (kPa)'][rows_for_param] + return phi_mean, coh_mean + +def erf2(x): + """modified from https://www.johndcook.com/blog/2009/01/19/stand-alone-error-function-erf""" + # constants + a1 = 0.254829592 + a2 = -0.284496736 + a3 = 1.421413741 + a4 = -1.453152027 + a5 = 1.061405429 + p = 0.3275911 + # Save the sign of x + signs = np.sign(x) + x = np.abs(x) + # A & S 7.1.26 + t = 1.0/(1.0 + p*x) + y = 1.0 - (((((a5*t + a4)*t) + a3)*t + a2)*t + a1)*t*np.exp(-x**2) + return signs*y + +def norm2_cdf(x, loc, scale): + """ + modified implementation of norm.cdf function from numba_stats, using self-implemented erf function + https://github.com/HDembinski/numba-stats/blob/main/src/numba_stats/norm.py + """ + inter = (x - loc)/scale + return 0.5 * (1 + erf2(inter * np.sqrt(0.5))) + +def erf2_2d(x): + """modified from https://www.johndcook.com/blog/2009/01/19/stand-alone-error-function-erf""" + # constants + a1 = 0.254829592 + a2 = -0.284496736 + a3 = 1.421413741 + a4 = -1.453152027 + a5 = 1.061405429 + p = 0.3275911 + # Save the sign of x + signs = np.sign(x) + x = np.abs(x) + # A & S 7.1.26 + t = 1.0/(1.0 + p*x) + y = 1.0 - (((((a5*t + a4)*t) + a3)*t + a2)*t + a1)*t*np.exp(-x**2) + return signs*y + +def norm2_cdf_2d(x, loc, scale): + """ + modified implementation of norm.cdf function from numba_stats, using self-implemented erf function + https://github.com/HDembinski/numba-stats/blob/main/src/numba_stats/norm.py + """ + inter = (x - loc)/scale + return 0.5 * (1 + erf2_2d(inter * np.sqrt(0.5))) + +def nb_round(x, decimals): + out = np.empty_like(x) + return np.round_(x, decimals, out) + +def erfinv_coeff(order=20): + # initialize + c = np.empty(order+1) + # starting value + c[0] = 1 + for i in range(1,order+1): + c[i] = sum([c[j]*c[i-1-j]/(j+1)/(2*j+1) for j in range(i)]) + # return + return c + +def erfinv(x, order=20): + """returns inverse erf(x)""" + # get coeffcients + c = erfinv_coeff(order) + # initialize + root_pi_over_2 = np.sqrt(np.pi)/2 + y = np.zeros(x.shape) + for i in range(order): + y += c[i]/(2*i+1)*(root_pi_over_2*x)**(2*i+1) + # return + return y + +def norm2_ppf(p, loc, scale): + """ + modified implementation of norm.ppf function from numba_stats, using self-implemented erfinv function + https://github.com/HDembinski/numba-stats/blob/main/src/numba_stats/norm.py + """ + inter = np.sqrt(2) * erfinv(2*p-1,order=20) + return scale * inter + loc + +def erfinv_2d(x, order=20): + """returns inverse erf(x)""" + # get coeffcients + c = erfinv_coeff(order) + # initialize + root_pi_over_2 = np.sqrt(np.pi)/2 + y = np.zeros(x.shape) + for i in range(order): + y += c[i]/(2*i+1)*(root_pi_over_2*x)**(2*i+1) + # return + return y + +def norm2_ppf_2d(p, loc, scale): + """ + modified implementation of norm.ppf function from numba_stats, using self-implemented erfinv function + https://github.com/HDembinski/numba-stats/blob/main/src/numba_stats/norm.py + """ + inter = np.sqrt(2) * erfinv_2d(2*p-1,order=20) + return scale * inter + loc + +class Landslide: + def __init__(self) -> None: + pass + +# ----------------------------------------------------------- +class BrayMacedo2019(Landslide): + """ + Compute landslide deformation at a given location using the Bray and Macedo (2007) probabilistic model. + Regression models based on three sets of ground motions are provided: + + 1. **Ordinary**: **d** = f(ky, Sa(T), Ts, M) + 2. **Near-fault**: **d** = f(ky, Sa(T), Ts, M, pgv) - unavailable for this version of OpenSRA + 3. **General** (default): **d** = f(ky, Sa(T), Ts, M, pgv) - unavailable for this version of OpenSRA + + The default relationship for **ky** uses **coh_soil**, **phi_soil**, **gamma_soil**, **t_slope**, **slope** + + **PGA** is used in place of **Sa(T)** (i.e., Ts=0) + + Parameters + ---------- + From upstream PBEE: + pga: float, np.ndarray or list + [g] peak ground acceleration + mag: float, np.ndarray or list + moment magnitude + + Geotechnical/geologic: + slope: float, np.ndarray or list + [deg] slope angle + t_slope: float, np.ndarray or list + [m] slope thickness (infinite-slope problem) + gamma_soil: float, np.ndarray or list + [kN/m^3] unit weight of soil + phi_soil: float, np.ndarray or list + [deg] friction angle of soil + coh_soil: float, np.ndarray or list + [kPa] cohesion of soil + + Fixed: + + Returns + ------- + pgdef : float, np.ndarray + [m] permanent ground deformation + sigma_pgdef : float, np.ndarray + aleatory variability for ln(pgdef) + + References + ---------- + .. [1] Bray, J.D., and Macedo, J., 2019, Procedure for Estimating Shear-Induced Seismic Slope Displacement for Shallow Crustal Earthquakes, Journal of Geotechnical and Geoenvironmental Engineering, vol. 145, pp. 12, 04019106. + + """ + def __init__(self, parameters, stations) -> None: + self.stations = stations + self.parameters = parameters + self.slope = None #(km) + self.t_slope = None #(km) + self.gamma_soil = None #(km) + self.phi_soil = None #(m) + self.coh_soil = None # (mm) + self.interpolate_spatial_parameters(parameters) + + def interpolate_spatial_parameters(self, parameters): + # site coordinate in CRS 4326 + lat_station = [site['lat'] for site in self.stations] + lon_station = [site['lon'] for site in self.stations] + # slope + if parameters["Slope"] == "Defined (\"Slope\") in Site File (.csv)": + self.slope = np.array([site['Slope'] for site in self.stations]) + else: + self.slope = sampleRaster(parameters["Slope"], parameters["inputCRS"],\ + lon_station, lat_station) + # t_slope + if parameters["SlopeThickness"] == "Defined (\"SlopeThickness\") in Site File (.csv)": + self.t_slope = np.array([site['SlopeThickness'] for site in self.stations]) + elif parameters["SlopeThickness"] == "Use constant value (m)": + self.t_slope = np.array(parameters["SlopeThicknessValue"]) + else: + self.t_slope = sampleRaster(parameters["SlopeThickness"], parameters["inputCRS"],\ + lon_station, lat_station) + # gamma_soil + if parameters["GammaSoil"] == "Defined (\"GammaSoil\") in Site File (.csv)": + self.gamma_soil = np.array([site['GammaSoil'] for site in self.stations]) + elif parameters["GammaSoil"] == "Use constant value (m)": + self.gamma_soil = np.array(parameters["GammaSoilValue"]) + elif parameters["GammaSoil"] == "Infer from Geologic Map": + self.gamma_soil = infer_from_geologic_map(parameters["GammaSoilGeoMap"],\ + parameters['inputCRS'], lon_station, lat_station) + else: + self.gamma_soil = sampleRaster(parameters["GammaSoil"], parameters["inputCRS"],\ + lon_station, lat_station) + # coh_soil + if parameters["CohesionSoil"] == "Defined (\"CohesionSoil\") in Site File (.csv)": + self.coh_soil = np.array([site['CohesionSoil'] for site in self.stations]) + elif parameters["CohesionSoil"] == "Use constant value (m)": + self.coh_soil = np.array(parameters["CohesionSoilValue"]) + elif parameters["CohesionSoil"] == "Infer from Geologic Map": + self.coh_soil = infer_from_geologic_map(parameters["CohesionSoilGeoMap"],\ + parameters['inputCRS'], lon_station, lat_station) + else: + self.coh_soil = sampleRaster(parameters["CohesionSoil"], parameters["inputCRS"],\ + lon_station, lat_station) + + print("Initiation finished") + + def run(self, ln_im_data, eq_data, im_list, output_keys, additional_output_keys = []): + if ('PGA' in im_list): + num_stations = len(self.stations) + num_scenarios = len(eq_data) + PGA_col_id = [i for i, x in enumerate(im_list) if x == 'PGA'][0] + for scenario_id in range(num_scenarios): + num_rlzs = ln_im_data[scenario_id].shape[2] + im_data_scen = np.zeros([num_stations,\ + len(im_list)+len(output_keys), num_rlzs]) + im_data_scen[:,0:len(im_list),:] = ln_im_data[scenario_id] + for rlz_id in range(num_rlzs): + pga = np.exp(ln_im_data[scenario_id][:,PGA_col_id,rlz_id]) + mag = float(eq_data[scenario_id][0]) + model_output = self.model(pga, mag, self.slope, self.t_slope, + self.gamma_soil, self.phi_soil, + self.coh_soil) + for i, key in enumerate(output_keys): + im_data_scen[:,len(im_list)+i,rlz_id] = model_output[key] + ln_im_data[scenario_id] = im_data_scen + im_list = im_list + output_keys + additional_output = dict() + for key in additional_output_keys: + item = getattr(self, key, None) + if item is None: + warnings.warn(f"Additional output {key} is not avaliable in the landslide model 'BrayMacedo2019'.") + else: + additional_output.update({key:item}) + else: + sys.exit(f"'PGA' is missing in the selected intensity measures and the landslide model 'BrayMacedo2019' can not be computed.") + # print(f"At least one of 'PGA' and 'PGV' is missing in the selected intensity measures and the liquefaction trigging model 'ZhuEtal2017' can not be computed."\ + # , file=sys.stderr) + # sys.stderr.write("test") + # sys.exit(-1) + return ln_im_data, eq_data, im_list, additional_output + + def model( + self, + pga, mag, # upstream PBEE RV + slope, t_slope, gamma_soil, phi_soil, coh_soil, # geotechnical/geologic + return_inter_params=False # to get intermediate params + ): + """Model""" + + # get dimensions + ndim = pga.ndim + if ndim == 1: + n_site = len(pga) + n_sample = 1 + shape = (n_site) + else: + shape = pga.shape + n_site = shape[0] + n_sample = shape[1] + + # initialize + pgdef = np.zeros(shape) + ky = np.zeros(shape) + prob_d_eq_0 = np.zeros(shape) + ln_pgdef_trunc = np.zeros(shape) + nonzero_median_cdf = np.zeros(shape) + + # convert from deg to rad + slope_rad = slope*np.pi/180 + phi_soil_rad = phi_soil*np.pi/180 + + # yield acceleration + ky = np.tan(phi_soil_rad-slope_rad) + \ + coh_soil/( + gamma_soil * t_slope * np.cos(slope_rad)**2 * \ + (1+np.tan(phi_soil_rad)*np.tan(slope_rad))) + ky = np.maximum(ky,0.01) # to avoid ky = 0 + + # aleatory + sigma_val = 0.72 + + # deformation, eq 3b + ln_pgdef_trunc = \ + -4.684 + \ + -2.482*np.log(ky) + \ + -0.244*(np.log(ky))**2 + \ + 0.344*np.log(ky)*np.log(pga) + \ + 2.649*np.log(pga) + \ + -0.090*(np.log(pga))**2 + \ + 0.603*mag # cm + nonzero_ln_pgdef = ln_pgdef_trunc.copy() + + # probability of zero displacement, eq. 2 with Ts=0 + if ndim == 1: + prob_d_eq_0 = 1 - norm2_cdf( + -2.480 + \ + -2.970*np.log(ky) + \ + -0.120*(np.log(ky))**2 + \ + 2.780*np.log(pga), + 0, 1) + else: + prob_d_eq_0 = 1 - norm2_cdf_2d( + -2.480 + \ + -2.970*np.log(ky) + \ + -0.120*(np.log(ky))**2 + \ + 2.780*np.log(pga), + 0, 1) + prob_d_eq_0 = nb_round(prob_d_eq_0, decimals=15) + + # apply non-zero displacement correction/condition, eq 11 + nonzero_median_cdf = 1 - .5/(1-prob_d_eq_0) + + # loop through numper of samples + if ndim == 1: + nonzero_ln_pgdef[nonzero_median_cdf>0] = ln_pgdef_trunc[nonzero_median_cdf>0] + \ + sigma_val*norm2_ppf(nonzero_median_cdf[nonzero_median_cdf>0], 0.0, 1.0) + else: + for i in range(n_sample): + cond = nonzero_median_cdf[:,i]>0 + nonzero_ln_pgdef[cond,i] = ln_pgdef_trunc[cond,i] + \ + sigma_val*norm2_ppf(nonzero_median_cdf[cond,i], 0.0, 1.0) + + # rest of actions + pgdef = np.exp(nonzero_ln_pgdef)/100 # also convert from cm to m + pgdef = np.maximum(pgdef,1e-5) # limit to + output = {'lsd_PGD_h':pgdef} + return output \ No newline at end of file diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/liquefaction.py b/modules/performRegionalEventSimulation/regionalGroundMotion/liquefaction.py index 83fb48de8..ef9909e57 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/liquefaction.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/liquefaction.py @@ -242,7 +242,7 @@ def interpolate_spatial_parameters(self, parameters): self.precip = sampleRaster(parameters["Precipitation"], parameters["inputCRS"],\ lon_station, lat_station) self.vs30 = np.array([site['vs30'] for site in self.stations]) - print("Sampling finished") + print("Initiation finished") def run(self, ln_im_data, eq_data, im_list, output_keys, additional_output_keys): if ('PGA' in im_list) and ('PGV' in im_list): @@ -431,7 +431,7 @@ def interpolate_spatial_parameters(self, parameters): self.liq_susc = np.array(self.liq_susc) # liq_susc = liq_susc_samples[parameters["SusceptibilityKey"]].fillna("NaN") # self.liq_susc = liq_susc.to_numpy() - print("Sampling finished") + print("Initiation finished") def run(self, ln_im_data, eq_data, im_list, output_keys, additional_output_keys): From a2dfe72923cac542d94f69ead27eb5ab9aad7956 Mon Sep 17 00:00:00 2001 From: jinyan1214 Date: Mon, 15 Jul 2024 13:59:49 -0700 Subject: [PATCH 2/9] landslide backend run --- .../ComputeIntensityMeasure.py | 24 +++---- .../HazardSimulationEQ.py | 9 ++- .../database/groundfailure/CMakeLists.txt | 2 + .../regionalGroundMotion/landslide.py | 71 ++++++++++--------- .../regionalGroundMotion/liquefaction.py | 1 - 5 files changed, 61 insertions(+), 46 deletions(-) create mode 100644 modules/performRegionalEventSimulation/regionalGroundMotion/database/groundfailure/CMakeLists.txt diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/ComputeIntensityMeasure.py b/modules/performRegionalEventSimulation/regionalGroundMotion/ComputeIntensityMeasure.py index e300811a6..5d58f1304 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/ComputeIntensityMeasure.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/ComputeIntensityMeasure.py @@ -837,21 +837,21 @@ def export_im(stations, im_list, im_data, eq_data, output_dir, filename, csv_fla }) df = pd.DataFrame(df) # Combine PGD from liquefaction, landslide and fault - if 'liq_PGD_h' in df.columns or 'ls_PGD_h'in df.columns or 'fd_PGD_h' in df.columns: + if 'liq_PGD_h' in df.columns or 'lsd_PGD_h'in df.columns or 'fd_PGD_h' in df.columns: PGD_h = np.zeros(df.shape[0]) if 'liq_PGD_h' in df.columns: PGD_h += df['liq_PGD_h'].to_numpy() - if 'ls_PGD_h' in df.columns: - PGD_h += df['ls_PGD_h'].to_numpy() + if 'lsd_PGD_h' in df.columns: + PGD_h += df['lsd_PGD_h'].to_numpy() if 'fd_PGD_h' in df.columns: PGD_h += df['fd_PGD_h'].to_numpy() df['PGD_h'] = PGD_h - if 'liq_PGD_v' in df.columns or 'ls_PGD_v'in df.columns or 'fd_PGD_v' in df.columns: + if 'liq_PGD_v' in df.columns or 'lsd_PGD_v'in df.columns or 'fd_PGD_v' in df.columns: PGD_v = np.zeros(df.shape[0]) if 'liq_PGD_v' in df.columns: PGD_v += df['liq_PGD_v'].to_numpy() - if 'ls_PGD_v' in df.columns: - PGD_v += df['ls_PGD_v'].to_numpy() + if 'lsd_PGD_v' in df.columns: + PGD_v += df['lsd_PGD_v'].to_numpy() if 'fd_PGD_v' in df.columns: PGD_v += df['fd_PGD_v'].to_numpy() df['PGD_v'] = PGD_v @@ -891,21 +891,21 @@ def export_im(stations, im_list, im_data, eq_data, output_dir, filename, csv_fla }) df = pd.DataFrame(df) # Combine PGD from liquefaction, landslide and fault - if 'liq_PGD_h' in df.columns or 'ls_PGD_h'in df.columns or 'fd_PGD_h' in df.columns: + if 'liq_PGD_h' in df.columns or 'lsd_PGD_h'in df.columns or 'fd_PGD_h' in df.columns: PGD_h = np.zeros(df.shape[0]) if 'liq_PGD_h' in df.columns: PGD_h += df['liq_PGD_h'].to_numpy() - if 'ls_PGD_h' in df.columns: - PGD_h += df['ls_PGD_h'].to_numpy() + if 'lsd_PGD_h' in df.columns: + PGD_h += df['lsd_PGD_h'].to_numpy() if 'fd_PGD_h' in df.columns: PGD_h += df['fd_PGD_h'].to_numpy() df['PGD_h'] = PGD_h - if 'liq_PGD_v' in df.columns or 'ls_PGD_v'in df.columns or 'fd_PGD_v' in df.columns: + if 'liq_PGD_v' in df.columns or 'lsd_PGD_v'in df.columns or 'fd_PGD_v' in df.columns: PGD_v = np.zeros(df.shape[0]) if 'liq_PGD_v' in df.columns: PGD_v += df['liq_PGD_v'].to_numpy() - if 'ls_PGD_v' in df.columns: - PGD_v += df['ls_PGD_v'].to_numpy() + if 'lsd_PGD_v' in df.columns: + PGD_v += df['lsd_PGD_v'].to_numpy() if 'fd_PGD_v' in df.columns: PGD_v += df['fd_PGD_v'].to_numpy() df['PGD_v'] = PGD_v diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py index 52ffec56b..2ba637fab 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py @@ -342,8 +342,15 @@ def hazard_job(hazard_info): ln_im_mr, mag_maf, im_list ) gf_im_list += settlement_info['Output'] - if "Liquefaction" in ground_failure_info.keys(): + if "Landslide" in ground_failure_info.keys(): import landslide + lsld_info = ground_failure_info['Landslide'] + lsld_model = getattr(landslide, lsld_info['Model'])(\ + lsld_info["Parameters"], stations) + ln_im_mr, mag_maf, im_list = lsld_model.run( + ln_im_mr, mag_maf, im_list + ) + gf_im_list += lsld_info['Output'] diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/database/groundfailure/CMakeLists.txt b/modules/performRegionalEventSimulation/regionalGroundMotion/database/groundfailure/CMakeLists.txt new file mode 100644 index 000000000..d5da071f5 --- /dev/null +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/database/groundfailure/CMakeLists.txt @@ -0,0 +1,2 @@ +simcenter_add_file(NAME Wills_etal_2015_CA_Geologic_Properties.csv) + diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/landslide.py b/modules/performRegionalEventSimulation/regionalGroundMotion/landslide.py index 474540206..1ead4ebe3 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/landslide.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/landslide.py @@ -134,16 +134,16 @@ def find_additional_output_req(liq_info, current_step): def infer_from_geologic_map(map_path, map_crs, lon_station, lat_station): gdf_units = sampleVector(map_path, map_crs, lon_station, lat_station, dtype = None) - gdf_units = gdf_units['UnitAbbr', 'geometry'] + gdf_units = gdf_units['PTYPE'] gdf_units = gdf_units.fillna('water') - default_geo_prop_fpath = os.path.join(os.path.abspath(__file__), 'database',\ + default_geo_prop_fpath = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'database',\ 'groundfailure', 'Wills_etal_2015_CA_Geologic_Properties.csv') default_geo_prop = pd.read_csv(default_geo_prop_fpath) - unique_geo_unit = np.unique(gdf_units['UnitAbbr']) - phi_mean = np.empty_like(gdf_units['UnitAbbr']) - coh_mean = np.empty_like(gdf_units['UnitAbbr']) + unique_geo_unit = np.unique(gdf_units) + phi_mean = np.empty_like(gdf_units) + coh_mean = np.empty_like(gdf_units) for each in unique_geo_unit: - rows_with_geo_unit = np.where(gdf_units['UnitAbbr'].values==each)[0] + rows_with_geo_unit = np.where(gdf_units.values==each)[0] rows_for_param = np.where(default_geo_prop['Unit Abbreviation'].values==each)[0][0] phi_mean[rows_with_geo_unit] = \ default_geo_prop['Friction Angle - Median (degrees)'][rows_for_param] @@ -331,36 +331,49 @@ def interpolate_spatial_parameters(self, parameters): if parameters["SlopeThickness"] == "Defined (\"SlopeThickness\") in Site File (.csv)": self.t_slope = np.array([site['SlopeThickness'] for site in self.stations]) elif parameters["SlopeThickness"] == "Use constant value (m)": - self.t_slope = np.array(parameters["SlopeThicknessValue"]) + self.t_slope = np.array([parameters["SlopeThicknessValue"]]*len(self.stations)) else: self.t_slope = sampleRaster(parameters["SlopeThickness"], parameters["inputCRS"],\ lon_station, lat_station) # gamma_soil if parameters["GammaSoil"] == "Defined (\"GammaSoil\") in Site File (.csv)": self.gamma_soil = np.array([site['GammaSoil'] for site in self.stations]) - elif parameters["GammaSoil"] == "Use constant value (m)": - self.gamma_soil = np.array(parameters["GammaSoilValue"]) - elif parameters["GammaSoil"] == "Infer from Geologic Map": - self.gamma_soil = infer_from_geologic_map(parameters["GammaSoilGeoMap"],\ - parameters['inputCRS'], lon_station, lat_station) + elif parameters["GammaSoil"] == "Use constant value (kN/m^3)": + self.gamma_soil = np.array(parameters["GammaSoilValue"]*len(self.stations)) else: self.gamma_soil = sampleRaster(parameters["GammaSoil"], parameters["inputCRS"],\ lon_station, lat_station) - # coh_soil - if parameters["CohesionSoil"] == "Defined (\"CohesionSoil\") in Site File (.csv)": - self.coh_soil = np.array([site['CohesionSoil'] for site in self.stations]) - elif parameters["CohesionSoil"] == "Use constant value (m)": - self.coh_soil = np.array(parameters["CohesionSoilValue"]) - elif parameters["CohesionSoil"] == "Infer from Geologic Map": - self.coh_soil = infer_from_geologic_map(parameters["CohesionSoilGeoMap"],\ - parameters['inputCRS'], lon_station, lat_station) + # phi_soil + if parameters["PhiSoil"] == "Defined (\"PhiSoil\") in Site File (.csv)": + self.phi_soil = np.array([site['PhiSoil'] for site in self.stations]) + elif parameters["PhiSoil"] == "Use constant value (deg)": + self.phi_soil = np.array(parameters["PhiSoilValue"]*len(self.stations)) + elif parameters["PhiSoil"] == "Infer from Geologic Map": + if parameters["CohesionSoil"] == "Infer from Geologic Map": + self.phi_soil, self.coh_soil = infer_from_geologic_map(parameters["GeologicMap"],\ + parameters['inputCRS'], lon_station, lat_station) + else: + self.phi_soil, _ = infer_from_geologic_map(parameters["GeologicMap"],\ + parameters['inputCRS'], lon_station, lat_station) else: - self.coh_soil = sampleRaster(parameters["CohesionSoil"], parameters["inputCRS"],\ + self.phi_soil = sampleRaster(parameters["CohesionSoil"], parameters["inputCRS"],\ lon_station, lat_station) + # coh_soil + if self.coh_soil is None: + if parameters["CohesionSoil"] == "Defined (\"CohesionSoil\") in Site File (.csv)": + self.coh_soil = np.array([site['CohesionSoil'] for site in self.stations]) + elif parameters["CohesionSoil"] == "Use constant value (kPa)": + self.coh_soil = np.array(parameters["CohesionSoilValue"]*len(self.stations)) + elif parameters["CohesionSoil"] == "Infer from Geologic Map": + self.coh_soil = infer_from_geologic_map(parameters["GeologicMap"],\ + parameters['inputCRS'], lon_station, lat_station) + else: + self.coh_soil = sampleRaster(parameters["CohesionSoil"], parameters["inputCRS"],\ + lon_station, lat_station) print("Initiation finished") - def run(self, ln_im_data, eq_data, im_list, output_keys, additional_output_keys = []): + def run(self, ln_im_data, eq_data, im_list, output_keys=['lsd_PGD_h'], additional_output_keys = []): if ('PGA' in im_list): num_stations = len(self.stations) num_scenarios = len(eq_data) @@ -380,20 +393,13 @@ def run(self, ln_im_data, eq_data, im_list, output_keys, additional_output_keys im_data_scen[:,len(im_list)+i,rlz_id] = model_output[key] ln_im_data[scenario_id] = im_data_scen im_list = im_list + output_keys - additional_output = dict() - for key in additional_output_keys: - item = getattr(self, key, None) - if item is None: - warnings.warn(f"Additional output {key} is not avaliable in the landslide model 'BrayMacedo2019'.") - else: - additional_output.update({key:item}) else: sys.exit(f"'PGA' is missing in the selected intensity measures and the landslide model 'BrayMacedo2019' can not be computed.") # print(f"At least one of 'PGA' and 'PGV' is missing in the selected intensity measures and the liquefaction trigging model 'ZhuEtal2017' can not be computed."\ # , file=sys.stderr) # sys.stderr.write("test") # sys.exit(-1) - return ln_im_data, eq_data, im_list, additional_output + return ln_im_data, eq_data, im_list, def model( self, @@ -422,8 +428,9 @@ def model( nonzero_median_cdf = np.zeros(shape) # convert from deg to rad - slope_rad = slope*np.pi/180 - phi_soil_rad = phi_soil*np.pi/180 + slope_rad = (slope*np.pi/180).astype(np.float32) + phi_soil_rad = (phi_soil*np.pi/180).astype(np.float32) + coh_soil = coh_soil.astype(np.float32) # yield acceleration ky = np.tan(phi_soil_rad-slope_rad) + \ diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/liquefaction.py b/modules/performRegionalEventSimulation/regionalGroundMotion/liquefaction.py index ef9909e57..d508fe850 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/liquefaction.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/liquefaction.py @@ -123,7 +123,6 @@ def find_additional_output_req(liq_info, current_step): triger_dist_water = liq_info['Triggering']['Parameters'].get('DistWater', None) if triger_dist_water is None: return additional_output_keys - lat_dist_water = liq_info['LateralSpreading']['Parameters'].get('DistWater', None) if 'LateralSpreading' in liq_info.keys(): lat_dist_water = liq_info['LateralSpreading']['Parameters'].get('DistWater', None) if (liq_info['LateralSpreading']['Model'] == 'Hazus2020')\ From b5d96c1e6d4b43715c4925537299ab7ea01e5826 Mon Sep 17 00:00:00 2001 From: jinyan1214 Date: Wed, 31 Jul 2024 15:59:43 -0700 Subject: [PATCH 3/9] JZ: R2D landslide front-back ends compatibility --- .../regionalGroundMotion/CMakeLists.txt | 1 + .../regionalGroundMotion/CreateStation.py | 3 +- .../HazardSimulationEQ.py | 15 +++++---- .../regionalGroundMotion/landslide.py | 32 +++++++++---------- 4 files changed, 27 insertions(+), 24 deletions(-) diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/CMakeLists.txt b/modules/performRegionalEventSimulation/regionalGroundMotion/CMakeLists.txt index e82a2052c..3d4f51a06 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/CMakeLists.txt +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/CMakeLists.txt @@ -13,4 +13,5 @@ simcenter_add_python_script(SCRIPT HazardOccurrence.py) simcenter_add_python_script(SCRIPT USGS_API.py) simcenter_add_python_script(SCRIPT ScenarioForecast.py) simcenter_add_python_script(SCRIPT liquefaction.py) +simcenter_add_python_script(SCRIPT landslide.py) simcenter_add_python_script(SCRIPT GMSimulators.py) diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/CreateStation.py b/modules/performRegionalEventSimulation/regionalGroundMotion/CreateStation.py index b2e2fbb3a..8e53e3dd5 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/CreateStation.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/CreateStation.py @@ -370,7 +370,8 @@ def create_stations(input_file, output_file, filterIDs, vs30Config, z1Config, z2 else: tmp.update({'vsInferred': (1 if vs30Config['Parameters']['vsInferred'] else 0) }) for key in ['liqSusc', 'gwDepth', 'distWater', 'distCoast', 'distRiver',\ - 'precipitation']: + 'precipitation', 'slope', 'slopeThickness', 'gammaSoil', 'phiSoil',\ + 'cohesionSoil']: if stn.get(key, None) is not None: tmp.update({key:stn.get(key)}) ground_failure_input_keys.add(key) diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py index 2ba637fab..76719b201 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py @@ -344,13 +344,14 @@ def hazard_job(hazard_info): gf_im_list += settlement_info['Output'] if "Landslide" in ground_failure_info.keys(): import landslide - lsld_info = ground_failure_info['Landslide'] - lsld_model = getattr(landslide, lsld_info['Model'])(\ - lsld_info["Parameters"], stations) - ln_im_mr, mag_maf, im_list = lsld_model.run( - ln_im_mr, mag_maf, im_list - ) - gf_im_list += lsld_info['Output'] + if 'Landslide' in ground_failure_info['Landslide'].keys(): + lsld_info = ground_failure_info['Landslide']['Landslide'] + lsld_model = getattr(landslide, lsld_info['Model'])(\ + lsld_info["Parameters"], stations) + ln_im_mr, mag_maf, im_list = lsld_model.run( + ln_im_mr, mag_maf, im_list + ) + gf_im_list += lsld_info['Output'] diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/landslide.py b/modules/performRegionalEventSimulation/regionalGroundMotion/landslide.py index 1ead4ebe3..8e2fd39c7 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/landslide.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/landslide.py @@ -322,34 +322,34 @@ def interpolate_spatial_parameters(self, parameters): lat_station = [site['lat'] for site in self.stations] lon_station = [site['lon'] for site in self.stations] # slope - if parameters["Slope"] == "Defined (\"Slope\") in Site File (.csv)": - self.slope = np.array([site['Slope'] for site in self.stations]) + if parameters["Slope"] == "Defined (\"slope\") in Site File (.csv)": + self.slope = np.array([site['slope'] for site in self.stations]) else: self.slope = sampleRaster(parameters["Slope"], parameters["inputCRS"],\ lon_station, lat_station) # t_slope - if parameters["SlopeThickness"] == "Defined (\"SlopeThickness\") in Site File (.csv)": - self.t_slope = np.array([site['SlopeThickness'] for site in self.stations]) + if parameters["SlopeThickness"] == "Defined (\"slopeThickness\") in Site File (.csv)": + self.t_slope = np.array([site['slopeThickness'] for site in self.stations]) elif parameters["SlopeThickness"] == "Use constant value (m)": self.t_slope = np.array([parameters["SlopeThicknessValue"]]*len(self.stations)) else: self.t_slope = sampleRaster(parameters["SlopeThickness"], parameters["inputCRS"],\ lon_station, lat_station) # gamma_soil - if parameters["GammaSoil"] == "Defined (\"GammaSoil\") in Site File (.csv)": - self.gamma_soil = np.array([site['GammaSoil'] for site in self.stations]) + if parameters["GammaSoil"] == "Defined (\"gammaSoil\") in Site File (.csv)": + self.gamma_soil = np.array([site['gammaSoil'] for site in self.stations]) elif parameters["GammaSoil"] == "Use constant value (kN/m^3)": - self.gamma_soil = np.array(parameters["GammaSoilValue"]*len(self.stations)) + self.gamma_soil = np.array([parameters["GammaSoilValue"]]*len(self.stations)) else: self.gamma_soil = sampleRaster(parameters["GammaSoil"], parameters["inputCRS"],\ lon_station, lat_station) # phi_soil - if parameters["PhiSoil"] == "Defined (\"PhiSoil\") in Site File (.csv)": - self.phi_soil = np.array([site['PhiSoil'] for site in self.stations]) + if parameters["PhiSoil"] == "Defined (\"phiSoil\") in Site File (.csv)": + self.phi_soil = np.array([site['phiSoil'] for site in self.stations]) elif parameters["PhiSoil"] == "Use constant value (deg)": - self.phi_soil = np.array(parameters["PhiSoilValue"]*len(self.stations)) - elif parameters["PhiSoil"] == "Infer from Geologic Map": - if parameters["CohesionSoil"] == "Infer from Geologic Map": + self.phi_soil = np.array([parameters["PhiSoilValue"]]*len(self.stations)) + elif parameters["PhiSoil"] == "Infer from Geologic Map (Bain et al. 2022)": + if parameters["CohesionSoil"] == "Infer from Geologic Map (Bain et al. 2022)": self.phi_soil, self.coh_soil = infer_from_geologic_map(parameters["GeologicMap"],\ parameters['inputCRS'], lon_station, lat_station) else: @@ -360,11 +360,11 @@ def interpolate_spatial_parameters(self, parameters): lon_station, lat_station) # coh_soil if self.coh_soil is None: - if parameters["CohesionSoil"] == "Defined (\"CohesionSoil\") in Site File (.csv)": - self.coh_soil = np.array([site['CohesionSoil'] for site in self.stations]) + if parameters["CohesionSoil"] == "Defined (\"cohesionSoil\") in Site File (.csv)": + self.coh_soil = np.array([site['cohesionSoil'] for site in self.stations]) elif parameters["CohesionSoil"] == "Use constant value (kPa)": - self.coh_soil = np.array(parameters["CohesionSoilValue"]*len(self.stations)) - elif parameters["CohesionSoil"] == "Infer from Geologic Map": + self.coh_soil = np.array([parameters["CohesionSoilValue"]]*len(self.stations)) + elif parameters["CohesionSoil"] == "Infer from Geologic Map (Bain et al. 2022)": self.coh_soil = infer_from_geologic_map(parameters["GeologicMap"],\ parameters['inputCRS'], lon_station, lat_station) else: From e9bef0385e24450a32febfc0e4ec0ca3fd09123b Mon Sep 17 00:00:00 2001 From: jinyan1214 Date: Tue, 13 Aug 2024 18:08:37 -0700 Subject: [PATCH 4/9] Fix needed imports when merging ruff dafe fixes --- .../regionalGroundMotion/HazardSimulationEQ.py | 1 + .../regionalGroundMotion/ScenarioForecast.py | 1 + 2 files changed, 2 insertions(+) diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py index e1c3f4ab1..a242a8eb9 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py @@ -550,6 +550,7 @@ def hazard_job(hazard_info): # noqa: C901, D103, PLR0914, PLR0915 if importlib.util.find_spec('jpype') is None: subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'JPype1']) # noqa: S603 import jpype + from jpype import imports from jpype.types import * # noqa: F403 memory_total = psutil.virtual_memory().total / (1024.0**3) diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/ScenarioForecast.py b/modules/performRegionalEventSimulation/regionalGroundMotion/ScenarioForecast.py index 94fbca023..7664f6e62 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/ScenarioForecast.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/ScenarioForecast.py @@ -99,6 +99,7 @@ if importlib.util.find_spec('jpype') is None: subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'JPype1']) # noqa: S603 import jpype + from jpype import imports from jpype.types import * # noqa: F403 memory_total = psutil.virtual_memory().total / (1024.0**3) From a50555dcf9f72371e43410077d7a122ee748c0c9 Mon Sep 17 00:00:00 2001 From: jinyan1214 Date: Wed, 14 Aug 2024 10:50:01 -0700 Subject: [PATCH 5/9] after codespell --- modules/performREC/pyrecodes/run_pyrecodes.py | 2 +- .../regionalGroundMotion/ScenarioForecast.py | 3 ++- .../regionalGroundMotion/landslide.py | 4 ++-- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/modules/performREC/pyrecodes/run_pyrecodes.py b/modules/performREC/pyrecodes/run_pyrecodes.py index 87cbf2123..4b41c8251 100644 --- a/modules/performREC/pyrecodes/run_pyrecodes.py +++ b/modules/performREC/pyrecodes/run_pyrecodes.py @@ -183,7 +183,7 @@ def run_pyrecodes(rec_config, inputRWHALE, parallelType, mpiExec, numPROC): comm.Barrier() # if rank 0, gather result_agg and resilience_results, write to file - # note that the gathered results dosen't follow the order in realization_to_run + # note that the gathered results doesn't follow the order in realization_to_run # but this order is not needed when calculating mean and std if doParallel: # gather results_agg diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/ScenarioForecast.py b/modules/performRegionalEventSimulation/regionalGroundMotion/ScenarioForecast.py index 7664f6e62..5f4725fa4 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/ScenarioForecast.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/ScenarioForecast.py @@ -99,7 +99,8 @@ if importlib.util.find_spec('jpype') is None: subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'JPype1']) # noqa: S603 import jpype - from jpype import imports + # from jpype import imports + import jpype.imports from jpype.types import * # noqa: F403 memory_total = psutil.virtual_memory().total / (1024.0**3) diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/landslide.py b/modules/performRegionalEventSimulation/regionalGroundMotion/landslide.py index 8e2fd39c7..a901df9b9 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/landslide.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/landslide.py @@ -217,7 +217,7 @@ def erfinv_coeff(order=20): def erfinv(x, order=20): """returns inverse erf(x)""" - # get coeffcients + # get coefficients c = erfinv_coeff(order) # initialize root_pi_over_2 = np.sqrt(np.pi)/2 @@ -237,7 +237,7 @@ def norm2_ppf(p, loc, scale): def erfinv_2d(x, order=20): """returns inverse erf(x)""" - # get coeffcients + # get coefficients c = erfinv_coeff(order) # initialize root_pi_over_2 = np.sqrt(np.pi)/2 From c81daeb744a67f13cb960279a22783b436024245 Mon Sep 17 00:00:00 2001 From: jinyan1214 Date: Wed, 14 Aug 2024 11:17:23 -0700 Subject: [PATCH 6/9] after ruff check --add-noqa --- modules/Workflow/computeResponseSpectrum.py | 8 +- modules/Workflow/createGM4BIM.py | 44 +- modules/Workflow/whale/main.py | 32 +- modules/common/simcenter_common.py | 12 +- modules/createEVENT/CFDEvent/CFDEvent.py | 2 +- .../EmptyDomainCFD/EmptyDomainCFD.py | 2 +- .../EmptyDomainCFD/post_process_output.py | 14 +- .../GeoClawOpenFOAM/AddBuildingForces.py | 4 +- .../createEVENT/GeoClawOpenFOAM/GeoClaw.py | 2 +- .../GeoClawOpenFOAM/GeoClawBathy.py | 2 +- .../GeoClawOpenFOAM/GetOpenFOAMEvent.py | 8 +- modules/createEVENT/GeoClawOpenFOAM/flume.py | 6 +- .../createEVENT/GeoClawOpenFOAM/hydroUtils.py | 10 +- .../GeoClawOpenFOAM/of7Alpboundary.py | 6 +- .../GeoClawOpenFOAM/of7Building.py | 4 +- .../createEVENT/GeoClawOpenFOAM/of7Decomp.py | 4 +- .../GeoClawOpenFOAM/of7Geometry.py | 4 +- .../createEVENT/GeoClawOpenFOAM/of7Initial.py | 6 +- .../GeoClawOpenFOAM/of7Materials.py | 6 +- .../createEVENT/GeoClawOpenFOAM/of7Meshing.py | 10 +- .../createEVENT/GeoClawOpenFOAM/of7Others.py | 4 +- .../GeoClawOpenFOAM/of7Prboundary.py | 6 +- .../createEVENT/GeoClawOpenFOAM/of7Process.py | 8 +- .../GeoClawOpenFOAM/of7PtDboundary.py | 10 +- .../createEVENT/GeoClawOpenFOAM/of7Solve.py | 12 +- .../GeoClawOpenFOAM/of7Turbulence.py | 4 +- .../GeoClawOpenFOAM/of7Uboundary.py | 8 +- .../createEVENT/GeoClawOpenFOAM/openfoam7.py | 26 +- .../createEVENT/GeoClawOpenFOAM/osuFlume.py | 2 +- .../createEVENT/GeoClawOpenFOAM/userFlume.py | 2 +- .../IsolatedBuildingCFD.py | 2 +- .../createEVENT/Istanbul/IstanbulStations.py | 2 +- modules/createEVENT/M9/M9API.py | 2 +- modules/createEVENT/M9/M9Stations.py | 2 +- modules/createEVENT/MPM/MPM.py | 2 +- .../createEVENT/MPM/post_process_output.py | 14 +- .../SurroundedBuildingCFD.py | 2 +- .../post_process_output.py | 14 +- .../coupledDigitalTwin/CoupledDigitalTwin.py | 2 +- .../IntensityMeasureComputer.py | 2 +- .../siteResponse/RegionalSiteResponse.py | 10 +- .../stochasticWave/StochasticWave.py | 2 +- modules/createSAM/AutoSDA/beam_component.py | 2 +- modules/createSAM/AutoSDA/column_component.py | 2 +- modules/createSAM/AutoSDA/connection_part.py | 2 +- modules/createSAM/AutoSDA/help_functions.py | 32 +- modules/performDL/pelicun3/DL_visuals.py | 42 +- .../performHUA/pyincore_data/censusutil.py | 10 +- modules/performREC/pyrecodes/run_pyrecodes.py | 584 ++++++++++------- .../regionalGroundMotion/CreateStation.py | 30 +- .../regionalGroundMotion/FetchOpenQuake.py | 20 +- .../regionalGroundMotion/HazardOccurrence.py | 12 +- .../HazardSimulationEQ.py | 17 +- .../regionalGroundMotion/ScenarioForecast.py | 1 + .../gmpe/CorrelationModel.py | 22 +- .../gmpe/SignificantDurationModel.py | 6 +- .../regionalGroundMotion/gmpe/openSHAGMPE.py | 2 +- .../regionalGroundMotion/landslide.py | 592 +++++++++++------- .../regionalGroundMotion/liquefaction.py | 24 +- .../ComputeIntensityMeasure.py | 4 +- .../regionalWindField/CreateScenario.py | 32 +- .../regionalWindField/CreateStation.py | 2 +- .../regionalWindField/WindFieldSimulation.py | 4 +- modules/performUQ/SimCenterUQ/PLoM/PLoM.py | 12 +- .../SimCenterUQ/PLoM/PLoM_library.py | 20 +- modules/performUQ/SimCenterUQ/PLoM/general.py | 20 +- modules/performUQ/SimCenterUQ/runPLoM.py | 8 +- .../performUQ/UCSD_UQ/defaultLogLikeScript.py | 2 +- modules/performUQ/UCSD_UQ/mwg_sampler.py | 2 +- modules/performUQ/UCSD_UQ/runFEM.py | 2 +- modules/performUQ/UCSD_UQ/runTMCMC.py | 2 +- .../performUQ/common/ERAClasses/ERACond.py | 12 +- .../performUQ/common/ERAClasses/ERADist.py | 160 ++--- .../performUQ/common/ERAClasses/ERANataf.py | 32 +- .../performUQ/common/ERAClasses/ERARosen.py | 28 +- modules/performUQ/other/UQpyRunner.py | 2 +- .../systemPerformance/REWET/REWET/Damage.py | 24 +- .../REWET/REWET/EnhancedWNTR/epanet/io.py | 22 +- .../REWET/REWET/EnhancedWNTR/network/model.py | 2 +- .../REWET/REWET/EnhancedWNTR/sim/epanet.py | 14 +- .../REWET/REWET/EnhancedWNTR/sim/io.py | 12 +- .../REWET/REWET/Input/Policy_IO.py | 6 +- .../REWET/REWET/Input/Settings.py | 2 +- .../REWET/REWET/Output/GUI_Curve_API.py | 16 +- .../systemPerformance/REWET/REWET/initial.py | 2 +- .../REWET/REWET/restoration/base.py | 8 +- .../REWET/REWET/restoration/io.py | 6 +- .../REWET/REWET/restoration/model.py | 12 +- .../REWET/REWET/restoration/registry.py | 12 +- .../systemPerformance/REWET/REWET/timeline.py | 4 +- .../systemPerformance/REWET/preprocessorIO.py | 2 +- 91 files changed, 1253 insertions(+), 959 deletions(-) diff --git a/modules/Workflow/computeResponseSpectrum.py b/modules/Workflow/computeResponseSpectrum.py index 66c4296ec..24871ed74 100644 --- a/modules/Workflow/computeResponseSpectrum.py +++ b/modules/Workflow/computeResponseSpectrum.py @@ -23,7 +23,7 @@ def convert_accel_units(acceleration, from_, to_='cm/s/s'): # noqa: C901 acceleration = np.asarray(acceleration) if from_ == 'g': if to_ == 'g': - return acceleration + return acceleration # noqa: DOC201 if to_ in m_sec_square: return acceleration * g if to_ in cm_sec_square: @@ -70,7 +70,7 @@ def get_velocity_displacement( velocity = time_step * cumtrapz(acceleration, initial=0.0) if displacement is None: displacement = time_step * cumtrapz(velocity, initial=0.0) - return velocity, displacement + return velocity, displacement # noqa: DOC201 class NewmarkBeta: @@ -160,7 +160,7 @@ def run(self): 'PGV': np.max(np.fabs(self.velocity)), 'PGD': np.max(np.fabs(self.displacement)), } - return self.response_spectrum, time_series, accel, vel, disp + return self.response_spectrum, time_series, accel, vel, disp # noqa: DOC201 def _newmark_beta(self, omega, cval, kval): # noqa: ARG002 """Newmark-beta integral @@ -216,4 +216,4 @@ def _newmark_beta(self, omega, cval, kval): # noqa: ARG002 disp[j, :] = delta_u + disp[j - 1, :] a_t[j, :] = ground_acc[j] + accel[j, :] - return accel, vel, disp, a_t + return accel, vel, disp, a_t # noqa: DOC201 diff --git a/modules/Workflow/createGM4BIM.py b/modules/Workflow/createGM4BIM.py index 3bc5f2297..889fa3d1b 100644 --- a/modules/Workflow/createGM4BIM.py +++ b/modules/Workflow/createGM4BIM.py @@ -75,7 +75,7 @@ def get_scale_factors(input_units, output_units): # noqa: C901 unit_time = output_units.get('time', 'sec') f_time = globals().get(unit_time, None) if f_time is None: - raise ValueError(f'Specified time unit not recognized: {unit_time}') # noqa: DOC501, EM102, TRY003 + raise ValueError(f'Specified time unit not recognized: {unit_time}') # noqa: DOC501, EM102, RUF100, TRY003 scale_factors = {} @@ -88,7 +88,7 @@ def get_scale_factors(input_units, output_units): # noqa: C901 # get the scale factor to standard units f_in = globals().get(input_unit, None) if f_in is None: - raise ValueError( # noqa: DOC501, TRY003 + raise ValueError( # noqa: DOC501, RUF100, TRY003 f'Input unit for event files not recognized: {input_unit}' # noqa: EM102 ) @@ -98,7 +98,7 @@ def get_scale_factors(input_units, output_units): # noqa: C901 unit_type = base_unit_type if unit_type is None: - raise ValueError(f'Failed to identify unit type: {input_unit}') # noqa: DOC501, EM102, TRY003 + raise ValueError(f'Failed to identify unit type: {input_unit}') # noqa: DOC501, EM102, RUF100, TRY003 # the output unit depends on the unit type if unit_type == 'acceleration': @@ -111,7 +111,7 @@ def get_scale_factors(input_units, output_units): # noqa: C901 f_out = 1.0 / f_length else: - raise ValueError( # noqa: DOC501, TRY003 + raise ValueError( # noqa: DOC501, RUF100, TRY003 f'Unexpected unit type in workflow: {unit_type}' # noqa: EM102 ) @@ -120,7 +120,7 @@ def get_scale_factors(input_units, output_units): # noqa: C901 scale_factors.update({input_name: f_scale}) - return scale_factors + return scale_factors # noqa: DOC201 def createFilesForEventGrid(inputDir, outputDir, removeInputDir): # noqa: C901, D103, N802, N803, PLR0914, PLR0915 @@ -410,28 +410,28 @@ def createFilesForEventGrid(inputDir, outputDir, removeInputDir): # noqa: C901, m_pgd_y = 0.0 s_pgd_y = 0.0 # add to dictionary - dict_im[('type', 'loc', 'dir', 'stat')].append(int(siteID)) + dict_im[('type', 'loc', 'dir', 'stat')].append(int(siteID)) # noqa: RUF031 # pga - dict_im[('PGA', 0, 1, 'median')].append(m_pga_x) - dict_im[('PGA', 0, 1, 'beta')].append(s_pga_x) - dict_im[('PGA', 0, 2, 'median')].append(m_pga_y) - dict_im[('PGA', 0, 2, 'beta')].append(s_pga_y) + dict_im[('PGA', 0, 1, 'median')].append(m_pga_x) # noqa: RUF031 + dict_im[('PGA', 0, 1, 'beta')].append(s_pga_x) # noqa: RUF031 + dict_im[('PGA', 0, 2, 'median')].append(m_pga_y) # noqa: RUF031 + dict_im[('PGA', 0, 2, 'beta')].append(s_pga_y) # noqa: RUF031 # pgv - dict_im[('PGV', 0, 1, 'median')].append(m_pgv_x) - dict_im[('PGV', 0, 1, 'beta')].append(s_pgv_x) - dict_im[('PGV', 0, 2, 'median')].append(m_pgv_y) - dict_im[('PGV', 0, 2, 'beta')].append(s_pgv_y) + dict_im[('PGV', 0, 1, 'median')].append(m_pgv_x) # noqa: RUF031 + dict_im[('PGV', 0, 1, 'beta')].append(s_pgv_x) # noqa: RUF031 + dict_im[('PGV', 0, 2, 'median')].append(m_pgv_y) # noqa: RUF031 + dict_im[('PGV', 0, 2, 'beta')].append(s_pgv_y) # noqa: RUF031 # pgd - dict_im[('PGD', 0, 1, 'median')].append(m_pgd_x) - dict_im[('PGD', 0, 1, 'beta')].append(s_pgd_x) - dict_im[('PGD', 0, 2, 'median')].append(m_pgd_y) - dict_im[('PGD', 0, 2, 'beta')].append(s_pgd_y) + dict_im[('PGD', 0, 1, 'median')].append(m_pgd_x) # noqa: RUF031 + dict_im[('PGD', 0, 1, 'beta')].append(s_pgd_x) # noqa: RUF031 + dict_im[('PGD', 0, 2, 'median')].append(m_pgd_y) # noqa: RUF031 + dict_im[('PGD', 0, 2, 'beta')].append(s_pgd_y) # noqa: RUF031 for jj, Ti in enumerate(periods): # noqa: N806 cur_sa = f'SA({Ti}s)' - dict_im[(cur_sa, 0, 1, 'median')].append(m_psa_x[jj]) - dict_im[(cur_sa, 0, 1, 'beta')].append(s_psa_x[jj]) - dict_im[(cur_sa, 0, 2, 'median')].append(m_psa_y[jj]) - dict_im[(cur_sa, 0, 2, 'beta')].append(s_psa_y[jj]) + dict_im[(cur_sa, 0, 1, 'median')].append(m_psa_x[jj]) # noqa: RUF031 + dict_im[(cur_sa, 0, 1, 'beta')].append(s_psa_x[jj]) # noqa: RUF031 + dict_im[(cur_sa, 0, 2, 'median')].append(m_psa_y[jj]) # noqa: RUF031 + dict_im[(cur_sa, 0, 2, 'beta')].append(s_psa_y[jj]) # noqa: RUF031 # aggregate for cur_key, cur_value in dict_im.items(): diff --git a/modules/Workflow/whale/main.py b/modules/Workflow/whale/main.py index 6f92dd86a..260fdba53 100644 --- a/modules/Workflow/whale/main.py +++ b/modules/Workflow/whale/main.py @@ -310,7 +310,7 @@ def create_command(command_list, enforced_python=None): for command_arg in command_list[1:]: command += f'"{command_arg}" ' - return command + return command # noqa: DOC201 def run_command(command): @@ -357,7 +357,7 @@ def run_command(command): py_script.main(arg_list) - return '', '' + return '', '' # noqa: DOC201 else: # noqa: RET505 # fmk with Shell=True not working on older windows machines, new approach needed for quoted command .. turn into a list @@ -668,7 +668,7 @@ def get_command_list(self, app_path, force_posix=False): # noqa: FBT002, C901 # pp.pprint(arg_list) - return arg_list + return arg_list # noqa: DOC201 class Workflow: # noqa: PLR0904 @@ -857,7 +857,7 @@ def _register_app_type(self, app_type, app_dict, sub_app=''): # noqa: C901 if app_type_obj == None: # noqa: E711 err = 'The application ' + app_type + ' is not found in the app registry' - raise WorkFlowInputError(err) # noqa: DOC501 + raise WorkFlowInputError(err) # noqa: DOC501, RUF100 # Finally check to see if the app registry contains the provided application if app_type_obj.get(app_in) == None: # noqa: E711 @@ -866,7 +866,7 @@ def _register_app_type(self, app_type, app_dict, sub_app=''): # noqa: C901 + app_in ) print('Error', app_in) # noqa: T201 - raise WorkFlowInputError(err) # noqa: DOC501 + raise WorkFlowInputError(err) # noqa: DOC501, RUF100 appData = app_dict['ApplicationData'] # noqa: N806 # @@ -878,7 +878,7 @@ def _register_app_type(self, app_type, app_dict, sub_app=''): # noqa: C901 # Check if the app object was created successfully if app_object is None: - raise WorkFlowInputError(f'Application deep copy failed for {app_type}') # noqa: DOC501, EM102, TRY003 + raise WorkFlowInputError(f'Application deep copy failed for {app_type}') # noqa: DOC501, EM102, RUF100, TRY003 # only assign the app to the workflow if it has an executable if app_object.rel_path is None: @@ -1081,7 +1081,7 @@ def _parse_inputs(self): # noqa: C901 # Events are special because they are in an array if 'Events' in requested_apps: if len(requested_apps['Events']) > 1: - raise WorkFlowInputError( # noqa: DOC501, TRY003 + raise WorkFlowInputError( # noqa: DOC501, RUF100, TRY003 'Currently, WHALE only supports a single event.' # noqa: EM101 ) for event in requested_apps['Events'][ @@ -1104,7 +1104,7 @@ def _parse_inputs(self): # noqa: C901 ) if app_object is None: - raise WorkFlowInputError( # noqa: DOC501 + raise WorkFlowInputError( # noqa: DOC501, RUF100 'Application entry missing for {}'.format('Events') # noqa: EM103 ) @@ -1114,12 +1114,12 @@ def _parse_inputs(self): # noqa: C901 self.workflow_apps['Event'] = app_object else: - raise WorkFlowInputError( # noqa: DOC501, TRY003 + raise WorkFlowInputError( # noqa: DOC501, RUF100, TRY003 'Currently, only earthquake and wind events are supported. ' # noqa: EM102 f'EventClassification must be Earthquake, not {eventClassification}' ) else: - raise WorkFlowInputError('Need Event Classification') # noqa: DOC501, EM101, TRY003 + raise WorkFlowInputError('Need Event Classification') # noqa: DOC501, EM101, RUF100, TRY003 # Figure out what types of assets are coming into the analysis assetObjs = requested_apps.get('Assets', None) # noqa: N806 @@ -1130,7 +1130,7 @@ def _parse_inputs(self): # noqa: C901 # Check if asset list is not empty if len(assetObjs) == 0: - raise WorkFlowInputError('The provided asset object is empty') # noqa: DOC501, EM101, TRY003 + raise WorkFlowInputError('The provided asset object is empty') # noqa: DOC501, EM101, RUF100, TRY003 # Iterate through the asset objects for assetObj in assetObjs: # noqa: N806 @@ -1316,7 +1316,7 @@ def create_asset_files(self): log_div() - return assetFilesList + return assetFilesList # noqa: DOC201 def augment_asset_files(self): # noqa: C901 """Short description @@ -1504,7 +1504,7 @@ def augment_asset_files(self): # noqa: C901 ) log_div() - return assetFilesList + return assetFilesList # noqa: DOC201 def perform_system_performance_assessment(self, asset_type): """For an asset type run the system level performance assessment application @@ -1525,7 +1525,7 @@ def perform_system_performance_assessment(self, asset_type): prepend_timestamp=False, ) log_div() - return False + return False # noqa: DOC201 if performance_app.rel_path == None: # noqa: E711 log_msg( @@ -1905,7 +1905,7 @@ def init_simdir(self, asst_id=None, AIM_file_path='AIM.json'): # noqa: C901, N8 prepend_timestamp=False, ) log_div() - return dst + return dst # noqa: DOC201 def cleanup_simdir(self, asst_id): """Short description @@ -2730,7 +2730,7 @@ def estimate_losses( # noqa: C901 ], ) if ('PID', '0') in df_res.columns: - del df_res[('PID', '0')] + del df_res[('PID', '0')] # noqa: RUF031 # store the EDP statistics in the output DF for col in np.transpose(col_info): diff --git a/modules/common/simcenter_common.py b/modules/common/simcenter_common.py index 03f3a1054..6e4b319c4 100644 --- a/modules/common/simcenter_common.py +++ b/modules/common/simcenter_common.py @@ -237,7 +237,7 @@ def get_scale_factors(input_units, output_units): # noqa: C901 unit_time = output_units.get('time', 'sec') f_time = globals().get(unit_time, None) if f_time is None: - raise ValueError(f'Specified time unit not recognized: {unit_time}') # noqa: DOC501, EM102, TRY003 + raise ValueError(f'Specified time unit not recognized: {unit_time}') # noqa: DOC501, EM102, RUF100, TRY003 scale_factors = {} @@ -253,7 +253,7 @@ def get_scale_factors(input_units, output_units): # noqa: C901 f_in = globals().get(input_unit, None) if f_in is None: - raise ValueError(f'Input unit not recognized: {input_unit}') # noqa: DOC501, EM102, TRY003 + raise ValueError(f'Input unit not recognized: {input_unit}') # noqa: DOC501, EM102, RUF100, TRY003 unit_type = None for base_unit_type, unit_set in globals()['unit_types'].items(): @@ -261,7 +261,7 @@ def get_scale_factors(input_units, output_units): # noqa: C901 unit_type = base_unit_type if unit_type is None: - raise ValueError(f'Failed to identify unit type: {input_unit}') # noqa: DOC501, EM102, TRY003 + raise ValueError(f'Failed to identify unit type: {input_unit}') # noqa: DOC501, EM102, RUF100, TRY003 # the output unit depends on the unit type if unit_type == 'acceleration': @@ -274,7 +274,7 @@ def get_scale_factors(input_units, output_units): # noqa: C901 f_out = 1.0 / f_length else: - raise ValueError( # noqa: DOC501, TRY003 + raise ValueError( # noqa: DOC501, RUF100, TRY003 f'Unexpected unit type in workflow: {unit_type}' # noqa: EM102 ) @@ -283,7 +283,7 @@ def get_scale_factors(input_units, output_units): # noqa: C901 scale_factors.update({input_name: f_scale}) - return scale_factors + return scale_factors # noqa: DOC201 def get_unit_bases(input_units): @@ -306,4 +306,4 @@ def get_unit_bases(input_units): input_unit_bases = cur_unit_bases break - return input_unit_bases + return input_unit_bases # noqa: DOC201 diff --git a/modules/createEVENT/CFDEvent/CFDEvent.py b/modules/createEVENT/CFDEvent/CFDEvent.py index f3a58f36a..01326808c 100644 --- a/modules/createEVENT/CFDEvent/CFDEvent.py +++ b/modules/createEVENT/CFDEvent/CFDEvent.py @@ -13,7 +13,7 @@ def directionToDof(direction): # noqa: N802 """Converts direction to degree of freedom""" # noqa: D400, D401 directioMap = {'X': 1, 'Y': 2, 'Z': 3} # noqa: N806 - return directioMap[direction] + return directioMap[direction] # noqa: DOC201 def addFloorForceToEvent(patternsArray, force, direction, floor): # noqa: ARG001, N802, N803 diff --git a/modules/createEVENT/EmptyDomainCFD/EmptyDomainCFD.py b/modules/createEVENT/EmptyDomainCFD/EmptyDomainCFD.py index 1db2a3ca1..449aa07f6 100644 --- a/modules/createEVENT/EmptyDomainCFD/EmptyDomainCFD.py +++ b/modules/createEVENT/EmptyDomainCFD/EmptyDomainCFD.py @@ -13,7 +13,7 @@ def directionToDof(direction): # noqa: N802 """Converts direction to degree of freedom""" # noqa: D400, D401 directioMap = {'X': 1, 'Y': 2, 'Z': 3} # noqa: N806 - return directioMap[direction] + return directioMap[direction] # noqa: DOC201 def addFloorForceToEvent(patternsArray, force, direction, floor): # noqa: ARG001, N802, N803 diff --git a/modules/createEVENT/EmptyDomainCFD/post_process_output.py b/modules/createEVENT/EmptyDomainCFD/post_process_output.py index f11de48cf..160877c2e 100644 --- a/modules/createEVENT/EmptyDomainCFD/post_process_output.py +++ b/modules/createEVENT/EmptyDomainCFD/post_process_output.py @@ -88,7 +88,7 @@ def readPressureProbes(fileName): # noqa: N802, N803 time = np.asarray(time, dtype=np.float32) p = np.asarray(p, dtype=np.float32) - return probes, time, p + return probes, time, p # noqa: DOC201 def read_pressure_data(file_names): @@ -291,7 +291,7 @@ def read_openFoam_scalar_field(file_name): # noqa: N802 sField = np.asarray(sField, dtype=np.float32) # noqa: N806 - return sField # noqa: RET504 + return sField # noqa: DOC201, RET504 def read_openFoam_vector_field(file_name): # noqa: N802 @@ -312,7 +312,7 @@ def read_openFoam_vector_field(file_name): # noqa: N802 vField = np.asarray(vField, dtype=np.float32) # noqa: N806 - return vField # noqa: RET504 + return vField # noqa: DOC201, RET504 def read_openFoam_tensor_field(file_name): # noqa: N802 @@ -340,7 +340,7 @@ def read_openFoam_tensor_field(file_name): # noqa: N802 vField = np.asarray(vField, dtype=np.float32) # noqa: N806 - return vField # noqa: RET504 + return vField # noqa: DOC201, RET504 def read_openFoam_symmetric_tensor_field(file_name): # noqa: N802 @@ -367,7 +367,7 @@ def read_openFoam_symmetric_tensor_field(file_name): # noqa: N802 vField = np.asarray(vField, dtype=np.float32) # noqa: N806 - return vField # noqa: RET504 + return vField # noqa: DOC201, RET504 def read_velocity_data(path): @@ -462,7 +462,7 @@ def read_velocity_probes(fileName): # noqa: N803 time = np.asarray(time, dtype=np.float32) U = np.asarray(U, dtype=np.float32) # noqa: N806 - return probes, time, U + return probes, time, U # noqa: DOC201 def calculate_length_scale(u, uav, dt, min_corr=0.0): @@ -481,7 +481,7 @@ def calculate_length_scale(u, uav, dt, min_corr=0.0): L = uav * np.trapz(corr, dx=dt) # noqa: NPY201, N806 - return L # noqa: RET504 + return L # noqa: DOC201, RET504 def psd(x, dt, nseg): # noqa: F811 diff --git a/modules/createEVENT/GeoClawOpenFOAM/AddBuildingForces.py b/modules/createEVENT/GeoClawOpenFOAM/AddBuildingForces.py index f932a5f86..46235bb09 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/AddBuildingForces.py +++ b/modules/createEVENT/GeoClawOpenFOAM/AddBuildingForces.py @@ -9,7 +9,7 @@ def validateCaseDirectoryStructure(caseDir): # noqa: N802, N803 It also checks that system directory contains the controlDict """ # noqa: D205, D400, D401, D404 if not os.path.isdir(caseDir): # noqa: PTH112 - return False + return False # noqa: DOC201 caseDirList = os.listdir(caseDir) # noqa: N806 necessaryDirs = ['0', 'constant', 'system'] # noqa: N806 @@ -27,7 +27,7 @@ def findFunctionsDictionary(controlDictLines): # noqa: N802, N803 """This method will find functions dictionary in the controlDict""" # noqa: D400, D401, D404 for line in controlDictLines: if line.startswith('functions'): - return (True, controlDictLines.index(line) + 2) + return (True, controlDictLines.index(line) + 2) # noqa: DOC201 return [False, len(controlDictLines)] diff --git a/modules/createEVENT/GeoClawOpenFOAM/GeoClaw.py b/modules/createEVENT/GeoClawOpenFOAM/GeoClaw.py index 491aff653..dc0544dec 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/GeoClaw.py +++ b/modules/createEVENT/GeoClawOpenFOAM/GeoClaw.py @@ -80,4 +80,4 @@ def creategeom(self, data, path): # Points of interest bottompts = self.getbathy(maxvalues, minvalues, data) # noqa: F841 - return 0 + return 0 # noqa: DOC201 diff --git a/modules/createEVENT/GeoClawOpenFOAM/GeoClawBathy.py b/modules/createEVENT/GeoClawOpenFOAM/GeoClawBathy.py index 2fed354a4..280b462bf 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/GeoClawBathy.py +++ b/modules/createEVENT/GeoClawOpenFOAM/GeoClawBathy.py @@ -61,4 +61,4 @@ def creategeom(self, data, path): # noqa: ARG002, PLR6301 # Create a utilities object hydroutil = hydroUtils() # noqa: F841 - return 0 + return 0 # noqa: DOC201 diff --git a/modules/createEVENT/GeoClawOpenFOAM/GetOpenFOAMEvent.py b/modules/createEVENT/GeoClawOpenFOAM/GetOpenFOAMEvent.py index c4098273e..24f05dccd 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/GetOpenFOAMEvent.py +++ b/modules/createEVENT/GeoClawOpenFOAM/GetOpenFOAMEvent.py @@ -16,7 +16,7 @@ def validateCaseDirectoryStructure(caseDir): # noqa: N802, N803 It also checks that system directory contains the controlDict """ # noqa: D205, D400, D401, D404 if not os.path.isdir(caseDir): # noqa: PTH112 - return False + return False # noqa: DOC201 caseDirList = os.listdir(caseDir) # noqa: N806 necessaryDirs = ['0', 'constant', 'system', 'postProcessing'] # noqa: N806 @@ -36,7 +36,7 @@ def parseForceComponents(forceArray): # noqa: N802, N803 x = float(components[0]) y = float(components[1]) z = float(components[2]) - return [x, y, z] + return [x, y, z] # noqa: DOC201 def ReadOpenFOAMForces(buildingForcesPath, floorsCount, startTime): # noqa: N802, N803 @@ -77,14 +77,14 @@ def ReadOpenFOAMForces(buildingForcesPath, floorsCount, startTime): # noqa: N80 forces[i].Y.append(fpry + fvy + fpoy) forces[i].Z.append(fprz + fvz + fpoz) - return [deltaT, forces] + return [deltaT, forces] # noqa: DOC201 def directionToDof(direction): # noqa: N802 """Converts direction to degree of freedom""" # noqa: D400, D401 directioMap = {'X': 1, 'Y': 2, 'Z': 3} # noqa: N806 - return directioMap[direction] + return directioMap[direction] # noqa: DOC201 def addFloorForceToEvent( # noqa: N802 diff --git a/modules/createEVENT/GeoClawOpenFOAM/flume.py b/modules/createEVENT/GeoClawOpenFOAM/flume.py index d7cd179d7..12a2cb8e7 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/flume.py +++ b/modules/createEVENT/GeoClawOpenFOAM/flume.py @@ -110,7 +110,7 @@ def generateflume(self, breadth, path): ) # Write bottom STL file # Return extreme values - return extremeval + return extremeval # noqa: DOC201 ############################################################# def flumedata(self, IpPTFile): # noqa: N803 @@ -178,7 +178,7 @@ def flumedata(self, IpPTFile): # noqa: N803 self.npt = np.delete(self.npt, noindexes, axis=0) # Return extreme values - return extremeval + return extremeval # noqa: DOC201 #################################################################### def right(self): @@ -431,4 +431,4 @@ def extremedata(self, extreme, breadth): # noqa: PLR6301 ) tempfileID.close # noqa: B018 - return 0 + return 0 # noqa: DOC201 diff --git a/modules/createEVENT/GeoClawOpenFOAM/hydroUtils.py b/modules/createEVENT/GeoClawOpenFOAM/hydroUtils.py index 2722a554f..a9c4eae15 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/hydroUtils.py +++ b/modules/createEVENT/GeoClawOpenFOAM/hydroUtils.py @@ -90,7 +90,7 @@ def extract(self, obj, path, ind, arr): # noqa: C901 else: arr.append(None) - return arr + return arr # noqa: DOC201 ############################################################# def extract_element_from_json(self, obj, path): @@ -106,7 +106,7 @@ def extract_element_from_json(self, obj, path): """ # noqa: D205, D401 if isinstance(obj, dict): # noqa: RET503 - return self.extract(obj, path, 0, []) + return self.extract(obj, path, 0, []) # noqa: DOC201 elif isinstance(obj, list): # noqa: RET505 outer_arr = [] for item in obj: @@ -129,7 +129,7 @@ def general_header(self): # noqa: PLR6301 | | O | \\*---------------------------------------------------------------------------*/ \n\n""" # noqa: W291 - return header # noqa: RET504 + return header # noqa: DOC201, RET504 #################################################################### def of7header(self, OFclass, location, filename): # noqa: N803, PLR6301 @@ -156,7 +156,7 @@ class {OFclass}; }} // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //\n\n""" # noqa: W291 - return header # noqa: RET504 + return header # noqa: DOC201, RET504 ############################################################# def hydrolog(self, projname, fipath): @@ -210,4 +210,4 @@ def getlist(self, data): # noqa: PLR6301 data = data.replace(',', ' ') results = [float(n) for n in data.split()] - return results # noqa: RET504 + return results # noqa: DOC201, RET504 diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Alpboundary.py b/modules/createEVENT/GeoClawOpenFOAM/of7Alpboundary.py index de03da459..bad5dbab3 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7Alpboundary.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7Alpboundary.py @@ -89,7 +89,7 @@ def Alptext(self, data, patches): # noqa: N802 Alptext = Alptext + '}\n\n' # noqa: N806, PLR6104 # Return the text for velocity BC - return Alptext # noqa: RET504 + return Alptext # noqa: DOC201, RET504 ############################################################# def Alpheader(self): # noqa: N802, PLR6301 @@ -114,7 +114,7 @@ def Alpheader(self): # noqa: N802, PLR6301 header = header + 'internalField\tuniform\t0;\n\n' # noqa: PLR6104 # Return the header for U file - return header # noqa: RET504 + return header # noqa: DOC201, RET504 ############################################################# def Alppatchtext(self, Alptype, patchname): # noqa: ARG002, N802, N803, PLR6301 @@ -140,4 +140,4 @@ def Alppatchtext(self, Alptype, patchname): # noqa: ARG002, N802, N803, PLR6301 Alptext = Alptext + 'type\tzeroGradient;\n\t}\n' # noqa: N806, PLR6104 # Return the header for U file - return Alptext + return Alptext # noqa: DOC201 diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Building.py b/modules/createEVENT/GeoClawOpenFOAM/of7Building.py index 3f3956ba1..acefbf5bd 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7Building.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7Building.py @@ -100,7 +100,7 @@ def buildcheck(self, data, path): # noqa: C901, PLR0911, PLR6301 data, ['Events', 'BuildingSTLFile'] ) if stlfile == [None]: - return -1 + return -1 # noqa: DOC201 else: # noqa: RET505 stlfile = ', '.join( hydroutil.extract_element_from_json( @@ -218,7 +218,7 @@ def createbuilds(self, data, path): elif buildeftype == 'Parameters': self.buildpara(data, path) - return 0 + return 0 # noqa: DOC201 ############################################################# def buildmanual(self, data, path): diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Decomp.py b/modules/createEVENT/GeoClawOpenFOAM/of7Decomp.py index 11af41f32..bf7396354 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7Decomp.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7Decomp.py @@ -75,7 +75,7 @@ def decomptext(self, data): decomptext = decomptext + 'method\tscotch;\n\n' # noqa: PLR6104 - return decomptext # noqa: RET504 + return decomptext # noqa: DOC201, RET504 ############################################################# def decompheader(self): # noqa: PLR6301 @@ -97,7 +97,7 @@ def decompheader(self): # noqa: PLR6301 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //\n\n""" # noqa: W291 # Return the header for U file - return header # noqa: RET504 + return header # noqa: DOC201, RET504 ############################################################# def scripts(self, data, path): # noqa: ARG002, PLR6301 diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Geometry.py b/modules/createEVENT/GeoClawOpenFOAM/of7Geometry.py index dc41e46c9..9f9670cb1 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7Geometry.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7Geometry.py @@ -82,7 +82,7 @@ def geomcheck(self, data, path): # noqa: C901, PLR0911, PLR6301 data, ['Events', 'NumBathymetryFiles'] ) if numbathy == [None]: - return -1 + return -1 # noqa: DOC201 else: # noqa: RET505 numbathy = ', '.join( hydroutil.extract_element_from_json( @@ -250,7 +250,7 @@ def createOFSTL(self, data, path): # noqa: C901, N802, PLR6301 # Create geometry (i.e. STL files) and extreme file ecode = finalgeom.creategeom(data, path) if ecode < 0: - return -1 + return -1 # noqa: DOC201 # Bathymetry only elif int(simtype) == 2: # noqa: PLR2004 diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Initial.py b/modules/createEVENT/GeoClawOpenFOAM/of7Initial.py index da6e38042..1b2431cac 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7Initial.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7Initial.py @@ -173,7 +173,7 @@ def alphatext(self, data, fipath): alphatext = alphatext + '\n);' # noqa: PLR6104 - return alphatext # noqa: RET504 + return alphatext # noqa: DOC201, RET504 ############################################################# def alphaheader(self): # noqa: PLR6301 @@ -195,7 +195,7 @@ def alphaheader(self): # noqa: PLR6301 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //\n\n""" # noqa: W291 # Return the header for U file - return header # noqa: RET504 + return header # noqa: DOC201, RET504 ############################################################# def alphacheck(self, data, fipath): # noqa: PLR6301 @@ -220,7 +220,7 @@ def alphacheck(self, data, fipath): # noqa: PLR6301 fname = 'SWAlpha.txt' swalphafile = os.path.join(fipath, fname) # noqa: PTH118 if not os.path.exists(swalphafile): # noqa: PTH110 - return -1 + return -1 # noqa: DOC201 # For all types other than the shallow water else: diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Materials.py b/modules/createEVENT/GeoClawOpenFOAM/of7Materials.py index ea7a5190f..17f07fa86 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7Materials.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7Materials.py @@ -120,7 +120,7 @@ def mattext(self, data): mattext = mattext + 'sigma\t[1 0 -2 0 0 0 0]\t' + sigma + ';\n' - return mattext # noqa: RET504 + return mattext # noqa: DOC201, RET504 ############################################################# def matheader(self): # noqa: PLR6301 @@ -142,7 +142,7 @@ def matheader(self): # noqa: PLR6301 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //\n\n""" # noqa: W291 # Return the header for U file - return header # noqa: RET504 + return header # noqa: DOC201, RET504 ############################################################# def matcheck(self, data): # noqa: PLR6301 @@ -162,7 +162,7 @@ def matcheck(self, data): # noqa: PLR6301 data, ['Events', 'WaterViscosity'] ) if nuwater == [None]: - return -1 + return -1 # noqa: DOC201 # Exponent nuwaterexp = hydroutil.extract_element_from_json( data, ['Events', 'WaterViscosityExp'] diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Meshing.py b/modules/createEVENT/GeoClawOpenFOAM/of7Meshing.py index 8f852de88..d02cb6300 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7Meshing.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7Meshing.py @@ -72,7 +72,7 @@ def meshcheck(self, data, fipath): # noqa: PLR6301 # If hydro mesher - nothing to check if int(mesher[0]) == 0: - return 0 + return 0 # noqa: DOC201 # Other mesh software elif int(mesher[0]) == 1: # noqa: RET505 @@ -126,7 +126,7 @@ def meshheader(self, fileobjec): # noqa: PLR6301 ) # Return the header for meshing file - return header # noqa: RET504 + return header # noqa: DOC201, RET504 ############################################################# def bmeshtext(self, data): @@ -284,7 +284,7 @@ def bmeshtext(self, data): # Add merge patch pairs bmeshtext = bmeshtext + 'mergePatchPairs\n(\n);\n' # noqa: PLR6104 - return bmeshtext # noqa: RET504 + return bmeshtext # noqa: DOC201, RET504 ############################################################# def sfetext(self): @@ -320,7 +320,7 @@ def sfetext(self): elif int(data_geoext[6]) == 3: # noqa: PLR2004 sfetext = sfetext + 'OtherBuilding.stl\n' + stlinfo + '\n\n' - return sfetext + return sfetext # noqa: DOC201 ############################################################# def shmtext(self, data): @@ -505,7 +505,7 @@ def shmtext(self, data): shmtext = shmtext + 'debug\t0;\n' # noqa: PLR6104 shmtext = shmtext + 'mergeTolerance\t1E-6;\n' # noqa: PLR6104 - return shmtext # noqa: RET504 + return shmtext # noqa: DOC201, RET504 ############################################################# def scripts(self, data, path): # noqa: C901, PLR6301 diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Others.py b/modules/createEVENT/GeoClawOpenFOAM/of7Others.py index e8da1252a..206d429e9 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7Others.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7Others.py @@ -78,7 +78,7 @@ def othersheader(self, fileclas, fileloc, fileobjec): # noqa: PLR6301 ) # Return the header for U file - return header # noqa: RET504 + return header # noqa: DOC201, RET504 ############################################################# def gfiletext(self, data): @@ -140,4 +140,4 @@ def gfiletext(self, data): + ');\n' ) - return gfiletext # noqa: RET504 + return gfiletext # noqa: DOC201, RET504 diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Prboundary.py b/modules/createEVENT/GeoClawOpenFOAM/of7Prboundary.py index f44d4ba98..957991624 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7Prboundary.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7Prboundary.py @@ -93,7 +93,7 @@ def Prtext(self, data, patches): # noqa: N802 prtext = prtext + '}\n\n' # noqa: PLR6104 # Return the text for velocity BC - return prtext # noqa: RET504 + return prtext # noqa: DOC201, RET504 ############################################################# def Prheader(self): # noqa: N802, PLR6301 @@ -118,7 +118,7 @@ def Prheader(self): # noqa: N802, PLR6301 header = header + 'internalField\tuniform\t0;\n\n' # noqa: PLR6104 # Return the header for U file - return header # noqa: RET504 + return header # noqa: DOC201, RET504 ############################################################# def Prpatchtext(self, data, Prtype, patchname): # noqa: C901, N802, N803, PLR6301 @@ -208,4 +208,4 @@ def Prpatchtext(self, data, Prtype, patchname): # noqa: C901, N802, N803, PLR63 Prtext = Prtext + 'type\tempty;\n\t}\n' # noqa: N806, PLR6104 # Return the header for U file - return Prtext + return Prtext # noqa: DOC201 diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Process.py b/modules/createEVENT/GeoClawOpenFOAM/of7Process.py index 72682f23d..eaed69feb 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7Process.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7Process.py @@ -132,7 +132,7 @@ def pprocesstext(self, data, path): # noqa: PLR6301 sampletext = sampletext + ');\n\n' # noqa: PLR6104 sampletext = sampletext + 'fields\t' + fieldtext + ';\n' - return sampletext # noqa: RET504 + return sampletext # noqa: DOC201, RET504 ############################################################# def pprocesscdict(self, data, path): # noqa: C901, PLR6301 @@ -275,7 +275,7 @@ def pprocesscdict(self, data, path): # noqa: C901, PLR6301 cdicttext = cdicttext + '\t\tfields\t' + fieldtext + ';\n' cdicttext = cdicttext + '\t}\n}' # noqa: PLR6104 - return cdicttext # noqa: RET504 + return cdicttext # noqa: DOC201, RET504 ############################################################# def scripts(self, data, path): # noqa: ARG002, PLR6301 @@ -293,7 +293,7 @@ def scripts(self, data, path): # noqa: ARG002, PLR6301 data, ['Events', 'Postprocessing'] ) if pprocess == [None]: - return 0 + return 0 # noqa: DOC201 else: # noqa: RET505 pprocess = ', '.join( hydroutil.extract_element_from_json( @@ -350,7 +350,7 @@ def pprocesscheck(self, data, path): # noqa: PLR6301 ) if pprocess == 'No': - return 0 + return 0 # noqa: DOC201 else: # noqa: RET505 pprocessV = ', '.join( # noqa: N806 hydroutil.extract_element_from_json(data, ['Events', 'PPVelocity']) diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7PtDboundary.py b/modules/createEVENT/GeoClawOpenFOAM/of7PtDboundary.py index 72a5a8615..9a72c2653 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7PtDboundary.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7PtDboundary.py @@ -115,7 +115,7 @@ def PtDcheck(self, data, patches): # noqa: N802, PLR6301 if (int(Utype) == 103) or (int(Utype) == 104): # noqa: PLR2004 numMovWall += 1 # noqa: N806 if numMovWall > 0: - return 1 + return 1 # noqa: DOC201 if numMovWall == 0: return 0 @@ -169,7 +169,7 @@ def PtDtext(self, data, fipath, patches): # noqa: N802 ptdtext = ptdtext + '}\n\n' # noqa: PLR6104 # Return the text for pointDisplacement - return ptdtext # noqa: RET504 + return ptdtext # noqa: DOC201, RET504 ############################################################# def PtDheader(self): # noqa: N802, PLR6301 @@ -194,7 +194,7 @@ def PtDheader(self): # noqa: N802, PLR6301 header = header + 'internalField\tuniform (0 0 0);\n\n' # noqa: PLR6104 # Return the header for U file - return header # noqa: RET504 + return header # noqa: DOC201, RET504 ############################################################# def PtDpatchtext(self, data, Utype, patchname, fipath): # noqa: ARG002, N802, N803 @@ -243,7 +243,7 @@ def PtDpatchtext(self, data, Utype, patchname, fipath): # noqa: ARG002, N802, N PtDtext = PtDtext + 'value\tuniform (0 0 0);\n' # noqa: N806, PLR6104 PtDtext = PtDtext + '\t}\n' # noqa: N806, PLR6104 - return PtDtext + return PtDtext # noqa: DOC201 ############################################################# def getNormal(self, patchname): # noqa: N802, PLR6301 @@ -267,4 +267,4 @@ def getNormal(self, patchname): # noqa: N802, PLR6301 elif (patchname == 'Building') or (patchname == 'OtherBuilding'): # noqa: PLR1714 normal = '1 0 0' - return normal + return normal # noqa: DOC201 diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Solve.py b/modules/createEVENT/GeoClawOpenFOAM/of7Solve.py index 02ca4fc12..6e62f2f43 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7Solve.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7Solve.py @@ -73,7 +73,7 @@ def solverheader(self, fileobjec): # noqa: PLR6301 ) # Return the header for U file - return header # noqa: RET504 + return header # noqa: DOC201, RET504 ############################################################# def fvSchemetext(self, data): # noqa: ARG002, N802 @@ -163,7 +163,7 @@ def fvSchemetext(self, data): # noqa: ARG002, N802 fvSchemetext = fvSchemetext + 'alpha.water;\n' # noqa: N806, PLR6104 fvSchemetext = fvSchemetext + '}\n' # noqa: N806, PLR6104 - return fvSchemetext # noqa: RET504 + return fvSchemetext # noqa: DOC201, RET504 ############################################################# def fvSolntext(self, data): # noqa: N802 @@ -280,7 +280,7 @@ def fvSolntext(self, data): # noqa: N802 fvSolntext = fvSolntext + 'fields\n\t{\n\t}\n\t' # noqa: N806, PLR6104 fvSolntext = fvSolntext + 'equations\n\t{\n\t\t".*"\t1;\n\t}\n}' # noqa: N806, PLR6104 - return fvSolntext # noqa: RET504 + return fvSolntext # noqa: DOC201, RET504 ############################################################# def cdicttext(self, data): @@ -349,7 +349,7 @@ def cdicttext(self, data): cdicttext = cdicttext + 'maxAlphaCo \t 1.0;\n\n' # noqa: PLR6104 cdicttext = cdicttext + 'maxDeltaT \t 1;\n\n' # noqa: PLR6104 - return cdicttext # noqa: RET504 + return cdicttext # noqa: DOC201, RET504 ############################################################# def cdictcheck(self, data): # noqa: PLR6301 @@ -366,7 +366,7 @@ def cdictcheck(self, data): # noqa: PLR6301 # Start time startT = hydroutil.extract_element_from_json(data, ['Events', 'StartTime']) # noqa: N806 if startT == [None]: - return -1 + return -1 # noqa: DOC201 # End time endT = hydroutil.extract_element_from_json(data, ['Events', 'EndTime']) # noqa: N806 @@ -489,4 +489,4 @@ def cdictFtext(self, data): # noqa: N802 cdicttext = cdicttext + 'direction\t(1 0 0);\n\t\t\t' # noqa: PLR6104 cdicttext = cdicttext + 'cumulative\tno;\n\t\t}\n\t}\n}' # noqa: PLR6104 - return cdicttext # noqa: RET504 + return cdicttext # noqa: DOC201, RET504 diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Turbulence.py b/modules/createEVENT/GeoClawOpenFOAM/of7Turbulence.py index d3e42227f..aba38ce8b 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7Turbulence.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7Turbulence.py @@ -84,7 +84,7 @@ def turbtext(self, data): turbtext = turbtext + '\tturbulence\ton;\n' # noqa: PLR6104 turbtext = turbtext + '\tprintCoeffs\ton;\n}\n' # noqa: PLR6104 - return turbtext + return turbtext # noqa: DOC201 ############################################################# def turbheader(self): # noqa: PLR6301 @@ -106,4 +106,4 @@ def turbheader(self): # noqa: PLR6301 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //\n\n""" # noqa: W291 # Return the header for U file - return header # noqa: RET504 + return header # noqa: DOC201, RET504 diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Uboundary.py b/modules/createEVENT/GeoClawOpenFOAM/of7Uboundary.py index d2f95e84d..eb16f74e5 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7Uboundary.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7Uboundary.py @@ -105,7 +105,7 @@ def Utext(self, data, fipath, patches): # noqa: N802 utext = utext + '}\n\n' # noqa: PLR6104 # Return the text for velocity BC - return utext # noqa: RET504 + return utext # noqa: DOC201, RET504 ############################################################# def Uheader(self): # noqa: N802, PLR6301 @@ -130,7 +130,7 @@ def Uheader(self): # noqa: N802, PLR6301 header = header + 'internalField\tuniform (0 0 0);\n\n' # noqa: PLR6104 # Return the header for U file - return header # noqa: RET504 + return header # noqa: DOC201, RET504 ############################################################# def Upatchtext(self, data, Utype, patchname, fipath, numMovWall): # noqa: C901, N802, N803 @@ -345,7 +345,7 @@ def Upatchtext(self, data, Utype, patchname, fipath, numMovWall): # noqa: C901, Utext = Utext + 'type\tempty;\n\t}\n' # noqa: N806, PLR6104 # Return the header for U file - return Utext + return Utext # noqa: DOC201 ############################################################# def Uchecks(self, data, fipath, patches): # noqa: C901, N802, PLR6301 @@ -384,7 +384,7 @@ def Uchecks(self, data, fipath, patches): # noqa: C901, N802, PLR6301 # Checking for multiple moving walls numMovWall += 1 # noqa: N806 if numMovWall > 1: - return -1 + return -1 # noqa: DOC201 # Check for existing moving wall files dispfilename = hydroutil.extract_element_from_json( diff --git a/modules/createEVENT/GeoClawOpenFOAM/openfoam7.py b/modules/createEVENT/GeoClawOpenFOAM/openfoam7.py index 8ec5da444..1bd139d66 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/openfoam7.py +++ b/modules/createEVENT/GeoClawOpenFOAM/openfoam7.py @@ -169,7 +169,7 @@ def createfolder(self, data, path, args): # noqa: PLR6301 scriptfile.close() # Return completion flag - return 0 + return 0 # noqa: DOC201 ############################################################# def creategeometry(self, data, path): # noqa: PLR6301 @@ -192,7 +192,7 @@ def creategeometry(self, data, path): # noqa: PLR6301 # Create the geometry related files Geometry = of7Geometry() # noqa: N806 if int(mesher[0]) == 1: - return 0 + return 0 # noqa: DOC201 elif int(mesher[0]) == 0 or int(mesher[0]) == 2: # noqa: RET505, PLR2004 geomcode = Geometry.geomcheck(data, path) if geomcode == -1: @@ -245,7 +245,7 @@ def createmesh(self, data, path): # noqa: PLR6301 Meshing = of7Meshing() # noqa: N806 meshcode = Meshing.meshcheck(data, path) if meshcode == -1: - return -1 + return -1 # noqa: DOC201 elif int(mesher[0]) == 0: # noqa: RET505 # blockMesh bmeshtext = Meshing.bmeshtext(data) @@ -295,7 +295,7 @@ def materials(self, data, path): # noqa: PLR6301 Materials = of7Materials() # noqa: N806 matcode = Materials.matcheck(data) if matcode == -1: - return -1 + return -1 # noqa: DOC201 else: # noqa: RET505 mattext = Materials.mattext(data) fname = 'transportProperties' @@ -320,7 +320,7 @@ def initial(self, data, path): # noqa: PLR6301 Inicond = of7Initial() # noqa: N806 initcode = Inicond.alphacheck(data, path) if initcode == -1: - return -1 + return -1 # noqa: DOC201 else: # noqa: RET505 alphatext = Inicond.alphatext(data, path) fname = 'setFieldsDict' @@ -355,7 +355,7 @@ def boundary(self, data, path): # noqa: PLR6301 # Check for boundary conditions here ecode = Uboundary.Uchecks(data, path, patches) if ecode == -1: - return -1 + return -1 # noqa: DOC201 else: # noqa: RET505 # Write the U-file if no errors # Path to the file @@ -421,7 +421,7 @@ def turbulence(self, data, path): # noqa: PLR6301 turbfile.write(turbtext) turbfile.close() - return 0 + return 0 # noqa: DOC201 ############################################################# def parallelize(self, data, path): # noqa: PLR6301 @@ -445,7 +445,7 @@ def parallelize(self, data, path): # noqa: PLR6301 # Scripts Decomp.scripts(data, path) - return 0 + return 0 # noqa: DOC201 ############################################################# def solve(self, data, path): # noqa: PLR6301 @@ -478,7 +478,7 @@ def solve(self, data, path): # noqa: PLR6301 # controlDict ecode = Solve.cdictcheck(data) if ecode == -1: - return -1 + return -1 # noqa: DOC201 else: # noqa: RET505 cdicttext = Solve.cdicttext(data) fname = 'controlDict' @@ -516,7 +516,7 @@ def others(self, data, path): # noqa: PLR6301 gfile.write(gfiletext) gfile.close() - return 0 + return 0 # noqa: DOC201 ############################################################# def dakota(self, args): # noqa: PLR6301 @@ -533,7 +533,7 @@ def dakota(self, args): # noqa: PLR6301 # Dakota Scripts dakota.dakotascripts(args) - return 0 + return 0 # noqa: DOC201 ############################################################# def postprocessing(self, data, path): # noqa: PLR6301 @@ -550,7 +550,7 @@ def postprocessing(self, data, path): # noqa: PLR6301 # controlDict ecode = pprocess.pprocesscheck(data, path) if ecode == -1: - return -1 + return -1 # noqa: DOC201 elif ecode == 0: # noqa: RET505 return 0 else: @@ -589,4 +589,4 @@ def cleaning(self, args, path): # noqa: PLR6301 # Dakota Scripts cleaner.cleaning(args, path) - return 0 + return 0 # noqa: DOC201 diff --git a/modules/createEVENT/GeoClawOpenFOAM/osuFlume.py b/modules/createEVENT/GeoClawOpenFOAM/osuFlume.py index a81430f26..ce0906e06 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/osuFlume.py +++ b/modules/createEVENT/GeoClawOpenFOAM/osuFlume.py @@ -104,4 +104,4 @@ def creategeom(self, data, path): # noqa: ARG002, PLR6301 # Write extreme values and building data to temporary file for later usage flumeobj.extremedata(extreme, breadth) - return 0 + return 0 # noqa: DOC201 diff --git a/modules/createEVENT/GeoClawOpenFOAM/userFlume.py b/modules/createEVENT/GeoClawOpenFOAM/userFlume.py index 2ae6bbfbf..42e51dec0 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/userFlume.py +++ b/modules/createEVENT/GeoClawOpenFOAM/userFlume.py @@ -102,4 +102,4 @@ def creategeom(self, data, path): # noqa: PLR6301 # Write extreme values and building data to temporary file for later usage flumeobj.extremedata(extreme, breadth) - return 0 + return 0 # noqa: DOC201 diff --git a/modules/createEVENT/IsolatedBuildingCFD/IsolatedBuildingCFD.py b/modules/createEVENT/IsolatedBuildingCFD/IsolatedBuildingCFD.py index 995c5c327..90b043020 100644 --- a/modules/createEVENT/IsolatedBuildingCFD/IsolatedBuildingCFD.py +++ b/modules/createEVENT/IsolatedBuildingCFD/IsolatedBuildingCFD.py @@ -13,7 +13,7 @@ def directionToDof(direction): # noqa: N802 """Converts direction to degree of freedom""" # noqa: D400, D401 directioMap = {'X': 1, 'Y': 2, 'Z': 3} # noqa: N806 - return directioMap[direction] + return directioMap[direction] # noqa: DOC201 def addFloorForceToEvent(patternsArray, force, direction, floor): # noqa: ARG001, N802, N803 diff --git a/modules/createEVENT/Istanbul/IstanbulStations.py b/modules/createEVENT/Istanbul/IstanbulStations.py index a8d2cb2c6..8ad973292 100644 --- a/modules/createEVENT/Istanbul/IstanbulStations.py +++ b/modules/createEVENT/Istanbul/IstanbulStations.py @@ -190,7 +190,7 @@ def haversine(lat1, lon1, lat2, lon2): r = 6371 # Radius of the Earth in kilometers distance = r * c - return distance # noqa: RET504 + return distance # noqa: DOC201, RET504 if __name__ == '__main__': diff --git a/modules/createEVENT/M9/M9API.py b/modules/createEVENT/M9/M9API.py index 3e1abb4e5..cd63320e8 100644 --- a/modules/createEVENT/M9/M9API.py +++ b/modules/createEVENT/M9/M9API.py @@ -332,4 +332,4 @@ def haversine(lat1, lon1, lat2, lon2): r = 6371 # Radius of the Earth in kilometers distance = r * c - return distance # noqa: RET504 + return distance # noqa: DOC201, RET504 diff --git a/modules/createEVENT/M9/M9Stations.py b/modules/createEVENT/M9/M9Stations.py index aa393b17f..43d29fd49 100644 --- a/modules/createEVENT/M9/M9Stations.py +++ b/modules/createEVENT/M9/M9Stations.py @@ -229,4 +229,4 @@ def haversine(lat1, lon1, lat2, lon2): r = 6371 # Radius of the Earth in kilometers distance = r * c - return distance # noqa: RET504 + return distance # noqa: DOC201, RET504 diff --git a/modules/createEVENT/MPM/MPM.py b/modules/createEVENT/MPM/MPM.py index 47f0b8832..eda226128 100644 --- a/modules/createEVENT/MPM/MPM.py +++ b/modules/createEVENT/MPM/MPM.py @@ -13,7 +13,7 @@ def directionToDof(direction): # noqa: N802 """Converts direction to degree of freedom""" # noqa: D400, D401 directioMap = {'X': 1, 'Y': 2, 'Z': 3} # noqa: N806 - return directioMap[direction] + return directioMap[direction] # noqa: DOC201 def addFloorForceToEvent( # noqa: N802 diff --git a/modules/createEVENT/MPM/post_process_output.py b/modules/createEVENT/MPM/post_process_output.py index f11de48cf..160877c2e 100644 --- a/modules/createEVENT/MPM/post_process_output.py +++ b/modules/createEVENT/MPM/post_process_output.py @@ -88,7 +88,7 @@ def readPressureProbes(fileName): # noqa: N802, N803 time = np.asarray(time, dtype=np.float32) p = np.asarray(p, dtype=np.float32) - return probes, time, p + return probes, time, p # noqa: DOC201 def read_pressure_data(file_names): @@ -291,7 +291,7 @@ def read_openFoam_scalar_field(file_name): # noqa: N802 sField = np.asarray(sField, dtype=np.float32) # noqa: N806 - return sField # noqa: RET504 + return sField # noqa: DOC201, RET504 def read_openFoam_vector_field(file_name): # noqa: N802 @@ -312,7 +312,7 @@ def read_openFoam_vector_field(file_name): # noqa: N802 vField = np.asarray(vField, dtype=np.float32) # noqa: N806 - return vField # noqa: RET504 + return vField # noqa: DOC201, RET504 def read_openFoam_tensor_field(file_name): # noqa: N802 @@ -340,7 +340,7 @@ def read_openFoam_tensor_field(file_name): # noqa: N802 vField = np.asarray(vField, dtype=np.float32) # noqa: N806 - return vField # noqa: RET504 + return vField # noqa: DOC201, RET504 def read_openFoam_symmetric_tensor_field(file_name): # noqa: N802 @@ -367,7 +367,7 @@ def read_openFoam_symmetric_tensor_field(file_name): # noqa: N802 vField = np.asarray(vField, dtype=np.float32) # noqa: N806 - return vField # noqa: RET504 + return vField # noqa: DOC201, RET504 def read_velocity_data(path): @@ -462,7 +462,7 @@ def read_velocity_probes(fileName): # noqa: N803 time = np.asarray(time, dtype=np.float32) U = np.asarray(U, dtype=np.float32) # noqa: N806 - return probes, time, U + return probes, time, U # noqa: DOC201 def calculate_length_scale(u, uav, dt, min_corr=0.0): @@ -481,7 +481,7 @@ def calculate_length_scale(u, uav, dt, min_corr=0.0): L = uav * np.trapz(corr, dx=dt) # noqa: NPY201, N806 - return L # noqa: RET504 + return L # noqa: DOC201, RET504 def psd(x, dt, nseg): # noqa: F811 diff --git a/modules/createEVENT/SurroundedBuildingCFD/SurroundedBuildingCFD.py b/modules/createEVENT/SurroundedBuildingCFD/SurroundedBuildingCFD.py index d48fbddb6..31e6a8d1a 100644 --- a/modules/createEVENT/SurroundedBuildingCFD/SurroundedBuildingCFD.py +++ b/modules/createEVENT/SurroundedBuildingCFD/SurroundedBuildingCFD.py @@ -13,7 +13,7 @@ def directionToDof(direction): # noqa: N802 """Converts direction to degree of freedom""" # noqa: D400, D401 directioMap = {'X': 1, 'Y': 2, 'Z': 3} # noqa: N806 - return directioMap[direction] + return directioMap[direction] # noqa: DOC201 def addFloorForceToEvent(patternsArray, force, direction, floor): # noqa: ARG001, N802, N803 diff --git a/modules/createEVENT/SurroundedBuildingCFD/post_process_output.py b/modules/createEVENT/SurroundedBuildingCFD/post_process_output.py index f11de48cf..160877c2e 100644 --- a/modules/createEVENT/SurroundedBuildingCFD/post_process_output.py +++ b/modules/createEVENT/SurroundedBuildingCFD/post_process_output.py @@ -88,7 +88,7 @@ def readPressureProbes(fileName): # noqa: N802, N803 time = np.asarray(time, dtype=np.float32) p = np.asarray(p, dtype=np.float32) - return probes, time, p + return probes, time, p # noqa: DOC201 def read_pressure_data(file_names): @@ -291,7 +291,7 @@ def read_openFoam_scalar_field(file_name): # noqa: N802 sField = np.asarray(sField, dtype=np.float32) # noqa: N806 - return sField # noqa: RET504 + return sField # noqa: DOC201, RET504 def read_openFoam_vector_field(file_name): # noqa: N802 @@ -312,7 +312,7 @@ def read_openFoam_vector_field(file_name): # noqa: N802 vField = np.asarray(vField, dtype=np.float32) # noqa: N806 - return vField # noqa: RET504 + return vField # noqa: DOC201, RET504 def read_openFoam_tensor_field(file_name): # noqa: N802 @@ -340,7 +340,7 @@ def read_openFoam_tensor_field(file_name): # noqa: N802 vField = np.asarray(vField, dtype=np.float32) # noqa: N806 - return vField # noqa: RET504 + return vField # noqa: DOC201, RET504 def read_openFoam_symmetric_tensor_field(file_name): # noqa: N802 @@ -367,7 +367,7 @@ def read_openFoam_symmetric_tensor_field(file_name): # noqa: N802 vField = np.asarray(vField, dtype=np.float32) # noqa: N806 - return vField # noqa: RET504 + return vField # noqa: DOC201, RET504 def read_velocity_data(path): @@ -462,7 +462,7 @@ def read_velocity_probes(fileName): # noqa: N803 time = np.asarray(time, dtype=np.float32) U = np.asarray(U, dtype=np.float32) # noqa: N806 - return probes, time, U + return probes, time, U # noqa: DOC201 def calculate_length_scale(u, uav, dt, min_corr=0.0): @@ -481,7 +481,7 @@ def calculate_length_scale(u, uav, dt, min_corr=0.0): L = uav * np.trapz(corr, dx=dt) # noqa: NPY201, N806 - return L # noqa: RET504 + return L # noqa: DOC201, RET504 def psd(x, dt, nseg): # noqa: F811 diff --git a/modules/createEVENT/coupledDigitalTwin/CoupledDigitalTwin.py b/modules/createEVENT/coupledDigitalTwin/CoupledDigitalTwin.py index 256bf0eff..68282a2a0 100644 --- a/modules/createEVENT/coupledDigitalTwin/CoupledDigitalTwin.py +++ b/modules/createEVENT/coupledDigitalTwin/CoupledDigitalTwin.py @@ -13,7 +13,7 @@ def directionToDof(direction): # noqa: N802 """Converts direction to degree of freedom""" # noqa: D400, D401 directioMap = {'X': 1, 'Y': 2, 'Z': 3} # noqa: N806 - return directioMap[direction] + return directioMap[direction] # noqa: DOC201 def addFloorForceToEvent(patternsArray, force, direction, floor): # noqa: ARG001, N802, N803 diff --git a/modules/createEVENT/groundMotionIM/IntensityMeasureComputer.py b/modules/createEVENT/groundMotionIM/IntensityMeasureComputer.py index df764531c..ba2372a62 100644 --- a/modules/createEVENT/groundMotionIM/IntensityMeasureComputer.py +++ b/modules/createEVENT/groundMotionIM/IntensityMeasureComputer.py @@ -167,7 +167,7 @@ def convert_accel_units(self, acceleration, from_, to_='cm/sec/sec'): # noqa: C acceleration = np.asarray(acceleration) if from_ == 'g': if to_ == 'g': - return acceleration + return acceleration # noqa: DOC201 if to_ in self.km_sec_square: return acceleration * self.g / 1000.0 if to_ in self.m_sec_square: diff --git a/modules/createEVENT/siteResponse/RegionalSiteResponse.py b/modules/createEVENT/siteResponse/RegionalSiteResponse.py index 5a01d37e0..d626663b8 100644 --- a/modules/createEVENT/siteResponse/RegionalSiteResponse.py +++ b/modules/createEVENT/siteResponse/RegionalSiteResponse.py @@ -86,7 +86,7 @@ def get_scale_factors(input_units, output_units): # noqa: C901 unit_time = output_units.get('time', 'sec') f_time = globals().get(unit_time, None) if f_time is None: - raise ValueError(f'Specified time unit not recognized: {unit_time}') # noqa: DOC501, EM102, TRY003 + raise ValueError(f'Specified time unit not recognized: {unit_time}') # noqa: DOC501, EM102, RUF100, TRY003 scale_factors = {} @@ -99,7 +99,7 @@ def get_scale_factors(input_units, output_units): # noqa: C901 # get the scale factor to standard units f_in = globals().get(input_unit, None) if f_in is None: - raise ValueError( # noqa: DOC501, TRY003 + raise ValueError( # noqa: DOC501, RUF100, TRY003 f'Input unit for event files not recognized: {input_unit}' # noqa: EM102 ) @@ -109,7 +109,7 @@ def get_scale_factors(input_units, output_units): # noqa: C901 unit_type = base_unit_type if unit_type is None: - raise ValueError(f'Failed to identify unit type: {input_unit}') # noqa: DOC501, EM102, TRY003 + raise ValueError(f'Failed to identify unit type: {input_unit}') # noqa: DOC501, EM102, RUF100, TRY003 # the output unit depends on the unit type if unit_type == 'acceleration': @@ -122,7 +122,7 @@ def get_scale_factors(input_units, output_units): # noqa: C901 f_out = 1.0 / f_length else: - raise ValueError( # noqa: DOC501, TRY003 + raise ValueError( # noqa: DOC501, RUF100, TRY003 f'Unexpected unit type in workflow: {unit_type}' # noqa: EM102 ) @@ -131,7 +131,7 @@ def get_scale_factors(input_units, output_units): # noqa: C901 scale_factors.update({input_name: f_scale}) - return scale_factors + return scale_factors # noqa: DOC201 def postProcess(evtName, input_units, f_scale_units): # noqa: N802, N803, D103 diff --git a/modules/createEVENT/stochasticWave/StochasticWave.py b/modules/createEVENT/stochasticWave/StochasticWave.py index 4c22a4d03..8e870168e 100644 --- a/modules/createEVENT/stochasticWave/StochasticWave.py +++ b/modules/createEVENT/stochasticWave/StochasticWave.py @@ -100,7 +100,7 @@ def directionToDof(direction): # noqa: N802 """Converts direction to degree of freedom""" # noqa: D400, D401 directioMap = {'X': 1, 'Y': 2, 'Z': 3} # noqa: N806 - return directioMap[direction] + return directioMap[direction] # noqa: DOC201 def addFloorForceToEvent(patternsList, timeSeriesList, force, direction, floor): # noqa: N802, N803 diff --git a/modules/createSAM/AutoSDA/beam_component.py b/modules/createSAM/AutoSDA/beam_component.py index 041bb51cf..1b423f2c3 100644 --- a/modules/createSAM/AutoSDA/beam_component.py +++ b/modules/createSAM/AutoSDA/beam_component.py @@ -181,7 +181,7 @@ def check_flag(self): for key in self.is_feasible.keys(): # noqa: SIM118 if self.is_feasible[key] == False: # noqa: E712 self.flag = False - return self.flag + return self.flag # noqa: DOC201 def compute_demand_capacity_ratio(self): """This method is used to compute demand to capacity ratios. diff --git a/modules/createSAM/AutoSDA/column_component.py b/modules/createSAM/AutoSDA/column_component.py index cbd83b50e..fcdc29ee3 100644 --- a/modules/createSAM/AutoSDA/column_component.py +++ b/modules/createSAM/AutoSDA/column_component.py @@ -264,7 +264,7 @@ def check_flag(self): for key in self.is_feasible.keys(): # noqa: SIM118 if self.is_feasible[key] == False: # noqa: E712 self.flag = False - return self.flag + return self.flag # noqa: DOC201 def compute_demand_capacity_ratio(self): """This method is used to calculate the demand to capacity ratios for column components diff --git a/modules/createSAM/AutoSDA/connection_part.py b/modules/createSAM/AutoSDA/connection_part.py index 7459d1190..f987d68b9 100644 --- a/modules/createSAM/AutoSDA/connection_part.py +++ b/modules/createSAM/AutoSDA/connection_part.py @@ -740,4 +740,4 @@ def check_flag(self): for key in self.is_feasible.keys(): # noqa: SIM118 if self.is_feasible[key] == False: # noqa: E712 self.flag = False - return self.flag + return self.flag # noqa: DOC201 diff --git a/modules/createSAM/AutoSDA/help_functions.py b/modules/createSAM/AutoSDA/help_functions.py index 6fbdb23c6..e57b48b57 100644 --- a/modules/createSAM/AutoSDA/help_functions.py +++ b/modules/createSAM/AutoSDA/help_functions.py @@ -50,7 +50,7 @@ def determine_Fa_coefficient(site_class, Ss): # noqa: C901, N802, N803 Fa = None # noqa: N806 print('Site class is entered with an invalid value') # noqa: T201 - return Fa + return Fa # noqa: DOC201 def determine_Fv_coefficient(site_class, S1): # noqa: C901, N802, N803 @@ -94,7 +94,7 @@ def determine_Fv_coefficient(site_class, S1): # noqa: C901, N802, N803 Fv = None # noqa: N806 print('Site class is entered with an invalid value') # noqa: T201 - return Fv + return Fv # noqa: DOC201 def calculate_DBE_acceleration(Ss, S1, Fa, Fv): # noqa: N802, N803 @@ -111,7 +111,7 @@ def calculate_DBE_acceleration(Ss, S1, Fa, Fv): # noqa: N802, N803 SM1 = Fv * S1 # noqa: N806 SDS = 2 / 3 * SMS # noqa: N806 SD1 = 2 / 3 * SM1 # noqa: N806 - return SMS, SM1, SDS, SD1 + return SMS, SM1, SDS, SD1 # noqa: DOC201 def determine_Cu_coefficient(SD1): # noqa: N802, N803 @@ -133,7 +133,7 @@ def determine_Cu_coefficient(SD1): # noqa: N802, N803 else: Cu = 1.4 # noqa: N806 - return Cu + return Cu # noqa: DOC201 def determine_floor_height( @@ -161,7 +161,7 @@ def determine_floor_height( level - 2 ) - return floor_height + return floor_height # noqa: DOC201 def calculate_Cs_coefficient(SDS, SD1, S1, T, TL, R, Ie): # noqa: N802, N803 @@ -212,7 +212,7 @@ def calculate_Cs_coefficient(SDS, SD1, S1, T, TL, R, Ie): # noqa: N802, N803 else: pass - return Cs + return Cs # noqa: DOC201 def determine_k_coeficient(period): @@ -227,7 +227,7 @@ def determine_k_coeficient(period): else: k = 1 + 0.5 * (period - 0.5) - return k + return k # noqa: DOC201 def calculate_seismic_force(base_shear, floor_weight, floor_height, k): @@ -252,7 +252,7 @@ def calculate_seismic_force(base_shear, floor_weight, floor_height, k): for story in range(len(floor_weight) - 1, -1, -1): story_shear[story] = np.sum(seismic_force[story:]) - return seismic_force, story_shear + return seismic_force, story_shear # noqa: DOC201 def find_section_candidate(target_depth, section_database): @@ -267,7 +267,7 @@ def find_section_candidate(target_depth, section_database): if match: candidate_index.append(indx) candidates = section_database.loc[candidate_index, 'section size'] - return candidates # noqa: RET504 + return candidates # noqa: DOC201, RET504 def search_member_size(target_name, target_quantity, candidate, section_database): @@ -299,7 +299,7 @@ def search_member_size(target_name, target_quantity, candidate, section_database section_size = section_database.loc[ candidate_index[min_index[0][0]], 'section size' ] - return section_size + return section_size # noqa: DOC201 def search_section_property(target_size, section_database): @@ -316,7 +316,7 @@ def search_section_property(target_size, section_database): for indx in np.array(section_database['index']): if target_size == section_database.loc[indx, 'section size']: section_info = section_database.loc[indx, :] - return section_info.to_dict() + return section_info.to_dict() # noqa: DOC201 except: # noqa: E722 sys.stderr.write( 'Error: wrong size nominated!\nNo such size exists in section database!' @@ -336,7 +336,7 @@ def decrease_member_size(candidate, current_size): # This means the smallest candidate still cannot make design drift close to drift limit, # which further means the smallest section candidate is too large. sys.stderr.write('The lower bound for depth initialization is too large!\n') - return candidate[candidate_pool_index + 1] + return candidate[candidate_pool_index + 1] # noqa: DOC201 def extract_depth(size): @@ -346,7 +346,7 @@ def extract_depth(size): """ # noqa: D205, D400, D401, D404 # Use Python regular expression to extract the char between 'W' and 'X', which then become depth output = re.findall(r'.*W(.*)X.*', size) - return int(output[0]) + return int(output[0]) # noqa: DOC201 def extract_weight(size): @@ -357,7 +357,7 @@ def extract_weight(size): # Use Python regular expression to extract the char after 'W' to the end of the string, # which then becomes weight output = re.findall(r'.X(.*)', size) - return int(output[0]) + return int(output[0]) # noqa: DOC201 def constructability_helper( # noqa: C901 @@ -541,7 +541,7 @@ def constructability_helper( # noqa: C901 variation_story.pop() # Update the ending index for next "identical story block" ending_index = variation_story[-1] - return section_size + return section_size # noqa: DOC201 # # Loop over all stories from top to bottom to consider the constructability # starting_story = total_story - 1 @@ -596,4 +596,4 @@ def increase_member_size(candidate, current_size): if candidate_pool_index - 1 < 0: # Make sure the index does not exceed the bound # This means the largest candidate still fails to satisfy the requirement sys.stderr.write('The upper bound for depth initialization is too small!\n') - return candidate[candidate_pool_index - 1] + return candidate[candidate_pool_index - 1] # noqa: DOC201 diff --git a/modules/performDL/pelicun3/DL_visuals.py b/modules/performDL/pelicun3/DL_visuals.py index 57eaf8b5b..ee5e55fc2 100644 --- a/modules/performDL/pelicun3/DL_visuals.py +++ b/modules/performDL/pelicun3/DL_visuals.py @@ -116,26 +116,26 @@ def plot_fragility(comp_db_path, output_path, create_zip='0'): # noqa: C901, D1 5: cl.scales['5']['seq']['Reds'], } - if comp_data.loc[('Incomplete', '')] != 1: + if comp_data.loc[('Incomplete', '')] != 1: # noqa: RUF031 p_min, p_max = 0.01, 0.9 d_min = np.inf d_max = -np.inf LS_count = 0 # noqa: N806 for LS in limit_states: # noqa: N806 - if comp_data.loc[(LS, 'Family')] == 'normal': + if comp_data.loc[(LS, 'Family')] == 'normal': # noqa: RUF031 d_min_i, d_max_i = norm.ppf( [p_min, p_max], - loc=comp_data.loc[(LS, 'Theta_0')], - scale=comp_data.loc[(LS, 'Theta_1')] - * comp_data.loc[(LS, 'Theta_0')], + loc=comp_data.loc[(LS, 'Theta_0')], # noqa: RUF031 + scale=comp_data.loc[(LS, 'Theta_1')] # noqa: RUF031 + * comp_data.loc[(LS, 'Theta_0')], # noqa: RUF031 ) - elif comp_data.loc[(LS, 'Family')] == 'lognormal': + elif comp_data.loc[(LS, 'Family')] == 'lognormal': # noqa: RUF031 d_min_i, d_max_i = np.exp( norm.ppf( [p_min, p_max], - loc=np.log(comp_data.loc[(LS, 'Theta_0')]), - scale=comp_data.loc[(LS, 'Theta_1')], + loc=np.log(comp_data.loc[(LS, 'Theta_0')]), # noqa: RUF031 + scale=comp_data.loc[(LS, 'Theta_1')], # noqa: RUF031 ) ) else: @@ -149,18 +149,18 @@ def plot_fragility(comp_db_path, output_path, create_zip='0'): # noqa: C901, D1 demand_vals = np.linspace(d_min, d_max, num=100) for i_ls, LS in enumerate(limit_states): # noqa: N806 - if comp_data.loc[(LS, 'Family')] == 'normal': + if comp_data.loc[(LS, 'Family')] == 'normal': # noqa: RUF031 cdf_vals = norm.cdf( demand_vals, - loc=comp_data.loc[(LS, 'Theta_0')], - scale=comp_data.loc[(LS, 'Theta_1')] - * comp_data.loc[(LS, 'Theta_0')], + loc=comp_data.loc[(LS, 'Theta_0')], # noqa: RUF031 + scale=comp_data.loc[(LS, 'Theta_1')] # noqa: RUF031 + * comp_data.loc[(LS, 'Theta_0')], # noqa: RUF031 ) - elif comp_data.loc[(LS, 'Family')] == 'lognormal': + elif comp_data.loc[(LS, 'Family')] == 'lognormal': # noqa: RUF031 cdf_vals = norm.cdf( np.log(demand_vals), - loc=np.log(comp_data.loc[(LS, 'Theta_0')]), - scale=comp_data.loc[(LS, 'Theta_1')], + loc=np.log(comp_data.loc[(LS, 'Theta_0')]), # noqa: RUF031 + scale=comp_data.loc[(LS, 'Theta_1')], # noqa: RUF031 ) else: continue @@ -385,11 +385,11 @@ def plot_fragility(comp_db_path, output_path, create_zip='0'): # noqa: C901, D1 gridcolor='rgb(192,192,192)', ) - demand_unit = comp_data.loc[('Demand', 'Unit')] + demand_unit = comp_data.loc[('Demand', 'Unit')] # noqa: RUF031 if demand_unit == 'unitless': demand_unit = '-' fig.update_xaxes( - title_text=f"{comp_data.loc[('Demand', 'Type')]} [{demand_unit}]", + title_text=f"{comp_data.loc[('Demand', 'Type')]} [{demand_unit}]", # noqa: RUF031 **shared_ax_props, ) @@ -465,7 +465,7 @@ def plot_repair(comp_db_path, output_path, create_zip='0'): # noqa: C901, D103, # perform plotting for each repair consequence type independently for c_type in repair_df.loc[comp_id].index: # load the component-specific part of the database - comp_data = repair_df.loc[(comp_id, c_type)] + comp_data = repair_df.loc[(comp_id, c_type)] # noqa: RUF031 # and the component-specific metadata - if it exists if repair_meta != None: # noqa: E711 @@ -620,7 +620,7 @@ def plot_repair(comp_db_path, output_path, create_zip='0'): # noqa: C901, D103, ), } - if comp_data.loc[('Incomplete', '')] != 1: + if comp_data.loc[('Incomplete', '')] != 1: # noqa: RUF031 # set the parameters for displaying uncertainty p_min, p_max = 0.16, 0.84 # +- 1 std # noqa: F841 @@ -923,13 +923,13 @@ def plot_repair(comp_db_path, output_path, create_zip='0'): # noqa: C901, D103, gridcolor='rgb(220,220,220)', ) - quantity_unit = comp_data.loc[('Quantity', 'Unit')] + quantity_unit = comp_data.loc[('Quantity', 'Unit')] # noqa: RUF031 if quantity_unit in ['unitless', '1 EA', '1 ea']: # noqa: PLR6201 quantity_unit = '-' elif quantity_unit.split()[0] == '1': quantity_unit = quantity_unit.split()[1] - dv_unit = comp_data.loc[('DV', 'Unit')] + dv_unit = comp_data.loc[('DV', 'Unit')] # noqa: RUF031 if dv_unit == 'unitless': dv_unit = '-' diff --git a/modules/performHUA/pyincore_data/censusutil.py b/modules/performHUA/pyincore_data/censusutil.py index b34f8f1c9..e733b6f35 100644 --- a/modules/performHUA/pyincore_data/censusutil.py +++ b/modules/performHUA/pyincore_data/censusutil.py @@ -16,7 +16,7 @@ import geopandas as gpd import pandas as pd import requests -from pyincore_data import globals +from pyincore_data import globals # noqa: A004 logger = globals.LOGGER @@ -62,7 +62,7 @@ def generate_census_api_url( if county is None: error_msg = 'State and county value must be provided when geo_type is provided.' logger.error(error_msg) - raise Exception(error_msg) # noqa: DOC501, TRY002 + raise Exception(error_msg) # noqa: DOC501, RUF100, TRY002 # Set up url for Census API base_url = f'https://api.census.gov/data/{year}/{data_source}' @@ -107,7 +107,7 @@ def request_census_api(data_url): api_json = request_json.json() api_df = pd.DataFrame(columns=api_json[0], data=api_json[1:]) - return api_df # noqa: RET504 + return api_df # noqa: DOC201, RET504 @staticmethod def get_blockdata_for_demographics( # noqa: C901 @@ -191,7 +191,7 @@ def get_blockdata_for_demographics( # noqa: C901 else: print('Only 2000, 2010, and 2020 decennial census supported') # noqa: T201 - return None + return None # noqa: DOC201 # Make directory to save output if not os.path.exists(output_dir): # noqa: PTH110 @@ -860,7 +860,7 @@ def get_blockgroupdata_for_income( # noqa: C901 print('Done creating household income shapefile') # noqa: T201 - return cen_blockgroup[save_columns] + return cen_blockgroup[save_columns] # noqa: DOC201 @staticmethod def convert_dislocation_gpd_to_shapefile(in_gpd, programname, savefile): diff --git a/modules/performREC/pyrecodes/run_pyrecodes.py b/modules/performREC/pyrecodes/run_pyrecodes.py index 4b41c8251..c98eeb1c5 100644 --- a/modules/performREC/pyrecodes/run_pyrecodes.py +++ b/modules/performREC/pyrecodes/run_pyrecodes.py @@ -1,182 +1,238 @@ -import json, os, shapely, argparse, sys, ujson, importlib +import json, os, shapely, argparse, sys, ujson, importlib # noqa: CPY001, INP001, I001, E401, D100 import geopandas as gpd import numpy as np import pandas as pd from pathlib import Path + # Delete below when pyrecodes can be installed as stand alone -import sys +import sys # noqa: F811 + sys.path.insert(0, '/Users/jinyanzhao/Desktop/SimCenterBuild/r2d_pyrecodes/') from pyrecodes import main -def run_pyrecodes(rec_config, inputRWHALE, parallelType, mpiExec, numPROC): - +def run_pyrecodes(rec_config, inputRWHALE, parallelType, mpiExec, numPROC): # noqa: ARG001, C901, N803, D103 # Initiate directory - rec_ouput_dir = os.path.join(inputRWHALE['runDir'],"Results", "Recovery") - if not os.path.exists(rec_ouput_dir): - os.mkdir(rec_ouput_dir) + rec_ouput_dir = os.path.join(inputRWHALE['runDir'], 'Results', 'Recovery') # noqa: PTH118 + if not os.path.exists(rec_ouput_dir): # noqa: PTH110 + os.mkdir(rec_ouput_dir) # noqa: PTH102 # Find the realizations to run damage_input = rec_config.pop('DamageInput') - realizations_to_run = select_realizations_to_run(\ - damage_input,inputRWHALE) - + realizations_to_run = select_realizations_to_run(damage_input, inputRWHALE) + # Replace SimCenterDefault with correct path - cmp_lib = rec_config["ComponentLibrary"] + cmp_lib = rec_config['ComponentLibrary'] if cmp_lib.startswith('SimCenterDefault'): cmp_lib_name = cmp_lib.split('/')[1] - cmp_lib_dir = os.path.dirname(os.path.realpath(__file__)) - cmp_lib = os.path.join(cmp_lib_dir, cmp_lib_name) - rec_config["ComponentLibrary"] = cmp_lib + cmp_lib_dir = os.path.dirname(os.path.realpath(__file__)) # noqa: PTH120 + cmp_lib = os.path.join(cmp_lib_dir, cmp_lib_name) # noqa: PTH118 + rec_config['ComponentLibrary'] = cmp_lib # loop through each realizations. Needs to be parallelized # Create the base of system configuration json system_configuration = create_system_configuration(rec_config) # Create the base of main json - main_json = dict() - main_json.update({"ComponentLibrary": { - "ComponentLibraryCreatorClass": "JSONComponentLibraryCreator", - "ComponentLibraryFile": rec_config["ComponentLibrary"] - }}) + main_json = dict() # noqa: C408 + main_json.update( + { + 'ComponentLibrary': { + 'ComponentLibraryCreatorClass': 'JSONComponentLibraryCreator', + 'ComponentLibraryFile': rec_config['ComponentLibrary'], + } + } + ) # initialize a dict to accumulate recovery results stats - result_det_path = os.path.join(inputRWHALE['runDir'],"Results", - f"Results_det.json") - with open(result_det_path, 'r') as f: + result_det_path = os.path.join( # noqa: PTH118 + inputRWHALE['runDir'], 'Results', f'Results_det.json' # noqa: F541 + ) + with open(result_det_path, 'r') as f: # noqa: PTH123, PLW1514, UP015 results_det = json.load(f) - result_agg = dict() - resilience_results = dict() + result_agg = dict() # noqa: C408 + resilience_results = dict() # noqa: C408 # Loop through realizations and run pyrecodes - numP = 1 - procID = 0 - doParallel = False - mpi_spec = importlib.util.find_spec("mpi4py") + numP = 1 # noqa: N806 + procID = 0 # noqa: N806 + doParallel = False # noqa: N806 + mpi_spec = importlib.util.find_spec('mpi4py') found = mpi_spec is not None if found and parallelType == 'parRUN': - import mpi4py - from mpi4py import MPI + import mpi4py # noqa: PLC0415 + from mpi4py import MPI # noqa: PLC0415 + comm = MPI.COMM_WORLD - numP = comm.Get_size() - procID = comm.Get_rank() - if numP < 2: - doParallel = False - numP = 1 - procID = 0 + numP = comm.Get_size() # noqa: N806 + procID = comm.Get_rank() # noqa: N806 + if numP < 2: # noqa: PLR2004 + doParallel = False # noqa: N806 + numP = 1 # noqa: N806 + procID = 0 # noqa: N806 else: - doParallel = True + doParallel = True # noqa: N806 count = 0 - needsInitiation = True + needsInitiation = True # noqa: N806 ind_in_rank = 0 - for ind, rlz_ind in enumerate(realizations_to_run): + for ind, rlz_ind in enumerate(realizations_to_run): # noqa: B007, PLR1702, FURB148 # Create a realization directory if count % numP == procID: - rlz_dir = os.path.join(rec_ouput_dir,str(rlz_ind)) - if not os.path.exists(rlz_dir): - os.mkdir(rlz_dir) + rlz_dir = os.path.join(rec_ouput_dir, str(rlz_ind)) # noqa: PTH118 + if not os.path.exists(rlz_dir): # noqa: PTH110 + os.mkdir(rlz_dir) # noqa: PTH102 # Update the system_configuration json - damage_rlz_file = os.path.join(inputRWHALE['runDir'],"Results",\ - f"Results_{int(rlz_ind)}.json") - DamageInput = {"Type": "R2DDamageInput", - "Parameters": {"DamageFile": damage_rlz_file}} - system_configuration.update({"DamageInput":DamageInput}) + damage_rlz_file = os.path.join( # noqa: PTH118 + inputRWHALE['runDir'], 'Results', f'Results_{int(rlz_ind)}.json' + ) + DamageInput = { # noqa: N806 + 'Type': 'R2DDamageInput', + 'Parameters': {'DamageFile': damage_rlz_file}, + } + system_configuration.update({'DamageInput': DamageInput}) # Write the system_configureation to a file - system_configuration_file = os.path.join(rlz_dir, \ - "SystemConfiguration.json") - with open(system_configuration_file, 'w') as f: + system_configuration_file = os.path.join( # noqa: PTH118 + rlz_dir, 'SystemConfiguration.json' + ) + with open(system_configuration_file, 'w') as f: # noqa: PTH123, PLW1514 ujson.dump(system_configuration, f) - + # Update the main json - main_json.update({"System": { - "SystemCreatorClass": "ConcreteSystemCreator", - "SystemClass": "BuiltEnvironmentSystem", - "SystemConfigurationFile": system_configuration_file - }}) + main_json.update( + { + 'System': { + 'SystemCreatorClass': 'ConcreteSystemCreator', + 'SystemClass': 'BuiltEnvironmentSystem', + 'SystemConfigurationFile': system_configuration_file, + } + } + ) # Write the main json to a file - main_file = os.path.join(rlz_dir, "main.json") - with open(main_file, 'w') as f: + main_file = os.path.join(rlz_dir, 'main.json') # noqa: PTH118 + with open(main_file, 'w') as f: # noqa: PTH123, PLW1514 ujson.dump(main_json, f) system = main.run(main_file) system.calculate_resilience() - # Append the recovery time to results_rlz + # Append the recovery time to results_rlz if needsInitiation: - needsInitiation = False - num_of_rlz_per_rank = int(np.floor(len(realizations_to_run)/numP)) - if procID < len(realizations_to_run)%numP: + needsInitiation = False # noqa: N806 + num_of_rlz_per_rank = int(np.floor(len(realizations_to_run) / numP)) + if procID < len(realizations_to_run) % numP: num_of_rlz_per_rank += 1 # Initialize resilience_results - resilience_results_buffer = dict() + resilience_results_buffer = dict() # noqa: C408 resilience_calculator_id = 0 - resilience_results.update({ - "time_steps": list(range(0, system.MAX_TIME_STEP+1)) - }) - resources_to_plot = system.resilience_calculators[resilience_calculator_id].system_supply.keys() - for resource_name in resources_to_plot: - resilience_results_buffer.update({ - resource_name: { - "Supply": np.zeros([num_of_rlz_per_rank, system.MAX_TIME_STEP+1]), - "Demand": np.zeros([num_of_rlz_per_rank, system.MAX_TIME_STEP+1]), - "Consumption": np.zeros([num_of_rlz_per_rank, system.MAX_TIME_STEP+1]) + resilience_results.update( + {'time_steps': list(range(0, system.MAX_TIME_STEP + 1))} # noqa: PIE808 + ) + resources_to_plot = system.resilience_calculators[ + resilience_calculator_id + ].system_supply.keys() + for resource_name in resources_to_plot: + resilience_results_buffer.update( + { + resource_name: { + 'Supply': np.zeros( + [num_of_rlz_per_rank, system.MAX_TIME_STEP + 1] + ), + 'Demand': np.zeros( + [num_of_rlz_per_rank, system.MAX_TIME_STEP + 1] + ), + 'Consumption': np.zeros( + [num_of_rlz_per_rank, system.MAX_TIME_STEP + 1] + ), + } } - }) + ) # Initialize result_agg - result_agg_buffer = dict() + result_agg_buffer = dict() # noqa: C408 for asset_type, item in results_det.items(): - asset_type_result = dict() + asset_type_result = dict() # noqa: C408 for asset_subtype, asset_subtype_item in item.items(): - asset_subtype_result = dict() - for aim_id, aim in asset_subtype_item.items(): - asset_subtype_result.update({aim_id:{ - "RecoveryDuration":np.zeros(num_of_rlz_per_rank) - }}) - asset_type_result.update({asset_subtype:asset_subtype_result}) - result_agg_buffer.update({asset_type:asset_type_result}) + asset_subtype_result = dict() # noqa: C408 + for aim_id, aim in asset_subtype_item.items(): # noqa: B007 + asset_subtype_result.update( + { + aim_id: { + 'RecoveryDuration': np.zeros( + num_of_rlz_per_rank + ) + } + } + ) + asset_type_result.update( + {asset_subtype: asset_subtype_result} + ) + result_agg_buffer.update({asset_type: asset_type_result}) del results_det - - resilience_result_rlz_i = dict() + + resilience_result_rlz_i = dict() # noqa: C408 for resource_name in resources_to_plot: - resilience_result_rlz_i.update({ - "time_steps": list(range(0, system.time_step+1)), + resilience_result_rlz_i.update( + { + 'time_steps': list(range(0, system.time_step + 1)), # noqa: PIE808 resource_name: { - "Supply": system.resilience_calculators[resilience_calculator_id].system_supply[resource_name][:system.time_step+1], - "Demand": system.resilience_calculators[resilience_calculator_id].system_demand[resource_name][:system.time_step+1], - "Consumption": system.resilience_calculators[resilience_calculator_id].system_consumption[resource_name][:system.time_step+1] - } + 'Supply': system.resilience_calculators[ + resilience_calculator_id + ].system_supply[resource_name][: system.time_step + 1], + 'Demand': system.resilience_calculators[ + resilience_calculator_id + ].system_demand[resource_name][: system.time_step + 1], + 'Consumption': system.resilience_calculators[ + resilience_calculator_id + ].system_consumption[resource_name][ + : system.time_step + 1 + ], + }, } - ) - resilience_results_buffer[resource_name]['Supply'][ind_in_rank,:system.time_step+1] = \ - system.resilience_calculators[resilience_calculator_id].system_supply[resource_name][:system.time_step+1] - resilience_results_buffer[resource_name]['Demand'][ind_in_rank,:system.time_step+1] = \ - system.resilience_calculators[resilience_calculator_id].system_demand[resource_name][:system.time_step+1] - resilience_results_buffer[resource_name]['Consumption'][ind_in_rank,:system.time_step+1] = \ - system.resilience_calculators[resilience_calculator_id].system_consumption[resource_name][:system.time_step+1] - resilience_result_rlz_i_file = os.path.join(rlz_dir, "ResilienceResult.json") - with open(resilience_result_rlz_i_file, 'w') as f: + ) + resilience_results_buffer[resource_name]['Supply'][ + ind_in_rank, : system.time_step + 1 + ] = system.resilience_calculators[ + resilience_calculator_id + ].system_supply[resource_name][: system.time_step + 1] + resilience_results_buffer[resource_name]['Demand'][ + ind_in_rank, : system.time_step + 1 + ] = system.resilience_calculators[ + resilience_calculator_id + ].system_demand[resource_name][: system.time_step + 1] + resilience_results_buffer[resource_name]['Consumption'][ + ind_in_rank, : system.time_step + 1 + ] = system.resilience_calculators[ + resilience_calculator_id + ].system_consumption[resource_name][: system.time_step + 1] + resilience_result_rlz_i_file = os.path.join( # noqa: PTH118 + rlz_dir, 'ResilienceResult.json' + ) + with open(resilience_result_rlz_i_file, 'w') as f: # noqa: PTH123, PLW1514 ujson.dump(resilience_result_rlz_i, f) - result_file_name = os.path.join(inputRWHALE['runDir'],"Results", - f"Results_{rlz_ind}.json") - with open(result_file_name, 'r') as f: + result_file_name = os.path.join( # noqa: PTH118 + inputRWHALE['runDir'], 'Results', f'Results_{rlz_ind}.json' + ) + with open(result_file_name, 'r') as f: # noqa: PTH123, PLW1514, UP015 results = json.load(f) for comp in system.components: if getattr(comp, 'r2d_comp', False) is True: - recovery_duration = getattr(comp, 'recoverd_time_step',system.MAX_TIME_STEP) - \ - system.DISASTER_TIME_STEP + recovery_duration = ( + getattr(comp, 'recoverd_time_step', system.MAX_TIME_STEP) + - system.DISASTER_TIME_STEP + ) recovery_duration = max(0, recovery_duration) - results[comp.asset_type][comp.asset_subtype][comp.aim_id].update({ - "Recovery": {"Duration":recovery_duration} - }) - result_agg_buffer[comp.asset_type][comp.asset_subtype][comp.aim_id]\ - ['RecoveryDuration'][ind_in_rank] = recovery_duration - with open(result_file_name, 'w') as f: + results[comp.asset_type][comp.asset_subtype][comp.aim_id].update( + {'Recovery': {'Duration': recovery_duration}} + ) + result_agg_buffer[comp.asset_type][comp.asset_subtype][ + comp.aim_id + ]['RecoveryDuration'][ind_in_rank] = recovery_duration + with open(result_file_name, 'w') as f: # noqa: PTH123, PLW1514 ujson.dump(results, f) ind_in_rank += 1 - count = count + 1 + count = count + 1 # noqa: PLR6104 # wait for all to finish if doParallel: @@ -188,127 +244,196 @@ def run_pyrecodes(rec_config, inputRWHALE, parallelType, mpiExec, numPROC): if doParallel: # gather results_agg for asset_type, item in result_agg_buffer.items(): - asset_type_result = dict() + asset_type_result = dict() # noqa: C408 for asset_subtype, asset_subtype_item in item.items(): - asset_subtype_result = dict() - for aim_id, aim in asset_subtype_item.items(): - asset_subtype_result.update({aim_id:{ - "RecoveryDuration":comm.gather(result_agg_buffer[asset_type][asset_subtype], root=0) - }}) - asset_type_result.update({asset_subtype:asset_subtype_result}) - result_agg.update({asset_type:asset_type_result}) + asset_subtype_result = dict() # noqa: C408 + for aim_id, aim in asset_subtype_item.items(): # noqa: B007 + asset_subtype_result.update( + { + aim_id: { + 'RecoveryDuration': comm.gather( + result_agg_buffer[asset_type][asset_subtype], + root=0, + ) + } + } + ) + asset_type_result.update({asset_subtype: asset_subtype_result}) + result_agg.update({asset_type: asset_type_result}) # gather resilience_resutls for resource_name in resources_to_plot: if procID == 0: - resilience_results.update({ - resource_name: { - "Supply": np.zeros([len(realizations_to_run), system.MAX_TIME_STEP+1]), - "Demand": np.zeros([len(realizations_to_run), system.MAX_TIME_STEP+1]), - "Consumption": np.zeros([len(realizations_to_run), system.MAX_TIME_STEP+1]) + resilience_results.update( + { + resource_name: { + 'Supply': np.zeros( + [len(realizations_to_run), system.MAX_TIME_STEP + 1] + ), + 'Demand': np.zeros( + [len(realizations_to_run), system.MAX_TIME_STEP + 1] + ), + 'Consumption': np.zeros( + [len(realizations_to_run), system.MAX_TIME_STEP + 1] + ), + } } - }) - comm.gather(resilience_results_buffer[resource_name]["Supply"], - resilience_results[resource_name]["Supply"], root=0) - comm.gather(resilience_results_buffer[resource_name]["Demand"], - resilience_results[resource_name]["Demand"], root=0) - comm.gather(resilience_results_buffer[resource_name]["Consumption"], - resilience_results[resource_name]["Consumption"], root=0) + ) + comm.gather( + resilience_results_buffer[resource_name]['Supply'], + resilience_results[resource_name]['Supply'], + root=0, + ) + comm.gather( + resilience_results_buffer[resource_name]['Demand'], + resilience_results[resource_name]['Demand'], + root=0, + ) + comm.gather( + resilience_results_buffer[resource_name]['Consumption'], + resilience_results[resource_name]['Consumption'], + root=0, + ) else: - for resource_name in resources_to_plot: - resilience_results.update({ - resource_name: resilience_results_buffer[resource_name] - }) + for resource_name in resources_to_plot: + resilience_results.update( + {resource_name: resilience_results_buffer[resource_name]} + ) result_agg = result_agg_buffer - if procID==0: - # Calculate stats of the results and add to results_det.json - with open(result_det_path, 'r') as f: + if procID == 0: + # Calculate stats of the results and add to results_det.json + with open(result_det_path, 'r') as f: # noqa: PTH123, PLW1514, UP015 results_det = json.load(f) for asset_type, item in result_agg.items(): for asset_subtype, asset_subtype_item in item.items(): for aim_id, aim in asset_subtype_item.items(): - if 'R2Dres' not in results_det[asset_type][asset_subtype][aim_id].keys(): - results_det[asset_type][asset_subtype][aim_id].update({'R2Dres':{}}) - results_det[asset_type][asset_subtype][aim_id]['R2Dres'].update({ - "R2Dres_mean_RecoveryDuration":aim['RecoveryDuration'].mean(), - "R2Dres_std_RecoveryDuration":aim['RecoveryDuration'].std() - }) - with open(result_det_path, 'w') as f: + if ( + 'R2Dres' # noqa: SIM118 + not in results_det[asset_type][asset_subtype][aim_id].keys() + ): + results_det[asset_type][asset_subtype][aim_id].update( + {'R2Dres': {}} + ) + results_det[asset_type][asset_subtype][aim_id]['R2Dres'].update( + { + 'R2Dres_mean_RecoveryDuration': aim[ + 'RecoveryDuration' + ].mean(), + 'R2Dres_std_RecoveryDuration': aim[ + 'RecoveryDuration' + ].std(), + } + ) + with open(result_det_path, 'w') as f: # noqa: PTH123, PLW1514 ujson.dump(results_det, f) - - recovery_result_path = os.path.join(rec_ouput_dir, "ResilienceResult.json") - for resource_name in resources_to_plot: - resilience_results[resource_name].update({ - 'R2Dres_mean_Supply':resilience_results[resource_name]['Supply'].mean(axis=0).tolist(), - 'R2Dres_std_Supply':resilience_results[resource_name]['Supply'].std(axis=0).tolist(), - 'R2Dres_mean_Demand':resilience_results[resource_name]['Demand'].mean(axis=0).tolist(), - 'R2Dres_std_Demand':resilience_results[resource_name]['Demand'].std(axis=0).tolist(), - 'R2Dres_mean_Consumption':resilience_results[resource_name]['Consumption'].mean(axis=0).tolist(), - 'R2Dres_std_Consumption':resilience_results[resource_name]['Consumption'].std(axis=0).tolist() - }) - resilience_results[resource_name].pop("Supply") - resilience_results[resource_name].pop("Demand") - resilience_results[resource_name].pop("Consumption") - - - with open(recovery_result_path, 'w') as f: + + recovery_result_path = os.path.join(rec_ouput_dir, 'ResilienceResult.json') # noqa: PTH118 + for resource_name in resources_to_plot: + resilience_results[resource_name].update( + { + 'R2Dres_mean_Supply': resilience_results[resource_name]['Supply'] + .mean(axis=0) + .tolist(), + 'R2Dres_std_Supply': resilience_results[resource_name]['Supply'] + .std(axis=0) + .tolist(), + 'R2Dres_mean_Demand': resilience_results[resource_name]['Demand'] + .mean(axis=0) + .tolist(), + 'R2Dres_std_Demand': resilience_results[resource_name]['Demand'] + .std(axis=0) + .tolist(), + 'R2Dres_mean_Consumption': resilience_results[resource_name][ + 'Consumption' + ] + .mean(axis=0) + .tolist(), + 'R2Dres_std_Consumption': resilience_results[resource_name][ + 'Consumption' + ] + .std(axis=0) + .tolist(), + } + ) + resilience_results[resource_name].pop('Supply') + resilience_results[resource_name].pop('Demand') + resilience_results[resource_name].pop('Consumption') + + with open(recovery_result_path, 'w') as f: # noqa: PTH123, PLW1514 ujson.dump(resilience_results, f) # Below are for development use - from pyrecodes import GeoVisualizer as gvis + from pyrecodes import GeoVisualizer as gvis # noqa: N813, PLC0415 + geo_visualizer = gvis.R2D_GeoVisualizer(system.components) geo_visualizer.plot_component_localities() - from pyrecodes import Plotter + from pyrecodes import Plotter # noqa: PLC0415 + plotter_object = Plotter.Plotter() x_axis_label = 'Time step [day]' - resources_to_plot = ['Shelter', 'FunctionalHousing', 'ElectricPower', 'PotableWater'] + resources_to_plot = [ + 'Shelter', + 'FunctionalHousing', + 'ElectricPower', + 'PotableWater', + ] resource_units = ['[beds/day]', '[beds/day]', '[MWh/day]', '[RC/day]'] # define which resilience calculator to use to plot the supply/demand/consumption of the resources # they are ordered as in the system configuration file resilience_calculator_id = 0 - for i, resource_name in enumerate(resources_to_plot): + for i, resource_name in enumerate(resources_to_plot): y_axis_label = f'{resource_name} {resource_units[i]} | {system.resilience_calculators[resilience_calculator_id].scope}' - axis_object = plotter_object.setup_lor_plot_fig(x_axis_label, y_axis_label) - time_range = system.time_step+1 - time_steps_before_event = 10 # - plotter_object.plot_single_resource(list(range(-time_steps_before_event, time_range)), - resilience_results[resource_name]['R2Dres_mean_Supply'][:time_range], - resilience_results[resource_name]['R2Dres_mean_Demand'][:time_range], - resilience_results[resource_name]['R2Dres_mean_Consumption'][:time_range], - axis_object, warmup=time_steps_before_event) - print() -def create_system_configuration(rec_config): + axis_object = plotter_object.setup_lor_plot_fig(x_axis_label, y_axis_label) + time_range = system.time_step + 1 + time_steps_before_event = 10 + plotter_object.plot_single_resource( + list(range(-time_steps_before_event, time_range)), + resilience_results[resource_name]['R2Dres_mean_Supply'][:time_range], + resilience_results[resource_name]['R2Dres_mean_Demand'][:time_range], + resilience_results[resource_name]['R2Dres_mean_Consumption'][ + :time_range + ], + axis_object, + warmup=time_steps_before_event, + ) + print() # noqa: T201 + + +def create_system_configuration(rec_config): # noqa: D103 content_config = rec_config.pop('Content') system_configuration = rec_config.copy() if content_config['Creator'] == 'FromJsonFile': - with open(content_config['FilePath'], 'r') as f: + with open(content_config['FilePath'], 'r') as f: # noqa: PTH123, PLW1514, UP015 content = json.load(f) - system_configuration.update({"Content":content}) + system_configuration.update({'Content': content}) elif content_config['Creator'] == 'LocalityGeoJSON': # think how users can input RecoveryResourceSupplier and Resources pass - + return system_configuration -def select_realizations_to_run(damage_input, inputRWHALE): - rlzs_num = min([item['ApplicationData']['Realizations'] \ - for _, item in inputRWHALE['Applications']['DL'].items()]) +def select_realizations_to_run(damage_input, inputRWHALE): # noqa: N803, D103 + rlzs_num = min( + [ # noqa: C419 + item['ApplicationData']['Realizations'] + for _, item in inputRWHALE['Applications']['DL'].items() + ] + ) rlzs_available = np.array(range(rlzs_num)) if damage_input['Type'] == 'R2DDamageRealization': rlz_filter = damage_input['Parameters']['Filter'] rlzs_requested = [] for rlzs in rlz_filter.split(','): - if "-" in rlzs: - rlzs_low, rlzs_high = rlzs.split("-") - rlzs_requested += list(range(int(rlzs_low), int(rlzs_high)+1)) + if '-' in rlzs: + rlzs_low, rlzs_high = rlzs.split('-') + rlzs_requested += list(range(int(rlzs_low), int(rlzs_high) + 1)) else: rlzs_requested.append(int(rlzs)) rlzs_requested = np.array(rlzs_requested) - rlzs_in_available = np.in1d(rlzs_requested, rlzs_available) + rlzs_in_available = np.in1d(rlzs_requested, rlzs_available) # noqa: NPY201 if rlzs_in_available.sum() != 0: - rlzs_to_run = rlzs_requested[ - np.where(rlzs_in_available)[0]] + rlzs_to_run = rlzs_requested[np.where(rlzs_in_available)[0]] else: rlzs_to_run = [] if damage_input['Type'] == 'R2DDamageSample': @@ -316,50 +441,65 @@ def select_realizations_to_run(damage_input, inputRWHALE): seed = damage_input['Parameters']['SampleSize'] if sample_size < rlzs_num: np.random.seed(seed) - rlzs_to_run = np.sort(np.random.choice(rlzs_available, sample_size,\ - replace = False)).tolist() + rlzs_to_run = np.sort( + np.random.choice(rlzs_available, sample_size, replace=False) + ).tolist() else: rlzs_to_run = np.sort(rlzs_available).tolist() return rlzs_to_run -if __name__ == '__main__': - #Defining the command line arguments +if __name__ == '__main__': + # Defining the command line arguments - workflowArgParser = argparse.ArgumentParser( - "Run Pyrecodes from the NHERI SimCenter rWHALE workflow for a set of assets.", - allow_abbrev=False) + workflowArgParser = argparse.ArgumentParser( # noqa: N816 + 'Run Pyrecodes from the NHERI SimCenter rWHALE workflow for a set of assets.', + allow_abbrev=False, + ) - workflowArgParser.add_argument("-c", "--configJsonPath", - help="Configuration file for running perycode") - workflowArgParser.add_argument("-i", "--inputRWHALEPath", - help="Configuration file specifying the rwhale applications and data " - "used") - workflowArgParser.add_argument("-p", "--parallelType", + workflowArgParser.add_argument( + '-c', '--configJsonPath', help='Configuration file for running perycode' + ) + workflowArgParser.add_argument( + '-i', + '--inputRWHALEPath', + help='Configuration file specifying the rwhale applications and data ' + 'used', + ) + workflowArgParser.add_argument( + '-p', + '--parallelType', default='seqRUN', - help="How parallel runs: options seqRUN, parSETUP, parRUN") - workflowArgParser.add_argument("-m", "--mpiexec", + help='How parallel runs: options seqRUN, parSETUP, parRUN', + ) + workflowArgParser.add_argument( + '-m', + '--mpiexec', default='mpiexec', - help="How mpi runs, e.g. ibrun, mpirun, mpiexec") - workflowArgParser.add_argument("-n", "--numP", + help='How mpi runs, e.g. ibrun, mpirun, mpiexec', + ) + workflowArgParser.add_argument( + '-n', + '--numP', default='8', - help="If parallel, how many jobs to start with mpiexec option") + help='If parallel, how many jobs to start with mpiexec option', + ) - #Parsing the command line arguments - wfArgs = workflowArgParser.parse_args() + # Parsing the command line arguments + wfArgs = workflowArgParser.parse_args() # noqa: N816 - #Calling the main workflow method and passing the parsed arguments - numPROC = int(wfArgs.numP) + # Calling the main workflow method and passing the parsed arguments + numPROC = int(wfArgs.numP) # noqa: N816 - with open(Path(wfArgs.configJsonPath).resolve(), 'r') as f: + with open(Path(wfArgs.configJsonPath).resolve(), 'r') as f: # noqa: PTH123, PLW1514, UP015 rec_config = json.load(f) - with open(Path(wfArgs.inputRWHALEPath).resolve(), 'r') as f: - inputRWHALE = json.load(f) - - run_pyrecodes(rec_config=rec_config,\ - inputRWHALE=inputRWHALE, - parallelType = wfArgs.parallelType, - mpiExec = wfArgs.mpiexec, - numPROC = numPROC) - - \ No newline at end of file + with open(Path(wfArgs.inputRWHALEPath).resolve(), 'r') as f: # noqa: PTH123, PLW1514, UP015 + inputRWHALE = json.load(f) # noqa: N816 + + run_pyrecodes( + rec_config=rec_config, + inputRWHALE=inputRWHALE, + parallelType=wfArgs.parallelType, + mpiExec=wfArgs.mpiexec, + numPROC=numPROC, + ) diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/CreateStation.py b/modules/performRegionalEventSimulation/regionalGroundMotion/CreateStation.py index 0bd879000..bf724a34c 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/CreateStation.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/CreateStation.py @@ -114,7 +114,7 @@ def create_stations( # noqa: C901, PLR0912, PLR0915 stn_df = pd.read_csv(input_file, header=0, index_col=0) except: # noqa: E722 run_tag = 0 - return run_tag # noqa: RET504 + return run_tag # noqa: DOC201, RET504 # Max and Min IDs if len(filterIDs) > 0: stns_requested = [] @@ -547,7 +547,7 @@ def create_stations( # noqa: C901, PLR0912, PLR0915 'slopeThickness', 'gammaSoil', 'phiSoil', - 'cohesionSoil' + 'cohesionSoil', ]: if stn.get(key, None) is not None: tmp.update({key: stn.get(key)}) @@ -609,7 +609,7 @@ def create_gridded_stations( gstn_df = pd.read_csv(input_file, header=0, index_col=0) except: # noqa: E722 run_tag = 1 - return run_tag # noqa: RET504 + return run_tag # noqa: DOC201, RET504 if np.max(gstn_df.index.values) != 2: # noqa: PLR2004 run_tag = 1 return run_tag # noqa: RET504 @@ -662,7 +662,7 @@ def get_vs30_global(lat, lon): ) vs30 = [float(interpFunc(x, y)) for x, y in zip(lon, lat)] # return - return vs30 # noqa: RET504 + return vs30 # noqa: DOC201, RET504 def get_vs30_thompson(lat, lon): @@ -694,21 +694,21 @@ def get_vs30_thompson(lat, lon): vs30 = [float(interpFunc(x, y)) for x, y in zip(lon, lat)] # return - return vs30 # noqa: RET504 + return vs30 # noqa: DOC201, RET504 def get_z1(vs30): """Compute z1 based on the prediction equation by Chiou and Youngs (2013) (unit of vs30 is meter/second and z1 is meter)""" # noqa: D400 z1 = np.exp(-7.15 / 4.0 * np.log((vs30**4 + 571.0**4) / (1360.0**4 + 571.0**4))) # return - return z1 # noqa: RET504 + return z1 # noqa: DOC201, RET504 def get_z25(z1): """Compute z25 based on the prediction equation by Campbell and Bozorgnia (2013)""" # noqa: D400 z25 = 0.748 + 2.218 * z1 # return - return z25 # noqa: RET504 + return z25 # noqa: DOC201, RET504 def get_z25fromVs(vs): # noqa: N802 @@ -717,7 +717,7 @@ def get_z25fromVs(vs): # noqa: N802 """ # noqa: D205, D400 z25 = (7.089 - 1.144 * np.log(vs)) * 1000 # return - return z25 # noqa: RET504 + return z25 # noqa: DOC201, RET504 def get_zTR_global(lat, lon): # noqa: N802 @@ -743,7 +743,7 @@ def get_zTR_global(lat, lon): # noqa: N802 ) zTR = [float(interpFunc(x, y)) for x, y in zip(lon, lat)] # noqa: N806 # return - return zTR # noqa: RET504 + return zTR # noqa: DOC201, RET504 def export_site_prop(stn_file, output_dir, filename): @@ -811,7 +811,7 @@ def get_zTR_ncm(lat, lon): # noqa: N802 # get the top bedrock data zTR.append(abs(cur_res['response']['results'][0]['profiles'][0]['top'])) # return - return zTR + return zTR # noqa: DOC201 def get_vsp_ncm(lat, lon, depth): @@ -850,7 +850,7 @@ def get_vsp_ncm(lat, lon, depth): if len(vsp) == 1: vsp = vsp[0] # return - return vsp + return vsp # noqa: DOC201 def compute_vs30_from_vsp(depthp, vsp): @@ -868,7 +868,7 @@ def compute_vs30_from_vsp(depthp, vsp): # Computing the Vs30 vs30p = 30.0 / np.sum(delta_t) # return - return vs30p # noqa: RET504 + return vs30p # noqa: DOC201, RET504 def get_vs30_ncm(lat, lon): @@ -895,7 +895,7 @@ def get_vs30_ncm(lat, lon): ) vs30.append(760.0) # return - return vs30 + return vs30 # noqa: DOC201 def get_soil_model_ba(param=None): @@ -925,7 +925,7 @@ def get_soil_model_ba(param=None): else: res = None - return res + return res # noqa: DOC201 def get_soil_model_ei(param=None): @@ -940,7 +940,7 @@ def get_soil_model_ei(param=None): else: res = None - return res + return res # noqa: DOC201 def get_soil_model_user(df_stn, model_fun): # noqa: D103 diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/FetchOpenQuake.py b/modules/performRegionalEventSimulation/regionalGroundMotion/FetchOpenQuake.py index a09550e27..3f4f87688 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/FetchOpenQuake.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/FetchOpenQuake.py @@ -630,7 +630,7 @@ def oq_run_classical_psha( # noqa: C901 export_realizations('realizations', dstore) except: # noqa: E722 print('FetchOpenQuake: Classical PSHA failed.') # noqa: T201 - return 1 + return 1 # noqa: DOC201 elif vtag == 11: # noqa: PLR2004 try: print(f'FetchOpenQuake: running Version {oq_version}.') # noqa: T201 @@ -680,7 +680,7 @@ def oq_run_classical_psha( # noqa: C901 try: params['hazard_calculation_id'] = str(calc_ids[hc_id]) except IndexError: - raise SystemExit( # noqa: B904, DOC501 + raise SystemExit( # noqa: B904, DOC501, RUF100 'There are %d old calculations, cannot ' 'retrieve the %s' % (len(calc_ids), hc_id) ) @@ -845,7 +845,7 @@ def oq_read_uhs_classical_psha(scen_info, event_info, dir_info): mag_maf.append([0.0, float(list_IMs[0].split('~')[0]), 0.0]) # return - return ln_psa_mr, mag_maf, im_list + return ln_psa_mr, mag_maf, im_list # noqa: DOC201 class OpenQuakeHazardCalc: # noqa: D101 @@ -991,7 +991,7 @@ def run_calc(self): # noqa: C901 oq.ground_motion_fields is False and oq.hazard_curves_from_gmfs is False ): - return {} + return {} # noqa: DOC201 elif 'rupture_model' not in oq.inputs: logging.warning( 'There is no rupture_model, the calculator will just ' @@ -1275,7 +1275,7 @@ def eval_calc(self): # noqa: C901, PLR0912, PLR0915 dctx = computer.dctx.roundup(cur_gs.minimum_distance) if computer.distribution is None: if computer.correlation_model: - raise ValueError( # noqa: DOC501, TRY003, TRY301 + raise ValueError( # noqa: DOC501, RUF100, TRY003, TRY301 'truncation_level=0 requires ' # noqa: EM101 'no correlation model' ) @@ -1295,7 +1295,7 @@ def eval_calc(self): # noqa: C901, PLR0912, PLR0915 # of interest. # In this case, we also assume no correlation model is used. if computer.correlation_model: - raise CorrelationButNoInterIntraStdDevs( # noqa: DOC501, TRY301 + raise CorrelationButNoInterIntraStdDevs( # noqa: DOC501, RUF100, TRY301 computer.correlation_model, cur_gs ) @@ -1371,7 +1371,7 @@ def eval_calc(self): # noqa: C901, PLR0912, PLR0915 dctx = computer.dctx.roundup(cur_gs.minimum_distance) if computer.truncation_level == 0: if computer.correlation_model: - raise ValueError( # noqa: DOC501, TRY003, TRY301 + raise ValueError( # noqa: DOC501, RUF100, TRY003, TRY301 'truncation_level=0 requires ' # noqa: EM101 'no correlation model' ) @@ -1391,7 +1391,7 @@ def eval_calc(self): # noqa: C901, PLR0912, PLR0915 # of interest. # In this case, we also assume no correlation model is used. if computer.correlation_model: - raise CorrelationButNoInterIntraStdDevs( # noqa: DOC501, TRY301 + raise CorrelationButNoInterIntraStdDevs( # noqa: DOC501, RUF100, TRY301 computer.correlation_model, cur_gs ) @@ -1550,7 +1550,7 @@ def eval_calc(self): # noqa: C901, PLR0912, PLR0915 } # return - return res # noqa: RET504 + return res # noqa: DOC201, RET504 def calculator_build_events_from_sources(self): # noqa: C901 """Prefilter the composite source model and store the source_info""" # noqa: D400 @@ -1666,7 +1666,7 @@ def __str__(self): # noqa: D105 def to_imt_unit_values(vals, imt): """Exponentiate the values unless the IMT is MMI""" # noqa: D400 if str(imt) == 'MMI': - return vals + return vals # noqa: DOC201 return np.exp(vals) diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardOccurrence.py b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardOccurrence.py index 54bf7a752..6518421cf 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardOccurrence.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardOccurrence.py @@ -654,7 +654,7 @@ def _input_check(self): print( # noqa: T201 'OccurrenceModel_ManzourDavidson2016._input_check: no return period is defined.' ) - return False + return False # noqa: DOC201 # shape of exceedance probability if len(self.im_exceedance_probs.shape) != 3: # noqa: PLR2004 print( # noqa: T201 @@ -730,8 +730,8 @@ def _opt_initialization(self): itertools.product(range(self.num_sites), range(self.num_return_periods)) ) self.prob += pulp.lpSum( - self.return_periods[j] * self.e_plus[(i, j)] - + self.return_periods[j] * self.e_minus[(i, j)] + self.return_periods[j] * self.e_plus[(i, j)] # noqa: RUF031 + + self.return_periods[j] * self.e_minus[(i, j)] # noqa: RUF031 for (i, j) in comb_sites_rps ) @@ -757,7 +757,7 @@ def _opt_initialization(self): <= self.num_scenarios ) - return True + return True # noqa: DOC201 def solve_opt(self): """target_function: compute the target function to be minimized @@ -853,7 +853,7 @@ def _input_check(self): print( # noqa: T201 'OccurrenceModel_Wangetal2023._input_check: no return period is defined.' ) - return False + return False # noqa: DOC201 # shape of exceedance probability if len(self.im_exceedance_probs.shape) != 3: # noqa: PLR2004 print( # noqa: T201 @@ -916,7 +916,7 @@ def _opt_initialization(self): self.X_weighted = np.dot(self.W, self.X) self.y_weighted = np.dot(self.W, self.y) - return True + return True # noqa: DOC201 def solve_opt(self): """LASSO regression""" # noqa: D400 diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py index a242a8eb9..779fdb70d 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py @@ -470,17 +470,18 @@ def hazard_job(hazard_info): # noqa: C901, D103, PLR0914, PLR0915 ln_im_mr, mag_maf, im_list ) gf_im_list += settlement_info['Output'] - if "Landslide" in ground_failure_info.keys(): - import landslide - if 'Landslide' in ground_failure_info['Landslide'].keys(): + if 'Landslide' in ground_failure_info.keys(): # noqa: SIM118 + import landslide # noqa: PLC0415 + + if 'Landslide' in ground_failure_info['Landslide'].keys(): # noqa: SIM118 lsld_info = ground_failure_info['Landslide']['Landslide'] - lsld_model = getattr(landslide, lsld_info['Model'])(\ - lsld_info["Parameters"], stations) + lsld_model = getattr(landslide, lsld_info['Model'])( + lsld_info['Parameters'], stations + ) ln_im_mr, mag_maf, im_list = lsld_model.run( - ln_im_mr, mag_maf, im_list - ) + ln_im_mr, mag_maf, im_list + ) gf_im_list += lsld_info['Output'] - if event_info['SaveIM'] and ln_im_mr: print('HazardSimulation: saving simulated intensity measures.') # noqa: T201 diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/ScenarioForecast.py b/modules/performRegionalEventSimulation/regionalGroundMotion/ScenarioForecast.py index 5f4725fa4..95112fa86 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/ScenarioForecast.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/ScenarioForecast.py @@ -99,6 +99,7 @@ if importlib.util.find_spec('jpype') is None: subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'JPype1']) # noqa: S603 import jpype + # from jpype import imports import jpype.imports from jpype.types import * # noqa: F403 diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/CorrelationModel.py b/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/CorrelationModel.py index 5c2ea75c3..357973d27 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/CorrelationModel.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/CorrelationModel.py @@ -65,7 +65,7 @@ def baker_jayaram_correlation_2008(im1, im2, flag_orth=False): # noqa: FBT002, elif im1.startswith('PGA'): T1 = 0.0 # noqa: N806 else: - return 0.0 + return 0.0 # noqa: DOC201 if im2.startswith('SA'): T2 = float(im2[3:-1]) # noqa: N806 elif im2.startswith('PGA'): @@ -126,7 +126,7 @@ def bradley_correlation_2011(IM, T=None, flag_Ds=True): # noqa: FBT002, C901, N # PGA if IM == 'PGA': # noqa: RET503 if flag_Ds: - return -0.442 + return -0.442 # noqa: DOC201 else: # noqa: RET505 return -0.305 elif IM == 'PGV': @@ -252,7 +252,7 @@ def jayaram_baker_correlation_2009(im, h, flag_clustering=False): # noqa: FBT00 else: b = 40.7 - 15.0 * T rho = np.exp(-3.0 * h / b) - return rho # noqa: RET504 + return rho # noqa: DOC201, RET504 def load_loth_baker_correlation_2013(datapath): @@ -270,7 +270,7 @@ def load_loth_baker_correlation_2013(datapath): B2 = pd.read_csv(datapath + 'loth_baker_correlation_2013_B2.csv', header=0) # noqa: N806 B1 = pd.read_csv(datapath + 'loth_baker_correlation_2013_B1.csv', header=0) # noqa: N806 B3 = pd.read_csv(datapath + 'loth_baker_correlation_2013_B3.csv', header=0) # noqa: N806 - return B1, B2, B3 + return B1, B2, B3 # noqa: DOC201 def compute_rho_loth_baker_correlation_2013(T1, T2, h, B1, B2, B3): # noqa: N803 @@ -303,7 +303,7 @@ def compute_rho_loth_baker_correlation_2013(T1, T2, h, B1, B2, B3): # noqa: N80 Ch = b1 * np.exp(-3.0 * h / 20.0) + b2 * np.exp(-3.0 * h / 70.0) + b3 * (h == 0) # noqa: N806 # Correlation coefficient rho = Ch - return rho # noqa: RET504 + return rho # noqa: DOC201, RET504 def loth_baker_correlation_2013(stations, im_name_list, num_simu): # noqa: C901 @@ -373,7 +373,7 @@ def loth_baker_correlation_2013(stations, im_name_list, num_simu): # noqa: C901 .swapaxes(1, 2) ) # return - return residuals # noqa: RET504 + return residuals # noqa: DOC201, RET504 def load_markhvida_ceferino_baker_correlation_2017(datapath): @@ -404,7 +404,7 @@ def load_markhvida_ceferino_baker_correlation_2017(datapath): index_col=None, header=0, ) - return MCB_model, MCB_pca, MCB_var + return MCB_model, MCB_pca, MCB_var # noqa: DOC201 def markhvida_ceferino_baker_correlation_2017( # noqa: C901 @@ -521,7 +521,7 @@ def markhvida_ceferino_baker_correlation_2017( # noqa: C901 if tmp_periods > model_Tmax: residuals = np.concatenate((residuals, Tmax_residuals), axis=1) # return - return residuals + return residuals # noqa: DOC201 def load_du_ning_correlation_2021(datapath): @@ -548,7 +548,7 @@ def load_du_ning_correlation_2021(datapath): DN_var = pd.read_csv( # noqa: N806 datapath + 'du_ning_correlation_2021_var_scale.csv', index_col=None, header=0 ) - return DN_model, DN_pca, DN_var + return DN_model, DN_pca, DN_var # noqa: DOC201 def du_ning_correlation_2021(stations, im_name_list, num_simu, num_pc=23): @@ -657,7 +657,7 @@ def du_ning_correlation_2021(stations, im_name_list, num_simu, num_pc=23): ) # return - return residuals + return residuals # noqa: DOC201 def baker_bradley_correlation_2017(im1=None, im2=None): # noqa: C901 @@ -686,7 +686,7 @@ def baker_bradley_correlation_2017(im1=None, im2=None): # noqa: C901 print( # noqa: T201 f'CorrelationModel.baker_bradley_correlation_2017: warning - return 0.0 for unknown {im1}' ) - return 0.0 + return 0.0 # noqa: DOC201 im_list.append(tmp_tag) period_list.append(None) if im2.startswith('SA'): diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/SignificantDurationModel.py b/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/SignificantDurationModel.py index 57f02c96b..2a017cbf4 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/SignificantDurationModel.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/SignificantDurationModel.py @@ -65,7 +65,7 @@ def abrahamson_silva_ds_1999( print( # noqa: T201 "SignificantDurationModel.abrahamson_silva_ds_1999: duration_type='DS575H','DS575V','DS595H','DS595V'?" ) - return None, None + return None, None # noqa: DOC201 # modeling coefficients beta = [3.2, 3.2, 3.2, 3.2] b1 = [5.204, 4.610, 5.204, 4.610] @@ -140,7 +140,7 @@ def bommer_stafford_alarcon_ds_2009( print( # noqa: T201 "SignificantDurationModel.bommer_stafford_alarcon_ds_2009: duration_type='DS575H','DS595H'?" ) - return None, None, None, None + return None, None, None, None # noqa: DOC201 # modeling coefficients c0 = [-5.6298, -2.2393] @@ -205,7 +205,7 @@ def afshari_stewart_ds_2016( # noqa: C901 print( # noqa: T201 "SignificantDurationModel.afshari_stewart_ds_2016: mechanism='unknown','normal','reverse','strike-slip'?" ) - return None, None, None, None + return None, None, None, None # noqa: DOC201 # region map reg_map = {'california': 0, 'japan': 1, 'other': 2} reg_tag = reg_map.get(region.lower(), None) diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/openSHAGMPE.py b/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/openSHAGMPE.py index 61ce5c1ed..b4cf82da8 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/openSHAGMPE.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/openSHAGMPE.py @@ -240,7 +240,7 @@ def calc(self, Mw, rJB, rRup, rX, dip, zTop, vs30, vsInf, z1p0, style): # noqa: stdDev = np.sqrt(tauSq + phiSq) # noqa: N806 - return mean, stdDev, np.sqrt(tauSq), np.sqrt(phiSq) + return mean, stdDev, np.sqrt(tauSq), np.sqrt(phiSq) # noqa: DOC201 # https://github.com/opensha/opensha/blob/master/src/main/java/org/opensha/sha/imr/attenRelImpl/ngaw2/NGAW2_Wrapper.java#L220 def getFaultFromRake(self, rake): # noqa: D102, N802, PLR6301 diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/landslide.py b/modules/performRegionalEventSimulation/regionalGroundMotion/landslide.py index a901df9b9..cd6352725 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/landslide.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/landslide.py @@ -1,7 +1,7 @@ -import numpy as np +import numpy as np # noqa: CPY001, INP001, I001, D100 import rasterio as rio from scipy.interpolate import interp2d -import sys, warnings, shapely, pandas, os +import sys, warnings, shapely, pandas, os # noqa: ICN001, E401 from pyproj import Transformer from pyproj import CRS from enum import Enum @@ -9,11 +9,13 @@ from scipy.spatial import ConvexHull import pandas as pd -## Helper functions -def sampleRaster(raster_file_path, raster_crs, x, y, interp_scheme = 'nearest',\ - dtype = None): - """performs 2D interpolation at (x,y) pairs. Accepted interp_scheme = 'nearest', 'linear', 'cubic', and 'quintic'""" - print(f"Sampling from the Raster File: {os.path.basename(raster_file_path)}...") + +## Helper functions # noqa: E266 +def sampleRaster( # noqa: N802 + raster_file_path, raster_crs, x, y, interp_scheme='nearest', dtype=None +): + """performs 2D interpolation at (x,y) pairs. Accepted interp_scheme = 'nearest', 'linear', 'cubic', and 'quintic'""" # noqa: D400, D401, D403 + print(f'Sampling from the Raster File: {os.path.basename(raster_file_path)}...') # noqa: T201, PTH119 invalid_value = np.nan xy_crs = CRS.from_user_input(4326) raster_crs = CRS.from_user_input(raster_crs) @@ -21,82 +23,108 @@ def sampleRaster(raster_file_path, raster_crs, x, y, interp_scheme = 'nearest',\ try: raster_data = raster_file.read() if raster_data.shape[0] > 1: - warnings.warn(f"More than one band in the file {raster_file_path}, the first band is used.") - except: - sys.exit(f"Can not read data from {raster_file_path}") + warnings.warn( # noqa: B028 + f'More than one band in the file {raster_file_path}, the first band is used.' + ) + except: # noqa: E722 + sys.exit(f'Can not read data from {raster_file_path}') if xy_crs != raster_crs: # make transformer for reprojection - transformer_xy_to_data = Transformer.from_crs(xy_crs, raster_crs,\ - always_xy=True) + transformer_xy_to_data = Transformer.from_crs( + xy_crs, raster_crs, always_xy=True + ) # reproject and store x_proj, y_proj = transformer_xy_to_data.transform(x, y) x = x_proj y = y_proj n_sample = len(x) if interp_scheme == 'nearest': - sample = np.array([val[0] for val in raster_file.sample(list(zip(x,y)))]) + sample = np.array( + [val[0] for val in raster_file.sample(list(zip(x, y)))] + ) else: # create x and y ticks for grid - x_tick = np.linspace(raster_file.bounds.left, \ - raster_file.bounds.right, raster_file.width, endpoint=False) - y_tick = np.linspace(raster_file.bounds.bottom,\ - raster_file.bounds.top, raster_file.height, endpoint=False) + x_tick = np.linspace( + raster_file.bounds.left, + raster_file.bounds.right, + raster_file.width, + endpoint=False, + ) + y_tick = np.linspace( + raster_file.bounds.bottom, + raster_file.bounds.top, + raster_file.height, + endpoint=False, + ) # create interp2d function interp_function = interp2d( - x_tick, y_tick, np.flipud(raster_file.read(1)), - kind=interp_scheme, fill_value=invalid_value) + x_tick, + y_tick, + np.flipud(raster_file.read(1)), + kind=interp_scheme, + fill_value=invalid_value, + ) # get samples sample = np.transpose( - [interp_function(x[i],y[i]) for i in range(n_sample)] + [interp_function(x[i], y[i]) for i in range(n_sample)] )[0] # convert to target datatype if dtype is not None: sample = sample.astype(dtype) # clean up invalid values (returned as 1e38 by NumPy) - sample[abs(sample)>1e10] = invalid_value - return sample + sample[abs(sample) > 1e10] = invalid_value # noqa: PLR2004 + return sample # noqa: DOC201 -## Helper functions -def sampleVector(vector_file_path, vector_crs, x, y, dtype = None): - """performs spatial join of vector_file with xy'""" - print(f"Sampling from the Vector File: {os.path.basename(vector_file_path)}...") - invalid_value = np.nan + +## Helper functions # noqa: E266 +def sampleVector(vector_file_path, vector_crs, x, y, dtype=None): # noqa: ARG001, N802 + """performs spatial join of vector_file with xy'""" # noqa: D400, D401, D403 + print(f'Sampling from the Vector File: {os.path.basename(vector_file_path)}...') # noqa: T201, PTH119 + invalid_value = np.nan # noqa: F841 xy_crs = CRS.from_user_input(4326) vector_gdf = gpd.read_file(vector_file_path) if vector_gdf.crs != vector_crs: - sys.exit(f"The CRS of vector file {vector_file_path} is {vector_gdf.crs}, and doesn't match the input CRS ({xy_crs}) defined for liquefaction triggering models") + sys.exit( + f"The CRS of vector file {vector_file_path} is {vector_gdf.crs}, and doesn't match the input CRS ({xy_crs}) defined for liquefaction triggering models" + ) if xy_crs != vector_crs: # make transformer for reprojection - transformer_xy_to_data = Transformer.from_crs(xy_crs, vector_crs,\ - always_xy=True) + transformer_xy_to_data = Transformer.from_crs( + xy_crs, vector_crs, always_xy=True + ) # reproject and store x_proj, y_proj = transformer_xy_to_data.transform(x, y) x = x_proj y = y_proj - # Create a convex hull containing all sites + # Create a convex hull containing all sites sites = np.array([x, y]).transpose() try: hull = ConvexHull(sites) vertices = hull.vertices vertices = sites[np.append(vertices, vertices[0])] centroid = np.mean(vertices, axis=0) - vertices = vertices + 0.05 * (vertices - centroid) - RoI = shapely.geometry.Polygon(vertices) - except: + vertices = vertices + 0.05 * (vertices - centroid) # noqa: PLR6104 + RoI = shapely.geometry.Polygon(vertices) # noqa: N806 + except: # noqa: E722 centroid = shapely.geometry.Point(np.mean(x), np.mean(y)) points = [shapely.geometry.Point(x[i], y[i]) for i in range(len(x))] if len(points) == 1: - distances = [0.1] # Degree + distances = [0.1] # Degree else: distances = [point.distance(centroid) for point in points] - max_distance = max(distances)*1.2 + max_distance = max(distances) * 1.2 angles = np.linspace(0, 2 * np.pi, 36) - circle_points = [(centroid.x + max_distance * np.cos(angle), \ - centroid.y + max_distance * np.sin(angle)) for angle in angles] - RoI = shapely.geometry.Polygon(circle_points) - data = dict() + circle_points = [ + ( + centroid.x + max_distance * np.cos(angle), + centroid.y + max_distance * np.sin(angle), + ) + for angle in angles + ] + RoI = shapely.geometry.Polygon(circle_points) # noqa: N806 + data = dict() # noqa: C408 for col in vector_gdf.columns: - data.update({col:[]}) + data.update({col: []}) for row_index in vector_gdf.index: new_geom = RoI.intersection(vector_gdf.loc[row_index, 'geometry']) if new_geom.is_empty: @@ -107,158 +135,188 @@ def sampleVector(vector_file_path, vector_crs, x, y, dtype = None): data[col].append(vector_gdf.loc[row_index, col]) data['geometry'].append(new_geom) del vector_gdf - gdf_roi = gpd.GeoDataFrame(data, geometry="geometry", crs=4326) - geometry = [shapely.geometry.Point(lon, lat) for lon, lat in zip(x, y)] + gdf_roi = gpd.GeoDataFrame(data, geometry='geometry', crs=4326) + geometry = [shapely.geometry.Point(lon, lat) for lon, lat in zip(x, y)] # noqa: FURB140 gdf_sites = gpd.GeoDataFrame(geometry=geometry, crs=4326).reset_index() - merged = gpd.GeoDataFrame.sjoin(gdf_roi, gdf_sites, how = 'inner', predicate = 'contains') + merged = gpd.GeoDataFrame.sjoin( + gdf_roi, gdf_sites, how='inner', predicate='contains' + ) merged = merged.set_index('index_right').sort_index().drop(columns=['geometry']) - gdf_sites = pandas.merge(gdf_sites, merged, on = 'index', how = 'left') - gdf_sites.drop(columns=['geometry', 'index'], inplace=True) - return gdf_sites + gdf_sites = pandas.merge(gdf_sites, merged, on='index', how='left') + gdf_sites.drop(columns=['geometry', 'index'], inplace=True) # noqa: PD002 + return gdf_sites # noqa: DOC201 -def find_additional_output_req(liq_info, current_step): + +def find_additional_output_req(liq_info, current_step): # noqa: D103 additional_output_keys = [] if current_step == 'Triggering': - trigging_parameters = liq_info['Triggering']\ - ['Parameters'].keys() - triger_dist_water = liq_info['Triggering']['Parameters'].get('DistWater', None) + trigging_parameters = liq_info['Triggering']['Parameters'].keys() # noqa: F841 + triger_dist_water = liq_info['Triggering']['Parameters'].get( + 'DistWater', None + ) if triger_dist_water is None: return additional_output_keys - lat_dist_water = liq_info['LateralSpreading']['Parameters'].get('DistWater', None) - if 'LateralSpreading' in liq_info.keys(): - lat_dist_water = liq_info['LateralSpreading']['Parameters'].get('DistWater', None) - if (liq_info['LateralSpreading']['Model'] == 'Hazus2020')\ - and (lat_dist_water==triger_dist_water): + lat_dist_water = liq_info['LateralSpreading']['Parameters'].get( + 'DistWater', None + ) + if 'LateralSpreading' in liq_info.keys(): # noqa: SIM118 + lat_dist_water = liq_info['LateralSpreading']['Parameters'].get( + 'DistWater', None + ) + if (liq_info['LateralSpreading']['Model'] == 'Hazus2020') and ( + lat_dist_water == triger_dist_water + ): additional_output_keys.append('dist_to_water') return additional_output_keys -def infer_from_geologic_map(map_path, map_crs, lon_station, lat_station): - gdf_units = sampleVector(map_path, map_crs, lon_station, lat_station, dtype = None) + +def infer_from_geologic_map(map_path, map_crs, lon_station, lat_station): # noqa: D103 + gdf_units = sampleVector(map_path, map_crs, lon_station, lat_station, dtype=None) gdf_units = gdf_units['PTYPE'] gdf_units = gdf_units.fillna('water') - default_geo_prop_fpath = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'database',\ - 'groundfailure', 'Wills_etal_2015_CA_Geologic_Properties.csv') + default_geo_prop_fpath = os.path.join( # noqa: PTH118 + os.path.dirname(os.path.abspath(__file__)), # noqa: PTH100, PTH120 + 'database', + 'groundfailure', + 'Wills_etal_2015_CA_Geologic_Properties.csv', + ) default_geo_prop = pd.read_csv(default_geo_prop_fpath) unique_geo_unit = np.unique(gdf_units) phi_mean = np.empty_like(gdf_units) coh_mean = np.empty_like(gdf_units) for each in unique_geo_unit: - rows_with_geo_unit = np.where(gdf_units.values==each)[0] - rows_for_param = np.where(default_geo_prop['Unit Abbreviation'].values==each)[0][0] - phi_mean[rows_with_geo_unit] = \ - default_geo_prop['Friction Angle - Median (degrees)'][rows_for_param] - coh_mean[rows_with_geo_unit] = \ - default_geo_prop['Cohesion - Median (kPa)'][rows_for_param] + rows_with_geo_unit = np.where(gdf_units.values == each)[0] # noqa: PD011 + rows_for_param = np.where( + default_geo_prop['Unit Abbreviation'].values == each # noqa: PD011 + )[0][0] + phi_mean[rows_with_geo_unit] = default_geo_prop[ + 'Friction Angle - Median (degrees)' + ][rows_for_param] + coh_mean[rows_with_geo_unit] = default_geo_prop['Cohesion - Median (kPa)'][ + rows_for_param + ] return phi_mean, coh_mean + def erf2(x): - """modified from https://www.johndcook.com/blog/2009/01/19/stand-alone-error-function-erf""" + """modified from https://www.johndcook.com/blog/2009/01/19/stand-alone-error-function-erf""" # noqa: D400, D401, D403 # constants - a1 = 0.254829592 + a1 = 0.254829592 a2 = -0.284496736 - a3 = 1.421413741 + a3 = 1.421413741 a4 = -1.453152027 - a5 = 1.061405429 - p = 0.3275911 + a5 = 1.061405429 + p = 0.3275911 # Save the sign of x signs = np.sign(x) x = np.abs(x) # A & S 7.1.26 - t = 1.0/(1.0 + p*x) - y = 1.0 - (((((a5*t + a4)*t) + a3)*t + a2)*t + a1)*t*np.exp(-x**2) - return signs*y + t = 1.0 / (1.0 + p * x) + y = 1.0 - (((((a5 * t + a4) * t) + a3) * t + a2) * t + a1) * t * np.exp(-(x**2)) + return signs * y # noqa: DOC201 + def norm2_cdf(x, loc, scale): """ modified implementation of norm.cdf function from numba_stats, using self-implemented erf function https://github.com/HDembinski/numba-stats/blob/main/src/numba_stats/norm.py - """ - inter = (x - loc)/scale - return 0.5 * (1 + erf2(inter * np.sqrt(0.5))) + """ # noqa: D205, D400, D401 + inter = (x - loc) / scale + return 0.5 * (1 + erf2(inter * np.sqrt(0.5))) # noqa: DOC201 + def erf2_2d(x): - """modified from https://www.johndcook.com/blog/2009/01/19/stand-alone-error-function-erf""" + """modified from https://www.johndcook.com/blog/2009/01/19/stand-alone-error-function-erf""" # noqa: D400, D401, D403 # constants - a1 = 0.254829592 + a1 = 0.254829592 a2 = -0.284496736 - a3 = 1.421413741 + a3 = 1.421413741 a4 = -1.453152027 - a5 = 1.061405429 - p = 0.3275911 + a5 = 1.061405429 + p = 0.3275911 # Save the sign of x signs = np.sign(x) x = np.abs(x) # A & S 7.1.26 - t = 1.0/(1.0 + p*x) - y = 1.0 - (((((a5*t + a4)*t) + a3)*t + a2)*t + a1)*t*np.exp(-x**2) - return signs*y + t = 1.0 / (1.0 + p * x) + y = 1.0 - (((((a5 * t + a4) * t) + a3) * t + a2) * t + a1) * t * np.exp(-(x**2)) + return signs * y # noqa: DOC201 + def norm2_cdf_2d(x, loc, scale): """ modified implementation of norm.cdf function from numba_stats, using self-implemented erf function https://github.com/HDembinski/numba-stats/blob/main/src/numba_stats/norm.py - """ - inter = (x - loc)/scale - return 0.5 * (1 + erf2_2d(inter * np.sqrt(0.5))) + """ # noqa: D205, D400, D401 + inter = (x - loc) / scale + return 0.5 * (1 + erf2_2d(inter * np.sqrt(0.5))) # noqa: DOC201 + -def nb_round(x, decimals): +def nb_round(x, decimals): # noqa: D103 out = np.empty_like(x) - return np.round_(x, decimals, out) + return np.round_(x, decimals, out) # noqa: NPY003, NPY201 + -def erfinv_coeff(order=20): +def erfinv_coeff(order=20): # noqa: D103 # initialize - c = np.empty(order+1) + c = np.empty(order + 1) # starting value c[0] = 1 - for i in range(1,order+1): - c[i] = sum([c[j]*c[i-1-j]/(j+1)/(2*j+1) for j in range(i)]) + for i in range(1, order + 1): + c[i] = sum([c[j] * c[i - 1 - j] / (j + 1) / (2 * j + 1) for j in range(i)]) # noqa: C419 # return return c + def erfinv(x, order=20): - """returns inverse erf(x)""" + """returns inverse erf(x)""" # noqa: D400, D401, D403 # get coefficients c = erfinv_coeff(order) # initialize - root_pi_over_2 = np.sqrt(np.pi)/2 + root_pi_over_2 = np.sqrt(np.pi) / 2 y = np.zeros(x.shape) for i in range(order): - y += c[i]/(2*i+1)*(root_pi_over_2*x)**(2*i+1) + y += c[i] / (2 * i + 1) * (root_pi_over_2 * x) ** (2 * i + 1) # return - return y + return y # noqa: DOC201 + def norm2_ppf(p, loc, scale): """ modified implementation of norm.ppf function from numba_stats, using self-implemented erfinv function https://github.com/HDembinski/numba-stats/blob/main/src/numba_stats/norm.py - """ - inter = np.sqrt(2) * erfinv(2*p-1,order=20) - return scale * inter + loc + """ # noqa: D205, D400, D401 + inter = np.sqrt(2) * erfinv(2 * p - 1, order=20) + return scale * inter + loc # noqa: DOC201 + def erfinv_2d(x, order=20): - """returns inverse erf(x)""" + """returns inverse erf(x)""" # noqa: D400, D401, D403 # get coefficients c = erfinv_coeff(order) # initialize - root_pi_over_2 = np.sqrt(np.pi)/2 + root_pi_over_2 = np.sqrt(np.pi) / 2 y = np.zeros(x.shape) for i in range(order): - y += c[i]/(2*i+1)*(root_pi_over_2*x)**(2*i+1) + y += c[i] / (2 * i + 1) * (root_pi_over_2 * x) ** (2 * i + 1) # return - return y + return y # noqa: DOC201 + def norm2_ppf_2d(p, loc, scale): """ modified implementation of norm.ppf function from numba_stats, using self-implemented erfinv function https://github.com/HDembinski/numba-stats/blob/main/src/numba_stats/norm.py - """ - inter = np.sqrt(2) * erfinv_2d(2*p-1,order=20) - return scale * inter + loc + """ # noqa: D205, D400, D401 + inter = np.sqrt(2) * erfinv_2d(2 * p - 1, order=20) + return scale * inter + loc # noqa: DOC201 + -class Landslide: +class Landslide: # noqa: D101 def __init__(self) -> None: pass + # ----------------------------------------------------------- class BrayMacedo2019(Landslide): """ @@ -280,7 +338,7 @@ class BrayMacedo2019(Landslide): [g] peak ground acceleration mag: float, np.ndarray or list moment magnitude - + Geotechnical/geologic: slope: float, np.ndarray or list [deg] slope angle @@ -292,7 +350,7 @@ class BrayMacedo2019(Landslide): [deg] friction angle of soil coh_soil: float, np.ndarray or list [kPa] cohesion of soil - + Fixed: Returns @@ -301,190 +359,280 @@ class BrayMacedo2019(Landslide): [m] permanent ground deformation sigma_pgdef : float, np.ndarray aleatory variability for ln(pgdef) - + References ---------- .. [1] Bray, J.D., and Macedo, J., 2019, Procedure for Estimating Shear-Induced Seismic Slope Displacement for Shallow Crustal Earthquakes, Journal of Geotechnical and Geoenvironmental Engineering, vol. 145, pp. 12, 04019106. - - """ + + """ # noqa: D205, D400 + def __init__(self, parameters, stations) -> None: self.stations = stations self.parameters = parameters - self.slope = None #(km) - self.t_slope = None #(km) - self.gamma_soil = None #(km) - self.phi_soil = None #(m) - self.coh_soil = None # (mm) + self.slope = None # (km) + self.t_slope = None # (km) + self.gamma_soil = None # (km) + self.phi_soil = None # (m) + self.coh_soil = None # (mm) self.interpolate_spatial_parameters(parameters) - - def interpolate_spatial_parameters(self, parameters): + + def interpolate_spatial_parameters(self, parameters): # noqa: C901, D102 # site coordinate in CRS 4326 lat_station = [site['lat'] for site in self.stations] lon_station = [site['lon'] for site in self.stations] - # slope - if parameters["Slope"] == "Defined (\"slope\") in Site File (.csv)": + # slope + if parameters['Slope'] == 'Defined ("slope") in Site File (.csv)': self.slope = np.array([site['slope'] for site in self.stations]) else: - self.slope = sampleRaster(parameters["Slope"], parameters["inputCRS"],\ - lon_station, lat_station) + self.slope = sampleRaster( + parameters['Slope'], parameters['inputCRS'], lon_station, lat_station + ) # t_slope - if parameters["SlopeThickness"] == "Defined (\"slopeThickness\") in Site File (.csv)": - self.t_slope = np.array([site['slopeThickness'] for site in self.stations]) - elif parameters["SlopeThickness"] == "Use constant value (m)": - self.t_slope = np.array([parameters["SlopeThicknessValue"]]*len(self.stations)) + if ( + parameters['SlopeThickness'] + == 'Defined ("slopeThickness") in Site File (.csv)' + ): + self.t_slope = np.array( + [site['slopeThickness'] for site in self.stations] + ) + elif parameters['SlopeThickness'] == 'Use constant value (m)': + self.t_slope = np.array( + [parameters['SlopeThicknessValue']] * len(self.stations) + ) else: - self.t_slope = sampleRaster(parameters["SlopeThickness"], parameters["inputCRS"],\ - lon_station, lat_station) + self.t_slope = sampleRaster( + parameters['SlopeThickness'], + parameters['inputCRS'], + lon_station, + lat_station, + ) # gamma_soil - if parameters["GammaSoil"] == "Defined (\"gammaSoil\") in Site File (.csv)": + if parameters['GammaSoil'] == 'Defined ("gammaSoil") in Site File (.csv)': self.gamma_soil = np.array([site['gammaSoil'] for site in self.stations]) - elif parameters["GammaSoil"] == "Use constant value (kN/m^3)": - self.gamma_soil = np.array([parameters["GammaSoilValue"]]*len(self.stations)) + elif parameters['GammaSoil'] == 'Use constant value (kN/m^3)': + self.gamma_soil = np.array( + [parameters['GammaSoilValue']] * len(self.stations) + ) else: - self.gamma_soil = sampleRaster(parameters["GammaSoil"], parameters["inputCRS"],\ - lon_station, lat_station) + self.gamma_soil = sampleRaster( + parameters['GammaSoil'], + parameters['inputCRS'], + lon_station, + lat_station, + ) # phi_soil - if parameters["PhiSoil"] == "Defined (\"phiSoil\") in Site File (.csv)": + if parameters['PhiSoil'] == 'Defined ("phiSoil") in Site File (.csv)': self.phi_soil = np.array([site['phiSoil'] for site in self.stations]) - elif parameters["PhiSoil"] == "Use constant value (deg)": - self.phi_soil = np.array([parameters["PhiSoilValue"]]*len(self.stations)) - elif parameters["PhiSoil"] == "Infer from Geologic Map (Bain et al. 2022)": - if parameters["CohesionSoil"] == "Infer from Geologic Map (Bain et al. 2022)": - self.phi_soil, self.coh_soil = infer_from_geologic_map(parameters["GeologicMap"],\ - parameters['inputCRS'], lon_station, lat_station) + elif parameters['PhiSoil'] == 'Use constant value (deg)': + self.phi_soil = np.array( + [parameters['PhiSoilValue']] * len(self.stations) + ) + elif parameters['PhiSoil'] == 'Infer from Geologic Map (Bain et al. 2022)': + if ( + parameters['CohesionSoil'] + == 'Infer from Geologic Map (Bain et al. 2022)' + ): + self.phi_soil, self.coh_soil = infer_from_geologic_map( + parameters['GeologicMap'], + parameters['inputCRS'], + lon_station, + lat_station, + ) else: - self.phi_soil, _ = infer_from_geologic_map(parameters["GeologicMap"],\ - parameters['inputCRS'], lon_station, lat_station) + self.phi_soil, _ = infer_from_geologic_map( + parameters['GeologicMap'], + parameters['inputCRS'], + lon_station, + lat_station, + ) else: - self.phi_soil = sampleRaster(parameters["CohesionSoil"], parameters["inputCRS"],\ - lon_station, lat_station) + self.phi_soil = sampleRaster( + parameters['CohesionSoil'], + parameters['inputCRS'], + lon_station, + lat_station, + ) # coh_soil if self.coh_soil is None: - if parameters["CohesionSoil"] == "Defined (\"cohesionSoil\") in Site File (.csv)": - self.coh_soil = np.array([site['cohesionSoil'] for site in self.stations]) - elif parameters["CohesionSoil"] == "Use constant value (kPa)": - self.coh_soil = np.array([parameters["CohesionSoilValue"]]*len(self.stations)) - elif parameters["CohesionSoil"] == "Infer from Geologic Map (Bain et al. 2022)": - self.coh_soil = infer_from_geologic_map(parameters["GeologicMap"],\ - parameters['inputCRS'], lon_station, lat_station) + if ( + parameters['CohesionSoil'] + == 'Defined ("cohesionSoil") in Site File (.csv)' + ): + self.coh_soil = np.array( + [site['cohesionSoil'] for site in self.stations] + ) + elif parameters['CohesionSoil'] == 'Use constant value (kPa)': + self.coh_soil = np.array( + [parameters['CohesionSoilValue']] * len(self.stations) + ) + elif ( + parameters['CohesionSoil'] + == 'Infer from Geologic Map (Bain et al. 2022)' + ): + self.coh_soil = infer_from_geologic_map( + parameters['GeologicMap'], + parameters['inputCRS'], + lon_station, + lat_station, + ) else: - self.coh_soil = sampleRaster(parameters["CohesionSoil"], parameters["inputCRS"],\ - lon_station, lat_station) + self.coh_soil = sampleRaster( + parameters['CohesionSoil'], + parameters['inputCRS'], + lon_station, + lat_station, + ) - print("Initiation finished") + print('Initiation finished') # noqa: T201 - def run(self, ln_im_data, eq_data, im_list, output_keys=['lsd_PGD_h'], additional_output_keys = []): - if ('PGA' in im_list): + def run( # noqa: D102 + self, + ln_im_data, + eq_data, + im_list, + output_keys=['lsd_PGD_h'], # noqa: B006 + additional_output_keys=[], # noqa: B006, ARG002 + ): + if 'PGA' in im_list: num_stations = len(self.stations) num_scenarios = len(eq_data) - PGA_col_id = [i for i, x in enumerate(im_list) if x == 'PGA'][0] + PGA_col_id = [i for i, x in enumerate(im_list) if x == 'PGA'][0] # noqa: N806, RUF015 for scenario_id in range(num_scenarios): num_rlzs = ln_im_data[scenario_id].shape[2] - im_data_scen = np.zeros([num_stations,\ - len(im_list)+len(output_keys), num_rlzs]) - im_data_scen[:,0:len(im_list),:] = ln_im_data[scenario_id] + im_data_scen = np.zeros( + [num_stations, len(im_list) + len(output_keys), num_rlzs] + ) + im_data_scen[:, 0 : len(im_list), :] = ln_im_data[scenario_id] for rlz_id in range(num_rlzs): - pga = np.exp(ln_im_data[scenario_id][:,PGA_col_id,rlz_id]) + pga = np.exp(ln_im_data[scenario_id][:, PGA_col_id, rlz_id]) mag = float(eq_data[scenario_id][0]) - model_output = self.model(pga, mag, self.slope, self.t_slope, - self.gamma_soil, self.phi_soil, - self.coh_soil) + model_output = self.model( + pga, + mag, + self.slope, + self.t_slope, + self.gamma_soil, + self.phi_soil, + self.coh_soil, + ) for i, key in enumerate(output_keys): - im_data_scen[:,len(im_list)+i,rlz_id] = model_output[key] + im_data_scen[:, len(im_list) + i, rlz_id] = model_output[key] ln_im_data[scenario_id] = im_data_scen - im_list = im_list + output_keys + im_list = im_list + output_keys # noqa: PLR6104 else: - sys.exit(f"'PGA' is missing in the selected intensity measures and the landslide model 'BrayMacedo2019' can not be computed.") + sys.exit( + f"'PGA' is missing in the selected intensity measures and the landslide model 'BrayMacedo2019' can not be computed." # noqa: F541 + ) # print(f"At least one of 'PGA' and 'PGV' is missing in the selected intensity measures and the liquefaction trigging model 'ZhuEtal2017' can not be computed."\ # , file=sys.stderr) # sys.stderr.write("test") # sys.exit(-1) - return ln_im_data, eq_data, im_list, - - def model( + return ( + ln_im_data, + eq_data, + im_list, + ) + + def model( # noqa: PLR6301 self, - pga, mag, # upstream PBEE RV - slope, t_slope, gamma_soil, phi_soil, coh_soil, # geotechnical/geologic - return_inter_params=False # to get intermediate params + pga, + mag, # upstream PBEE RV + slope, + t_slope, + gamma_soil, + phi_soil, + coh_soil, # geotechnical/geologic + return_inter_params=False, # to get intermediate params # noqa: FBT002, ARG002 ): - """Model""" - - # get dimensions + """Model""" # noqa: D202, D400 + + # get dimensions ndim = pga.ndim if ndim == 1: n_site = len(pga) n_sample = 1 - shape = (n_site) + shape = n_site else: shape = pga.shape n_site = shape[0] n_sample = shape[1] - + # initialize pgdef = np.zeros(shape) ky = np.zeros(shape) prob_d_eq_0 = np.zeros(shape) ln_pgdef_trunc = np.zeros(shape) nonzero_median_cdf = np.zeros(shape) - + # convert from deg to rad - slope_rad = (slope*np.pi/180).astype(np.float32) - phi_soil_rad = (phi_soil*np.pi/180).astype(np.float32) + slope_rad = (slope * np.pi / 180).astype(np.float32) + phi_soil_rad = (phi_soil * np.pi / 180).astype(np.float32) coh_soil = coh_soil.astype(np.float32) - + # yield acceleration - ky = np.tan(phi_soil_rad-slope_rad) + \ - coh_soil/( - gamma_soil * t_slope * np.cos(slope_rad)**2 * \ - (1+np.tan(phi_soil_rad)*np.tan(slope_rad))) - ky = np.maximum(ky,0.01) # to avoid ky = 0 + ky = np.tan(phi_soil_rad - slope_rad) + coh_soil / ( + gamma_soil + * t_slope + * np.cos(slope_rad) ** 2 + * (1 + np.tan(phi_soil_rad) * np.tan(slope_rad)) + ) + ky = np.maximum(ky, 0.01) # to avoid ky = 0 # aleatory sigma_val = 0.72 # deformation, eq 3b - ln_pgdef_trunc = \ - -4.684 + \ - -2.482*np.log(ky) + \ - -0.244*(np.log(ky))**2 + \ - 0.344*np.log(ky)*np.log(pga) + \ - 2.649*np.log(pga) + \ - -0.090*(np.log(pga))**2 + \ - 0.603*mag # cm + ln_pgdef_trunc = ( + -4.684 + + -2.482 * np.log(ky) + + -0.244 * (np.log(ky)) ** 2 + + 0.344 * np.log(ky) * np.log(pga) + + 2.649 * np.log(pga) + + -0.090 * (np.log(pga)) ** 2 + + 0.603 * mag + ) # cm nonzero_ln_pgdef = ln_pgdef_trunc.copy() - + # probability of zero displacement, eq. 2 with Ts=0 if ndim == 1: prob_d_eq_0 = 1 - norm2_cdf( - -2.480 + \ - -2.970*np.log(ky) + \ - -0.120*(np.log(ky))**2 + \ - 2.780*np.log(pga), - 0, 1) + -2.480 + + -2.970 * np.log(ky) + + -0.120 * (np.log(ky)) ** 2 + + 2.780 * np.log(pga), + 0, + 1, + ) else: prob_d_eq_0 = 1 - norm2_cdf_2d( - -2.480 + \ - -2.970*np.log(ky) + \ - -0.120*(np.log(ky))**2 + \ - 2.780*np.log(pga), - 0, 1) + -2.480 + + -2.970 * np.log(ky) + + -0.120 * (np.log(ky)) ** 2 + + 2.780 * np.log(pga), + 0, + 1, + ) prob_d_eq_0 = nb_round(prob_d_eq_0, decimals=15) - + # apply non-zero displacement correction/condition, eq 11 - nonzero_median_cdf = 1 - .5/(1-prob_d_eq_0) - + nonzero_median_cdf = 1 - 0.5 / (1 - prob_d_eq_0) + # loop through numper of samples if ndim == 1: - nonzero_ln_pgdef[nonzero_median_cdf>0] = ln_pgdef_trunc[nonzero_median_cdf>0] + \ - sigma_val*norm2_ppf(nonzero_median_cdf[nonzero_median_cdf>0], 0.0, 1.0) + nonzero_ln_pgdef[nonzero_median_cdf > 0] = ln_pgdef_trunc[ + nonzero_median_cdf > 0 + ] + sigma_val * norm2_ppf( + nonzero_median_cdf[nonzero_median_cdf > 0], 0.0, 1.0 + ) else: for i in range(n_sample): - cond = nonzero_median_cdf[:,i]>0 - nonzero_ln_pgdef[cond,i] = ln_pgdef_trunc[cond,i] + \ - sigma_val*norm2_ppf(nonzero_median_cdf[cond,i], 0.0, 1.0) - + cond = nonzero_median_cdf[:, i] > 0 + nonzero_ln_pgdef[cond, i] = ln_pgdef_trunc[ + cond, i + ] + sigma_val * norm2_ppf(nonzero_median_cdf[cond, i], 0.0, 1.0) + # rest of actions - pgdef = np.exp(nonzero_ln_pgdef)/100 # also convert from cm to m - pgdef = np.maximum(pgdef,1e-5) # limit to - output = {'lsd_PGD_h':pgdef} - return output \ No newline at end of file + pgdef = np.exp(nonzero_ln_pgdef) / 100 # also convert from cm to m + pgdef = np.maximum(pgdef, 1e-5) # limit to + output = {'lsd_PGD_h': pgdef} + return output # noqa: RET504, DOC201 diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/liquefaction.py b/modules/performRegionalEventSimulation/regionalGroundMotion/liquefaction.py index 7f0fe1dfc..5eda3b1cc 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/liquefaction.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/liquefaction.py @@ -82,7 +82,7 @@ def sampleRaster( # noqa: N802 sample = sample.astype(dtype) # clean up invalid values (returned as 1e38 by NumPy) sample[abs(sample) > 1e10] = invalid_value # noqa: PLR2004 - return sample + return sample # noqa: DOC201 # Helper functions @@ -96,9 +96,13 @@ def sampleVector(vector_file_path, vector_crs, x, y, dtype=None): # noqa: ARG00 try: user_crs_input = CRS.from_user_input(vector_crs).to_epsg() if vector_gdf.crs.to_epsg() != user_crs_input: - sys.exit(f"The CRS of vector file {vector_file_path} is {vector_gdf.crs}, and doesn't match the input CRS ({xy_crs}) defined for liquefaction triggering models") - except: - print("The input CRS ({xy_crs}) defined for liquefaction triggering models is invalid. The CRS of vector files are used") + sys.exit( + f"The CRS of vector file {vector_file_path} is {vector_gdf.crs}, and doesn't match the input CRS ({xy_crs}) defined for liquefaction triggering models" + ) + except: # noqa: E722 + print( # noqa: T201 + 'The input CRS ({xy_crs}) defined for liquefaction triggering models is invalid. The CRS of vector files are used' # noqa: RUF027 + ) # if vector_gdf.crs != vector_crs: # sys.exit(f"The CRS of vector file {vector_file_path} is {vector_gdf.crs}, and doesn't match the input CRS ({xy_crs}) defined for liquefaction triggering models") @@ -159,7 +163,7 @@ def sampleVector(vector_file_path, vector_crs, x, y, dtype=None): # noqa: ARG00 merged = merged.set_index('index_right').sort_index().drop(columns=['geometry']) gdf_sites = pandas.merge(gdf_sites, merged, on='index', how='left') gdf_sites.drop(columns=['geometry', 'index'], inplace=True) # noqa: PD002 - return gdf_sites + return gdf_sites # noqa: DOC201 def find_additional_output_req(liq_info, current_step): # noqa: D103 @@ -451,7 +455,7 @@ def model(self, pgv, pga, mag): # liq_susc[prob_liq==zero_prob_liq] = 'none' - return {'liq_prob': prob_liq, 'liq_susc': liq_susc} + return {'liq_prob': prob_liq, 'liq_susc': liq_susc} # noqa: DOC201 # ----------------------------------------------------------- @@ -654,7 +658,7 @@ def model( pga_mag = pga / (10**2.24 / mag**2.56) prob_liq[pga_mag < 0.1] = zero_prob_liq # noqa: PLR2004 - return {'liq_prob': prob_liq, 'liq_susc': liq_susc} + return {'liq_prob': prob_liq, 'liq_susc': liq_susc} # noqa: DOC201 # ----------------------------------------------------------- @@ -817,7 +821,7 @@ def model(self, pgv, pga, mag): # for precip > 1700 mm, set prob to "0" prob_liq[self.precip > 1700] = zero_prob_liq # noqa: PLR2004 - return {'liq_prob': prob_liq, 'liq_susc': liq_susc} + return {'liq_prob': prob_liq, 'liq_susc': liq_susc} # noqa: DOC201 # Lateral Spreading: @@ -983,7 +987,7 @@ def model( # output['ratio'] = ratio # return - return output # noqa: RET504 + return output # noqa: DOC201, RET504 # Settlement: @@ -1056,7 +1060,7 @@ def model( pass # return - return output + return output # noqa: DOC201 def run(self, ln_im_data, eq_data, im_list): # noqa: D102 output_keys = ['liq_PGD_v'] diff --git a/modules/performRegionalEventSimulation/regionalWindField/ComputeIntensityMeasure.py b/modules/performRegionalEventSimulation/regionalWindField/ComputeIntensityMeasure.py index f521ec680..806cad47e 100644 --- a/modules/performRegionalEventSimulation/regionalWindField/ComputeIntensityMeasure.py +++ b/modules/performRegionalEventSimulation/regionalWindField/ComputeIntensityMeasure.py @@ -448,7 +448,7 @@ def interp_wind_by_height(pws_ip, height_simu, height_ref): ) # return - return pws_op + return pws_op # noqa: DOC201 def gust_factor_ESDU(gd_c, gd_t): # noqa: N802 @@ -475,7 +475,7 @@ def gust_factor_ESDU(gd_c, gd_t): # noqa: N802 gd_c, gd, gf, left=gf[0], right=gf[-1] ) # return - return gf_t # noqa: RET504 + return gf_t # noqa: DOC201, RET504 def export_pws(stations, pws, output_dir, filename='EventGrid.csv'): # noqa: D103 diff --git a/modules/performRegionalEventSimulation/regionalWindField/CreateScenario.py b/modules/performRegionalEventSimulation/regionalWindField/CreateScenario.py index 8abcf54dd..3635b4fb6 100644 --- a/modules/performRegionalEventSimulation/regionalWindField/CreateScenario.py +++ b/modules/performRegionalEventSimulation/regionalWindField/CreateScenario.py @@ -156,8 +156,8 @@ def create_wind_scenarios(scenario_info, event_info, stations, data_dir): # noq print('CreateScenario: error - no storm name or year is provided.') # noqa: T201 # Searching the storm try: - df_chs = df_hs[df_hs[('NAME', ' ')] == storm_name] - df_chs = df_chs[df_chs[('SEASON', 'Year')] == storm_year] + df_chs = df_hs[df_hs[('NAME', ' ')] == storm_name] # noqa: RUF031 + df_chs = df_chs[df_chs[('SEASON', 'Year')] == storm_year] # noqa: RUF031 except: # noqa: E722 print('CreateScenario: error - the storm is not found.') # noqa: T201 if len(df_chs.values) == 0: @@ -166,10 +166,10 @@ def create_wind_scenarios(scenario_info, event_info, stations, data_dir): # noq # Collecting storm properties track_lat = [] track_lon = [] - for x in df_chs[('USA_LAT', 'degrees_north')].values.tolist(): # noqa: PD011 + for x in df_chs[('USA_LAT', 'degrees_north')].values.tolist(): # noqa: PD011, RUF031 if x != ' ': track_lat.append(float(x)) # noqa: PERF401 - for x in df_chs[('USA_LON', 'degrees_east')].values.tolist(): # noqa: PD011 + for x in df_chs[('USA_LON', 'degrees_east')].values.tolist(): # noqa: PD011, RUF031 if x != ' ': track_lon.append(float(x)) # noqa: PERF401 # If the default option (USA_LAT and USA_LON) is not available, switching to LAT and LON @@ -177,10 +177,10 @@ def create_wind_scenarios(scenario_info, event_info, stations, data_dir): # noq print( # noqa: T201 'CreateScenario: warning - the USA_LAT and USA_LON are not available, switching to LAT and LON.' ) - for x in df_chs[('LAT', 'degrees_north')].values.tolist(): # noqa: PD011 + for x in df_chs[('LAT', 'degrees_north')].values.tolist(): # noqa: PD011, RUF031 if x != ' ': track_lat.append(float(x)) # noqa: PERF401 - for x in df_chs[('LON', 'degrees_east')].values.tolist(): # noqa: PD011 + for x in df_chs[('LON', 'degrees_east')].values.tolist(): # noqa: PD011, RUF031 if x != ' ': track_lon.append(float(x)) # noqa: PERF401 if len(track_lat) == 0: @@ -197,7 +197,7 @@ def create_wind_scenarios(scenario_info, event_info, stations, data_dir): # noq terrain_data = [] # Storm characteristics at the landfall dist2land = [] - for x in df_chs[('DIST2LAND', 'km')]: + for x in df_chs[('DIST2LAND', 'km')]: # noqa: RUF031 if x != ' ': dist2land.append(x) # noqa: PERF401 if len(track_lat) == 0: @@ -237,14 +237,14 @@ def create_wind_scenarios(scenario_info, event_info, stations, data_dir): # noq track_simu = track_lat # Reading data try: - landfall_lat = float(df_chs[('USA_LAT', 'degrees_north')].iloc[tmploc]) - landfall_lon = float(df_chs[('USA_LON', 'degrees_east')].iloc[tmploc]) + landfall_lat = float(df_chs[('USA_LAT', 'degrees_north')].iloc[tmploc]) # noqa: RUF031 + landfall_lon = float(df_chs[('USA_LON', 'degrees_east')].iloc[tmploc]) # noqa: RUF031 except: # noqa: E722 # If the default option (USA_LAT and USA_LON) is not available, switching to LAT and LON - landfall_lat = float(df_chs[('LAT', 'degrees_north')].iloc[tmploc]) - landfall_lon = float(df_chs[('LON', 'degrees_east')].iloc[tmploc]) + landfall_lat = float(df_chs[('LAT', 'degrees_north')].iloc[tmploc]) # noqa: RUF031 + landfall_lon = float(df_chs[('LON', 'degrees_east')].iloc[tmploc]) # noqa: RUF031 try: - landfall_ang = float(df_chs[('STORM_DIR', 'degrees')].iloc[tmploc]) + landfall_ang = float(df_chs[('STORM_DIR', 'degrees')].iloc[tmploc]) # noqa: RUF031 except: # noqa: E722 print('CreateScenario: error - no landing angle is found.') # noqa: T201 if landfall_ang > 180.0: # noqa: PLR2004 @@ -254,7 +254,7 @@ def create_wind_scenarios(scenario_info, event_info, stations, data_dir): # noq - np.min( [ float(x) - for x in df_chs[('USA_PRES', 'mb')] # noqa: PD011 + for x in df_chs[('USA_PRES', 'mb')] # noqa: PD011, RUF031 .iloc[tmploc - 5 :] .values.tolist() if x != ' ' @@ -262,11 +262,11 @@ def create_wind_scenarios(scenario_info, event_info, stations, data_dir): # noq ) ) landfall_spd = ( - float(df_chs[('STORM_SPEED', 'kts')].iloc[tmploc]) * 0.51444 + float(df_chs[('STORM_SPEED', 'kts')].iloc[tmploc]) * 0.51444 # noqa: RUF031 ) # convert knots/s to km/s try: landfall_rad = ( - float(df_chs[('USA_RMW', 'nmile')].iloc[tmploc]) * 1.60934 + float(df_chs[('USA_RMW', 'nmile')].iloc[tmploc]) * 1.60934 # noqa: RUF031 ) # convert nmile to km except: # noqa: E722 # No available radius of maximum wind is found @@ -274,7 +274,7 @@ def create_wind_scenarios(scenario_info, event_info, stations, data_dir): # noq try: # If the default option (USA_RMW) is not available, switching to REUNION_RMW landfall_rad = ( - float(df_chs[('REUNION_RMW', 'nmile')].iloc[tmploc]) * 1.60934 + float(df_chs[('REUNION_RMW', 'nmile')].iloc[tmploc]) * 1.60934 # noqa: RUF031 ) # convert nmile to km except: # noqa: E722 # No available radius of maximum wind is found diff --git a/modules/performRegionalEventSimulation/regionalWindField/CreateStation.py b/modules/performRegionalEventSimulation/regionalWindField/CreateStation.py index a03a64c08..caae970e4 100644 --- a/modules/performRegionalEventSimulation/regionalWindField/CreateStation.py +++ b/modules/performRegionalEventSimulation/regionalWindField/CreateStation.py @@ -68,7 +68,7 @@ def create_stations(input_file, output_file, min_id, max_id): stn_df = pd.read_csv(input_file, header=0, index_col=0) except: # noqa: E722 run_tag = 0 - return run_tag # noqa: RET504 + return run_tag # noqa: DOC201, RET504 # Max and Min IDs stn_ids_min = np.min(stn_df.index.values) stn_ids_max = np.max(stn_df.index.values) diff --git a/modules/performRegionalEventSimulation/regionalWindField/WindFieldSimulation.py b/modules/performRegionalEventSimulation/regionalWindField/WindFieldSimulation.py index d74a470a2..57c128fc1 100644 --- a/modules/performRegionalEventSimulation/regionalWindField/WindFieldSimulation.py +++ b/modules/performRegionalEventSimulation/regionalWindField/WindFieldSimulation.py @@ -168,7 +168,7 @@ def __interp_z0(self, lat, lon): if not z0: z0 = 0.01 # return - return z0 + return z0 # noqa: DOC201 def add_reference_terrain(self, terrain_info): """add_reference_terrainL specifying reference z0 values for a set of polygons @@ -595,4 +595,4 @@ def compute_wind_field(self): # noqa: PLR0914 def get_station_data(self): """get_station_data: returning station data""" # noqa: D400 # return station dictionary - return self.station + return self.station # noqa: DOC201 diff --git a/modules/performUQ/SimCenterUQ/PLoM/PLoM.py b/modules/performUQ/SimCenterUQ/PLoM/PLoM.py index ed6a60e11..78faad354 100644 --- a/modules/performUQ/SimCenterUQ/PLoM/PLoM.py +++ b/modules/performUQ/SimCenterUQ/PLoM/PLoM.py @@ -374,7 +374,7 @@ def _load_h5_plom(self, filename): if cur_var in self.dbserver.get_item_adds() and ATTR_MAP[cur_var]: # noqa: F405 # read in cur_data = store[cur_var] - cur_dshape = tuple( + cur_dshape = tuple( # noqa: C409 [x[0] for x in store['/DS_' + cur_var[1:]].values.tolist()] # noqa: PD011 ) if cur_dshape == (1,): @@ -416,7 +416,7 @@ def _load_h5_data_X(self, filename): # noqa: N802 item_name='X0', col_name=list(self.X0.columns), item=self.X0 ) - return self.X0.to_numpy() + return self.X0.to_numpy() # noqa: DOC201 except: # noqa: E722 return None @@ -491,7 +491,7 @@ def load_h5(self, filename): ) if '/X0' in self.dbserver.get_name_list(): self.X0 = self.dbserver.get_item('X0', table_like=True) - return self.X0.to_numpy() + return self.X0.to_numpy() # noqa: DOC201 else: # noqa: RET505 self.logfile.write_msg( msg='PLoM.load_h5: the original X0 data not found in the loaded data.', @@ -598,7 +598,7 @@ def ConfigTasks(self, task_list=FULL_TASK_LIST): # noqa: C901, N802, F405 msg_type='WARNING', msg_level=0, ) - return False + return False # noqa: DOC201 map_order = [FULL_TASK_LIST.index(x) for x in self.cur_task_list] # noqa: F405 if map_order != sorted(map_order): self.logfile.write_msg( @@ -961,7 +961,7 @@ def DataNormalization(self, X): # noqa: N802, N803, PLR6301 X_scaled, alpha, x_min = plom.scaling(X) # noqa: N806 x_mean = plom.mean(X_scaled) - return X_scaled, alpha, x_min, x_mean + return X_scaled, alpha, x_min, x_mean # noqa: DOC201 def RunPCA(self, X_origin, epsilon_pca): # noqa: N802, N803, D102 # ...PCA... @@ -995,7 +995,7 @@ def RunKDE(self, X, epsilon_kde): # noqa: N802, N803, PLR6301 (s_v, c_v, hat_s_v) = plom.parameters_kde(X) K, b = plom.K(X, epsilon_kde) # noqa: N806 - return s_v, c_v, hat_s_v, K, b + return s_v, c_v, hat_s_v, K, b # noqa: DOC201 def DiffMaps(self, H, K, b, tol=0.1): # noqa: N802, N803, D102 # ..diff maps basis... diff --git a/modules/performUQ/SimCenterUQ/PLoM/PLoM_library.py b/modules/performUQ/SimCenterUQ/PLoM/PLoM_library.py index ac55ef66f..8988315f9 100644 --- a/modules/performUQ/SimCenterUQ/PLoM/PLoM_library.py +++ b/modules/performUQ/SimCenterUQ/PLoM/PLoM_library.py @@ -102,7 +102,7 @@ def kernel(x, y, epsilon): """ # noqa: D205, D400 dist = np.linalg.norm(x - y) ** 2 k = np.exp(-dist / (4 * epsilon)) - return k # noqa: RET504 + return k # noqa: DOC201, RET504 def K(eta, epsilon): # noqa: N802 @@ -124,7 +124,7 @@ def K(eta, epsilon): # noqa: N802 K[i, j] = 1 row_sum = row_sum + 1 # noqa: PLR6104 b[i, i] = row_sum - return K, b + return K, b # noqa: DOC201 def g(K, b): # noqa: N803 @@ -142,7 +142,7 @@ def g(K, b): # noqa: N803 norm = np.diagonal(np.transpose(g).dot(b).dot(g)) sqrt_norm = np.sqrt(1 / norm) g = np.multiply(g, sqrt_norm) - return g, eigenvalues + return g, eigenvalues # noqa: DOC201 def m(eigenvalues, tol=0.1): @@ -153,7 +153,7 @@ def m(eigenvalues, tol=0.1): m = 0 while i < len(eigenvalues) and m == 0: if eigenvalues[i] <= eigenvalues[1] * tol: - return i + 1 + return i + 1 # noqa: DOC201 i = i + 1 # noqa: PLR6104 if m == 0: return max(round(len(eigenvalues) / 10), 3) @@ -170,7 +170,7 @@ def mean(x): x_mean = np.zeros((dim, 1)) for i in range(dim): x_mean[i] = np.mean(x[i, :]) - return x_mean + return x_mean # noqa: DOC201 def covariance(x): @@ -187,7 +187,7 @@ def covariance(x): C = C + (np.resize(x[:, i], x_mean.shape) - x_mean).dot( # noqa: N806, PLR6104 np.transpose(np.resize(x[:, i], x_mean.shape) - x_mean) ) - return C / (N - 1) + return C / (N - 1) # noqa: DOC201 def PCA(x, tol): # noqa: N802 @@ -226,7 +226,7 @@ def PCA(x, tol): # noqa: N802 1 / (mu) ) # no need to do the sqrt because we use the singularvalues eta = mu_sqrt_inv.dot(np.transpose(phi)).dot(x - x_mean) - return ( + return ( # noqa: DOC201 eta, mu, phi, @@ -243,7 +243,7 @@ def parameters_kde(eta): s_v = (4 / (N * (2 + nu))) ** (1 / (nu + 4)) # (4/(N*(2+nu)))**(1/(nu+4)) hat_s_v = s_v / sqrt(s_v**2 + ((N - 1) / N)) c_v = 1 / (sqrt(2 * pi) * hat_s_v) ** nu - return s_v, c_v, hat_s_v + return s_v, c_v, hat_s_v # noqa: DOC201 def kde(y, eta, s_v=None, c_v=None, hat_s_v=None): @@ -254,7 +254,7 @@ def kde(y, eta, s_v=None, c_v=None, hat_s_v=None): N = eta.shape[1] # noqa: N806 if s_v == None or c_v == None or hat_s_v == None: # noqa: E711 s_v, c_v, hat_s_v = parameters_kde(eta) - return c_v * rhoctypes( + return c_v * rhoctypes( # noqa: DOC201 np.resize(y, (y.shape[0] * y.shape[1], 1)), np.resize(np.transpose(eta), (nu * N, 1)), nu, @@ -288,7 +288,7 @@ def PCA2(C_h_hat_eta, beta, tol): # noqa: N802, N803 lambda_c = lambda_c[0:nu_c] psi = psi[:, 0:nu_c] b_c = np.transpose(psi).dot(beta) - return b_c, psi + return b_c, psi # noqa: DOC201 def h_c(eta, g_c, phi, mu, psi, x_mean): # noqa: D103 diff --git a/modules/performUQ/SimCenterUQ/PLoM/general.py b/modules/performUQ/SimCenterUQ/PLoM/general.py index e59f44039..b0d76ac97 100644 --- a/modules/performUQ/SimCenterUQ/PLoM/general.py +++ b/modules/performUQ/SimCenterUQ/PLoM/general.py @@ -149,13 +149,13 @@ def _create_export_dir(self): dir_export = os.path.join(self.db_dir, 'DataOut') # noqa: PTH118 try: os.makedirs(dir_export, exist_ok=True) # noqa: PTH103 - return dir_export # noqa: TRY300 + return dir_export # noqa: DOC201, TRY300 except: # noqa: E722 return None def get_item_adds(self): """Returning the full list of data items""" # noqa: D400, D401 - return self._item_adds + return self._item_adds # noqa: DOC201 def add_item( self, @@ -190,7 +190,7 @@ def add_item( store.close() # noqa: RET503 else: # Not supported data_type - return False + return False # noqa: DOC201 def get_item(self, item_name=None, table_like=False, data_type='Data'): # noqa: FBT002 """Getting a specific data item""" # noqa: D400, D401 @@ -199,7 +199,7 @@ def get_item(self, item_name=None, table_like=False, data_type='Data'): # noqa: store = pd.HDFStore(self.db_path, 'r') try: item = store.get(item_name) - item_shape = tuple( + item_shape = tuple( # noqa: C409 [ x[0] for x in self.get_item_shape( # noqa: PD011 @@ -214,7 +214,7 @@ def get_item(self, item_name=None, table_like=False, data_type='Data'): # noqa: finally: store.close() - return item + return item # noqa: DOC201 elif data_type == 'ConstraintsFile': store = pd.HDFStore(self.db_path, 'r') try: @@ -247,7 +247,7 @@ def get_item_shape(self, item_name=None): item_shape = None store.close() - return item_shape + return item_shape # noqa: DOC201 def get_name_list(self): """Returning the keys of the database""" # noqa: D400, D401 @@ -257,7 +257,7 @@ def get_name_list(self): except: # noqa: E722 name_list = [] store.close() - return name_list + return name_list # noqa: DOC201 def export(self, data_name=None, filename=None, file_format='csv'): """Exporting the specific data item @@ -266,7 +266,7 @@ def export(self, data_name=None, filename=None, file_format='csv'): """ # noqa: D205, D400, D401 d = self.get_item(item_name=data_name[1:], table_like=True) if d is None: - return 1 + return 1 # noqa: DOC201 if filename is None: filename = os.path.join( # noqa: PTH118 self.dir_export, str(data_name).replace('/', '') + '.' + file_format @@ -311,7 +311,7 @@ def refresh_status(self): # previous task not completed -> this task also needs to rerun self.status = False - return self.status + return self.status # noqa: DOC201 # self-check if Counter(self.avail_var_list) == Counter(self.full_var_list) and len( @@ -355,7 +355,7 @@ def refresh_status(self): if not cur_task.status: self.status = False - return self.status + return self.status # noqa: DOC201 while cur_task.next_task: cur_task = cur_task.next_task if not cur_task.status: diff --git a/modules/performUQ/SimCenterUQ/runPLoM.py b/modules/performUQ/SimCenterUQ/runPLoM.py index 9bc4130fe..368a55fe3 100644 --- a/modules/performUQ/SimCenterUQ/runPLoM.py +++ b/modules/performUQ/SimCenterUQ/runPLoM.py @@ -428,7 +428,7 @@ def _create_variables(self, training_data): # check if training data source from simulation if training_data == 'Sampling and Simulation': - return x_dim, y_dim, rv_name, g_name + return x_dim, y_dim, rv_name, g_name # noqa: DOC201 # read X and Y variable names for rv in job_config['randomVariables']: @@ -562,7 +562,7 @@ def _parse_plom_parameters(self, surrogateInfo): # noqa: C901, N803 run_flag = 1 # return - return run_flag + return run_flag # noqa: DOC201 def _set_up_parallel(self): """_set_up_parallel: set up modules and variables for parallel jobs @@ -592,7 +592,7 @@ def _set_up_parallel(self): run_flag = 1 # return - return run_flag + return run_flag # noqa: DOC201 def _load_variables(self, do_sampling, do_simulation): # noqa: C901 """_load_variables: load variables @@ -666,7 +666,7 @@ def _load_variables(self, do_sampling, do_simulation): # noqa: C901 # run_flag = 1 # return - return run_flag + return run_flag # noqa: DOC201 # KZ, 07/24: loading user-defined hyper-parameter files def _load_hyperparameter(self): diff --git a/modules/performUQ/UCSD_UQ/defaultLogLikeScript.py b/modules/performUQ/UCSD_UQ/defaultLogLikeScript.py index 2a455bcdb..76a95dcef 100644 --- a/modules/performUQ/UCSD_UQ/defaultLogLikeScript.py +++ b/modules/performUQ/UCSD_UQ/defaultLogLikeScript.py @@ -142,4 +142,4 @@ def log_likelihood( loglike += ll else: loglike += -np.inf - return loglike + return loglike # noqa: DOC201 diff --git a/modules/performUQ/UCSD_UQ/mwg_sampler.py b/modules/performUQ/UCSD_UQ/mwg_sampler.py index 8e4cfdcd0..d5073bbe6 100644 --- a/modules/performUQ/UCSD_UQ/mwg_sampler.py +++ b/modules/performUQ/UCSD_UQ/mwg_sampler.py @@ -328,7 +328,7 @@ def tune(scale, acc_rate): >0.95 x 10 """ # noqa: D205, D400 if acc_rate < 0.01: # noqa: PLR2004 - return scale * 0.01 + return scale * 0.01 # noqa: DOC201 elif acc_rate < 0.05: # noqa: RET505, PLR2004 return scale * 0.1 elif acc_rate < 0.2: # noqa: PLR2004 diff --git a/modules/performUQ/UCSD_UQ/runFEM.py b/modules/performUQ/UCSD_UQ/runFEM.py index d2c92799e..126e77560 100644 --- a/modules/performUQ/UCSD_UQ/runFEM.py +++ b/modules/performUQ/UCSD_UQ/runFEM.py @@ -101,4 +101,4 @@ def runFEM( # noqa: N802 preds = np.atleast_2d([-np.inf] * sum(edpLengthsList)).reshape((1, -1)) ll = -np.inf - return (ll, preds) + return (ll, preds) # noqa: DOC201 diff --git a/modules/performUQ/UCSD_UQ/runTMCMC.py b/modules/performUQ/UCSD_UQ/runTMCMC.py index c689e5fc4..7a9bed11f 100644 --- a/modules/performUQ/UCSD_UQ/runTMCMC.py +++ b/modules/performUQ/UCSD_UQ/runTMCMC.py @@ -561,4 +561,4 @@ def run_TMCMC( # noqa: N802, PLR0913, PLR0917 f'\n\tShutdown mpi4py executor pool for runType: {run_type}' ) - return mytrace, total_log_evidence + return mytrace, total_log_evidence # noqa: DOC201 diff --git a/modules/performUQ/common/ERAClasses/ERACond.py b/modules/performUQ/common/ERAClasses/ERACond.py index 2f47d13e4..db22ed72d 100644 --- a/modules/performUQ/common/ERAClasses/ERACond.py +++ b/modules/performUQ/common/ERAClasses/ERACond.py @@ -146,7 +146,7 @@ class description. if type(param) == types.LambdaType: # noqa: E721 self.Param = param else: - raise RuntimeError('The input param must be a lambda function.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('The input param must be a lambda function.') # noqa: DOC501, EM101, RUF100, TRY003 self.modParam = param @@ -388,7 +388,7 @@ def equation(param): for i in range(len(Par)): Par[i] = np.squeeze(Par[i]) - return Par + return Par # noqa: DOC201 # %% def condCDF(self, x, cond): # noqa: C901, N802 @@ -442,7 +442,7 @@ def condCDF(self, x, cond): # noqa: C901, N802 elif self.Name == 'weibull': CDF = stats.weibull_min.cdf(x, c=par[1], scale=par[0]) # noqa: N806 - return CDF + return CDF # noqa: DOC201 # %% def condiCDF(self, y, cond): # noqa: C901, N802 @@ -496,7 +496,7 @@ def condiCDF(self, y, cond): # noqa: C901, N802 elif self.Name == 'weibull': iCDF = stats.weibull_min.ppf(y, c=par[1], scale=par[0]) # noqa: N806 - return iCDF + return iCDF # noqa: DOC201 # %% def condPDF(self, x, cond): # noqa: C901, N802 @@ -550,7 +550,7 @@ def condPDF(self, x, cond): # noqa: C901, N802 elif self.Name == 'weibull': PDF = stats.weibull_min.pdf(x, c=par[1], scale=par[0]) # noqa: N806 - return PDF + return PDF # noqa: DOC201 # %% def condRandom(self, cond): # noqa: C901, N802 @@ -602,4 +602,4 @@ def condRandom(self, cond): # noqa: C901, N802 elif self.Name == 'weibull': Random = stats.weibull_min.rvs(c=par[1], scale=par[0]) # noqa: N806 - return Random + return Random # noqa: DOC201 diff --git a/modules/performUQ/common/ERAClasses/ERADist.py b/modules/performUQ/common/ERAClasses/ERADist.py index 96d78b23d..b824c9c6f 100644 --- a/modules/performUQ/common/ERAClasses/ERADist.py +++ b/modules/performUQ/common/ERAClasses/ERADist.py @@ -174,7 +174,7 @@ class description. self.Par = {'n': int(val[0]), 'p': val[1]} self.Dist = stats.binom(n=self.Par['n'], p=self.Par['p']) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The Binomial distribution is not defined for your parameters.' # noqa: EM101 ) @@ -183,7 +183,7 @@ class description. self.Par = {'k': np.around(val[0], 0)} self.Dist = stats.chi2(df=self.Par['k']) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The Chi-Squared distribution is not defined for your parameters.' # noqa: EM101 ) @@ -192,7 +192,7 @@ class description. self.Par = {'lambda': val[0]} self.Dist = stats.expon(scale=1 / self.Par['lambda']) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The Exponential distribution is not defined for your parameters.' # noqa: EM101 ) @@ -205,7 +205,7 @@ class description. loc=self.Par['a_n'], ) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The Frechet distribution is not defined for your parameters.' # noqa: EM101 ) @@ -216,7 +216,7 @@ class description. a=self.Par['k'], scale=1 / self.Par['lambda'] ) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The Gamma distribution is not defined for your parameters.' # noqa: EM101 ) @@ -226,7 +226,7 @@ class description. self.Par = {'p': val} self.Dist = stats.geom(p=self.Par['p']) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The Geometric distribution is not defined for your parameters.' # noqa: EM101 ) @@ -239,7 +239,7 @@ class description. loc=self.Par['epsilon'], ) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The Generalized Extreme Value gistribution is not defined for your parameters.' # noqa: EM101 ) @@ -252,7 +252,7 @@ class description. loc=-self.Par['epsilon'], ) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The Generalized Extreme Value distribution is not defined for your parameters.' # noqa: EM101 ) @@ -263,7 +263,7 @@ class description. scale=self.Par['a_n'], loc=self.Par['b_n'] ) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The Gumbel distribution is not defined for your parameters.' # noqa: EM101 ) @@ -274,7 +274,7 @@ class description. scale=self.Par['a_n'], loc=self.Par['b_n'] ) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The Gumbel distribution is not defined for your parameters.' # noqa: EM101 ) @@ -285,7 +285,7 @@ class description. s=self.Par['sig_lnx'], scale=np.exp(self.Par['mu_lnx']) ) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The Lognormal distribution is not defined for your parameters.' # noqa: EM101 ) @@ -299,7 +299,7 @@ class description. self.Par = {'k': val[0], 'p': val[1]} self.Dist = stats.nbinom(n=self.Par['k'], p=self.Par['p']) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The Negative Binomial distribution is not defined for your parameters.' # noqa: EM101 ) @@ -310,7 +310,7 @@ class description. loc=self.Par['mu'], scale=self.Par['sigma'] ) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The Normal distribution is not defined for your parameters.' # noqa: EM101 ) @@ -323,7 +323,7 @@ class description. loc=self.Par['x_m'], ) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The Pareto distribution is not defined for your parameters.' # noqa: EM101 ) @@ -334,7 +334,7 @@ class description. self.Par = {'lambda': val[0]} self.Dist = stats.poisson(mu=self.Par['lambda']) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The Poisson distribution is not defined for your parameters.' # noqa: EM101 ) @@ -343,7 +343,7 @@ class description. self.Par = {'v': val[0], 't': val[1]} self.Dist = stats.poisson(mu=self.Par['v'] * self.Par['t']) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The Poisson distribution is not defined for your parameters.' # noqa: EM101 ) @@ -353,7 +353,7 @@ class description. self.Par = {'alpha': alpha} self.Dist = stats.rayleigh(scale=self.Par['alpha']) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The Rayleigh distribution is not defined for your parameters.' # noqa: EM101 ) @@ -365,11 +365,11 @@ class description. elif name.lower() == 'truncatednormal': if val[2] >= val[3]: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The upper bound a must be larger than the lower bound b.' # noqa: EM101 ) if val[1] < 0: - raise RuntimeError('sigma must be larger than 0.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('sigma must be larger than 0.') # noqa: DOC501, EM101, RUF100, TRY003 self.Par = { 'mu_n': val[0], 'sig_n': val[1], @@ -390,7 +390,7 @@ class description. scale=self.Par['upper'] - self.Par['lower'], ) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The Uniform distribution is not defined for your parameters.' # noqa: EM101 ) @@ -401,12 +401,12 @@ class description. c=self.Par['k'], scale=self.Par['a_n'] ) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The Weibull distribution is not defined for your parameters.' # noqa: EM101 ) else: - raise RuntimeError("Distribution type '" + name + "' not available.") # noqa: DOC501 + raise RuntimeError("Distribution type '" + name + "' not available.") # noqa: DOC501, RUF100 # ---------------------------------------------------------------------------- # if the distribution is defined by its moments @@ -414,11 +414,11 @@ class description. val = np.array(val, ndmin=1, dtype=float) if val.size > 1 and val[1] < 0: - raise RuntimeError('The standard deviation must be non-negative.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('The standard deviation must be non-negative.') # noqa: DOC501, EM101, RUF100, TRY003 if name.lower() == 'beta': if val[3] <= val[2]: - raise RuntimeError('Please select an other support [a,b].') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('Please select an other support [a,b].') # noqa: DOC501, EM101, RUF100, TRY003 r = ( ((val[3] - val[0]) * (val[0] - val[2]) / val[1] ** 2 - 1) * (val[0] - val[2]) @@ -427,7 +427,7 @@ class description. s = r * (val[3] - val[0]) / (val[0] - val[2]) # Evaluate if distribution can be defined on the parameters if r <= 0 and s <= 0: - raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, RUF100, TRY003 self.Par = {'r': r, 's': s, 'a': val[2], 'b': val[3]} self.Dist = stats.beta( a=self.Par['r'], @@ -444,30 +444,30 @@ class description. if n % 1 <= 10 ** (-4): n = int(n) else: - raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, RUF100, TRY003 if p >= 0 and p <= 1 and n > 0: self.Par = {'n': n, 'p': p} self.Dist = stats.binom(n=self.Par['n'], p=self.Par['p']) else: - raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, RUF100, TRY003 elif name.lower() == 'chisquare': if val[0] > 0 and val[0] < np.inf and val[0] % 1 <= 10 ** (-4): self.Par = {'k': np.around(val[0], 0)} self.Dist = stats.chi2(df=self.Par['k']) else: - raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, RUF100, TRY003 elif name.lower() == 'exponential': try: lam = 1 / val[0] except ZeroDivisionError: - raise RuntimeError('The first moment cannot be zero!') # noqa: B904, DOC501, EM101, TRY003 + raise RuntimeError('The first moment cannot be zero!') # noqa: B904, DOC501, EM101, RUF100, TRY003 if lam >= 0: self.Par = {'lambda': lam} self.Dist = stats.expon(scale=1 / self.Par['lambda']) else: - raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, RUF100, TRY003 elif name.lower() == 'frechet': par0 = 2.0001 @@ -487,7 +487,7 @@ def equation(par): k = sol[0][0] a_n = val[0] / special.gamma(1 - 1 / k) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'fsolve could not converge to a solution, therefore' # noqa: EM101 'the parameters of the Frechet distribution could not be determined.' ) @@ -499,7 +499,7 @@ def equation(par): loc=self.Par['a_n'], ) else: - raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, RUF100, TRY003 elif name.lower() == 'gamma': # Solve system of equations for the parameters @@ -512,7 +512,7 @@ def equation(par): a=self.Par['k'], scale=1 / self.Par['lambda'] ) else: - raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, RUF100, TRY003 elif name.lower() == 'geometric': # Solve Equation for the parameter based on the first moment @@ -521,7 +521,7 @@ def equation(par): self.Par = {'p': p} self.Dist = stats.geom(p=self.Par['p']) else: - raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, RUF100, TRY003 elif name.lower() == 'gev': beta = val[2] @@ -530,7 +530,7 @@ def equation(par): alpha = val[1] * np.sqrt(6) / np.pi # scale parameter epsilon = val[2] - np.euler_gamma * alpha # location parameter elif beta >= 0.5: # noqa: PLR2004 - raise RuntimeError('MOM can only be used for beta < 0.5 .') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('MOM can only be used for beta < 0.5 .') # noqa: DOC501, EM101, RUF100, TRY003 else: alpha = ( abs(beta) @@ -555,7 +555,7 @@ def equation(par): alpha = val[1] * np.sqrt(6) / np.pi # scale parameter epsilon = val[2] + np.euler_gamma * alpha # location parameter elif beta >= 0.5: # noqa: PLR2004 - raise RuntimeError('MOM can only be used for beta < 0.5 .') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('MOM can only be used for beta < 0.5 .') # noqa: DOC501, EM101, RUF100, TRY003 else: alpha = ( abs(beta) @@ -583,7 +583,7 @@ def equation(par): scale=self.Par['a_n'], loc=self.Par['b_n'] ) else: - raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, RUF100, TRY003 elif name.lower() == 'gumbelmin': # solve two equations for the parameters of the distribution @@ -595,11 +595,11 @@ def equation(par): scale=self.Par['a_n'], loc=self.Par['b_n'] ) else: - raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, RUF100, TRY003 elif name.lower() == 'lognormal': if val[0] <= 0: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'Please select other moments, the first moment must be greater than zero.' # noqa: EM101 ) # solve two equations for the parameters of the distribution @@ -621,9 +621,9 @@ def equation(par): self.Par = {'k': k, 'p': p} self.Dist = stats.nbinom(n=self.Par['k'], p=self.Par['p']) else: - raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, RUF100, TRY003 else: - raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, RUF100, TRY003 elif (name.lower() == 'normal') or (name.lower() == 'gaussian'): self.Par = {'mu': val[0], 'sigma': val[1]} @@ -640,7 +640,7 @@ def equation(par): loc=self.Par['x_m'], ) else: - raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, RUF100, TRY003 elif name.lower() == 'poisson': n = len(val) @@ -649,17 +649,17 @@ def equation(par): self.Par = {'lambda': val[0]} self.Dist = stats.poisson(mu=self.Par['lambda']) else: - raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, RUF100, TRY003 if n == 2: # noqa: PLR2004 if val[0] > 0 and val[1] > 0: v = val[0] / val[1] if val[1] <= 0: - raise RuntimeError('t must be positive.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('t must be positive.') # noqa: DOC501, EM101, RUF100, TRY003 self.Par = {'v': v, 't': val[1]} self.Dist = stats.poisson(mu=self.Par['v'] * self.Par['t']) else: - raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, RUF100, TRY003 elif name.lower() == 'rayleigh': alpha = val[0] / np.sqrt(np.pi / 2) @@ -667,7 +667,7 @@ def equation(par): self.Par = {'alpha': alpha} self.Dist = stats.rayleigh(scale=self.Par['alpha']) else: - raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, RUF100, TRY003 elif (name.lower() == 'standardnormal') or ( name.lower() == 'standardgaussian' @@ -677,11 +677,11 @@ def equation(par): elif name.lower() == 'truncatednormal': if val[2] >= val[3]: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The upper bound a must be larger than the lower bound b.' # noqa: EM101 ) if val[0] <= val[2] or val[0] >= val[3]: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The mean of the distribution must be within the interval [a,b].' # noqa: EM101 ) @@ -727,7 +727,7 @@ def equation(par): b=b_mod, ) else: - raise RuntimeError('fsolve did not converge.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('fsolve did not converge.') # noqa: DOC501, EM101, RUF100, TRY003 elif name.lower() == 'uniform': # compute parameters @@ -756,7 +756,7 @@ def equation(par): k = sol[0][0] a_n = val[0] / special.gamma(1 + 1 / k) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'fsolve could not converge to a solution, therefore' # noqa: EM101 'the parameters of the Weibull distribution could not be determined.' ) @@ -766,17 +766,17 @@ def equation(par): c=self.Par['k'], scale=self.Par['a_n'] ) else: - raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('Please select other moments.') # noqa: DOC501, EM101, RUF100, TRY003 else: - raise RuntimeError("Distribution type '" + name + "' not available.") # noqa: DOC501 + raise RuntimeError("Distribution type '" + name + "' not available.") # noqa: DOC501, RUF100 # ---------------------------------------------------------------------------- # if the distribution is to be fitted to a data vector elif opt.upper() == 'DATA': if name.lower() == 'beta': if val[2] <= val[1]: - raise RuntimeError('Please select a different support [a,b].') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('Please select a different support [a,b].') # noqa: DOC501, EM101, RUF100, TRY003 if min(val[0]) >= val[1] and max(val[0]) <= val[2]: pars = stats.beta.fit( val[0], floc=val[1], fscale=val[2] - val[1] @@ -789,7 +789,7 @@ def equation(par): scale=self.Par['b'] - self.Par['a'], ) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The given samples must be in the support range [a,b].' # noqa: EM101 ) @@ -798,12 +798,12 @@ def equation(par): if val[1] % 1 <= 10 ** (-4) and val[1] > 0: val[1] = int(val[1]) else: - raise RuntimeError('n must be a positive integer.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('n must be a positive integer.') # noqa: DOC501, EM101, RUF100, TRY003 X = np.array(val[0]) # noqa: N806 if all((X) % 1 <= 10 ** (-4)) and all(X >= 0) and all(val[1] >= X): X = np.around(X, 0) # noqa: N806 else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The given samples must be integers in the range [0,n].' # noqa: EM101 ) val[0] = np.mean(val[0]) / val[1] @@ -816,7 +816,7 @@ def equation(par): self.Par = {'k': np.around(pars[0], 0)} self.Dist = stats.chi2(df=self.Par['k']) else: - raise RuntimeError('The given samples must be non-negative.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('The given samples must be non-negative.') # noqa: DOC501, EM101, RUF100, TRY003 elif name.lower() == 'exponential': if min(val) >= 0: @@ -824,11 +824,11 @@ def equation(par): self.Par = {'lambda': 1 / pars[1]} self.Dist = stats.expon(scale=1 / self.Par['lambda']) else: - raise RuntimeError('The given samples must be non-negative.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('The given samples must be non-negative.') # noqa: DOC501, EM101, RUF100, TRY003 elif name.lower() == 'frechet': if min(val) < 0: - raise RuntimeError('The given samples must be non-negative.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('The given samples must be non-negative.') # noqa: DOC501, EM101, RUF100, TRY003 def equation(par): return -np.sum( @@ -852,7 +852,7 @@ def equation(par): loc=self.Par['a_n'], ) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'Maximum likelihood estimation did not converge.' # noqa: EM101 ) @@ -868,7 +868,7 @@ def equation(par): self.Par = {'p': 1 / np.mean(val)} self.Dist = stats.geom(p=self.Par['p']) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The given samples must be integers larger than 0.' # noqa: EM101 ) @@ -916,7 +916,7 @@ def equation(par): p = np.mean(val) / (np.mean(val) + np.var(val)) k = np.mean(val) * p if k == 0: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'No suitable parameters can be estimated from the given data.' # noqa: EM101 ) k = round( @@ -954,11 +954,11 @@ def equation(par): loc=self.Par['x_m'], ) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'Maximum likelihood estimation did not converge.' # noqa: EM101 ) else: - raise RuntimeError('The given data must be positive.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('The given data must be positive.') # noqa: DOC501, EM101, RUF100, TRY003 elif name.lower() == 'poisson': n = len(val) @@ -966,20 +966,20 @@ def equation(par): X = val[0] # noqa: N806 t = val[1] if t <= 0: - raise RuntimeError('t must be positive.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('t must be positive.') # noqa: DOC501, EM101, RUF100, TRY003 if all(X >= 0) and all(X % 1 == 0): v = np.mean(X) / t self.Par = {'v': v, 't': t} self.Dist = stats.poisson(mu=self.Par['v'] * self.Par['t']) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The given samples must be non-negative integers.' # noqa: EM101 ) elif all(val >= 0) and all(val % 1 == 0): self.Par = {'lambda': np.mean(val)} self.Dist = stats.poisson(mu=self.Par['lambda']) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The given samples must be non-negative integers.' # noqa: EM101 ) @@ -991,11 +991,11 @@ def equation(par): elif name.lower() == 'truncatednormal': X = val[0] # noqa: N806 if val[1] >= val[2]: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The upper bound a must be larger than the lower bound b.' # noqa: EM101 ) if not (all(val[1] <= X) and all(val[2] >= X)): - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The given samples must be in the range [a,b].' # noqa: EM101 ) @@ -1029,7 +1029,7 @@ def equation(par): b=b_mod, ) else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'Maximum likelihood estimation did not converge.' # noqa: EM101 ) @@ -1046,16 +1046,16 @@ def equation(par): self.Dist = stats.weibull_min(c=self.Par['k'], scale=self.Par['a_n']) else: - raise RuntimeError("Distribution type '" + name + "' not available.") # noqa: DOC501 + raise RuntimeError("Distribution type '" + name + "' not available.") # noqa: DOC501, RUF100 else: - raise RuntimeError('Unknown option :' + opt) # noqa: DOC501 + raise RuntimeError('Unknown option :' + opt) # noqa: DOC501, RUF100 # %% def mean(self): """Returns the mean of the distribution.""" # noqa: D401 if self.Name == 'gevmin': - return -self.Dist.mean() + return -self.Dist.mean() # noqa: DOC201 elif self.Name == 'negativebinomial': # noqa: RET505 return self.Dist.mean() + self.Par['k'] @@ -1066,13 +1066,13 @@ def mean(self): # %% def std(self): """Returns the standard deviation of the distribution.""" # noqa: D401 - return self.Dist.std() + return self.Dist.std() # noqa: DOC201 # %% def pdf(self, x): """Returns the PDF value.""" # noqa: D401 if self.Name == 'binomial' or self.Name == 'geometric': # noqa: PLR1714 - return self.Dist.pmf(x) + return self.Dist.pmf(x) # noqa: DOC201 elif self.Name == 'gevmin': # noqa: RET505 return self.Dist.pdf(-x) @@ -1090,7 +1090,7 @@ def pdf(self, x): def cdf(self, x): """Returns the CDF value.""" # noqa: D401 if self.Name == 'gevmin': - return 1 - self.Dist.cdf(-x) # <-- this is not a proper cdf ! + return 1 - self.Dist.cdf(-x) # <-- this is not a proper cdf ! # noqa: DOC201 elif self.Name == 'negativebinomial': # noqa: RET505 return self.Dist.cdf(x - self.Par['k']) @@ -1104,7 +1104,7 @@ def random(self, size=None): object. """ # noqa: D205, D401 if self.Name == 'gevmin': - return self.Dist.rvs(size=size) * (-1) + return self.Dist.rvs(size=size) * (-1) # noqa: DOC201 elif self.Name == 'negativebinomial': # noqa: RET505 samples = self.Dist.rvs(size=size) + self.Par['k'] @@ -1118,7 +1118,7 @@ def random(self, size=None): def icdf(self, y): """Returns the value of the inverse CDF.""" # noqa: D401 if self.Name == 'gevmin': - return -self.Dist.ppf(1 - y) + return -self.Dist.ppf(1 - y) # noqa: DOC201 elif self.Name == 'negativebinomial': # noqa: RET505 return self.Dist.ppf(y) + self.Par['k'] @@ -1171,7 +1171,7 @@ def gevfit_alt(y): 'The shape parameter of the GEV is not in the range where PWM asymptotic results are valid.' ) - return par + return par # noqa: DOC201 # ------------------------------------------------------------------------------ @@ -1217,4 +1217,4 @@ def gevpwm(y): 'fsolve could not converge to a solution for the PWM estimate.' ) - return par + return par # noqa: DOC201 diff --git a/modules/performUQ/common/ERAClasses/ERANataf.py b/modules/performUQ/common/ERAClasses/ERANataf.py index 16f163331..bd5b23051 100644 --- a/modules/performUQ/common/ERAClasses/ERANataf.py +++ b/modules/performUQ/common/ERAClasses/ERANataf.py @@ -104,17 +104,17 @@ class description. try: np.linalg.cholesky(self.Rho_X) except np.linalg.LinAlgError: - raise RuntimeError( # noqa: B904, DOC501, TRY003 + raise RuntimeError( # noqa: B904, DOC501, RUF100, TRY003 'The given correlation matrix is not positive definite' # noqa: EM101 '--> Nataf transformation is not applicable.' ) if not np.all(self.Rho_X - self.Rho_X.T == 0): - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The given correlation matrix is not symmetric ' # noqa: EM101 '--> Nataf transformation is not applicable.' ) if not np.all(np.diag(self.Rho_X) == 1): - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'Not all diagonal entries of the given correlation matrix are equal to one ' # noqa: EM101 '--> Nataf transformation is not applicable.' ) @@ -246,7 +246,7 @@ def fun(rho0): self.Rho_Z[i, j] = sol[0] self.Rho_Z[j, i] = self.Rho_Z[i, j] else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'brentq and fsolve coul' # noqa: EM101 'd not converge to a ' 'solution of the Nataf ' @@ -255,7 +255,7 @@ def fun(rho0): try: self.A = np.linalg.cholesky(self.Rho_Z) except np.linalg.LinAlgError: - raise RuntimeError( # noqa: B904, DOC501, TRY003 + raise RuntimeError( # noqa: B904, DOC501, RUF100, TRY003 'Transformed correlation matrix is not positive' # noqa: EM101 ' definite --> Nataf transformation is not ' 'applicable.' @@ -298,12 +298,12 @@ def X2U(self, X, Jacobian=False): # noqa: FBT002, N802, N803 # check of the dimensions of input X if X.ndim > 2: # noqa: PLR2004 - raise RuntimeError('X must have not more than two dimensions. ') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('X must have not more than two dimensions. ') # noqa: DOC501, EM101, RUF100, TRY003 if np.shape(X)[1] == 1 and n_dim != 1: # in case that only one point X is given, he can be defined either as row or column vector X = X.T # noqa: N806 if np.shape(X)[1] != n_dim: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'X must be an array of size [n,d], where d is the' # noqa: EM101 ' number of dimensions of the joint distribution.' ) @@ -318,7 +318,7 @@ def X2U(self, X, Jacobian=False): # noqa: FBT002, N802, N803 for i in range(n_dim): diag[i, i] = self.Marginals[i].pdf(X[0, i]) / stats.norm.pdf(Z[i, 0]) Jac = np.linalg.solve(self.A, diag) # noqa: N806 - return np.squeeze(U), Jac + return np.squeeze(U), Jac # noqa: DOC201 else: # noqa: RET505 return np.squeeze(U) @@ -342,7 +342,7 @@ def U2X(self, U, Jacobian=False): # noqa: FBT002, N802, N803 # in case that only one point U is given, he can be defined either as row or column vector U = U.T # noqa: N806 if np.shape(U)[1] != n_dim: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'U must be an array of size [n,d], where d is the' # noqa: EM101 ' number of dimensions of the joint distribution.' ) @@ -359,7 +359,7 @@ def U2X(self, U, Jacobian=False): # noqa: FBT002, N802, N803 for i in range(n_dim): diag[i, i] = stats.norm.pdf(Z[i, 0]) / self.Marginals[i].pdf(X[0, i]) Jac = np.dot(diag, self.A) # noqa: N806 - return np.squeeze(X), Jac + return np.squeeze(X), Jac # noqa: DOC201 else: # noqa: RET505 return np.squeeze(X) @@ -376,7 +376,7 @@ def random(self, n=1): for i in range(n_dim): jr[:, i] = self.Marginals[i].icdf(stats.norm.cdf(Z[i, :])) - return np.squeeze(jr) + return np.squeeze(jr) # noqa: DOC201 # %% def pdf(self, X): # noqa: C901, N803 @@ -402,12 +402,12 @@ def pdf(self, X): # noqa: C901, N803 # check of the dimensions of input X if X.ndim > 2: # noqa: PLR2004 - raise RuntimeError('X must have not more than two dimensions.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('X must have not more than two dimensions.') # noqa: DOC501, EM101, RUF100, TRY003 if np.shape(X)[1] == 1 and n_dim != 1: # in case that only one point X is given, he can be defined either as row or column vector X = X.T # noqa: N806 if np.shape(X)[1] != n_dim: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'X must be an array of size [n,d], where d is the' # noqa: EM101 ' number of dimensions of the joint distribution.' ) @@ -437,7 +437,7 @@ def pdf(self, X): # noqa: C901, N803 jointpdf[i] = 0 if np.size(jointpdf) == 1: - return jointpdf[0] + return jointpdf[0] # noqa: DOC201 else: # noqa: RET505 return jointpdf @@ -461,7 +461,7 @@ def cdf(self, X): # noqa: N803 # in case that only one point X is given, he can be defined either as row or column vector X = X.T # noqa: N806 if np.shape(X)[1] != n_dim: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'X must be an array of size [n,d], where d is the' # noqa: EM101 ' number of dimensions of the joint distribution.' ) @@ -474,7 +474,7 @@ def cdf(self, X): # noqa: N803 U, mean=mu, cov=np.matrix(self.Rho_Z) ) - return jointcdf # noqa: RET504 + return jointcdf # noqa: DOC201, RET504 # %% @staticmethod diff --git a/modules/performUQ/common/ERAClasses/ERARosen.py b/modules/performUQ/common/ERAClasses/ERARosen.py index 5625df024..52d71e468 100644 --- a/modules/performUQ/common/ERAClasses/ERARosen.py +++ b/modules/performUQ/common/ERAClasses/ERARosen.py @@ -105,7 +105,7 @@ class description. if isinstance(dist[i], ERACond): n_parents[i] = dist[i].Param.__code__.co_argcount elif not isinstance(dist[i], ERADist): - raise RuntimeError( # noqa: DOC501, TRY003, TRY004 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003, TRY004 'The objects in dist must be either ERADist or ERACond objects.' # noqa: EM101 ) @@ -118,7 +118,7 @@ class description. for i in range(n_dist + 1): # noqa: B007 adj_prod = np.matmul(adj_prod, adj_mat) if sum(np.diag(adj_prod)) != 0: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The graph defining the dependence between the different ' # noqa: EM101 'distributions must be directed and acyclical.' ) @@ -141,7 +141,7 @@ class description. self.Order = [layers[0], np.concatenate(layers[1:])] self.Layers = layers else: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'The defined joint distribution consists only of independent distributions.' # noqa: EM101 'This type of joint distribution is not supported by ERARosen.' ) @@ -174,12 +174,12 @@ def X2U(self, X, error=True): # noqa: FBT002, N802, N803 # check of the dimensions of input X if X.ndim > 2: # noqa: PLR2004 - raise RuntimeError('X must have not more than two dimensions. ') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('X must have not more than two dimensions. ') # noqa: DOC501, EM101, RUF100, TRY003 if np.shape(X)[1] == 1 and n_dim != 1: # in case that only one point X is given, he can be defined either as row or column vector X = X.T # noqa: N806 if np.shape(X)[1] != n_dim: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'X must be an array of size [n,d], where d is the' # noqa: EM101 ' number of dimensions of the joint distribution.' ) @@ -200,11 +200,11 @@ def X2U(self, X, error=True): # noqa: FBT002, N802, N803 if error: if not all(np.logical_not(lin_ind)): - raise RuntimeError('Invalid joint distribution was created.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('Invalid joint distribution was created.') # noqa: DOC501, EM101, RUF100, TRY003 else: U[lin_ind, :] = np.nan - return np.squeeze(U) + return np.squeeze(U) # noqa: DOC201 # %% def U2X(self, U, error=True): # noqa: FBT002, N802, N803 @@ -226,7 +226,7 @@ def U2X(self, U, error=True): # noqa: FBT002, N802, N803 # in case that only one point X is given, he can be defined either as row or column vector U = U.T # noqa: N806 if np.shape(U)[1] != n_dim: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'U must be an array of size [n,d], where d is the' # noqa: EM101 ' number of dimensions of the joint distribution.' ) @@ -246,11 +246,11 @@ def U2X(self, U, error=True): # noqa: FBT002, N802, N803 if error: if not np.all(np.logical_not(lin_ind)): - raise RuntimeError('Invalid joint distribution was created.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('Invalid joint distribution was created.') # noqa: DOC501, EM101, RUF100, TRY003 else: X[lin_ind, :] = np.nan - return np.squeeze(X) + return np.squeeze(X) # noqa: DOC201 # %% def pdf(self, X, error=True): # noqa: FBT002, N803 @@ -271,7 +271,7 @@ def pdf(self, X, error=True): # noqa: FBT002, N803 # in case that only one point X is given, he can be defined either as row or column vector X = X.T # noqa: N806 if np.shape(X)[1] != n_dim: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'X must be an array of size [n,d], where d is the' # noqa: EM101 ' number of dimensions of the joint distribution.' ) @@ -290,10 +290,10 @@ def pdf(self, X, error=True): # noqa: FBT002, N803 if error: if not np.all(np.logical_not(nan_ind)): - raise RuntimeError('Invalid joint distribution was created.') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('Invalid joint distribution was created.') # noqa: DOC501, EM101, RUF100, TRY003 if np.size(jointpdf) == 1: - return jointpdf[0] + return jointpdf[0] # noqa: DOC201 else: # noqa: RET505 return jointpdf @@ -314,7 +314,7 @@ def random(self, n=1): except ValueError: # noqa: PERF203 raise RuntimeError('Invalid joint distribution was created.') # noqa: B904, DOC501, EM101, TRY003 - return np.squeeze(X) + return np.squeeze(X) # noqa: DOC201 # %% def plotGraph(self, opt=False): # noqa: FBT002, C901, N802 diff --git a/modules/performUQ/other/UQpyRunner.py b/modules/performUQ/other/UQpyRunner.py index 251765fdb..3ce90e804 100644 --- a/modules/performUQ/other/UQpyRunner.py +++ b/modules/performUQ/other/UQpyRunner.py @@ -139,7 +139,7 @@ def runUQ( # noqa: C901, N802, PLR6301 distributionObjects, nsamples=numberOfSamples, random_state=seed ) else: - raise OSError( # noqa: DOC501 + raise OSError( # noqa: DOC501, RUF100 "ERROR: You'll need to update UQpyRunner.py to run your specified" # noqa: ISC003 + ' sampling method!' ) diff --git a/modules/systemPerformance/REWET/REWET/Damage.py b/modules/systemPerformance/REWET/REWET/Damage.py index a7141352c..49f2fb715 100644 --- a/modules/systemPerformance/REWET/REWET/Damage.py +++ b/modules/systemPerformance/REWET/REWET/Damage.py @@ -75,7 +75,7 @@ def readDamageFromPickleFile( # noqa: N802 Returns ------- - """ # noqa: D205, D400, D401, D404, D414 + """ # noqa: D205, D400, D401, D404, D414, DOC202 with open(pickle_file_name, 'rb') as pckf: # noqa: PTH123 w = pickle.load(pckf) # noqa: S301 @@ -276,7 +276,7 @@ def readDamageFromTextFile(self, path): # noqa: N802 elif sline[0].lower() == 'break': if line_length < 3: # noqa: PLR2004 - raise OSError('Line cannot have more than three arguments') # noqa: DOC501, EM101, TRY003 + raise OSError('Line cannot have more than three arguments') # noqa: DOC501, EM101, RUF100, TRY003 # print('Probelm 2') temp_break = {} temp_break['pipe_id'] = sline[1] @@ -313,7 +313,7 @@ def applyNodalDamage(self, WaterNetwork, current_time): # noqa: C901, N802, N80 ------- None. - """ # noqa: D400 + """ # noqa: D400, DOC202 if self.node_damage.empty: print('no node damage at all') # noqa: T201 return @@ -383,7 +383,7 @@ def applyNodalDamage(self, WaterNetwork, current_time): # noqa: C901, N802, N80 pipe_length = val['node_Pipe_Length'] * 1000 if node_name not in WaterNetwork.node_name_list: - raise ValueError( # noqa: DOC501 + raise ValueError( # noqa: DOC501, RUF100 'Node name of damages not in node name list: ' + node_name ) @@ -420,7 +420,7 @@ def applyNodalDamage(self, WaterNetwork, current_time): # noqa: C901, N802, N80 number_of_damages = val['Number_of_damages'] pipe_length = val['node_Pipe_Length'] * 1000 if node_name not in WaterNetwork.node_name_list: - raise ValueError( # noqa: DOC501 + raise ValueError( # noqa: DOC501, RUF100 'Node name of damages not in node name list: ' + node_name ) maximum_node_demand = 10 @@ -484,7 +484,7 @@ def applyNodalDamage(self, WaterNetwork, current_time): # noqa: C901, N802, N80 over_designed_diameter, } else: - raise ValueError('Unknown nodal damage method') # noqa: DOC501, EM101, TRY003 + raise ValueError('Unknown nodal damage method') # noqa: DOC501, EM101, RUF100, TRY003 # return WaterNetwork @@ -807,7 +807,7 @@ def applyPipeDamages(self, WaterNetwork, current_time): # noqa: C901, N802, N80 ratio = cur_damage['damage_loc'] / last_ratio if ratio >= 1: - raise ValueError( # noqa: DOC501 + raise ValueError( # noqa: DOC501, RUF100 'IN LEAK: ratio is bigger than or equal to 1 for pipe:' + repr(pipe_id) + ' ' @@ -863,7 +863,7 @@ def applyPipeDamages(self, WaterNetwork, current_time): # noqa: C901, N802, N80 ratio = cur_damage['damage_loc'] / last_ratio if ratio >= 1: - raise ValueError( # noqa: DOC501 + raise ValueError( # noqa: DOC501, RUF100 'IN BREAK: ratio is bigger than or equal to 1 for pipe:' + repr(pipe_id) + ' ' @@ -940,7 +940,7 @@ def applyPipeDamages(self, WaterNetwork, current_time): # noqa: C901, N802, N80 ) # self._registry.addPipeDamageToDamageRestorationData(pipe_id, 'break', damage_time) else: - raise ValueError( # noqa: DOC501 + raise ValueError( # noqa: DOC501, RUF100 'undefined damage type: ' + repr(cur_damage['type']) + ". Accpetale type of famages are either 'creack' or 'break'." @@ -1043,7 +1043,7 @@ def read_earthquake(self, earthquake_file_name): ------- None. - """ # noqa: D205, DOC502 + """ # noqa: D205, DOC202, DOC502 if type(earthquake_file_name) != str: # noqa: E721 raise ValueError('string is wanted for earthqiake fie name') # noqa: EM101, TRY003 @@ -1080,7 +1080,7 @@ def sortEarthquakeListTimely(self): # noqa: N802 ------- None. - """ # noqa: D400, D401, D404 + """ # noqa: D400, D401, D404, DOC202 self._earthquake.sort_index() self.is_timely_sorted = True @@ -1099,7 +1099,7 @@ def predictDamage(self, wn, iClear=False): # noqa: FBT002, N802, N803 ------- None. - """ # noqa: D401, D404 + """ # noqa: D401, D404, DOC202 if iClear: self.pipe_leak = pd.Series() self.pipe_break = pd.Series() diff --git a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/epanet/io.py b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/epanet/io.py index f7aad0616..b5469199c 100644 --- a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/epanet/io.py +++ b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/epanet/io.py @@ -7,7 +7,7 @@ InpFile BinFile s -""" # noqa: CPY001 +""" # noqa: A005, CPY001 import datetime import difflib @@ -231,7 +231,7 @@ def _clock_time_to_sec(s, am_pm): # noqa: C901 time_sec -= 3600 * 12 if not am: if time_sec >= 3600 * 12: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'Cannot specify am/pm for times greater than 12:00:00' # noqa: EM101 ) time_sec += 3600 * 12 @@ -248,7 +248,7 @@ def _clock_time_to_sec(s, am_pm): # noqa: C901 time_sec -= 3600 * 12 if not am: if time_sec >= 3600 * 12: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'Cannot specify am/pm for times greater than 12:00:00' # noqa: EM101 ) time_sec += 3600 * 12 @@ -262,13 +262,13 @@ def _clock_time_to_sec(s, am_pm): # noqa: C901 time_sec -= 3600 * 12 if not am: if time_sec >= 3600 * 12: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'Cannot specify am/pm for times greater than 12:00:00' # noqa: EM101 ) time_sec += 3600 * 12 return time_sec else: # noqa: RET505 - raise RuntimeError('Time format in ' 'INP file not recognized. ') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('Time format in ' 'INP file not recognized. ') # noqa: DOC501, EM101, RUF100, TRY003 def _sec_to_string(sec): @@ -371,7 +371,7 @@ def read(self, inp_files, wn=None): # noqa: C901 continue elif section is None: logger.debug('Found confusing line: %s', repr(line)) - raise RuntimeError( # noqa: DOC501 + raise RuntimeError( # noqa: DOC501, RUF100 '%(fname)s:%(lnum)d: Non-comment outside of valid section!' % edata ) @@ -3985,7 +3985,7 @@ def contains_section(self, sec): """ # noqa: D205 try: self.get_section(sec) - return True # noqa: TRY300 + return True # noqa: DOC201, TRY300 except NoSectionError: return False @@ -4147,7 +4147,7 @@ def _read_control_line(line, wn, flow_units, control_name): # noqa: C901 elif current[6] == 'BELOW': oper = np.less else: - raise RuntimeError( # noqa: DOC501 + raise RuntimeError( # noqa: DOC501, RUF100 'The following control is not recognized: ' + line ) # OKAY - we are adding in the elevation. This is A PROBLEM @@ -4169,14 +4169,14 @@ def _read_control_line(line, wn, flow_units, control_name): # noqa: C901 node, 'level', oper, threshold, action_obj, control_name ) else: - raise RuntimeError('The following control is not recognized: ' + line) # noqa: DOC501 + raise RuntimeError('The following control is not recognized: ' + line) # noqa: DOC501, RUF100 # control_name = '' # for i in range(len(current)-1): # control_name = control_name + '/' + current[i] # control_name = control_name + '/' + str(round(threshold, 2)) elif 'CLOCKTIME' not in current: # at time if 'TIME' not in current: - raise ValueError(f'Unrecognized line in inp file: {line}') # noqa: DOC501, EM102, TRY003 + raise ValueError(f'Unrecognized line in inp file: {line}') # noqa: DOC501, EM102, RUF100, TRY003 if ':' in current[5]: run_at_time = int(_str_time_to_sec(current[5])) @@ -4345,4 +4345,4 @@ def _diff_inp_files( # noqa: C901 g.write(html_diff) g.close() - return n + return n # noqa: DOC201 diff --git a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/network/model.py b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/network/model.py index c669aedc9..ff332a895 100644 --- a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/network/model.py +++ b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/network/model.py @@ -75,7 +75,7 @@ def updateWaterNetworkModelWithResult( # noqa: C901, N802 ------- None. - """ # noqa: D205, D401 + """ # noqa: D205, D401, DOC202 max_time = result.node['head'].index.max() if latest_simulation_time == None: # noqa: E711 latest_simulation_time = max_time diff --git a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/sim/epanet.py b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/sim/epanet.py index c02c0ef18..2decbbfa0 100644 --- a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/sim/epanet.py +++ b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/sim/epanet.py @@ -229,7 +229,7 @@ def run_sim( # noqa: C901 if run_successful: break - return result_data, run_successful + return result_data, run_successful # noqa: DOC201 def _updateResultStartTime(self, result_data, start_time): # noqa: N802, PLR6301 for res_type, res in result_data.link.items(): # noqa: B007, PERF102 @@ -313,10 +313,10 @@ def _initialize_internal_graph(self): # noqa: C901 from_node_id = self._node_name_to_id[from_node_name] to_node_id = self._node_name_to_id[to_node_name] if (from_node_id, to_node_id) not in n_links: - n_links[(from_node_id, to_node_id)] = 0 - n_links[(to_node_id, from_node_id)] = 0 - n_links[(from_node_id, to_node_id)] += 1 - n_links[(to_node_id, from_node_id)] += 1 + n_links[(from_node_id, to_node_id)] = 0 # noqa: RUF031 + n_links[(to_node_id, from_node_id)] = 0 # noqa: RUF031 + n_links[(from_node_id, to_node_id)] += 1 # noqa: RUF031 + n_links[(to_node_id, from_node_id)] += 1 # noqa: RUF031 rows.append(from_node_id) # noqa: FURB113 cols.append(to_node_id) # noqa: FURB113 rows.append(to_node_id) @@ -379,7 +379,7 @@ def _initialize_internal_graph(self): # noqa: C901 self._node_pairs_with_multiple_links = OrderedDict() for from_node_id, to_node_id in n_links.keys(): # noqa: SIM118 - if n_links[(from_node_id, to_node_id)] > 1: + if n_links[(from_node_id, to_node_id)] > 1: # noqa: RUF031 if ( to_node_id, from_node_id, @@ -390,7 +390,7 @@ def _initialize_internal_graph(self): # noqa: C901 from_node_name = self._node_id_to_name[from_node_id] to_node_name = self._node_id_to_name[to_node_id] tmp_list = self._node_pairs_with_multiple_links[ - (from_node_id, to_node_id) + (from_node_id, to_node_id) # noqa: RUF031 ] = [] for link_name in self._wn.get_links_for_node(from_node_name): link = self._wn.get_link(link_name) diff --git a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/sim/io.py b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/sim/io.py index 2f0128ca0..f6556539d 100644 --- a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/sim/io.py +++ b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/sim/io.py @@ -10,7 +10,7 @@ ---- -""" # noqa: CPY001 +""" # noqa: A005, CPY001 import logging import re @@ -131,7 +131,7 @@ def _is_number(s): """ # noqa: D400, D401 try: float(s) - return True # noqa: TRY300 + return True # noqa: DOC201, TRY300 except ValueError: return False @@ -211,7 +211,7 @@ def _clock_time_to_sec(s, am_pm): # noqa: C901 time_sec -= 3600 * 12 if not am: if time_sec >= 3600 * 12: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'Cannot specify am/pm for times greater than 12:00:00' # noqa: EM101 ) time_sec += 3600 * 12 @@ -228,7 +228,7 @@ def _clock_time_to_sec(s, am_pm): # noqa: C901 time_sec -= 3600 * 12 if not am: if time_sec >= 3600 * 12: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'Cannot specify am/pm for times greater than 12:00:00' # noqa: EM101 ) time_sec += 3600 * 12 @@ -242,13 +242,13 @@ def _clock_time_to_sec(s, am_pm): # noqa: C901 time_sec -= 3600 * 12 if not am: if time_sec >= 3600 * 12: - raise RuntimeError( # noqa: DOC501, TRY003 + raise RuntimeError( # noqa: DOC501, RUF100, TRY003 'Cannot specify am/pm for times greater than 12:00:00' # noqa: EM101 ) time_sec += 3600 * 12 return time_sec else: # noqa: RET505 - raise RuntimeError('Time format in ' 'INP file not recognized. ') # noqa: DOC501, EM101, TRY003 + raise RuntimeError('Time format in ' 'INP file not recognized. ') # noqa: DOC501, EM101, RUF100, TRY003 def _sec_to_string(sec): diff --git a/modules/systemPerformance/REWET/REWET/Input/Policy_IO.py b/modules/systemPerformance/REWET/REWET/Input/Policy_IO.py index b1f309404..35073ee55 100644 --- a/modules/systemPerformance/REWET/REWET/Input/Policy_IO.py +++ b/modules/systemPerformance/REWET/REWET/Input/Policy_IO.py @@ -67,7 +67,7 @@ def __init__(self, definition_file_name): ------- None. - """ # noqa: D400 + """ # noqa: D400, DOC202 # some of the following lines have been adopted from WNTR self.rm = restoration_data() @@ -122,7 +122,7 @@ def __init__(self, definition_file_name): self.config_file_comment.append(line[1:]) continue elif section is None: - raise RuntimeError( # noqa: DOC501 + raise RuntimeError( # noqa: DOC501, RUF100 '%(fname)s:%(lnum)d: Non-comment outside of valid section!' % edata ) @@ -230,7 +230,7 @@ def _read_entities(self): # noqa: C901 ------- None. - """ # noqa: D205, D401 + """ # noqa: D205, D401, DOC202 # Entities is kept for legacy compatibility with the first version damage_group_data = self.sections.get( '[ENTITIES]', self.sections.get('[Damage Group]') diff --git a/modules/systemPerformance/REWET/REWET/Input/Settings.py b/modules/systemPerformance/REWET/REWET/Input/Settings.py index 6f99723f2..2609e672f 100644 --- a/modules/systemPerformance/REWET/REWET/Input/Settings.py +++ b/modules/systemPerformance/REWET/REWET/Input/Settings.py @@ -304,7 +304,7 @@ def importJsonSettings(self, json_file_path): # noqa: N802 for key, val in settings_data.items(): if key not in self: - raise ValueError( # noqa: DOC501, TRY003 + raise ValueError( # noqa: DOC501, RUF100, TRY003 f'REWET settinsg does not have "{key}" as a settings key' # noqa: EM102 ) diff --git a/modules/systemPerformance/REWET/REWET/Output/GUI_Curve_API.py b/modules/systemPerformance/REWET/REWET/Output/GUI_Curve_API.py index 0416960ad..5720f7a7c 100644 --- a/modules/systemPerformance/REWET/REWET/Output/GUI_Curve_API.py +++ b/modules/systemPerformance/REWET/REWET/Output/GUI_Curve_API.py @@ -51,12 +51,12 @@ def QNExceedanceCurve(pr, percentage_list, time_type, time_shift=0): # noqa: N8 ) if type(time_shift) != int: # noqa: E721 - raise ValueError( # noqa: DOC501 + raise ValueError( # noqa: DOC501, RUF100 'Time shift must be integer type: ' + repr(type(time_shift)) + '.' ) if time_shift < 0: - raise ValueError('Time shift ust be bigger than or equal to zero.') # noqa: DOC501, EM101, TRY003 + raise ValueError('Time shift ust be bigger than or equal to zero.') # noqa: DOC501, EM101, RUF100, TRY003 res = {} for percentage in percentage_list: @@ -69,10 +69,10 @@ def QNExceedanceCurve(pr, percentage_list, time_type, time_shift=0): # noqa: N8 elif time_type.lower() == 'day': pr.convertTimeSecondToDay(temp_res, 'restore_time', time_shift) else: - raise ValueError('Uknown time_type: ' + repr(time_type)) # noqa: DOC501 + raise ValueError('Uknown time_type: ' + repr(time_type)) # noqa: DOC501, RUF100 res[percentage] = temp_res - return res + return res # noqa: DOC201 def DLExceedanceCurve(pr, percentage_list, time_type, time_shift=0): # noqa: N802 @@ -102,12 +102,12 @@ def DLExceedanceCurve(pr, percentage_list, time_type, time_shift=0): # noqa: N8 ) if type(time_shift) != int: # noqa: E721 - raise ValueError( # noqa: DOC501 + raise ValueError( # noqa: DOC501, RUF100 'Time shift must be integer type: ' + repr(type(time_shift)) + '.' ) if time_shift < 0: - raise ValueError('Time shift ust be bigger than or equal to zero.') # noqa: DOC501, EM101, TRY003 + raise ValueError('Time shift ust be bigger than or equal to zero.') # noqa: DOC501, EM101, RUF100, TRY003 res = {} for percentage in percentage_list: @@ -120,7 +120,7 @@ def DLExceedanceCurve(pr, percentage_list, time_type, time_shift=0): # noqa: N8 elif time_type.lower() == 'day': pr.convertTimeSecondToDay(temp_res, 'restore_time', time_shift) else: - raise ValueError('Uknown time_type: ' + repr(time_type)) # noqa: DOC501 + raise ValueError('Uknown time_type: ' + repr(time_type)) # noqa: DOC501, RUF100 res[percentage] = temp_res - return res + return res # noqa: DOC201 diff --git a/modules/systemPerformance/REWET/REWET/initial.py b/modules/systemPerformance/REWET/REWET/initial.py index df8f2a479..53f88f7db 100644 --- a/modules/systemPerformance/REWET/REWET/initial.py +++ b/modules/systemPerformance/REWET/REWET/initial.py @@ -46,7 +46,7 @@ def run(self, project_file=None): # noqa: C901 ------- None. - """ # noqa: D205, D401 + """ # noqa: D205, D401, DOC202 settings = Settings() if project_file is not None: project_file = str(project_file) diff --git a/modules/systemPerformance/REWET/REWET/restoration/base.py b/modules/systemPerformance/REWET/REWET/restoration/base.py index fba5a594f..2f0785c2b 100644 --- a/modules/systemPerformance/REWET/REWET/restoration/base.py +++ b/modules/systemPerformance/REWET/REWET/restoration/base.py @@ -232,7 +232,7 @@ def addAgent(self, agent_name, agent_type, definition): # noqa: N802 ------- None. - """ # noqa: D400, D401 + """ # noqa: D400, D401, DOC202 # number_of_agents = int(definition['Number']) agent_speed = self.registry.settings['crew_travel_speed'] temp_agent_data = AgentData( @@ -270,7 +270,7 @@ def setActiveAgents(self, active_agent_ID_list): # noqa: N802, N803 ------- None. - """ # noqa: D400 + """ # noqa: D400, DOC202 for active_agent_ID in active_agent_ID_list: # noqa: N806 self._agents['active'].loc[active_agent_ID] = True @@ -600,7 +600,7 @@ def addShift(self, name, beginning, ending): # noqa: N802 ------- None. - """ # noqa: D400, D401 + """ # noqa: D400, D401, DOC202 if name in self._shift_data: raise ValueError('Shift name already registered') # noqa: EM101, TRY003 if type(beginning) != int and type(beginning) != float: # noqa: E721 @@ -673,7 +673,7 @@ def assignShiftToAgent(self, agent_ID, shift_name): # noqa: N802, N803 ------- None. - """ # noqa: D400, D401 + """ # noqa: D400, D401, DOC202 if agent_ID in self._all_agent_shift_data: raise ValueError('The agent ID currently in Agent ALl Shifts') # noqa: EM101, TRY003 if shift_name not in self._shift_data: diff --git a/modules/systemPerformance/REWET/REWET/restoration/io.py b/modules/systemPerformance/REWET/REWET/restoration/io.py index c03595b62..8c7277831 100644 --- a/modules/systemPerformance/REWET/REWET/restoration/io.py +++ b/modules/systemPerformance/REWET/REWET/restoration/io.py @@ -44,7 +44,7 @@ def __init__(self, restoration_model, definition_file_name): ------- None. - """ # noqa: D400 + """ # noqa: D400, DOC202 # some of the following lines have been adopted from WNTR self.rm = restoration_model self.crew_data = {} @@ -106,7 +106,7 @@ def __init__(self, restoration_model, definition_file_name): self.config_file_comment.append(line[1:]) continue elif section is None: - raise RuntimeError( # noqa: DOC501 + raise RuntimeError( # noqa: DOC501, RUF100 '%(fname)s:%(lnum)d: Non-comment outside of valid section!' % edata ) @@ -1180,7 +1180,7 @@ def _read_config(self): ------- None. - """ # noqa: D205, D400, D401 + """ # noqa: D205, D400, D401, DOC202 edata = OrderedDict() self._crew_file_name = [] self._crew_file_type = [] diff --git a/modules/systemPerformance/REWET/REWET/restoration/model.py b/modules/systemPerformance/REWET/REWET/restoration/model.py index 71f61cad2..862d71480 100644 --- a/modules/systemPerformance/REWET/REWET/restoration/model.py +++ b/modules/systemPerformance/REWET/REWET/restoration/model.py @@ -907,12 +907,12 @@ def updateShifiting(self, time): # noqa: N802 ------- None. - """ # noqa: D400, D401 + """ # noqa: D400, D401, DOC202 if type(time) != int and type(time) != float: # noqa: E721 raise ValueError('Time must be integer not ' + str(type(time))) # noqa: DOC501 time = int(time) if time < 0: - raise ValueError('Time must be bigger than zero') # noqa: DOC501, EM101, TRY003 + raise ValueError('Time must be bigger than zero') # noqa: DOC501, EM101, RUF100, TRY003 next_shift_time = self.shifting.getNextShiftTime(time) # logger.debug('next shitt time = ' + str(next_shift_time)) self._addHardEvent(int(next_shift_time), 'shift') @@ -1555,18 +1555,18 @@ def _addHardEvent(self, next_time, requester, detail=None, current_time=None): if type(next_time) != int and type(next_time) != float: # noqa: E721 raise ValueError('time must be int, not ' + str(type(next_time))) # noqa: DOC501 if detail != None and current_time == None: # noqa: E711 - raise ValueError('When detail is provided, current time cannot be None') # noqa: DOC501, EM101, TRY003 + raise ValueError('When detail is provided, current time cannot be None') # noqa: DOC501, EM101, RUF100, TRY003 minimum_time_devision = int(self._registry.settings['simulation_time_step']) if current_time != None: # noqa: E711 if next_time < current_time: - raise ValueError('Time is smaller than current time') # noqa: DOC501, EM101, TRY003 + raise ValueError('Time is smaller than current time') # noqa: DOC501, EM101, RUF100, TRY003 if detail == None: # noqa: E711 - raise ValueError( # noqa: DOC501, TRY003 + raise ValueError( # noqa: DOC501, RUF100, TRY003 'When current time is provided, detail cannot be None' # noqa: EM101 ) if minimum_time_devision < 0: - raise ValueError('Minimum time division cannot be negative') # noqa: DOC501, EM101, TRY003 + raise ValueError('Minimum time division cannot be negative') # noqa: DOC501, EM101, RUF100, TRY003 name = requester + '-' + detail diff --git a/modules/systemPerformance/REWET/REWET/restoration/registry.py b/modules/systemPerformance/REWET/REWET/restoration/registry.py index 1ce8da5dc..50f120fa2 100644 --- a/modules/systemPerformance/REWET/REWET/restoration/registry.py +++ b/modules/systemPerformance/REWET/REWET/restoration/registry.py @@ -515,7 +515,7 @@ def addPipeDamageToRegistry(self, node_name, data): # noqa: N802 ------- None. - """ # noqa: D400, D401 + """ # noqa: D400, D401, DOC202 # self._pipe_node_damage_status[name] = data leaking_pipe_with_pipeA_orginal_pipe = self._pipe_leak_history[ # noqa: N806 @@ -582,7 +582,7 @@ def addPipeDamageToRegistry(self, node_name, data): # noqa: N802 self._pipe_break_history.loc[node_name, 'Node_B'] = data['node_B'] else: - raise ValueError('Undefined damage type') # noqa: DOC501, EM101, TRY003 + raise ValueError('Undefined damage type') # noqa: DOC501, EM101, RUF100, TRY003 def addGeneralNodeDamageToRegistry(self, node_name, data=None): # noqa: ARG002, N802, D102 self._gnode_damage_table.loc[node_name, 'damage_type'] = None @@ -1280,7 +1280,7 @@ def occupyNode(self, node_name, occupier_name): # noqa: N802 ------- None. - """ # noqa: D400 + """ # noqa: D400, DOC202 if occupier_name in self._occupancy: # if not iNodeCoupled(node_name): raise ValueError( # noqa: TRY003 @@ -1307,7 +1307,7 @@ def removeOccupancy(self, occupier_name): # noqa: N802 ------- None. - """ # noqa: D401 + """ # noqa: D401, DOC202 temp = self._occupancy[self._occupancy == occupier_name] if len(temp) == 0: @@ -1350,7 +1350,7 @@ def whereIsOccupiedByName(self, occupier_name): # noqa: N802 str or series node(s) ID. - """ # noqa: D400, D401 + """ # noqa: D400, D401, DOC202 temp = self._occupancy[self._occupancy == occupier_name] if len(temp) == 0: raise ValueError('there is no occupancy with this name') # noqa: EM101, TRY003 @@ -1387,7 +1387,7 @@ def coupleTwoBreakNodes(self, break_point_1_name, break_point_2_name): # noqa: ------- None. - """ # noqa: D205 + """ # noqa: D205, DOC202 self._pipe_break_node_coupling[break_point_1_name] = break_point_2_name self._pipe_break_node_coupling[break_point_2_name] = break_point_1_name self._break_point_attached_to_mainPipe.append(break_point_1_name) diff --git a/modules/systemPerformance/REWET/REWET/timeline.py b/modules/systemPerformance/REWET/REWET/timeline.py index 964977fa1..12ba6efaa 100644 --- a/modules/systemPerformance/REWET/REWET/timeline.py +++ b/modules/systemPerformance/REWET/REWET/timeline.py @@ -143,7 +143,7 @@ def addEventTime(self, event_distinct_time, event_type='dmg'): # noqa: N802 ------- None. - """ # noqa: D205, D401, D404 + """ # noqa: D205, D401, D404, DOC202 if type(event_distinct_time) != pd.core.series.Series: # noqa: E721 if ( type(event_distinct_time) == numpy.float64 # noqa: E721 @@ -218,7 +218,7 @@ def checkAndAmendTime(self): # noqa: N802 ------- None. - """ # noqa: D205, D401 + """ # noqa: D205, D401, DOC202 first_length = len(self._event_time_register.index) self._event_time_register = self._event_time_register[ self._event_time_register.index <= self._simulation_end_time diff --git a/modules/systemPerformance/REWET/preprocessorIO.py b/modules/systemPerformance/REWET/preprocessorIO.py index bc3d730fc..08bd122eb 100644 --- a/modules/systemPerformance/REWET/preprocessorIO.py +++ b/modules/systemPerformance/REWET/preprocessorIO.py @@ -187,7 +187,7 @@ def save_scenario_table(scenario_table, scenario_table_file_path): ------- None. - """ # noqa: D205, D400, D401 + """ # noqa: D205, D400, D401, DOC202 if isinstance(scenario_table, pd.core.frame.DataFrame): pass elif isinstance(scenario_table, list): From 47d160b8562dd70421c633f09e92db495d740b88 Mon Sep 17 00:00:00 2001 From: jinyan1214 Date: Fri, 16 Aug 2024 15:16:45 -0700 Subject: [PATCH 7/9] ruff check --- modules/Workflow/computeResponseSpectrum.py | 8 ++-- modules/Workflow/createGM4BIM.py | 36 ++++++++--------- modules/Workflow/whale/main.py | 16 ++++---- modules/common/simcenter_common.py | 4 +- modules/createEVENT/CFDEvent/CFDEvent.py | 2 +- .../EmptyDomainCFD/EmptyDomainCFD.py | 2 +- .../EmptyDomainCFD/post_process_output.py | 14 +++---- .../GeoClawOpenFOAM/AddBuildingForces.py | 4 +- .../createEVENT/GeoClawOpenFOAM/GeoClaw.py | 2 +- .../GeoClawOpenFOAM/GeoClawBathy.py | 2 +- .../GeoClawOpenFOAM/GetOpenFOAMEvent.py | 8 ++-- modules/createEVENT/GeoClawOpenFOAM/flume.py | 6 +-- .../createEVENT/GeoClawOpenFOAM/hydroUtils.py | 10 ++--- .../GeoClawOpenFOAM/of7Alpboundary.py | 6 +-- .../GeoClawOpenFOAM/of7Building.py | 4 +- .../createEVENT/GeoClawOpenFOAM/of7Decomp.py | 4 +- .../GeoClawOpenFOAM/of7Geometry.py | 4 +- .../createEVENT/GeoClawOpenFOAM/of7Initial.py | 6 +-- .../GeoClawOpenFOAM/of7Materials.py | 6 +-- .../createEVENT/GeoClawOpenFOAM/of7Meshing.py | 10 ++--- .../createEVENT/GeoClawOpenFOAM/of7Others.py | 4 +- .../GeoClawOpenFOAM/of7Prboundary.py | 6 +-- .../createEVENT/GeoClawOpenFOAM/of7Process.py | 8 ++-- .../GeoClawOpenFOAM/of7PtDboundary.py | 10 ++--- .../createEVENT/GeoClawOpenFOAM/of7Solve.py | 12 +++--- .../GeoClawOpenFOAM/of7Turbulence.py | 4 +- .../GeoClawOpenFOAM/of7Uboundary.py | 8 ++-- .../createEVENT/GeoClawOpenFOAM/openfoam7.py | 26 ++++++------ .../createEVENT/GeoClawOpenFOAM/osuFlume.py | 2 +- .../createEVENT/GeoClawOpenFOAM/userFlume.py | 2 +- .../IsolatedBuildingCFD.py | 2 +- .../createEVENT/Istanbul/IstanbulStations.py | 2 +- modules/createEVENT/M9/M9API.py | 2 +- modules/createEVENT/M9/M9Stations.py | 2 +- modules/createEVENT/MPM/MPM.py | 2 +- .../createEVENT/MPM/post_process_output.py | 14 +++---- .../SurroundedBuildingCFD.py | 2 +- .../post_process_output.py | 14 +++---- .../coupledDigitalTwin/CoupledDigitalTwin.py | 2 +- .../IntensityMeasureComputer.py | 2 +- .../siteResponse/RegionalSiteResponse.py | 2 +- .../stochasticWave/StochasticWave.py | 2 +- modules/createSAM/AutoSDA/beam_component.py | 2 +- modules/createSAM/AutoSDA/column_component.py | 2 +- modules/createSAM/AutoSDA/connection_part.py | 2 +- modules/createSAM/AutoSDA/help_functions.py | 32 +++++++-------- modules/performDL/pelicun3/DL_visuals.py | 40 +++++++++---------- .../performHUA/pyincore_data/censusutil.py | 8 ++-- modules/performREC/pyrecodes/run_pyrecodes.py | 3 +- .../regionalGroundMotion/CreateStation.py | 28 ++++++------- .../regionalGroundMotion/FetchOpenQuake.py | 10 ++--- .../regionalGroundMotion/HazardOccurrence.py | 12 +++--- .../HazardSimulationEQ.py | 2 +- .../gmpe/CorrelationModel.py | 22 +++++----- .../gmpe/SignificantDurationModel.py | 6 +-- .../regionalGroundMotion/gmpe/openSHAGMPE.py | 2 +- .../regionalGroundMotion/landslide.py | 38 +++++++++--------- .../regionalGroundMotion/liquefaction.py | 14 +++---- .../ComputeIntensityMeasure.py | 4 +- .../regionalWindField/CreateScenario.py | 32 +++++++-------- .../regionalWindField/CreateStation.py | 2 +- .../regionalWindField/WindFieldSimulation.py | 4 +- modules/performUQ/SimCenterUQ/PLoM/PLoM.py | 12 +++--- modules/performUQ/SimCenterUQ/PLoM/general.py | 20 +++++----- modules/performUQ/SimCenterUQ/runPLoM.py | 8 ++-- .../performUQ/UCSD_UQ/defaultLogLikeScript.py | 2 +- modules/performUQ/UCSD_UQ/mwg_sampler.py | 2 +- modules/performUQ/UCSD_UQ/runFEM.py | 2 +- modules/performUQ/UCSD_UQ/runTMCMC.py | 2 +- .../performUQ/common/ERAClasses/ERACond.py | 10 ++--- modules/performUQ/other/UQpyRunner.py | 2 +- .../systemPerformance/REWET/REWET/Damage.py | 8 ++-- .../REWET/REWET/EnhancedWNTR/epanet/io.py | 4 +- .../REWET/REWET/EnhancedWNTR/network/model.py | 2 +- .../REWET/REWET/EnhancedWNTR/sim/epanet.py | 6 +-- .../REWET/REWET/EnhancedWNTR/sim/io.py | 2 +- .../REWET/REWET/Input/Policy_IO.py | 4 +- .../REWET/REWET/Output/GUI_Curve_API.py | 4 +- .../systemPerformance/REWET/REWET/initial.py | 2 +- .../REWET/REWET/restoration/base.py | 8 ++-- .../REWET/REWET/restoration/io.py | 4 +- .../REWET/REWET/restoration/model.py | 2 +- .../REWET/REWET/restoration/registry.py | 10 ++--- .../systemPerformance/REWET/REWET/timeline.py | 4 +- .../systemPerformance/REWET/preprocessorIO.py | 2 +- 85 files changed, 335 insertions(+), 336 deletions(-) diff --git a/modules/Workflow/computeResponseSpectrum.py b/modules/Workflow/computeResponseSpectrum.py index 83f2a2d9c..bb4ad3f09 100644 --- a/modules/Workflow/computeResponseSpectrum.py +++ b/modules/Workflow/computeResponseSpectrum.py @@ -23,7 +23,7 @@ def convert_accel_units(acceleration, from_, to_='cm/s/s'): # noqa: C901 acceleration = np.asarray(acceleration) if from_ == 'g': if to_ == 'g': - return acceleration # noqa: DOC201 + return acceleration # noqa: DOC201, RUF100 if to_ in m_sec_square: return acceleration * g if to_ in cm_sec_square: @@ -70,7 +70,7 @@ def get_velocity_displacement( velocity = time_step * cumtrapz(acceleration, initial=0.0) if displacement is None: displacement = time_step * cumtrapz(velocity, initial=0.0) - return velocity, displacement # noqa: DOC201 + return velocity, displacement # noqa: DOC201, RUF100 class NewmarkBeta: @@ -160,7 +160,7 @@ def run(self): 'PGV': np.max(np.fabs(self.velocity)), 'PGD': np.max(np.fabs(self.displacement)), } - return self.response_spectrum, time_series, accel, vel, disp # noqa: DOC201 + return self.response_spectrum, time_series, accel, vel, disp # noqa: DOC201, RUF100 def _newmark_beta(self, omega, cval, kval): # noqa: ARG002 """Newmark-beta integral @@ -216,4 +216,4 @@ def _newmark_beta(self, omega, cval, kval): # noqa: ARG002 disp[j, :] = delta_u + disp[j - 1, :] a_t[j, :] = ground_acc[j] + accel[j, :] - return accel, vel, disp, a_t # noqa: DOC201 + return accel, vel, disp, a_t # noqa: DOC201, RUF100 diff --git a/modules/Workflow/createGM4BIM.py b/modules/Workflow/createGM4BIM.py index 2f3c6a9ad..d2fb8f22d 100644 --- a/modules/Workflow/createGM4BIM.py +++ b/modules/Workflow/createGM4BIM.py @@ -120,7 +120,7 @@ def get_scale_factors(input_units, output_units): # noqa: C901 scale_factors.update({input_name: f_scale}) - return scale_factors # noqa: DOC201 + return scale_factors # noqa: DOC201, RUF100 def createFilesForEventGrid(inputDir, outputDir, removeInputDir): # noqa: C901, N802, N803, D103, PLR0915 @@ -410,28 +410,28 @@ def createFilesForEventGrid(inputDir, outputDir, removeInputDir): # noqa: C901, m_pgd_y = 0.0 s_pgd_y = 0.0 # add to dictionary - dict_im[('type', 'loc', 'dir', 'stat')].append(int(siteID)) # noqa: RUF031 + dict_im[('type', 'loc', 'dir', 'stat')].append(int(siteID)) # noqa: RUF031, RUF100 # pga - dict_im[('PGA', 0, 1, 'median')].append(m_pga_x) # noqa: RUF031 - dict_im[('PGA', 0, 1, 'beta')].append(s_pga_x) # noqa: RUF031 - dict_im[('PGA', 0, 2, 'median')].append(m_pga_y) # noqa: RUF031 - dict_im[('PGA', 0, 2, 'beta')].append(s_pga_y) # noqa: RUF031 + dict_im[('PGA', 0, 1, 'median')].append(m_pga_x) # noqa: RUF031, RUF100 + dict_im[('PGA', 0, 1, 'beta')].append(s_pga_x) # noqa: RUF031, RUF100 + dict_im[('PGA', 0, 2, 'median')].append(m_pga_y) # noqa: RUF031, RUF100 + dict_im[('PGA', 0, 2, 'beta')].append(s_pga_y) # noqa: RUF031, RUF100 # pgv - dict_im[('PGV', 0, 1, 'median')].append(m_pgv_x) # noqa: RUF031 - dict_im[('PGV', 0, 1, 'beta')].append(s_pgv_x) # noqa: RUF031 - dict_im[('PGV', 0, 2, 'median')].append(m_pgv_y) # noqa: RUF031 - dict_im[('PGV', 0, 2, 'beta')].append(s_pgv_y) # noqa: RUF031 + dict_im[('PGV', 0, 1, 'median')].append(m_pgv_x) # noqa: RUF031, RUF100 + dict_im[('PGV', 0, 1, 'beta')].append(s_pgv_x) # noqa: RUF031, RUF100 + dict_im[('PGV', 0, 2, 'median')].append(m_pgv_y) # noqa: RUF031, RUF100 + dict_im[('PGV', 0, 2, 'beta')].append(s_pgv_y) # noqa: RUF031, RUF100 # pgd - dict_im[('PGD', 0, 1, 'median')].append(m_pgd_x) # noqa: RUF031 - dict_im[('PGD', 0, 1, 'beta')].append(s_pgd_x) # noqa: RUF031 - dict_im[('PGD', 0, 2, 'median')].append(m_pgd_y) # noqa: RUF031 - dict_im[('PGD', 0, 2, 'beta')].append(s_pgd_y) # noqa: RUF031 + dict_im[('PGD', 0, 1, 'median')].append(m_pgd_x) # noqa: RUF031, RUF100 + dict_im[('PGD', 0, 1, 'beta')].append(s_pgd_x) # noqa: RUF031, RUF100 + dict_im[('PGD', 0, 2, 'median')].append(m_pgd_y) # noqa: RUF031, RUF100 + dict_im[('PGD', 0, 2, 'beta')].append(s_pgd_y) # noqa: RUF031, RUF100 for jj, Ti in enumerate(periods): # noqa: N806 cur_sa = f'SA({Ti}s)' - dict_im[(cur_sa, 0, 1, 'median')].append(m_psa_x[jj]) # noqa: RUF031 - dict_im[(cur_sa, 0, 1, 'beta')].append(s_psa_x[jj]) # noqa: RUF031 - dict_im[(cur_sa, 0, 2, 'median')].append(m_psa_y[jj]) # noqa: RUF031 - dict_im[(cur_sa, 0, 2, 'beta')].append(s_psa_y[jj]) # noqa: RUF031 + dict_im[(cur_sa, 0, 1, 'median')].append(m_psa_x[jj]) # noqa: RUF031, RUF100 + dict_im[(cur_sa, 0, 1, 'beta')].append(s_psa_x[jj]) # noqa: RUF031, RUF100 + dict_im[(cur_sa, 0, 2, 'median')].append(m_psa_y[jj]) # noqa: RUF031, RUF100 + dict_im[(cur_sa, 0, 2, 'beta')].append(s_psa_y[jj]) # noqa: RUF031, RUF100 # aggregate for cur_key, cur_value in dict_im.items(): diff --git a/modules/Workflow/whale/main.py b/modules/Workflow/whale/main.py index e40adc597..759f8af73 100644 --- a/modules/Workflow/whale/main.py +++ b/modules/Workflow/whale/main.py @@ -310,7 +310,7 @@ def create_command(command_list, enforced_python=None): for command_arg in command_list[1:]: command += f'"{command_arg}" ' - return command # noqa: DOC201 + return command # noqa: DOC201, RUF100 def run_command(command): @@ -357,7 +357,7 @@ def run_command(command): py_script.main(arg_list) - return '', '' # noqa: DOC201 + return '', '' # noqa: DOC201, RUF100 else: # noqa: RET505 # fmk with Shell=True not working on older windows machines, new approach needed for quoted command .. turn into a list @@ -668,7 +668,7 @@ def get_command_list(self, app_path, force_posix=False): # noqa: FBT002, C901 # pp.pprint(arg_list) - return arg_list # noqa: DOC201 + return arg_list # noqa: DOC201, RUF100 class Workflow: @@ -1316,7 +1316,7 @@ def create_asset_files(self): log_div() - return assetFilesList # noqa: DOC201 + return assetFilesList # noqa: DOC201, RUF100 def augment_asset_files(self): # noqa: C901 """Short description @@ -1504,7 +1504,7 @@ def augment_asset_files(self): # noqa: C901 ) log_div() - return assetFilesList # noqa: DOC201 + return assetFilesList # noqa: DOC201, RUF100 def perform_system_performance_assessment(self, asset_type): """For an asset type run the system level performance assessment application @@ -1525,7 +1525,7 @@ def perform_system_performance_assessment(self, asset_type): prepend_timestamp=False, ) log_div() - return False # noqa: DOC201 + return False # noqa: DOC201, RUF100 if performance_app.rel_path == None: # noqa: E711 log_msg( @@ -1909,7 +1909,7 @@ def init_simdir(self, asst_id=None, AIM_file_path='AIM.json'): # noqa: C901, N8 prepend_timestamp=False, ) log_div() - return dst # noqa: DOC201 + return dst # noqa: DOC201, RUF100 def cleanup_simdir(self, asst_id): """Short description @@ -2734,7 +2734,7 @@ def estimate_losses( # noqa: C901 ], ) if ('PID', '0') in df_res.columns: - del df_res[('PID', '0')] # noqa: RUF031 + del df_res[('PID', '0')] # noqa: RUF031, RUF100 # store the EDP statistics in the output DF for col in np.transpose(col_info): diff --git a/modules/common/simcenter_common.py b/modules/common/simcenter_common.py index 15e110eed..c54977fbe 100644 --- a/modules/common/simcenter_common.py +++ b/modules/common/simcenter_common.py @@ -283,7 +283,7 @@ def get_scale_factors(input_units, output_units): # noqa: C901 scale_factors.update({input_name: f_scale}) - return scale_factors # noqa: DOC201 + return scale_factors # noqa: DOC201, RUF100 def get_unit_bases(input_units): @@ -306,4 +306,4 @@ def get_unit_bases(input_units): input_unit_bases = cur_unit_bases break - return input_unit_bases # noqa: DOC201 + return input_unit_bases # noqa: DOC201, RUF100 diff --git a/modules/createEVENT/CFDEvent/CFDEvent.py b/modules/createEVENT/CFDEvent/CFDEvent.py index 7343cb800..884f232eb 100644 --- a/modules/createEVENT/CFDEvent/CFDEvent.py +++ b/modules/createEVENT/CFDEvent/CFDEvent.py @@ -13,7 +13,7 @@ def directionToDof(direction): # noqa: N802 """Converts direction to degree of freedom""" # noqa: D400, D401 directioMap = {'X': 1, 'Y': 2, 'Z': 3} # noqa: N806 - return directioMap[direction] # noqa: DOC201 + return directioMap[direction] # noqa: DOC201, RUF100 def addFloorForceToEvent(patternsArray, force, direction, floor): # noqa: ARG001, N802, N803 diff --git a/modules/createEVENT/EmptyDomainCFD/EmptyDomainCFD.py b/modules/createEVENT/EmptyDomainCFD/EmptyDomainCFD.py index 48fec2e0b..5d309a10f 100644 --- a/modules/createEVENT/EmptyDomainCFD/EmptyDomainCFD.py +++ b/modules/createEVENT/EmptyDomainCFD/EmptyDomainCFD.py @@ -13,7 +13,7 @@ def directionToDof(direction): # noqa: N802 """Converts direction to degree of freedom""" # noqa: D400, D401 directioMap = {'X': 1, 'Y': 2, 'Z': 3} # noqa: N806 - return directioMap[direction] # noqa: DOC201 + return directioMap[direction] # noqa: DOC201, RUF100 def addFloorForceToEvent(patternsArray, force, direction, floor): # noqa: ARG001, N802, N803 diff --git a/modules/createEVENT/EmptyDomainCFD/post_process_output.py b/modules/createEVENT/EmptyDomainCFD/post_process_output.py index 6a32be5ac..3327b8a1f 100644 --- a/modules/createEVENT/EmptyDomainCFD/post_process_output.py +++ b/modules/createEVENT/EmptyDomainCFD/post_process_output.py @@ -88,7 +88,7 @@ def readPressureProbes(fileName): # noqa: N802, N803 time = np.asarray(time, dtype=np.float32) p = np.asarray(p, dtype=np.float32) - return probes, time, p # noqa: DOC201 + return probes, time, p # noqa: DOC201, RUF100 def read_pressure_data(file_names): @@ -291,7 +291,7 @@ def read_openFoam_scalar_field(file_name): # noqa: N802 sField = np.asarray(sField, dtype=np.float32) # noqa: N806 - return sField # noqa: DOC201, RET504 + return sField # noqa: DOC201, RET504, RUF100 def read_openFoam_vector_field(file_name): # noqa: N802 @@ -312,7 +312,7 @@ def read_openFoam_vector_field(file_name): # noqa: N802 vField = np.asarray(vField, dtype=np.float32) # noqa: N806 - return vField # noqa: DOC201, RET504 + return vField # noqa: DOC201, RET504, RUF100 def read_openFoam_tensor_field(file_name): # noqa: N802 @@ -340,7 +340,7 @@ def read_openFoam_tensor_field(file_name): # noqa: N802 vField = np.asarray(vField, dtype=np.float32) # noqa: N806 - return vField # noqa: DOC201, RET504 + return vField # noqa: DOC201, RET504, RUF100 def read_openFoam_symmetric_tensor_field(file_name): # noqa: N802 @@ -367,7 +367,7 @@ def read_openFoam_symmetric_tensor_field(file_name): # noqa: N802 vField = np.asarray(vField, dtype=np.float32) # noqa: N806 - return vField # noqa: DOC201, RET504 + return vField # noqa: DOC201, RET504, RUF100 def read_velocity_data(path): @@ -462,7 +462,7 @@ def read_velocity_probes(fileName): # noqa: N803 time = np.asarray(time, dtype=np.float32) U = np.asarray(U, dtype=np.float32) # noqa: N806 - return probes, time, U # noqa: DOC201 + return probes, time, U # noqa: DOC201, RUF100 def calculate_length_scale(u, uav, dt, min_corr=0.0): @@ -481,7 +481,7 @@ def calculate_length_scale(u, uav, dt, min_corr=0.0): L = uav * np.trapz(corr, dx=dt) # noqa: NPY201, N806 - return L # noqa: DOC201, RET504 + return L # noqa: DOC201, RET504, RUF100 def psd(x, dt, nseg): # noqa: F811 diff --git a/modules/createEVENT/GeoClawOpenFOAM/AddBuildingForces.py b/modules/createEVENT/GeoClawOpenFOAM/AddBuildingForces.py index 9bd04831e..95ee701f9 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/AddBuildingForces.py +++ b/modules/createEVENT/GeoClawOpenFOAM/AddBuildingForces.py @@ -9,7 +9,7 @@ def validateCaseDirectoryStructure(caseDir): # noqa: N802, N803 It also checks that system directory contains the controlDict """ # noqa: D205, D400, D401, D404 if not os.path.isdir(caseDir): # noqa: PTH112 - return False # noqa: DOC201 + return False # noqa: DOC201, RUF100 caseDirList = os.listdir(caseDir) # noqa: N806 necessaryDirs = ['0', 'constant', 'system'] # noqa: N806 @@ -27,7 +27,7 @@ def findFunctionsDictionary(controlDictLines): # noqa: N802, N803 """This method will find functions dictionary in the controlDict""" # noqa: D400, D401, D404 for line in controlDictLines: if line.startswith('functions'): - return (True, controlDictLines.index(line) + 2) # noqa: DOC201 + return (True, controlDictLines.index(line) + 2) # noqa: DOC201, RUF100 return [False, len(controlDictLines)] diff --git a/modules/createEVENT/GeoClawOpenFOAM/GeoClaw.py b/modules/createEVENT/GeoClawOpenFOAM/GeoClaw.py index e029b1a5c..890c5549f 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/GeoClaw.py +++ b/modules/createEVENT/GeoClawOpenFOAM/GeoClaw.py @@ -80,4 +80,4 @@ def creategeom(self, data, path): # Points of interest bottompts = self.getbathy(maxvalues, minvalues, data) # noqa: F841 - return 0 # noqa: DOC201 + return 0 # noqa: DOC201, RUF100 diff --git a/modules/createEVENT/GeoClawOpenFOAM/GeoClawBathy.py b/modules/createEVENT/GeoClawOpenFOAM/GeoClawBathy.py index d2e525bb4..b4c7961f4 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/GeoClawBathy.py +++ b/modules/createEVENT/GeoClawOpenFOAM/GeoClawBathy.py @@ -61,4 +61,4 @@ def creategeom(self, data, path): # noqa: ARG002 # Create a utilities object hydroutil = hydroUtils() # noqa: F841 - return 0 # noqa: DOC201 + return 0 # noqa: DOC201, RUF100 diff --git a/modules/createEVENT/GeoClawOpenFOAM/GetOpenFOAMEvent.py b/modules/createEVENT/GeoClawOpenFOAM/GetOpenFOAMEvent.py index 4e64bbd37..8a80e4cfc 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/GetOpenFOAMEvent.py +++ b/modules/createEVENT/GeoClawOpenFOAM/GetOpenFOAMEvent.py @@ -16,7 +16,7 @@ def validateCaseDirectoryStructure(caseDir): # noqa: N802, N803 It also checks that system directory contains the controlDict """ # noqa: D205, D400, D401, D404 if not os.path.isdir(caseDir): # noqa: PTH112 - return False # noqa: DOC201 + return False # noqa: DOC201, RUF100 caseDirList = os.listdir(caseDir) # noqa: N806 necessaryDirs = ['0', 'constant', 'system', 'postProcessing'] # noqa: N806 @@ -36,7 +36,7 @@ def parseForceComponents(forceArray): # noqa: N802, N803 x = float(components[0]) y = float(components[1]) z = float(components[2]) - return [x, y, z] # noqa: DOC201 + return [x, y, z] # noqa: DOC201, RUF100 def ReadOpenFOAMForces(buildingForcesPath, floorsCount, startTime): # noqa: N802, N803 @@ -77,14 +77,14 @@ def ReadOpenFOAMForces(buildingForcesPath, floorsCount, startTime): # noqa: N80 forces[i].Y.append(fpry + fvy + fpoy) forces[i].Z.append(fprz + fvz + fpoz) - return [deltaT, forces] # noqa: DOC201 + return [deltaT, forces] # noqa: DOC201, RUF100 def directionToDof(direction): # noqa: N802 """Converts direction to degree of freedom""" # noqa: D400, D401 directioMap = {'X': 1, 'Y': 2, 'Z': 3} # noqa: N806 - return directioMap[direction] # noqa: DOC201 + return directioMap[direction] # noqa: DOC201, RUF100 def addFloorForceToEvent( # noqa: N802 diff --git a/modules/createEVENT/GeoClawOpenFOAM/flume.py b/modules/createEVENT/GeoClawOpenFOAM/flume.py index ff99da1ee..bfba2ea13 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/flume.py +++ b/modules/createEVENT/GeoClawOpenFOAM/flume.py @@ -110,7 +110,7 @@ def generateflume(self, breadth, path): ) # Write bottom STL file # Return extreme values - return extremeval # noqa: DOC201 + return extremeval # noqa: DOC201, RUF100 ############################################################# def flumedata(self, IpPTFile): # noqa: N803 @@ -178,7 +178,7 @@ def flumedata(self, IpPTFile): # noqa: N803 self.npt = np.delete(self.npt, noindexes, axis=0) # Return extreme values - return extremeval # noqa: DOC201 + return extremeval # noqa: DOC201, RUF100 #################################################################### def right(self): @@ -431,4 +431,4 @@ def extremedata(self, extreme, breadth): ) tempfileID.close # noqa: B018 - return 0 # noqa: DOC201 + return 0 # noqa: DOC201, RUF100 diff --git a/modules/createEVENT/GeoClawOpenFOAM/hydroUtils.py b/modules/createEVENT/GeoClawOpenFOAM/hydroUtils.py index e4a1f8644..bf32311f5 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/hydroUtils.py +++ b/modules/createEVENT/GeoClawOpenFOAM/hydroUtils.py @@ -90,7 +90,7 @@ def extract(self, obj, path, ind, arr): # noqa: C901 else: arr.append(None) - return arr # noqa: DOC201 + return arr # noqa: DOC201, RUF100 ############################################################# def extract_element_from_json(self, obj, path): @@ -106,7 +106,7 @@ def extract_element_from_json(self, obj, path): """ # noqa: D205, D401 if isinstance(obj, dict): # noqa: RET503 - return self.extract(obj, path, 0, []) # noqa: DOC201 + return self.extract(obj, path, 0, []) # noqa: DOC201, RUF100 elif isinstance(obj, list): # noqa: RET505 outer_arr = [] for item in obj: @@ -129,7 +129,7 @@ def general_header(self): | | O | \\*---------------------------------------------------------------------------*/ \n\n""" # noqa: W291 - return header # noqa: DOC201, RET504 + return header # noqa: DOC201, RET504, RUF100 #################################################################### def of7header(self, OFclass, location, filename): # noqa: N803 @@ -156,7 +156,7 @@ class {OFclass}; }} // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //\n\n""" # noqa: W291 - return header # noqa: DOC201, RET504 + return header # noqa: DOC201, RET504, RUF100 ############################################################# def hydrolog(self, projname, fipath): @@ -210,4 +210,4 @@ def getlist(self, data): data = data.replace(',', ' ') results = [float(n) for n in data.split()] - return results # noqa: DOC201, RET504 + return results # noqa: DOC201, RET504, RUF100 diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Alpboundary.py b/modules/createEVENT/GeoClawOpenFOAM/of7Alpboundary.py index d677896dd..4ac1b0b2f 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7Alpboundary.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7Alpboundary.py @@ -89,7 +89,7 @@ def Alptext(self, data, patches): # noqa: N802 Alptext = Alptext + '}\n\n' # noqa: N806 # Return the text for velocity BC - return Alptext # noqa: DOC201, RET504 + return Alptext # noqa: DOC201, RET504, RUF100 ############################################################# def Alpheader(self): # noqa: N802 @@ -114,7 +114,7 @@ def Alpheader(self): # noqa: N802 header = header + 'internalField\tuniform\t0;\n\n' # Return the header for U file - return header # noqa: DOC201, RET504 + return header # noqa: DOC201, RET504, RUF100 ############################################################# def Alppatchtext(self, Alptype, patchname): # noqa: ARG002, N802, N803 @@ -140,4 +140,4 @@ def Alppatchtext(self, Alptype, patchname): # noqa: ARG002, N802, N803 Alptext = Alptext + 'type\tzeroGradient;\n\t}\n' # noqa: N806 # Return the header for U file - return Alptext # noqa: DOC201 + return Alptext # noqa: DOC201, RUF100 diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Building.py b/modules/createEVENT/GeoClawOpenFOAM/of7Building.py index 7062caa2a..ecb806c7e 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7Building.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7Building.py @@ -100,7 +100,7 @@ def buildcheck(self, data, path): # noqa: C901, PLR0911 data, ['Events', 'BuildingSTLFile'] ) if stlfile == [None]: - return -1 # noqa: DOC201 + return -1 # noqa: DOC201, RUF100 else: # noqa: RET505 stlfile = ', '.join( hydroutil.extract_element_from_json( @@ -218,7 +218,7 @@ def createbuilds(self, data, path): elif buildeftype == 'Parameters': self.buildpara(data, path) - return 0 # noqa: DOC201 + return 0 # noqa: DOC201, RUF100 ############################################################# def buildmanual(self, data, path): diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Decomp.py b/modules/createEVENT/GeoClawOpenFOAM/of7Decomp.py index f929b7f50..40349fa86 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7Decomp.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7Decomp.py @@ -75,7 +75,7 @@ def decomptext(self, data): decomptext = decomptext + 'method\tscotch;\n\n' - return decomptext # noqa: DOC201, RET504 + return decomptext # noqa: DOC201, RET504, RUF100 ############################################################# def decompheader(self): @@ -97,7 +97,7 @@ def decompheader(self): // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //\n\n""" # noqa: W291 # Return the header for U file - return header # noqa: DOC201, RET504 + return header # noqa: DOC201, RET504, RUF100 ############################################################# def scripts(self, data, path): # noqa: ARG002 diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Geometry.py b/modules/createEVENT/GeoClawOpenFOAM/of7Geometry.py index df3126ff3..1c4b78679 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7Geometry.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7Geometry.py @@ -82,7 +82,7 @@ def geomcheck(self, data, path): # noqa: C901, PLR0911 data, ['Events', 'NumBathymetryFiles'] ) if numbathy == [None]: - return -1 # noqa: DOC201 + return -1 # noqa: DOC201, RUF100 else: # noqa: RET505 numbathy = ', '.join( hydroutil.extract_element_from_json( @@ -250,7 +250,7 @@ def createOFSTL(self, data, path): # noqa: C901, N802 # Create geometry (i.e. STL files) and extreme file ecode = finalgeom.creategeom(data, path) if ecode < 0: - return -1 # noqa: DOC201 + return -1 # noqa: DOC201, RUF100 # Bathymetry only elif int(simtype) == 2: # noqa: PLR2004 diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Initial.py b/modules/createEVENT/GeoClawOpenFOAM/of7Initial.py index 4043f2b36..16825f65b 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7Initial.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7Initial.py @@ -173,7 +173,7 @@ def alphatext(self, data, fipath): alphatext = alphatext + '\n);' - return alphatext # noqa: DOC201, RET504 + return alphatext # noqa: DOC201, RET504, RUF100 ############################################################# def alphaheader(self): @@ -195,7 +195,7 @@ def alphaheader(self): // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //\n\n""" # noqa: W291 # Return the header for U file - return header # noqa: DOC201, RET504 + return header # noqa: DOC201, RET504, RUF100 ############################################################# def alphacheck(self, data, fipath): @@ -220,7 +220,7 @@ def alphacheck(self, data, fipath): fname = 'SWAlpha.txt' swalphafile = os.path.join(fipath, fname) # noqa: PTH118 if not os.path.exists(swalphafile): # noqa: PTH110 - return -1 # noqa: DOC201 + return -1 # noqa: DOC201, RUF100 # For all types other than the shallow water else: diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Materials.py b/modules/createEVENT/GeoClawOpenFOAM/of7Materials.py index 5dd10114a..97c77bec6 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7Materials.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7Materials.py @@ -120,7 +120,7 @@ def mattext(self, data): mattext = mattext + 'sigma\t[1 0 -2 0 0 0 0]\t' + sigma + ';\n' - return mattext # noqa: DOC201, RET504 + return mattext # noqa: DOC201, RET504, RUF100 ############################################################# def matheader(self): @@ -142,7 +142,7 @@ def matheader(self): // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //\n\n""" # noqa: W291 # Return the header for U file - return header # noqa: DOC201, RET504 + return header # noqa: DOC201, RET504, RUF100 ############################################################# def matcheck(self, data): @@ -162,7 +162,7 @@ def matcheck(self, data): data, ['Events', 'WaterViscosity'] ) if nuwater == [None]: - return -1 # noqa: DOC201 + return -1 # noqa: DOC201, RUF100 # Exponent nuwaterexp = hydroutil.extract_element_from_json( data, ['Events', 'WaterViscosityExp'] diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Meshing.py b/modules/createEVENT/GeoClawOpenFOAM/of7Meshing.py index abf1c24b0..dc300767a 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7Meshing.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7Meshing.py @@ -72,7 +72,7 @@ def meshcheck(self, data, fipath): # If hydro mesher - nothing to check if int(mesher[0]) == 0: - return 0 # noqa: DOC201 + return 0 # noqa: DOC201, RUF100 # Other mesh software elif int(mesher[0]) == 1: # noqa: RET505 @@ -126,7 +126,7 @@ def meshheader(self, fileobjec): ) # Return the header for meshing file - return header # noqa: DOC201, RET504 + return header # noqa: DOC201, RET504, RUF100 ############################################################# def bmeshtext(self, data): @@ -284,7 +284,7 @@ def bmeshtext(self, data): # Add merge patch pairs bmeshtext = bmeshtext + 'mergePatchPairs\n(\n);\n' - return bmeshtext # noqa: DOC201, RET504 + return bmeshtext # noqa: DOC201, RET504, RUF100 ############################################################# def sfetext(self): @@ -320,7 +320,7 @@ def sfetext(self): elif int(data_geoext[6]) == 3: # noqa: PLR2004 sfetext = sfetext + 'OtherBuilding.stl\n' + stlinfo + '\n\n' - return sfetext # noqa: DOC201 + return sfetext # noqa: DOC201, RUF100 ############################################################# def shmtext(self, data): @@ -505,7 +505,7 @@ def shmtext(self, data): shmtext = shmtext + 'debug\t0;\n' shmtext = shmtext + 'mergeTolerance\t1E-6;\n' - return shmtext # noqa: DOC201, RET504 + return shmtext # noqa: DOC201, RET504, RUF100 ############################################################# def scripts(self, data, path): # noqa: C901 diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Others.py b/modules/createEVENT/GeoClawOpenFOAM/of7Others.py index 3afa97455..a2b0b1bdf 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7Others.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7Others.py @@ -78,7 +78,7 @@ def othersheader(self, fileclas, fileloc, fileobjec): ) # Return the header for U file - return header # noqa: DOC201, RET504 + return header # noqa: DOC201, RET504, RUF100 ############################################################# def gfiletext(self, data): @@ -140,4 +140,4 @@ def gfiletext(self, data): + ');\n' ) - return gfiletext # noqa: DOC201, RET504 + return gfiletext # noqa: DOC201, RET504, RUF100 diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Prboundary.py b/modules/createEVENT/GeoClawOpenFOAM/of7Prboundary.py index d6ee4a088..151d95546 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7Prboundary.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7Prboundary.py @@ -93,7 +93,7 @@ def Prtext(self, data, patches): # noqa: N802 prtext = prtext + '}\n\n' # Return the text for velocity BC - return prtext # noqa: DOC201, RET504 + return prtext # noqa: DOC201, RET504, RUF100 ############################################################# def Prheader(self): # noqa: N802 @@ -118,7 +118,7 @@ def Prheader(self): # noqa: N802 header = header + 'internalField\tuniform\t0;\n\n' # Return the header for U file - return header # noqa: DOC201, RET504 + return header # noqa: DOC201, RET504, RUF100 ############################################################# def Prpatchtext(self, data, Prtype, patchname): # noqa: C901, N802, N803 @@ -208,4 +208,4 @@ def Prpatchtext(self, data, Prtype, patchname): # noqa: C901, N802, N803 Prtext = Prtext + 'type\tempty;\n\t}\n' # noqa: N806 # Return the header for U file - return Prtext # noqa: DOC201 + return Prtext # noqa: DOC201, RUF100 diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Process.py b/modules/createEVENT/GeoClawOpenFOAM/of7Process.py index 2306f71bf..3aa0a8178 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7Process.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7Process.py @@ -132,7 +132,7 @@ def pprocesstext(self, data, path): sampletext = sampletext + ');\n\n' sampletext = sampletext + 'fields\t' + fieldtext + ';\n' - return sampletext # noqa: DOC201, RET504 + return sampletext # noqa: DOC201, RET504, RUF100 ############################################################# def pprocesscdict(self, data, path): # noqa: C901 @@ -275,7 +275,7 @@ def pprocesscdict(self, data, path): # noqa: C901 cdicttext = cdicttext + '\t\tfields\t' + fieldtext + ';\n' cdicttext = cdicttext + '\t}\n}' - return cdicttext # noqa: DOC201, RET504 + return cdicttext # noqa: DOC201, RET504, RUF100 ############################################################# def scripts(self, data, path): # noqa: ARG002 @@ -293,7 +293,7 @@ def scripts(self, data, path): # noqa: ARG002 data, ['Events', 'Postprocessing'] ) if pprocess == [None]: - return 0 # noqa: DOC201 + return 0 # noqa: DOC201, RUF100 else: # noqa: RET505 pprocess = ', '.join( hydroutil.extract_element_from_json( @@ -350,7 +350,7 @@ def pprocesscheck(self, data, path): ) if pprocess == 'No': - return 0 # noqa: DOC201 + return 0 # noqa: DOC201, RUF100 else: # noqa: RET505 pprocessV = ', '.join( # noqa: N806 hydroutil.extract_element_from_json(data, ['Events', 'PPVelocity']) diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7PtDboundary.py b/modules/createEVENT/GeoClawOpenFOAM/of7PtDboundary.py index f338f96da..f337d5032 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7PtDboundary.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7PtDboundary.py @@ -115,7 +115,7 @@ def PtDcheck(self, data, patches): # noqa: N802 if (int(Utype) == 103) or (int(Utype) == 104): # noqa: PLR2004 numMovWall += 1 # noqa: N806 if numMovWall > 0: - return 1 # noqa: DOC201 + return 1 # noqa: DOC201, RUF100 if numMovWall == 0: return 0 @@ -169,7 +169,7 @@ def PtDtext(self, data, fipath, patches): # noqa: N802 ptdtext = ptdtext + '}\n\n' # Return the text for pointDisplacement - return ptdtext # noqa: DOC201, RET504 + return ptdtext # noqa: DOC201, RET504, RUF100 ############################################################# def PtDheader(self): # noqa: N802 @@ -194,7 +194,7 @@ def PtDheader(self): # noqa: N802 header = header + 'internalField\tuniform (0 0 0);\n\n' # Return the header for U file - return header # noqa: DOC201, RET504 + return header # noqa: DOC201, RET504, RUF100 ############################################################# def PtDpatchtext(self, data, Utype, patchname, fipath): # noqa: ARG002, N802, N803 @@ -243,7 +243,7 @@ def PtDpatchtext(self, data, Utype, patchname, fipath): # noqa: ARG002, N802, N PtDtext = PtDtext + 'value\tuniform (0 0 0);\n' # noqa: N806 PtDtext = PtDtext + '\t}\n' # noqa: N806 - return PtDtext # noqa: DOC201 + return PtDtext # noqa: DOC201, RUF100 ############################################################# def getNormal(self, patchname): # noqa: N802 @@ -267,4 +267,4 @@ def getNormal(self, patchname): # noqa: N802 elif (patchname == 'Building') or (patchname == 'OtherBuilding'): # noqa: PLR1714 normal = '1 0 0' - return normal # noqa: DOC201 + return normal # noqa: DOC201, RUF100 diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Solve.py b/modules/createEVENT/GeoClawOpenFOAM/of7Solve.py index c08b721c3..bbdfa077d 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7Solve.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7Solve.py @@ -73,7 +73,7 @@ def solverheader(self, fileobjec): ) # Return the header for U file - return header # noqa: DOC201, RET504 + return header # noqa: DOC201, RET504, RUF100 ############################################################# def fvSchemetext(self, data): # noqa: ARG002, N802 @@ -163,7 +163,7 @@ def fvSchemetext(self, data): # noqa: ARG002, N802 fvSchemetext = fvSchemetext + 'alpha.water;\n' # noqa: N806 fvSchemetext = fvSchemetext + '}\n' # noqa: N806 - return fvSchemetext # noqa: DOC201, RET504 + return fvSchemetext # noqa: DOC201, RET504, RUF100 ############################################################# def fvSolntext(self, data): # noqa: N802 @@ -280,7 +280,7 @@ def fvSolntext(self, data): # noqa: N802 fvSolntext = fvSolntext + 'fields\n\t{\n\t}\n\t' # noqa: N806 fvSolntext = fvSolntext + 'equations\n\t{\n\t\t".*"\t1;\n\t}\n}' # noqa: N806 - return fvSolntext # noqa: DOC201, RET504 + return fvSolntext # noqa: DOC201, RET504, RUF100 ############################################################# def cdicttext(self, data): @@ -349,7 +349,7 @@ def cdicttext(self, data): cdicttext = cdicttext + 'maxAlphaCo \t 1.0;\n\n' cdicttext = cdicttext + 'maxDeltaT \t 1;\n\n' - return cdicttext # noqa: DOC201, RET504 + return cdicttext # noqa: DOC201, RET504, RUF100 ############################################################# def cdictcheck(self, data): @@ -366,7 +366,7 @@ def cdictcheck(self, data): # Start time startT = hydroutil.extract_element_from_json(data, ['Events', 'StartTime']) # noqa: N806 if startT == [None]: - return -1 # noqa: DOC201 + return -1 # noqa: DOC201, RUF100 # End time endT = hydroutil.extract_element_from_json(data, ['Events', 'EndTime']) # noqa: N806 @@ -489,4 +489,4 @@ def cdictFtext(self, data): # noqa: N802 cdicttext = cdicttext + 'direction\t(1 0 0);\n\t\t\t' cdicttext = cdicttext + 'cumulative\tno;\n\t\t}\n\t}\n}' - return cdicttext # noqa: DOC201, RET504 + return cdicttext # noqa: DOC201, RET504, RUF100 diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Turbulence.py b/modules/createEVENT/GeoClawOpenFOAM/of7Turbulence.py index 14be4b9d2..407ceddf8 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7Turbulence.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7Turbulence.py @@ -84,7 +84,7 @@ def turbtext(self, data): turbtext = turbtext + '\tturbulence\ton;\n' turbtext = turbtext + '\tprintCoeffs\ton;\n}\n' - return turbtext # noqa: DOC201 + return turbtext # noqa: DOC201, RUF100 ############################################################# def turbheader(self): @@ -106,4 +106,4 @@ def turbheader(self): // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //\n\n""" # noqa: W291 # Return the header for U file - return header # noqa: DOC201, RET504 + return header # noqa: DOC201, RET504, RUF100 diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Uboundary.py b/modules/createEVENT/GeoClawOpenFOAM/of7Uboundary.py index 87c49472f..283b2f082 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/of7Uboundary.py +++ b/modules/createEVENT/GeoClawOpenFOAM/of7Uboundary.py @@ -105,7 +105,7 @@ def Utext(self, data, fipath, patches): # noqa: N802 utext = utext + '}\n\n' # Return the text for velocity BC - return utext # noqa: DOC201, RET504 + return utext # noqa: DOC201, RET504, RUF100 ############################################################# def Uheader(self): # noqa: N802 @@ -130,7 +130,7 @@ def Uheader(self): # noqa: N802 header = header + 'internalField\tuniform (0 0 0);\n\n' # Return the header for U file - return header # noqa: DOC201, RET504 + return header # noqa: DOC201, RET504, RUF100 ############################################################# def Upatchtext(self, data, Utype, patchname, fipath, numMovWall): # noqa: C901, N802, N803 @@ -345,7 +345,7 @@ def Upatchtext(self, data, Utype, patchname, fipath, numMovWall): # noqa: C901, Utext = Utext + 'type\tempty;\n\t}\n' # noqa: N806 # Return the header for U file - return Utext # noqa: DOC201 + return Utext # noqa: DOC201, RUF100 ############################################################# def Uchecks(self, data, fipath, patches): # noqa: C901, N802 @@ -384,7 +384,7 @@ def Uchecks(self, data, fipath, patches): # noqa: C901, N802 # Checking for multiple moving walls numMovWall += 1 # noqa: N806 if numMovWall > 1: - return -1 # noqa: DOC201 + return -1 # noqa: DOC201, RUF100 # Check for existing moving wall files dispfilename = hydroutil.extract_element_from_json( diff --git a/modules/createEVENT/GeoClawOpenFOAM/openfoam7.py b/modules/createEVENT/GeoClawOpenFOAM/openfoam7.py index 4ed8cc557..d82396707 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/openfoam7.py +++ b/modules/createEVENT/GeoClawOpenFOAM/openfoam7.py @@ -169,7 +169,7 @@ def createfolder(self, data, path, args): scriptfile.close() # Return completion flag - return 0 # noqa: DOC201 + return 0 # noqa: DOC201, RUF100 ############################################################# def creategeometry(self, data, path): @@ -192,7 +192,7 @@ def creategeometry(self, data, path): # Create the geometry related files Geometry = of7Geometry() # noqa: N806 if int(mesher[0]) == 1: - return 0 # noqa: DOC201 + return 0 # noqa: DOC201, RUF100 elif int(mesher[0]) == 0 or int(mesher[0]) == 2: # noqa: RET505, PLR2004 geomcode = Geometry.geomcheck(data, path) if geomcode == -1: @@ -245,7 +245,7 @@ def createmesh(self, data, path): Meshing = of7Meshing() # noqa: N806 meshcode = Meshing.meshcheck(data, path) if meshcode == -1: - return -1 # noqa: DOC201 + return -1 # noqa: DOC201, RUF100 elif int(mesher[0]) == 0: # noqa: RET505 # blockMesh bmeshtext = Meshing.bmeshtext(data) @@ -295,7 +295,7 @@ def materials(self, data, path): Materials = of7Materials() # noqa: N806 matcode = Materials.matcheck(data) if matcode == -1: - return -1 # noqa: DOC201 + return -1 # noqa: DOC201, RUF100 else: # noqa: RET505 mattext = Materials.mattext(data) fname = 'transportProperties' @@ -320,7 +320,7 @@ def initial(self, data, path): Inicond = of7Initial() # noqa: N806 initcode = Inicond.alphacheck(data, path) if initcode == -1: - return -1 # noqa: DOC201 + return -1 # noqa: DOC201, RUF100 else: # noqa: RET505 alphatext = Inicond.alphatext(data, path) fname = 'setFieldsDict' @@ -355,7 +355,7 @@ def boundary(self, data, path): # Check for boundary conditions here ecode = Uboundary.Uchecks(data, path, patches) if ecode == -1: - return -1 # noqa: DOC201 + return -1 # noqa: DOC201, RUF100 else: # noqa: RET505 # Write the U-file if no errors # Path to the file @@ -421,7 +421,7 @@ def turbulence(self, data, path): turbfile.write(turbtext) turbfile.close() - return 0 # noqa: DOC201 + return 0 # noqa: DOC201, RUF100 ############################################################# def parallelize(self, data, path): @@ -445,7 +445,7 @@ def parallelize(self, data, path): # Scripts Decomp.scripts(data, path) - return 0 # noqa: DOC201 + return 0 # noqa: DOC201, RUF100 ############################################################# def solve(self, data, path): @@ -478,7 +478,7 @@ def solve(self, data, path): # controlDict ecode = Solve.cdictcheck(data) if ecode == -1: - return -1 # noqa: DOC201 + return -1 # noqa: DOC201, RUF100 else: # noqa: RET505 cdicttext = Solve.cdicttext(data) fname = 'controlDict' @@ -516,7 +516,7 @@ def others(self, data, path): gfile.write(gfiletext) gfile.close() - return 0 # noqa: DOC201 + return 0 # noqa: DOC201, RUF100 ############################################################# def dakota(self, args): @@ -533,7 +533,7 @@ def dakota(self, args): # Dakota Scripts dakota.dakotascripts(args) - return 0 # noqa: DOC201 + return 0 # noqa: DOC201, RUF100 ############################################################# def postprocessing(self, data, path): @@ -550,7 +550,7 @@ def postprocessing(self, data, path): # controlDict ecode = pprocess.pprocesscheck(data, path) if ecode == -1: - return -1 # noqa: DOC201 + return -1 # noqa: DOC201, RUF100 elif ecode == 0: # noqa: RET505 return 0 else: @@ -589,4 +589,4 @@ def cleaning(self, args, path): # Dakota Scripts cleaner.cleaning(args, path) - return 0 # noqa: DOC201 + return 0 # noqa: DOC201, RUF100 diff --git a/modules/createEVENT/GeoClawOpenFOAM/osuFlume.py b/modules/createEVENT/GeoClawOpenFOAM/osuFlume.py index a14198e9d..be0cef04d 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/osuFlume.py +++ b/modules/createEVENT/GeoClawOpenFOAM/osuFlume.py @@ -104,4 +104,4 @@ def creategeom(self, data, path): # noqa: ARG002 # Write extreme values and building data to temporary file for later usage flumeobj.extremedata(extreme, breadth) - return 0 # noqa: DOC201 + return 0 # noqa: DOC201, RUF100 diff --git a/modules/createEVENT/GeoClawOpenFOAM/userFlume.py b/modules/createEVENT/GeoClawOpenFOAM/userFlume.py index e0767012c..66638127c 100644 --- a/modules/createEVENT/GeoClawOpenFOAM/userFlume.py +++ b/modules/createEVENT/GeoClawOpenFOAM/userFlume.py @@ -102,4 +102,4 @@ def creategeom(self, data, path): # Write extreme values and building data to temporary file for later usage flumeobj.extremedata(extreme, breadth) - return 0 # noqa: DOC201 + return 0 # noqa: DOC201, RUF100 diff --git a/modules/createEVENT/IsolatedBuildingCFD/IsolatedBuildingCFD.py b/modules/createEVENT/IsolatedBuildingCFD/IsolatedBuildingCFD.py index 240ad0459..e561d9dbc 100644 --- a/modules/createEVENT/IsolatedBuildingCFD/IsolatedBuildingCFD.py +++ b/modules/createEVENT/IsolatedBuildingCFD/IsolatedBuildingCFD.py @@ -13,7 +13,7 @@ def directionToDof(direction): # noqa: N802 """Converts direction to degree of freedom""" # noqa: D400, D401 directioMap = {'X': 1, 'Y': 2, 'Z': 3} # noqa: N806 - return directioMap[direction] # noqa: DOC201 + return directioMap[direction] # noqa: DOC201, RUF100 def addFloorForceToEvent(patternsArray, force, direction, floor): # noqa: ARG001, N802, N803 diff --git a/modules/createEVENT/Istanbul/IstanbulStations.py b/modules/createEVENT/Istanbul/IstanbulStations.py index 05eab924c..8145e6ee1 100644 --- a/modules/createEVENT/Istanbul/IstanbulStations.py +++ b/modules/createEVENT/Istanbul/IstanbulStations.py @@ -190,7 +190,7 @@ def haversine(lat1, lon1, lat2, lon2): r = 6371 # Radius of the Earth in kilometers distance = r * c - return distance # noqa: DOC201, RET504 + return distance # noqa: DOC201, RET504, RUF100 if __name__ == '__main__': diff --git a/modules/createEVENT/M9/M9API.py b/modules/createEVENT/M9/M9API.py index 4e8744751..ec6a49ea0 100644 --- a/modules/createEVENT/M9/M9API.py +++ b/modules/createEVENT/M9/M9API.py @@ -332,4 +332,4 @@ def haversine(lat1, lon1, lat2, lon2): r = 6371 # Radius of the Earth in kilometers distance = r * c - return distance # noqa: DOC201, RET504 + return distance # noqa: DOC201, RET504, RUF100 diff --git a/modules/createEVENT/M9/M9Stations.py b/modules/createEVENT/M9/M9Stations.py index 2ade560b6..f52352c90 100644 --- a/modules/createEVENT/M9/M9Stations.py +++ b/modules/createEVENT/M9/M9Stations.py @@ -229,4 +229,4 @@ def haversine(lat1, lon1, lat2, lon2): r = 6371 # Radius of the Earth in kilometers distance = r * c - return distance # noqa: DOC201, RET504 + return distance # noqa: DOC201, RET504, RUF100 diff --git a/modules/createEVENT/MPM/MPM.py b/modules/createEVENT/MPM/MPM.py index 43246d132..5a2d55a45 100644 --- a/modules/createEVENT/MPM/MPM.py +++ b/modules/createEVENT/MPM/MPM.py @@ -13,7 +13,7 @@ def directionToDof(direction): # noqa: N802 """Converts direction to degree of freedom""" # noqa: D400, D401 directioMap = {'X': 1, 'Y': 2, 'Z': 3} # noqa: N806 - return directioMap[direction] # noqa: DOC201 + return directioMap[direction] # noqa: DOC201, RUF100 def addFloorForceToEvent( # noqa: N802 diff --git a/modules/createEVENT/MPM/post_process_output.py b/modules/createEVENT/MPM/post_process_output.py index 6a32be5ac..3327b8a1f 100644 --- a/modules/createEVENT/MPM/post_process_output.py +++ b/modules/createEVENT/MPM/post_process_output.py @@ -88,7 +88,7 @@ def readPressureProbes(fileName): # noqa: N802, N803 time = np.asarray(time, dtype=np.float32) p = np.asarray(p, dtype=np.float32) - return probes, time, p # noqa: DOC201 + return probes, time, p # noqa: DOC201, RUF100 def read_pressure_data(file_names): @@ -291,7 +291,7 @@ def read_openFoam_scalar_field(file_name): # noqa: N802 sField = np.asarray(sField, dtype=np.float32) # noqa: N806 - return sField # noqa: DOC201, RET504 + return sField # noqa: DOC201, RET504, RUF100 def read_openFoam_vector_field(file_name): # noqa: N802 @@ -312,7 +312,7 @@ def read_openFoam_vector_field(file_name): # noqa: N802 vField = np.asarray(vField, dtype=np.float32) # noqa: N806 - return vField # noqa: DOC201, RET504 + return vField # noqa: DOC201, RET504, RUF100 def read_openFoam_tensor_field(file_name): # noqa: N802 @@ -340,7 +340,7 @@ def read_openFoam_tensor_field(file_name): # noqa: N802 vField = np.asarray(vField, dtype=np.float32) # noqa: N806 - return vField # noqa: DOC201, RET504 + return vField # noqa: DOC201, RET504, RUF100 def read_openFoam_symmetric_tensor_field(file_name): # noqa: N802 @@ -367,7 +367,7 @@ def read_openFoam_symmetric_tensor_field(file_name): # noqa: N802 vField = np.asarray(vField, dtype=np.float32) # noqa: N806 - return vField # noqa: DOC201, RET504 + return vField # noqa: DOC201, RET504, RUF100 def read_velocity_data(path): @@ -462,7 +462,7 @@ def read_velocity_probes(fileName): # noqa: N803 time = np.asarray(time, dtype=np.float32) U = np.asarray(U, dtype=np.float32) # noqa: N806 - return probes, time, U # noqa: DOC201 + return probes, time, U # noqa: DOC201, RUF100 def calculate_length_scale(u, uav, dt, min_corr=0.0): @@ -481,7 +481,7 @@ def calculate_length_scale(u, uav, dt, min_corr=0.0): L = uav * np.trapz(corr, dx=dt) # noqa: NPY201, N806 - return L # noqa: DOC201, RET504 + return L # noqa: DOC201, RET504, RUF100 def psd(x, dt, nseg): # noqa: F811 diff --git a/modules/createEVENT/SurroundedBuildingCFD/SurroundedBuildingCFD.py b/modules/createEVENT/SurroundedBuildingCFD/SurroundedBuildingCFD.py index a31ebbac7..e7065e2ff 100644 --- a/modules/createEVENT/SurroundedBuildingCFD/SurroundedBuildingCFD.py +++ b/modules/createEVENT/SurroundedBuildingCFD/SurroundedBuildingCFD.py @@ -13,7 +13,7 @@ def directionToDof(direction): # noqa: N802 """Converts direction to degree of freedom""" # noqa: D400, D401 directioMap = {'X': 1, 'Y': 2, 'Z': 3} # noqa: N806 - return directioMap[direction] # noqa: DOC201 + return directioMap[direction] # noqa: DOC201, RUF100 def addFloorForceToEvent(patternsArray, force, direction, floor): # noqa: ARG001, N802, N803 diff --git a/modules/createEVENT/SurroundedBuildingCFD/post_process_output.py b/modules/createEVENT/SurroundedBuildingCFD/post_process_output.py index 6a32be5ac..3327b8a1f 100644 --- a/modules/createEVENT/SurroundedBuildingCFD/post_process_output.py +++ b/modules/createEVENT/SurroundedBuildingCFD/post_process_output.py @@ -88,7 +88,7 @@ def readPressureProbes(fileName): # noqa: N802, N803 time = np.asarray(time, dtype=np.float32) p = np.asarray(p, dtype=np.float32) - return probes, time, p # noqa: DOC201 + return probes, time, p # noqa: DOC201, RUF100 def read_pressure_data(file_names): @@ -291,7 +291,7 @@ def read_openFoam_scalar_field(file_name): # noqa: N802 sField = np.asarray(sField, dtype=np.float32) # noqa: N806 - return sField # noqa: DOC201, RET504 + return sField # noqa: DOC201, RET504, RUF100 def read_openFoam_vector_field(file_name): # noqa: N802 @@ -312,7 +312,7 @@ def read_openFoam_vector_field(file_name): # noqa: N802 vField = np.asarray(vField, dtype=np.float32) # noqa: N806 - return vField # noqa: DOC201, RET504 + return vField # noqa: DOC201, RET504, RUF100 def read_openFoam_tensor_field(file_name): # noqa: N802 @@ -340,7 +340,7 @@ def read_openFoam_tensor_field(file_name): # noqa: N802 vField = np.asarray(vField, dtype=np.float32) # noqa: N806 - return vField # noqa: DOC201, RET504 + return vField # noqa: DOC201, RET504, RUF100 def read_openFoam_symmetric_tensor_field(file_name): # noqa: N802 @@ -367,7 +367,7 @@ def read_openFoam_symmetric_tensor_field(file_name): # noqa: N802 vField = np.asarray(vField, dtype=np.float32) # noqa: N806 - return vField # noqa: DOC201, RET504 + return vField # noqa: DOC201, RET504, RUF100 def read_velocity_data(path): @@ -462,7 +462,7 @@ def read_velocity_probes(fileName): # noqa: N803 time = np.asarray(time, dtype=np.float32) U = np.asarray(U, dtype=np.float32) # noqa: N806 - return probes, time, U # noqa: DOC201 + return probes, time, U # noqa: DOC201, RUF100 def calculate_length_scale(u, uav, dt, min_corr=0.0): @@ -481,7 +481,7 @@ def calculate_length_scale(u, uav, dt, min_corr=0.0): L = uav * np.trapz(corr, dx=dt) # noqa: NPY201, N806 - return L # noqa: DOC201, RET504 + return L # noqa: DOC201, RET504, RUF100 def psd(x, dt, nseg): # noqa: F811 diff --git a/modules/createEVENT/coupledDigitalTwin/CoupledDigitalTwin.py b/modules/createEVENT/coupledDigitalTwin/CoupledDigitalTwin.py index 66728136a..1caab63cd 100644 --- a/modules/createEVENT/coupledDigitalTwin/CoupledDigitalTwin.py +++ b/modules/createEVENT/coupledDigitalTwin/CoupledDigitalTwin.py @@ -13,7 +13,7 @@ def directionToDof(direction): # noqa: N802 """Converts direction to degree of freedom""" # noqa: D400, D401 directioMap = {'X': 1, 'Y': 2, 'Z': 3} # noqa: N806 - return directioMap[direction] # noqa: DOC201 + return directioMap[direction] # noqa: DOC201, RUF100 def addFloorForceToEvent(patternsArray, force, direction, floor): # noqa: ARG001, N802, N803 diff --git a/modules/createEVENT/groundMotionIM/IntensityMeasureComputer.py b/modules/createEVENT/groundMotionIM/IntensityMeasureComputer.py index c05932380..eb60a5fcb 100644 --- a/modules/createEVENT/groundMotionIM/IntensityMeasureComputer.py +++ b/modules/createEVENT/groundMotionIM/IntensityMeasureComputer.py @@ -167,7 +167,7 @@ def convert_accel_units(self, acceleration, from_, to_='cm/sec/sec'): # noqa: C acceleration = np.asarray(acceleration) if from_ == 'g': if to_ == 'g': - return acceleration # noqa: DOC201 + return acceleration # noqa: DOC201, RUF100 if to_ in self.km_sec_square: return acceleration * self.g / 1000.0 if to_ in self.m_sec_square: diff --git a/modules/createEVENT/siteResponse/RegionalSiteResponse.py b/modules/createEVENT/siteResponse/RegionalSiteResponse.py index 35ef081a0..edce432d0 100644 --- a/modules/createEVENT/siteResponse/RegionalSiteResponse.py +++ b/modules/createEVENT/siteResponse/RegionalSiteResponse.py @@ -131,7 +131,7 @@ def get_scale_factors(input_units, output_units): # noqa: C901 scale_factors.update({input_name: f_scale}) - return scale_factors # noqa: DOC201 + return scale_factors # noqa: DOC201, RUF100 def postProcess(evtName, input_units, f_scale_units): # noqa: N802, N803, D103 diff --git a/modules/createEVENT/stochasticWave/StochasticWave.py b/modules/createEVENT/stochasticWave/StochasticWave.py index e8fda748f..98971436f 100644 --- a/modules/createEVENT/stochasticWave/StochasticWave.py +++ b/modules/createEVENT/stochasticWave/StochasticWave.py @@ -100,7 +100,7 @@ def directionToDof(direction): # noqa: N802 """Converts direction to degree of freedom""" # noqa: D400, D401 directioMap = {'X': 1, 'Y': 2, 'Z': 3} # noqa: N806 - return directioMap[direction] # noqa: DOC201 + return directioMap[direction] # noqa: DOC201, RUF100 def addFloorForceToEvent(patternsList, timeSeriesList, force, direction, floor): # noqa: N802, N803 diff --git a/modules/createSAM/AutoSDA/beam_component.py b/modules/createSAM/AutoSDA/beam_component.py index b1e840587..ed7b949c7 100644 --- a/modules/createSAM/AutoSDA/beam_component.py +++ b/modules/createSAM/AutoSDA/beam_component.py @@ -181,7 +181,7 @@ def check_flag(self): for key in self.is_feasible.keys(): # noqa: SIM118 if self.is_feasible[key] == False: # noqa: E712 self.flag = False - return self.flag # noqa: DOC201 + return self.flag # noqa: DOC201, RUF100 def compute_demand_capacity_ratio(self): """This method is used to compute demand to capacity ratios. diff --git a/modules/createSAM/AutoSDA/column_component.py b/modules/createSAM/AutoSDA/column_component.py index 4d803081c..c17b6b06e 100644 --- a/modules/createSAM/AutoSDA/column_component.py +++ b/modules/createSAM/AutoSDA/column_component.py @@ -264,7 +264,7 @@ def check_flag(self): for key in self.is_feasible.keys(): # noqa: SIM118 if self.is_feasible[key] == False: # noqa: E712 self.flag = False - return self.flag # noqa: DOC201 + return self.flag # noqa: DOC201, RUF100 def compute_demand_capacity_ratio(self): """This method is used to calculate the demand to capacity ratios for column components diff --git a/modules/createSAM/AutoSDA/connection_part.py b/modules/createSAM/AutoSDA/connection_part.py index c1c246d56..0b2bd3fa6 100644 --- a/modules/createSAM/AutoSDA/connection_part.py +++ b/modules/createSAM/AutoSDA/connection_part.py @@ -740,4 +740,4 @@ def check_flag(self): for key in self.is_feasible.keys(): # noqa: SIM118 if self.is_feasible[key] == False: # noqa: E712 self.flag = False - return self.flag # noqa: DOC201 + return self.flag # noqa: DOC201, RUF100 diff --git a/modules/createSAM/AutoSDA/help_functions.py b/modules/createSAM/AutoSDA/help_functions.py index b65c0cbfa..379209b75 100644 --- a/modules/createSAM/AutoSDA/help_functions.py +++ b/modules/createSAM/AutoSDA/help_functions.py @@ -50,7 +50,7 @@ def determine_Fa_coefficient(site_class, Ss): # noqa: C901, N802, N803 Fa = None # noqa: N806 print('Site class is entered with an invalid value') # noqa: T201 - return Fa # noqa: DOC201 + return Fa # noqa: DOC201, RUF100 def determine_Fv_coefficient(site_class, S1): # noqa: C901, N802, N803 @@ -94,7 +94,7 @@ def determine_Fv_coefficient(site_class, S1): # noqa: C901, N802, N803 Fv = None # noqa: N806 print('Site class is entered with an invalid value') # noqa: T201 - return Fv # noqa: DOC201 + return Fv # noqa: DOC201, RUF100 def calculate_DBE_acceleration(Ss, S1, Fa, Fv): # noqa: N802, N803 @@ -111,7 +111,7 @@ def calculate_DBE_acceleration(Ss, S1, Fa, Fv): # noqa: N802, N803 SM1 = Fv * S1 # noqa: N806 SDS = 2 / 3 * SMS # noqa: N806 SD1 = 2 / 3 * SM1 # noqa: N806 - return SMS, SM1, SDS, SD1 # noqa: DOC201 + return SMS, SM1, SDS, SD1 # noqa: DOC201, RUF100 def determine_Cu_coefficient(SD1): # noqa: N802, N803 @@ -133,7 +133,7 @@ def determine_Cu_coefficient(SD1): # noqa: N802, N803 else: Cu = 1.4 # noqa: N806 - return Cu # noqa: DOC201 + return Cu # noqa: DOC201, RUF100 def determine_floor_height( @@ -161,7 +161,7 @@ def determine_floor_height( level - 2 ) - return floor_height # noqa: DOC201 + return floor_height # noqa: DOC201, RUF100 def calculate_Cs_coefficient(SDS, SD1, S1, T, TL, R, Ie): # noqa: N802, N803 @@ -212,7 +212,7 @@ def calculate_Cs_coefficient(SDS, SD1, S1, T, TL, R, Ie): # noqa: N802, N803 else: pass - return Cs # noqa: DOC201 + return Cs # noqa: DOC201, RUF100 def determine_k_coeficient(period): @@ -227,7 +227,7 @@ def determine_k_coeficient(period): else: k = 1 + 0.5 * (period - 0.5) - return k # noqa: DOC201 + return k # noqa: DOC201, RUF100 def calculate_seismic_force(base_shear, floor_weight, floor_height, k): @@ -252,7 +252,7 @@ def calculate_seismic_force(base_shear, floor_weight, floor_height, k): for story in range(len(floor_weight) - 1, -1, -1): story_shear[story] = np.sum(seismic_force[story:]) - return seismic_force, story_shear # noqa: DOC201 + return seismic_force, story_shear # noqa: DOC201, RUF100 def find_section_candidate(target_depth, section_database): @@ -267,7 +267,7 @@ def find_section_candidate(target_depth, section_database): if match: candidate_index.append(indx) candidates = section_database.loc[candidate_index, 'section size'] - return candidates # noqa: DOC201, RET504 + return candidates # noqa: DOC201, RET504, RUF100 def search_member_size(target_name, target_quantity, candidate, section_database): @@ -299,7 +299,7 @@ def search_member_size(target_name, target_quantity, candidate, section_database section_size = section_database.loc[ candidate_index[min_index[0][0]], 'section size' ] - return section_size # noqa: DOC201 + return section_size # noqa: DOC201, RUF100 def search_section_property(target_size, section_database): @@ -316,7 +316,7 @@ def search_section_property(target_size, section_database): for indx in np.array(section_database['index']): if target_size == section_database.loc[indx, 'section size']: section_info = section_database.loc[indx, :] - return section_info.to_dict() # noqa: DOC201 + return section_info.to_dict() # noqa: DOC201, RUF100 except: # noqa: E722 sys.stderr.write( 'Error: wrong size nominated!\nNo such size exists in section database!' @@ -336,7 +336,7 @@ def decrease_member_size(candidate, current_size): # This means the smallest candidate still cannot make design drift close to drift limit, # which further means the smallest section candidate is too large. sys.stderr.write('The lower bound for depth initialization is too large!\n') - return candidate[candidate_pool_index + 1] # noqa: DOC201 + return candidate[candidate_pool_index + 1] # noqa: DOC201, RUF100 def extract_depth(size): @@ -346,7 +346,7 @@ def extract_depth(size): """ # noqa: D205, D400, D401, D404 # Use Python regular expression to extract the char between 'W' and 'X', which then become depth output = re.findall(r'.*W(.*)X.*', size) - return int(output[0]) # noqa: DOC201 + return int(output[0]) # noqa: DOC201, RUF100 def extract_weight(size): @@ -357,7 +357,7 @@ def extract_weight(size): # Use Python regular expression to extract the char after 'W' to the end of the string, # which then becomes weight output = re.findall(r'.X(.*)', size) - return int(output[0]) # noqa: DOC201 + return int(output[0]) # noqa: DOC201, RUF100 def constructability_helper( # noqa: C901 @@ -541,7 +541,7 @@ def constructability_helper( # noqa: C901 variation_story.pop() # Update the ending index for next "identical story block" ending_index = variation_story[-1] - return section_size # noqa: DOC201 + return section_size # noqa: DOC201, RUF100 # # Loop over all stories from top to bottom to consider the constructability # starting_story = total_story - 1 @@ -596,4 +596,4 @@ def increase_member_size(candidate, current_size): if candidate_pool_index - 1 < 0: # Make sure the index does not exceed the bound # This means the largest candidate still fails to satisfy the requirement sys.stderr.write('The upper bound for depth initialization is too small!\n') - return candidate[candidate_pool_index - 1] # noqa: DOC201 + return candidate[candidate_pool_index - 1] # noqa: DOC201, RUF100 diff --git a/modules/performDL/pelicun3/DL_visuals.py b/modules/performDL/pelicun3/DL_visuals.py index 66b42cef7..43cc80d39 100644 --- a/modules/performDL/pelicun3/DL_visuals.py +++ b/modules/performDL/pelicun3/DL_visuals.py @@ -116,26 +116,26 @@ def plot_fragility(comp_db_path, output_path, create_zip='0'): # noqa: C901, D1 5: cl.scales['5']['seq']['Reds'], } - if comp_data.loc[('Incomplete', '')] != 1: # noqa: RUF031 + if comp_data.loc[('Incomplete', '')] != 1: # noqa: RUF031, RUF100 p_min, p_max = 0.01, 0.9 d_min = np.inf d_max = -np.inf LS_count = 0 # noqa: N806 for LS in limit_states: # noqa: N806 - if comp_data.loc[(LS, 'Family')] == 'normal': # noqa: RUF031 + if comp_data.loc[(LS, 'Family')] == 'normal': # noqa: RUF031, RUF100 d_min_i, d_max_i = norm.ppf( [p_min, p_max], - loc=comp_data.loc[(LS, 'Theta_0')], # noqa: RUF031 - scale=comp_data.loc[(LS, 'Theta_1')] # noqa: RUF031 - * comp_data.loc[(LS, 'Theta_0')], # noqa: RUF031 + loc=comp_data.loc[(LS, 'Theta_0')], # noqa: RUF031, RUF100 + scale=comp_data.loc[(LS, 'Theta_1')] # noqa: RUF031, RUF100 + * comp_data.loc[(LS, 'Theta_0')], # noqa: RUF031, RUF100 ) - elif comp_data.loc[(LS, 'Family')] == 'lognormal': # noqa: RUF031 + elif comp_data.loc[(LS, 'Family')] == 'lognormal': # noqa: RUF031, RUF100 d_min_i, d_max_i = np.exp( norm.ppf( [p_min, p_max], - loc=np.log(comp_data.loc[(LS, 'Theta_0')]), # noqa: RUF031 - scale=comp_data.loc[(LS, 'Theta_1')], # noqa: RUF031 + loc=np.log(comp_data.loc[(LS, 'Theta_0')]), # noqa: RUF031, RUF100 + scale=comp_data.loc[(LS, 'Theta_1')], # noqa: RUF031, RUF100 ) ) else: @@ -149,18 +149,18 @@ def plot_fragility(comp_db_path, output_path, create_zip='0'): # noqa: C901, D1 demand_vals = np.linspace(d_min, d_max, num=100) for i_ls, LS in enumerate(limit_states): # noqa: N806 - if comp_data.loc[(LS, 'Family')] == 'normal': # noqa: RUF031 + if comp_data.loc[(LS, 'Family')] == 'normal': # noqa: RUF031, RUF100 cdf_vals = norm.cdf( demand_vals, - loc=comp_data.loc[(LS, 'Theta_0')], # noqa: RUF031 - scale=comp_data.loc[(LS, 'Theta_1')] # noqa: RUF031 - * comp_data.loc[(LS, 'Theta_0')], # noqa: RUF031 + loc=comp_data.loc[(LS, 'Theta_0')], # noqa: RUF031, RUF100 + scale=comp_data.loc[(LS, 'Theta_1')] # noqa: RUF031, RUF100 + * comp_data.loc[(LS, 'Theta_0')], # noqa: RUF031, RUF100 ) - elif comp_data.loc[(LS, 'Family')] == 'lognormal': # noqa: RUF031 + elif comp_data.loc[(LS, 'Family')] == 'lognormal': # noqa: RUF031, RUF100 cdf_vals = norm.cdf( np.log(demand_vals), - loc=np.log(comp_data.loc[(LS, 'Theta_0')]), # noqa: RUF031 - scale=comp_data.loc[(LS, 'Theta_1')], # noqa: RUF031 + loc=np.log(comp_data.loc[(LS, 'Theta_0')]), # noqa: RUF031, RUF100 + scale=comp_data.loc[(LS, 'Theta_1')], # noqa: RUF031, RUF100 ) else: continue @@ -385,11 +385,11 @@ def plot_fragility(comp_db_path, output_path, create_zip='0'): # noqa: C901, D1 gridcolor='rgb(192,192,192)', ) - demand_unit = comp_data.loc[('Demand', 'Unit')] # noqa: RUF031 + demand_unit = comp_data.loc[('Demand', 'Unit')] # noqa: RUF031, RUF100 if demand_unit == 'unitless': demand_unit = '-' fig.update_xaxes( - title_text=f"{comp_data.loc[('Demand', 'Type')]} [{demand_unit}]", # noqa: RUF031 + title_text=f"{comp_data.loc[('Demand', 'Type')]} [{demand_unit}]", # noqa: RUF031, RUF100 **shared_ax_props, ) @@ -465,7 +465,7 @@ def plot_repair(comp_db_path, output_path, create_zip='0'): # noqa: C901, D103, # perform plotting for each repair consequence type independently for c_type in repair_df.loc[comp_id].index: # load the component-specific part of the database - comp_data = repair_df.loc[(comp_id, c_type)] # noqa: RUF031 + comp_data = repair_df.loc[(comp_id, c_type)] # noqa: RUF031, RUF100 # and the component-specific metadata - if it exists if repair_meta != None: # noqa: E711 @@ -620,7 +620,7 @@ def plot_repair(comp_db_path, output_path, create_zip='0'): # noqa: C901, D103, ), } - if comp_data.loc[('Incomplete', '')] != 1: # noqa: RUF031 + if comp_data.loc[('Incomplete', '')] != 1: # noqa: RUF031, RUF100 # set the parameters for displaying uncertainty p_min, p_max = 0.16, 0.84 # +- 1 std # noqa: F841 @@ -929,7 +929,7 @@ def plot_repair(comp_db_path, output_path, create_zip='0'): # noqa: C901, D103, elif quantity_unit.split()[0] == '1': quantity_unit = quantity_unit.split()[1] - dv_unit = comp_data.loc[('DV', 'Unit')] # noqa: RUF031 + dv_unit = comp_data.loc[('DV', 'Unit')] # noqa: RUF031, RUF100 if dv_unit == 'unitless': dv_unit = '-' diff --git a/modules/performHUA/pyincore_data/censusutil.py b/modules/performHUA/pyincore_data/censusutil.py index 55b97b6f4..d17253710 100644 --- a/modules/performHUA/pyincore_data/censusutil.py +++ b/modules/performHUA/pyincore_data/censusutil.py @@ -16,7 +16,7 @@ import geopandas as gpd import pandas as pd import requests -from pyincore_data import globals # noqa: A004 +from pyincore_data import globals # noqa: A004, RUF100 logger = globals.LOGGER @@ -107,7 +107,7 @@ def request_census_api(data_url): api_json = request_json.json() api_df = pd.DataFrame(columns=api_json[0], data=api_json[1:]) - return api_df # noqa: DOC201, RET504 + return api_df # noqa: DOC201, RET504, RUF100 @staticmethod def get_blockdata_for_demographics( # noqa: C901 @@ -191,7 +191,7 @@ def get_blockdata_for_demographics( # noqa: C901 else: print('Only 2000, 2010, and 2020 decennial census supported') # noqa: T201 - return None # noqa: DOC201 + return None # noqa: DOC201, RUF100 # Make directory to save output if not os.path.exists(output_dir): # noqa: PTH110 @@ -860,7 +860,7 @@ def get_blockgroupdata_for_income( # noqa: C901 print('Done creating household income shapefile') # noqa: T201 - return cen_blockgroup[save_columns] # noqa: DOC201 + return cen_blockgroup[save_columns] # noqa: DOC201, RUF100 @staticmethod def convert_dislocation_gpd_to_shapefile(in_gpd, programname, savefile): diff --git a/modules/performREC/pyrecodes/run_pyrecodes.py b/modules/performREC/pyrecodes/run_pyrecodes.py index c173370e0..c141ffb8d 100644 --- a/modules/performREC/pyrecodes/run_pyrecodes.py +++ b/modules/performREC/pyrecodes/run_pyrecodes.py @@ -235,7 +235,7 @@ def run_pyrecodes(rec_config, inputRWHALE, parallelType, mpiExec, numPROC): # n ujson.dump(results, f) ind_in_rank += 1 - count = count + 1 # noqa: PLR6104 + count = count + 1 # noqa: PLR6104, RUF100 # wait for all to finish if doParallel: @@ -504,4 +504,3 @@ def select_realizations_to_run(damage_input, inputRWHALE): # noqa: N803, D103 mpiExec=wfArgs.mpiexec, numPROC=numPROC, ) - diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/CreateStation.py b/modules/performRegionalEventSimulation/regionalGroundMotion/CreateStation.py index 46ddda580..c875cc8c5 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/CreateStation.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/CreateStation.py @@ -114,7 +114,7 @@ def create_stations( # noqa: C901, PLR0912, PLR0915 stn_df = pd.read_csv(input_file, header=0, index_col=0) except: # noqa: E722 run_tag = 0 - return run_tag # noqa: DOC201, RET504 + return run_tag # noqa: DOC201, RET504, RUF100 # Max and Min IDs if len(filterIDs) > 0: stns_requested = [] @@ -609,7 +609,7 @@ def create_gridded_stations( gstn_df = pd.read_csv(input_file, header=0, index_col=0) except: # noqa: E722 run_tag = 1 - return run_tag # noqa: DOC201, RET504 + return run_tag # noqa: DOC201, RET504, RUF100 if np.max(gstn_df.index.values) != 2: # noqa: PLR2004 run_tag = 1 return run_tag # noqa: RET504 @@ -662,7 +662,7 @@ def get_vs30_global(lat, lon): ) vs30 = [float(interpFunc(x, y)) for x, y in zip(lon, lat)] # return - return vs30 # noqa: DOC201, RET504 + return vs30 # noqa: DOC201, RET504, RUF100 def get_vs30_thompson(lat, lon): @@ -694,21 +694,21 @@ def get_vs30_thompson(lat, lon): vs30 = [float(interpFunc(x, y)) for x, y in zip(lon, lat)] # return - return vs30 # noqa: DOC201, RET504 + return vs30 # noqa: DOC201, RET504, RUF100 def get_z1(vs30): """Compute z1 based on the prediction equation by Chiou and Youngs (2013) (unit of vs30 is meter/second and z1 is meter)""" # noqa: D400 z1 = np.exp(-7.15 / 4.0 * np.log((vs30**4 + 571.0**4) / (1360.0**4 + 571.0**4))) # return - return z1 # noqa: DOC201, RET504 + return z1 # noqa: DOC201, RET504, RUF100 def get_z25(z1): """Compute z25 based on the prediction equation by Campbell and Bozorgnia (2013)""" # noqa: D400 z25 = 0.748 + 2.218 * z1 # return - return z25 # noqa: DOC201, RET504 + return z25 # noqa: DOC201, RET504, RUF100 def get_z25fromVs(vs): # noqa: N802 @@ -717,7 +717,7 @@ def get_z25fromVs(vs): # noqa: N802 """ # noqa: D205, D400 z25 = (7.089 - 1.144 * np.log(vs)) * 1000 # return - return z25 # noqa: DOC201, RET504 + return z25 # noqa: DOC201, RET504, RUF100 def get_zTR_global(lat, lon): # noqa: N802 @@ -743,7 +743,7 @@ def get_zTR_global(lat, lon): # noqa: N802 ) zTR = [float(interpFunc(x, y)) for x, y in zip(lon, lat)] # noqa: N806 # return - return zTR # noqa: DOC201, RET504 + return zTR # noqa: DOC201, RET504, RUF100 def export_site_prop(stn_file, output_dir, filename): @@ -811,7 +811,7 @@ def get_zTR_ncm(lat, lon): # noqa: N802 # get the top bedrock data zTR.append(abs(cur_res['response']['results'][0]['profiles'][0]['top'])) # return - return zTR # noqa: DOC201 + return zTR # noqa: DOC201, RUF100 def get_vsp_ncm(lat, lon, depth): @@ -850,7 +850,7 @@ def get_vsp_ncm(lat, lon, depth): if len(vsp) == 1: vsp = vsp[0] # return - return vsp # noqa: DOC201 + return vsp # noqa: DOC201, RUF100 def compute_vs30_from_vsp(depthp, vsp): @@ -868,7 +868,7 @@ def compute_vs30_from_vsp(depthp, vsp): # Computing the Vs30 vs30p = 30.0 / np.sum(delta_t) # return - return vs30p # noqa: DOC201, RET504 + return vs30p # noqa: DOC201, RET504, RUF100 def get_vs30_ncm(lat, lon): @@ -895,7 +895,7 @@ def get_vs30_ncm(lat, lon): ) vs30.append(760.0) # return - return vs30 # noqa: DOC201 + return vs30 # noqa: DOC201, RUF100 def get_soil_model_ba(param=None): @@ -925,7 +925,7 @@ def get_soil_model_ba(param=None): else: res = None - return res # noqa: DOC201 + return res # noqa: DOC201, RUF100 def get_soil_model_ei(param=None): @@ -940,7 +940,7 @@ def get_soil_model_ei(param=None): else: res = None - return res # noqa: DOC201 + return res # noqa: DOC201, RUF100 def get_soil_model_user(df_stn, model_fun): # noqa: D103 diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/FetchOpenQuake.py b/modules/performRegionalEventSimulation/regionalGroundMotion/FetchOpenQuake.py index cd1615ab0..2a7cf2574 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/FetchOpenQuake.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/FetchOpenQuake.py @@ -630,7 +630,7 @@ def oq_run_classical_psha( # noqa: C901 export_realizations('realizations', dstore) except: # noqa: E722 print('FetchOpenQuake: Classical PSHA failed.') # noqa: T201 - return 1 # noqa: DOC201 + return 1 # noqa: DOC201, RUF100 elif vtag == 11: # noqa: PLR2004 try: print(f'FetchOpenQuake: running Version {oq_version}.') # noqa: T201 @@ -845,7 +845,7 @@ def oq_read_uhs_classical_psha(scen_info, event_info, dir_info): mag_maf.append([0.0, float(list_IMs[0].split('~')[0]), 0.0]) # return - return ln_psa_mr, mag_maf, im_list # noqa: DOC201 + return ln_psa_mr, mag_maf, im_list # noqa: DOC201, RUF100 class OpenQuakeHazardCalc: # noqa: D101 @@ -991,7 +991,7 @@ def run_calc(self): # noqa: C901 oq.ground_motion_fields is False and oq.hazard_curves_from_gmfs is False ): - return {} # noqa: DOC201 + return {} # noqa: DOC201, RUF100 elif 'rupture_model' not in oq.inputs: logging.warning( 'There is no rupture_model, the calculator will just ' @@ -1550,7 +1550,7 @@ def eval_calc(self): # noqa: C901, PLR0912, PLR0915 } # return - return res # noqa: DOC201, RET504 + return res # noqa: DOC201, RET504, RUF100 def calculator_build_events_from_sources(self): # noqa: C901 """Prefilter the composite source model and store the source_info""" # noqa: D400 @@ -1666,7 +1666,7 @@ def __str__(self): # noqa: D105 def to_imt_unit_values(vals, imt): """Exponentiate the values unless the IMT is MMI""" # noqa: D400 if str(imt) == 'MMI': - return vals # noqa: DOC201 + return vals # noqa: DOC201, RUF100 return np.exp(vals) diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardOccurrence.py b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardOccurrence.py index 093217cc3..15a748471 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardOccurrence.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardOccurrence.py @@ -654,7 +654,7 @@ def _input_check(self): print( # noqa: T201 'OccurrenceModel_ManzourDavidson2016._input_check: no return period is defined.' ) - return False # noqa: DOC201 + return False # noqa: DOC201, RUF100 # shape of exceedance probability if len(self.im_exceedance_probs.shape) != 3: # noqa: PLR2004 print( # noqa: T201 @@ -730,8 +730,8 @@ def _opt_initialization(self): itertools.product(range(self.num_sites), range(self.num_return_periods)) ) self.prob += pulp.lpSum( - self.return_periods[j] * self.e_plus[(i, j)] # noqa: RUF031 - + self.return_periods[j] * self.e_minus[(i, j)] # noqa: RUF031 + self.return_periods[j] * self.e_plus[(i, j)] # noqa: RUF031, RUF100 + + self.return_periods[j] * self.e_minus[(i, j)] # noqa: RUF031, RUF100 for (i, j) in comb_sites_rps ) @@ -757,7 +757,7 @@ def _opt_initialization(self): <= self.num_scenarios ) - return True # noqa: DOC201 + return True # noqa: DOC201, RUF100 def solve_opt(self): """target_function: compute the target function to be minimized @@ -853,7 +853,7 @@ def _input_check(self): print( # noqa: T201 'OccurrenceModel_Wangetal2023._input_check: no return period is defined.' ) - return False # noqa: DOC201 + return False # noqa: DOC201, RUF100 # shape of exceedance probability if len(self.im_exceedance_probs.shape) != 3: # noqa: PLR2004 print( # noqa: T201 @@ -916,7 +916,7 @@ def _opt_initialization(self): self.X_weighted = np.dot(self.W, self.X) self.y_weighted = np.dot(self.W, self.y) - return True # noqa: DOC201 + return True # noqa: DOC201, RUF100 def solve_opt(self): """LASSO regression""" # noqa: D400 diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py index 98ff9cd60..3d9eaed21 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py @@ -471,7 +471,7 @@ def hazard_job(hazard_info): # noqa: C901, D103, PLR0915 ) gf_im_list += settlement_info['Output'] if 'Landslide' in ground_failure_info.keys(): # noqa: SIM118 - import landslide # noqa: PLC0415 + import landslide # noqa: PLC0415, RUF100 if 'Landslide' in ground_failure_info['Landslide'].keys(): # noqa: SIM118 lsld_info = ground_failure_info['Landslide']['Landslide'] diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/CorrelationModel.py b/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/CorrelationModel.py index 17c8b1fa1..d6e820aac 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/CorrelationModel.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/CorrelationModel.py @@ -65,7 +65,7 @@ def baker_jayaram_correlation_2008(im1, im2, flag_orth=False): # noqa: FBT002, elif im1.startswith('PGA'): T1 = 0.0 # noqa: N806 else: - return 0.0 # noqa: DOC201 + return 0.0 # noqa: DOC201, RUF100 if im2.startswith('SA'): T2 = float(im2[3:-1]) # noqa: N806 elif im2.startswith('PGA'): @@ -126,7 +126,7 @@ def bradley_correlation_2011(IM, T=None, flag_Ds=True): # noqa: FBT002, C901, N # PGA if IM == 'PGA': # noqa: RET503 if flag_Ds: - return -0.442 # noqa: DOC201 + return -0.442 # noqa: DOC201, RUF100 else: # noqa: RET505 return -0.305 elif IM == 'PGV': @@ -252,7 +252,7 @@ def jayaram_baker_correlation_2009(im, h, flag_clustering=False): # noqa: FBT00 else: b = 40.7 - 15.0 * T rho = np.exp(-3.0 * h / b) - return rho # noqa: DOC201, RET504 + return rho # noqa: DOC201, RET504, RUF100 def load_loth_baker_correlation_2013(datapath): @@ -270,7 +270,7 @@ def load_loth_baker_correlation_2013(datapath): B2 = pd.read_csv(datapath + 'loth_baker_correlation_2013_B2.csv', header=0) # noqa: N806 B1 = pd.read_csv(datapath + 'loth_baker_correlation_2013_B1.csv', header=0) # noqa: N806 B3 = pd.read_csv(datapath + 'loth_baker_correlation_2013_B3.csv', header=0) # noqa: N806 - return B1, B2, B3 # noqa: DOC201 + return B1, B2, B3 # noqa: DOC201, RUF100 def compute_rho_loth_baker_correlation_2013(T1, T2, h, B1, B2, B3): # noqa: N803 @@ -303,7 +303,7 @@ def compute_rho_loth_baker_correlation_2013(T1, T2, h, B1, B2, B3): # noqa: N80 Ch = b1 * np.exp(-3.0 * h / 20.0) + b2 * np.exp(-3.0 * h / 70.0) + b3 * (h == 0) # noqa: N806 # Correlation coefficient rho = Ch - return rho # noqa: DOC201, RET504 + return rho # noqa: DOC201, RET504, RUF100 def loth_baker_correlation_2013(stations, im_name_list, num_simu): # noqa: C901 @@ -373,7 +373,7 @@ def loth_baker_correlation_2013(stations, im_name_list, num_simu): # noqa: C901 .swapaxes(1, 2) ) # return - return residuals # noqa: DOC201, RET504 + return residuals # noqa: DOC201, RET504, RUF100 def load_markhvida_ceferino_baker_correlation_2017(datapath): @@ -404,7 +404,7 @@ def load_markhvida_ceferino_baker_correlation_2017(datapath): index_col=None, header=0, ) - return MCB_model, MCB_pca, MCB_var # noqa: DOC201 + return MCB_model, MCB_pca, MCB_var # noqa: DOC201, RUF100 def markhvida_ceferino_baker_correlation_2017( # noqa: C901 @@ -521,7 +521,7 @@ def markhvida_ceferino_baker_correlation_2017( # noqa: C901 if tmp_periods > model_Tmax: residuals = np.concatenate((residuals, Tmax_residuals), axis=1) # return - return residuals # noqa: DOC201 + return residuals # noqa: DOC201, RUF100 def load_du_ning_correlation_2021(datapath): @@ -548,7 +548,7 @@ def load_du_ning_correlation_2021(datapath): DN_var = pd.read_csv( # noqa: N806 datapath + 'du_ning_correlation_2021_var_scale.csv', index_col=None, header=0 ) - return DN_model, DN_pca, DN_var # noqa: DOC201 + return DN_model, DN_pca, DN_var # noqa: DOC201, RUF100 def du_ning_correlation_2021(stations, im_name_list, num_simu, num_pc=23): @@ -657,7 +657,7 @@ def du_ning_correlation_2021(stations, im_name_list, num_simu, num_pc=23): ) # return - return residuals # noqa: DOC201 + return residuals # noqa: DOC201, RUF100 def baker_bradley_correlation_2017(im1=None, im2=None): # noqa: C901 @@ -686,7 +686,7 @@ def baker_bradley_correlation_2017(im1=None, im2=None): # noqa: C901 print( # noqa: T201 f'CorrelationModel.baker_bradley_correlation_2017: warning - return 0.0 for unknown {im1}' ) - return 0.0 # noqa: DOC201 + return 0.0 # noqa: DOC201, RUF100 im_list.append(tmp_tag) period_list.append(None) if im2.startswith('SA'): diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/SignificantDurationModel.py b/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/SignificantDurationModel.py index 2a017cbf4..137adec5d 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/SignificantDurationModel.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/SignificantDurationModel.py @@ -65,7 +65,7 @@ def abrahamson_silva_ds_1999( print( # noqa: T201 "SignificantDurationModel.abrahamson_silva_ds_1999: duration_type='DS575H','DS575V','DS595H','DS595V'?" ) - return None, None # noqa: DOC201 + return None, None # noqa: DOC201, RUF100 # modeling coefficients beta = [3.2, 3.2, 3.2, 3.2] b1 = [5.204, 4.610, 5.204, 4.610] @@ -140,7 +140,7 @@ def bommer_stafford_alarcon_ds_2009( print( # noqa: T201 "SignificantDurationModel.bommer_stafford_alarcon_ds_2009: duration_type='DS575H','DS595H'?" ) - return None, None, None, None # noqa: DOC201 + return None, None, None, None # noqa: DOC201, RUF100 # modeling coefficients c0 = [-5.6298, -2.2393] @@ -205,7 +205,7 @@ def afshari_stewart_ds_2016( # noqa: C901 print( # noqa: T201 "SignificantDurationModel.afshari_stewart_ds_2016: mechanism='unknown','normal','reverse','strike-slip'?" ) - return None, None, None, None # noqa: DOC201 + return None, None, None, None # noqa: DOC201, RUF100 # region map reg_map = {'california': 0, 'japan': 1, 'other': 2} reg_tag = reg_map.get(region.lower(), None) diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/openSHAGMPE.py b/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/openSHAGMPE.py index be96864de..3a6c527ec 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/openSHAGMPE.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/openSHAGMPE.py @@ -240,7 +240,7 @@ def calc(self, Mw, rJB, rRup, rX, dip, zTop, vs30, vsInf, z1p0, style): # noqa: stdDev = np.sqrt(tauSq + phiSq) # noqa: N806 - return mean, stdDev, np.sqrt(tauSq), np.sqrt(phiSq) # noqa: DOC201 + return mean, stdDev, np.sqrt(tauSq), np.sqrt(phiSq) # noqa: DOC201, RUF100 # https://github.com/opensha/opensha/blob/master/src/main/java/org/opensha/sha/imr/attenRelImpl/ngaw2/NGAW2_Wrapper.java#L220 def getFaultFromRake(self, rake): # noqa: N802, D102 diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/landslide.py b/modules/performRegionalEventSimulation/regionalGroundMotion/landslide.py index cd6352725..d4119aba9 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/landslide.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/landslide.py @@ -1,4 +1,4 @@ -import numpy as np # noqa: CPY001, INP001, I001, D100 +import numpy as np # noqa: CPY001, D100, I001, INP001, RUF100 import rasterio as rio from scipy.interpolate import interp2d import sys, warnings, shapely, pandas, os # noqa: ICN001, E401 @@ -10,7 +10,7 @@ import pandas as pd -## Helper functions # noqa: E266 +## Helper functions # noqa: E266, RUF100 def sampleRaster( # noqa: N802 raster_file_path, raster_crs, x, y, interp_scheme='nearest', dtype=None ): @@ -73,10 +73,10 @@ def sampleRaster( # noqa: N802 sample = sample.astype(dtype) # clean up invalid values (returned as 1e38 by NumPy) sample[abs(sample) > 1e10] = invalid_value # noqa: PLR2004 - return sample # noqa: DOC201 + return sample # noqa: DOC201, RUF100 -## Helper functions # noqa: E266 +## Helper functions # noqa: E266, RUF100 def sampleVector(vector_file_path, vector_crs, x, y, dtype=None): # noqa: ARG001, N802 """performs spatial join of vector_file with xy'""" # noqa: D400, D401, D403 print(f'Sampling from the Vector File: {os.path.basename(vector_file_path)}...') # noqa: T201, PTH119 @@ -103,7 +103,7 @@ def sampleVector(vector_file_path, vector_crs, x, y, dtype=None): # noqa: ARG00 vertices = hull.vertices vertices = sites[np.append(vertices, vertices[0])] centroid = np.mean(vertices, axis=0) - vertices = vertices + 0.05 * (vertices - centroid) # noqa: PLR6104 + vertices = vertices + 0.05 * (vertices - centroid) # noqa: PLR6104, RUF100 RoI = shapely.geometry.Polygon(vertices) # noqa: N806 except: # noqa: E722 centroid = shapely.geometry.Point(np.mean(x), np.mean(y)) @@ -136,7 +136,7 @@ def sampleVector(vector_file_path, vector_crs, x, y, dtype=None): # noqa: ARG00 data['geometry'].append(new_geom) del vector_gdf gdf_roi = gpd.GeoDataFrame(data, geometry='geometry', crs=4326) - geometry = [shapely.geometry.Point(lon, lat) for lon, lat in zip(x, y)] # noqa: FURB140 + geometry = [shapely.geometry.Point(lon, lat) for lon, lat in zip(x, y)] # noqa: FURB140, RUF100 gdf_sites = gpd.GeoDataFrame(geometry=geometry, crs=4326).reset_index() merged = gpd.GeoDataFrame.sjoin( gdf_roi, gdf_sites, how='inner', predicate='contains' @@ -144,7 +144,7 @@ def sampleVector(vector_file_path, vector_crs, x, y, dtype=None): # noqa: ARG00 merged = merged.set_index('index_right').sort_index().drop(columns=['geometry']) gdf_sites = pandas.merge(gdf_sites, merged, on='index', how='left') gdf_sites.drop(columns=['geometry', 'index'], inplace=True) # noqa: PD002 - return gdf_sites # noqa: DOC201 + return gdf_sites # noqa: DOC201, RUF100 def find_additional_output_req(liq_info, current_step): # noqa: D103 @@ -213,7 +213,7 @@ def erf2(x): # A & S 7.1.26 t = 1.0 / (1.0 + p * x) y = 1.0 - (((((a5 * t + a4) * t) + a3) * t + a2) * t + a1) * t * np.exp(-(x**2)) - return signs * y # noqa: DOC201 + return signs * y # noqa: DOC201, RUF100 def norm2_cdf(x, loc, scale): @@ -222,7 +222,7 @@ def norm2_cdf(x, loc, scale): https://github.com/HDembinski/numba-stats/blob/main/src/numba_stats/norm.py """ # noqa: D205, D400, D401 inter = (x - loc) / scale - return 0.5 * (1 + erf2(inter * np.sqrt(0.5))) # noqa: DOC201 + return 0.5 * (1 + erf2(inter * np.sqrt(0.5))) # noqa: DOC201, RUF100 def erf2_2d(x): @@ -240,7 +240,7 @@ def erf2_2d(x): # A & S 7.1.26 t = 1.0 / (1.0 + p * x) y = 1.0 - (((((a5 * t + a4) * t) + a3) * t + a2) * t + a1) * t * np.exp(-(x**2)) - return signs * y # noqa: DOC201 + return signs * y # noqa: DOC201, RUF100 def norm2_cdf_2d(x, loc, scale): @@ -249,7 +249,7 @@ def norm2_cdf_2d(x, loc, scale): https://github.com/HDembinski/numba-stats/blob/main/src/numba_stats/norm.py """ # noqa: D205, D400, D401 inter = (x - loc) / scale - return 0.5 * (1 + erf2_2d(inter * np.sqrt(0.5))) # noqa: DOC201 + return 0.5 * (1 + erf2_2d(inter * np.sqrt(0.5))) # noqa: DOC201, RUF100 def nb_round(x, decimals): # noqa: D103 @@ -263,7 +263,7 @@ def erfinv_coeff(order=20): # noqa: D103 # starting value c[0] = 1 for i in range(1, order + 1): - c[i] = sum([c[j] * c[i - 1 - j] / (j + 1) / (2 * j + 1) for j in range(i)]) # noqa: C419 + c[i] = sum([c[j] * c[i - 1 - j] / (j + 1) / (2 * j + 1) for j in range(i)]) # noqa: C419, RUF100 # return return c @@ -278,7 +278,7 @@ def erfinv(x, order=20): for i in range(order): y += c[i] / (2 * i + 1) * (root_pi_over_2 * x) ** (2 * i + 1) # return - return y # noqa: DOC201 + return y # noqa: DOC201, RUF100 def norm2_ppf(p, loc, scale): @@ -287,7 +287,7 @@ def norm2_ppf(p, loc, scale): https://github.com/HDembinski/numba-stats/blob/main/src/numba_stats/norm.py """ # noqa: D205, D400, D401 inter = np.sqrt(2) * erfinv(2 * p - 1, order=20) - return scale * inter + loc # noqa: DOC201 + return scale * inter + loc # noqa: DOC201, RUF100 def erfinv_2d(x, order=20): @@ -300,7 +300,7 @@ def erfinv_2d(x, order=20): for i in range(order): y += c[i] / (2 * i + 1) * (root_pi_over_2 * x) ** (2 * i + 1) # return - return y # noqa: DOC201 + return y # noqa: DOC201, RUF100 def norm2_ppf_2d(p, loc, scale): @@ -309,7 +309,7 @@ def norm2_ppf_2d(p, loc, scale): https://github.com/HDembinski/numba-stats/blob/main/src/numba_stats/norm.py """ # noqa: D205, D400, D401 inter = np.sqrt(2) * erfinv_2d(2 * p - 1, order=20) - return scale * inter + loc # noqa: DOC201 + return scale * inter + loc # noqa: DOC201, RUF100 class Landslide: # noqa: D101 @@ -518,7 +518,7 @@ def run( # noqa: D102 for i, key in enumerate(output_keys): im_data_scen[:, len(im_list) + i, rlz_id] = model_output[key] ln_im_data[scenario_id] = im_data_scen - im_list = im_list + output_keys # noqa: PLR6104 + im_list = im_list + output_keys # noqa: PLR6104, RUF100 else: sys.exit( f"'PGA' is missing in the selected intensity measures and the landslide model 'BrayMacedo2019' can not be computed." # noqa: F541 @@ -533,7 +533,7 @@ def run( # noqa: D102 im_list, ) - def model( # noqa: PLR6301 + def model( # noqa: PLR6301, RUF100 self, pga, mag, # upstream PBEE RV @@ -635,4 +635,4 @@ def model( # noqa: PLR6301 pgdef = np.exp(nonzero_ln_pgdef) / 100 # also convert from cm to m pgdef = np.maximum(pgdef, 1e-5) # limit to output = {'lsd_PGD_h': pgdef} - return output # noqa: RET504, DOC201 + return output # noqa: DOC201, RET504, RUF100 diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/liquefaction.py b/modules/performRegionalEventSimulation/regionalGroundMotion/liquefaction.py index 2fa91e360..43bc5d13c 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/liquefaction.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/liquefaction.py @@ -82,7 +82,7 @@ def sampleRaster( # noqa: N802 sample = sample.astype(dtype) # clean up invalid values (returned as 1e38 by NumPy) sample[abs(sample) > 1e10] = invalid_value # noqa: PLR2004 - return sample # noqa: DOC201 + return sample # noqa: DOC201, RUF100 # Helper functions @@ -163,7 +163,7 @@ def sampleVector(vector_file_path, vector_crs, x, y, dtype=None): # noqa: ARG00 merged = merged.set_index('index_right').sort_index().drop(columns=['geometry']) gdf_sites = pandas.merge(gdf_sites, merged, on='index', how='left') gdf_sites.drop(columns=['geometry', 'index'], inplace=True) # noqa: PD002 - return gdf_sites # noqa: DOC201 + return gdf_sites # noqa: DOC201, RUF100 def find_additional_output_req(liq_info, current_step): # noqa: D103 @@ -455,7 +455,7 @@ def model(self, pgv, pga, mag): # liq_susc[prob_liq==zero_prob_liq] = 'none' - return {'liq_prob': prob_liq, 'liq_susc': liq_susc} # noqa: DOC201 + return {'liq_prob': prob_liq, 'liq_susc': liq_susc} # noqa: DOC201, RUF100 # ----------------------------------------------------------- @@ -658,7 +658,7 @@ def model( pga_mag = pga / (10**2.24 / mag**2.56) prob_liq[pga_mag < 0.1] = zero_prob_liq # noqa: PLR2004 - return {'liq_prob': prob_liq, 'liq_susc': liq_susc} # noqa: DOC201 + return {'liq_prob': prob_liq, 'liq_susc': liq_susc} # noqa: DOC201, RUF100 # ----------------------------------------------------------- @@ -821,7 +821,7 @@ def model(self, pgv, pga, mag): # for precip > 1700 mm, set prob to "0" prob_liq[self.precip > 1700] = zero_prob_liq # noqa: PLR2004 - return {'liq_prob': prob_liq, 'liq_susc': liq_susc} # noqa: DOC201 + return {'liq_prob': prob_liq, 'liq_susc': liq_susc} # noqa: DOC201, RUF100 # Lateral Spreading: @@ -987,7 +987,7 @@ def model( # output['ratio'] = ratio # return - return output # noqa: DOC201, RET504 + return output # noqa: DOC201, RET504, RUF100 # Settlement: @@ -1060,7 +1060,7 @@ def model( pass # return - return output # noqa: DOC201 + return output # noqa: DOC201, RUF100 def run(self, ln_im_data, eq_data, im_list): # noqa: D102 output_keys = ['liq_PGD_v'] diff --git a/modules/performRegionalEventSimulation/regionalWindField/ComputeIntensityMeasure.py b/modules/performRegionalEventSimulation/regionalWindField/ComputeIntensityMeasure.py index d619d091d..c32d912f8 100644 --- a/modules/performRegionalEventSimulation/regionalWindField/ComputeIntensityMeasure.py +++ b/modules/performRegionalEventSimulation/regionalWindField/ComputeIntensityMeasure.py @@ -448,7 +448,7 @@ def interp_wind_by_height(pws_ip, height_simu, height_ref): ) # return - return pws_op # noqa: DOC201 + return pws_op # noqa: DOC201, RUF100 def gust_factor_ESDU(gd_c, gd_t): # noqa: N802 @@ -475,7 +475,7 @@ def gust_factor_ESDU(gd_c, gd_t): # noqa: N802 gd_c, gd, gf, left=gf[0], right=gf[-1] ) # return - return gf_t # noqa: DOC201, RET504 + return gf_t # noqa: DOC201, RET504, RUF100 def export_pws(stations, pws, output_dir, filename='EventGrid.csv'): # noqa: D103 diff --git a/modules/performRegionalEventSimulation/regionalWindField/CreateScenario.py b/modules/performRegionalEventSimulation/regionalWindField/CreateScenario.py index bacb23af4..15322c4c6 100644 --- a/modules/performRegionalEventSimulation/regionalWindField/CreateScenario.py +++ b/modules/performRegionalEventSimulation/regionalWindField/CreateScenario.py @@ -156,8 +156,8 @@ def create_wind_scenarios(scenario_info, event_info, stations, data_dir): # noq print('CreateScenario: error - no storm name or year is provided.') # noqa: T201 # Searching the storm try: - df_chs = df_hs[df_hs[('NAME', ' ')] == storm_name] # noqa: RUF031 - df_chs = df_chs[df_chs[('SEASON', 'Year')] == storm_year] # noqa: RUF031 + df_chs = df_hs[df_hs[('NAME', ' ')] == storm_name] # noqa: RUF031, RUF100 + df_chs = df_chs[df_chs[('SEASON', 'Year')] == storm_year] # noqa: RUF031, RUF100 except: # noqa: E722 print('CreateScenario: error - the storm is not found.') # noqa: T201 if len(df_chs.values) == 0: @@ -166,10 +166,10 @@ def create_wind_scenarios(scenario_info, event_info, stations, data_dir): # noq # Collecting storm properties track_lat = [] track_lon = [] - for x in df_chs[('USA_LAT', 'degrees_north')].values.tolist(): # noqa: PD011, RUF031 + for x in df_chs[('USA_LAT', 'degrees_north')].values.tolist(): # noqa: PD011, RUF031, RUF100 if x != ' ': track_lat.append(float(x)) # noqa: PERF401 - for x in df_chs[('USA_LON', 'degrees_east')].values.tolist(): # noqa: PD011, RUF031 + for x in df_chs[('USA_LON', 'degrees_east')].values.tolist(): # noqa: PD011, RUF031, RUF100 if x != ' ': track_lon.append(float(x)) # noqa: PERF401 # If the default option (USA_LAT and USA_LON) is not available, switching to LAT and LON @@ -177,10 +177,10 @@ def create_wind_scenarios(scenario_info, event_info, stations, data_dir): # noq print( # noqa: T201 'CreateScenario: warning - the USA_LAT and USA_LON are not available, switching to LAT and LON.' ) - for x in df_chs[('LAT', 'degrees_north')].values.tolist(): # noqa: PD011, RUF031 + for x in df_chs[('LAT', 'degrees_north')].values.tolist(): # noqa: PD011, RUF031, RUF100 if x != ' ': track_lat.append(float(x)) # noqa: PERF401 - for x in df_chs[('LON', 'degrees_east')].values.tolist(): # noqa: PD011, RUF031 + for x in df_chs[('LON', 'degrees_east')].values.tolist(): # noqa: PD011, RUF031, RUF100 if x != ' ': track_lon.append(float(x)) # noqa: PERF401 if len(track_lat) == 0: @@ -197,7 +197,7 @@ def create_wind_scenarios(scenario_info, event_info, stations, data_dir): # noq terrain_data = [] # Storm characteristics at the landfall dist2land = [] - for x in df_chs[('DIST2LAND', 'km')]: # noqa: RUF031 + for x in df_chs[('DIST2LAND', 'km')]: # noqa: RUF031, RUF100 if x != ' ': dist2land.append(x) # noqa: PERF401 if len(track_lat) == 0: @@ -237,14 +237,14 @@ def create_wind_scenarios(scenario_info, event_info, stations, data_dir): # noq track_simu = track_lat # Reading data try: - landfall_lat = float(df_chs[('USA_LAT', 'degrees_north')].iloc[tmploc]) # noqa: RUF031 - landfall_lon = float(df_chs[('USA_LON', 'degrees_east')].iloc[tmploc]) # noqa: RUF031 + landfall_lat = float(df_chs[('USA_LAT', 'degrees_north')].iloc[tmploc]) # noqa: RUF031, RUF100 + landfall_lon = float(df_chs[('USA_LON', 'degrees_east')].iloc[tmploc]) # noqa: RUF031, RUF100 except: # noqa: E722 # If the default option (USA_LAT and USA_LON) is not available, switching to LAT and LON - landfall_lat = float(df_chs[('LAT', 'degrees_north')].iloc[tmploc]) # noqa: RUF031 - landfall_lon = float(df_chs[('LON', 'degrees_east')].iloc[tmploc]) # noqa: RUF031 + landfall_lat = float(df_chs[('LAT', 'degrees_north')].iloc[tmploc]) # noqa: RUF031, RUF100 + landfall_lon = float(df_chs[('LON', 'degrees_east')].iloc[tmploc]) # noqa: RUF031, RUF100 try: - landfall_ang = float(df_chs[('STORM_DIR', 'degrees')].iloc[tmploc]) # noqa: RUF031 + landfall_ang = float(df_chs[('STORM_DIR', 'degrees')].iloc[tmploc]) # noqa: RUF031, RUF100 except: # noqa: E722 print('CreateScenario: error - no landing angle is found.') # noqa: T201 if landfall_ang > 180.0: # noqa: PLR2004 @@ -254,7 +254,7 @@ def create_wind_scenarios(scenario_info, event_info, stations, data_dir): # noq - np.min( [ float(x) - for x in df_chs[('USA_PRES', 'mb')] # noqa: PD011, RUF031 + for x in df_chs[('USA_PRES', 'mb')] # noqa: PD011, RUF031, RUF100 .iloc[tmploc - 5 :] .values.tolist() if x != ' ' @@ -262,11 +262,11 @@ def create_wind_scenarios(scenario_info, event_info, stations, data_dir): # noq ) ) landfall_spd = ( - float(df_chs[('STORM_SPEED', 'kts')].iloc[tmploc]) * 0.51444 # noqa: RUF031 + float(df_chs[('STORM_SPEED', 'kts')].iloc[tmploc]) * 0.51444 # noqa: RUF031, RUF100 ) # convert knots/s to km/s try: landfall_rad = ( - float(df_chs[('USA_RMW', 'nmile')].iloc[tmploc]) * 1.60934 # noqa: RUF031 + float(df_chs[('USA_RMW', 'nmile')].iloc[tmploc]) * 1.60934 # noqa: RUF031, RUF100 ) # convert nmile to km except: # noqa: E722 # No available radius of maximum wind is found @@ -274,7 +274,7 @@ def create_wind_scenarios(scenario_info, event_info, stations, data_dir): # noq try: # If the default option (USA_RMW) is not available, switching to REUNION_RMW landfall_rad = ( - float(df_chs[('REUNION_RMW', 'nmile')].iloc[tmploc]) * 1.60934 # noqa: RUF031 + float(df_chs[('REUNION_RMW', 'nmile')].iloc[tmploc]) * 1.60934 # noqa: RUF031, RUF100 ) # convert nmile to km except: # noqa: E722 # No available radius of maximum wind is found diff --git a/modules/performRegionalEventSimulation/regionalWindField/CreateStation.py b/modules/performRegionalEventSimulation/regionalWindField/CreateStation.py index 60d1b8a05..4eb3ffa04 100644 --- a/modules/performRegionalEventSimulation/regionalWindField/CreateStation.py +++ b/modules/performRegionalEventSimulation/regionalWindField/CreateStation.py @@ -68,7 +68,7 @@ def create_stations(input_file, output_file, min_id, max_id): stn_df = pd.read_csv(input_file, header=0, index_col=0) except: # noqa: E722 run_tag = 0 - return run_tag # noqa: DOC201, RET504 + return run_tag # noqa: DOC201, RET504, RUF100 # Max and Min IDs stn_ids_min = np.min(stn_df.index.values) stn_ids_max = np.max(stn_df.index.values) diff --git a/modules/performRegionalEventSimulation/regionalWindField/WindFieldSimulation.py b/modules/performRegionalEventSimulation/regionalWindField/WindFieldSimulation.py index 00ab61f9a..4f5e3c41b 100644 --- a/modules/performRegionalEventSimulation/regionalWindField/WindFieldSimulation.py +++ b/modules/performRegionalEventSimulation/regionalWindField/WindFieldSimulation.py @@ -168,7 +168,7 @@ def __interp_z0(self, lat, lon): if not z0: z0 = 0.01 # return - return z0 # noqa: DOC201 + return z0 # noqa: DOC201, RUF100 def add_reference_terrain(self, terrain_info): """add_reference_terrainL specifying reference z0 values for a set of polygons @@ -595,4 +595,4 @@ def compute_wind_field(self): def get_station_data(self): """get_station_data: returning station data""" # noqa: D400 # return station dictionary - return self.station # noqa: DOC201 + return self.station # noqa: DOC201, RUF100 diff --git a/modules/performUQ/SimCenterUQ/PLoM/PLoM.py b/modules/performUQ/SimCenterUQ/PLoM/PLoM.py index f4f082e99..13973260a 100644 --- a/modules/performUQ/SimCenterUQ/PLoM/PLoM.py +++ b/modules/performUQ/SimCenterUQ/PLoM/PLoM.py @@ -374,7 +374,7 @@ def _load_h5_plom(self, filename): if cur_var in self.dbserver.get_item_adds() and ATTR_MAP[cur_var]: # noqa: F405 # read in cur_data = store[cur_var] - cur_dshape = tuple( # noqa: C409 + cur_dshape = tuple( # noqa: C409, RUF100 [x[0] for x in store['/DS_' + cur_var[1:]].values.tolist()] # noqa: PD011 ) if cur_dshape == (1,): @@ -416,7 +416,7 @@ def _load_h5_data_X(self, filename): # noqa: N802 item_name='X0', col_name=list(self.X0.columns), item=self.X0 ) - return self.X0.to_numpy() # noqa: DOC201 + return self.X0.to_numpy() # noqa: DOC201, RUF100 except: # noqa: E722 return None @@ -491,7 +491,7 @@ def load_h5(self, filename): ) if '/X0' in self.dbserver.get_name_list(): self.X0 = self.dbserver.get_item('X0', table_like=True) - return self.X0.to_numpy() # noqa: DOC201 + return self.X0.to_numpy() # noqa: DOC201, RUF100 else: # noqa: RET505 self.logfile.write_msg( msg='PLoM.load_h5: the original X0 data not found in the loaded data.', @@ -598,7 +598,7 @@ def ConfigTasks(self, task_list=FULL_TASK_LIST): # noqa: C901, N802, F405 msg_type='WARNING', msg_level=0, ) - return False # noqa: DOC201 + return False # noqa: DOC201, RUF100 map_order = [FULL_TASK_LIST.index(x) for x in self.cur_task_list] # noqa: F405 if map_order != sorted(map_order): self.logfile.write_msg( @@ -961,7 +961,7 @@ def DataNormalization(self, X): # noqa: N802, N803 X_scaled, alpha, x_min = plom.scaling(X) # noqa: N806 x_mean = plom.mean(X_scaled) - return X_scaled, alpha, x_min, x_mean # noqa: DOC201 + return X_scaled, alpha, x_min, x_mean # noqa: DOC201, RUF100 def RunPCA(self, X_origin, epsilon_pca): # noqa: N802, N803, D102 # ...PCA... @@ -995,7 +995,7 @@ def RunKDE(self, X, epsilon_kde): # noqa: N802, N803 (s_v, c_v, hat_s_v) = plom.parameters_kde(X) K, b = plom.K(X, epsilon_kde) # noqa: N806 - return s_v, c_v, hat_s_v, K, b # noqa: DOC201 + return s_v, c_v, hat_s_v, K, b # noqa: DOC201, RUF100 def DiffMaps(self, H, K, b, tol=0.1): # noqa: N802, N803, D102 # ..diff maps basis... diff --git a/modules/performUQ/SimCenterUQ/PLoM/general.py b/modules/performUQ/SimCenterUQ/PLoM/general.py index 3b908da04..039fbf191 100644 --- a/modules/performUQ/SimCenterUQ/PLoM/general.py +++ b/modules/performUQ/SimCenterUQ/PLoM/general.py @@ -149,13 +149,13 @@ def _create_export_dir(self): dir_export = os.path.join(self.db_dir, 'DataOut') # noqa: PTH118 try: os.makedirs(dir_export, exist_ok=True) # noqa: PTH103 - return dir_export # noqa: DOC201, TRY300 + return dir_export # noqa: DOC201, RUF100, TRY300 except: # noqa: E722 return None def get_item_adds(self): """Returning the full list of data items""" # noqa: D400, D401 - return self._item_adds # noqa: DOC201 + return self._item_adds # noqa: DOC201, RUF100 def add_item( self, @@ -190,7 +190,7 @@ def add_item( store.close() # noqa: RET503 else: # Not supported data_type - return False # noqa: DOC201 + return False # noqa: DOC201, RUF100 def get_item(self, item_name=None, table_like=False, data_type='Data'): # noqa: FBT002 """Getting a specific data item""" # noqa: D400, D401 @@ -199,7 +199,7 @@ def get_item(self, item_name=None, table_like=False, data_type='Data'): # noqa: store = pd.HDFStore(self.db_path, 'r') try: item = store.get(item_name) - item_shape = tuple( # noqa: C409 + item_shape = tuple( # noqa: C409, RUF100 [ x[0] for x in self.get_item_shape( # noqa: PD011 @@ -214,7 +214,7 @@ def get_item(self, item_name=None, table_like=False, data_type='Data'): # noqa: finally: store.close() - return item # noqa: DOC201 + return item # noqa: DOC201, RUF100 elif data_type == 'ConstraintsFile': store = pd.HDFStore(self.db_path, 'r') try: @@ -247,7 +247,7 @@ def get_item_shape(self, item_name=None): item_shape = None store.close() - return item_shape # noqa: DOC201 + return item_shape # noqa: DOC201, RUF100 def get_name_list(self): """Returning the keys of the database""" # noqa: D400, D401 @@ -257,7 +257,7 @@ def get_name_list(self): except: # noqa: E722 name_list = [] store.close() - return name_list # noqa: DOC201 + return name_list # noqa: DOC201, RUF100 def export(self, data_name=None, filename=None, file_format='csv'): """Exporting the specific data item @@ -266,7 +266,7 @@ def export(self, data_name=None, filename=None, file_format='csv'): """ # noqa: D205, D400, D401 d = self.get_item(item_name=data_name[1:], table_like=True) if d is None: - return 1 # noqa: DOC201 + return 1 # noqa: DOC201, RUF100 if filename is None: filename = os.path.join( # noqa: PTH118 self.dir_export, str(data_name).replace('/', '') + '.' + file_format @@ -311,7 +311,7 @@ def refresh_status(self): # previous task not completed -> this task also needs to rerun self.status = False - return self.status # noqa: DOC201 + return self.status # noqa: DOC201, RUF100 # self-check if Counter(self.avail_var_list) == Counter(self.full_var_list) and len( @@ -355,7 +355,7 @@ def refresh_status(self): if not cur_task.status: self.status = False - return self.status # noqa: DOC201 + return self.status # noqa: DOC201, RUF100 while cur_task.next_task: cur_task = cur_task.next_task if not cur_task.status: diff --git a/modules/performUQ/SimCenterUQ/runPLoM.py b/modules/performUQ/SimCenterUQ/runPLoM.py index 2ab8e8b17..bc3f494a6 100644 --- a/modules/performUQ/SimCenterUQ/runPLoM.py +++ b/modules/performUQ/SimCenterUQ/runPLoM.py @@ -428,7 +428,7 @@ def _create_variables(self, training_data): # check if training data source from simulation if training_data == 'Sampling and Simulation': - return x_dim, y_dim, rv_name, g_name # noqa: DOC201 + return x_dim, y_dim, rv_name, g_name # noqa: DOC201, RUF100 # read X and Y variable names for rv in job_config['randomVariables']: @@ -562,7 +562,7 @@ def _parse_plom_parameters(self, surrogateInfo): # noqa: C901, N803 run_flag = 1 # return - return run_flag # noqa: DOC201 + return run_flag # noqa: DOC201, RUF100 def _set_up_parallel(self): """_set_up_parallel: set up modules and variables for parallel jobs @@ -592,7 +592,7 @@ def _set_up_parallel(self): run_flag = 1 # return - return run_flag # noqa: DOC201 + return run_flag # noqa: DOC201, RUF100 def _load_variables(self, do_sampling, do_simulation): # noqa: C901 """_load_variables: load variables @@ -666,7 +666,7 @@ def _load_variables(self, do_sampling, do_simulation): # noqa: C901 # run_flag = 1 # return - return run_flag # noqa: DOC201 + return run_flag # noqa: DOC201, RUF100 # KZ, 07/24: loading user-defined hyper-parameter files def _load_hyperparameter(self): diff --git a/modules/performUQ/UCSD_UQ/defaultLogLikeScript.py b/modules/performUQ/UCSD_UQ/defaultLogLikeScript.py index 4da5ff658..c0e42c7be 100644 --- a/modules/performUQ/UCSD_UQ/defaultLogLikeScript.py +++ b/modules/performUQ/UCSD_UQ/defaultLogLikeScript.py @@ -142,4 +142,4 @@ def log_likelihood( loglike += ll else: loglike += -np.inf - return loglike # noqa: DOC201 + return loglike # noqa: DOC201, RUF100 diff --git a/modules/performUQ/UCSD_UQ/mwg_sampler.py b/modules/performUQ/UCSD_UQ/mwg_sampler.py index 13e8b46d8..4bf1e1ea8 100644 --- a/modules/performUQ/UCSD_UQ/mwg_sampler.py +++ b/modules/performUQ/UCSD_UQ/mwg_sampler.py @@ -328,7 +328,7 @@ def tune(scale, acc_rate): >0.95 x 10 """ # noqa: D205, D400 if acc_rate < 0.01: # noqa: PLR2004 - return scale * 0.01 # noqa: DOC201 + return scale * 0.01 # noqa: DOC201, RUF100 elif acc_rate < 0.05: # noqa: RET505, PLR2004 return scale * 0.1 elif acc_rate < 0.2: # noqa: PLR2004 diff --git a/modules/performUQ/UCSD_UQ/runFEM.py b/modules/performUQ/UCSD_UQ/runFEM.py index 98401eac6..aa5136105 100644 --- a/modules/performUQ/UCSD_UQ/runFEM.py +++ b/modules/performUQ/UCSD_UQ/runFEM.py @@ -101,4 +101,4 @@ def runFEM( # noqa: N802 preds = np.atleast_2d([-np.inf] * sum(edpLengthsList)).reshape((1, -1)) ll = -np.inf - return (ll, preds) # noqa: DOC201 + return (ll, preds) # noqa: DOC201, RUF100 diff --git a/modules/performUQ/UCSD_UQ/runTMCMC.py b/modules/performUQ/UCSD_UQ/runTMCMC.py index 99e841300..6bc59e0bc 100644 --- a/modules/performUQ/UCSD_UQ/runTMCMC.py +++ b/modules/performUQ/UCSD_UQ/runTMCMC.py @@ -561,4 +561,4 @@ def run_TMCMC( # noqa: N802, PLR0913 f'\n\tShutdown mpi4py executor pool for runType: {run_type}' ) - return mytrace, total_log_evidence # noqa: DOC201 + return mytrace, total_log_evidence # noqa: DOC201, RUF100 diff --git a/modules/performUQ/common/ERAClasses/ERACond.py b/modules/performUQ/common/ERAClasses/ERACond.py index e9a40b6b3..cb3dd99fb 100644 --- a/modules/performUQ/common/ERAClasses/ERACond.py +++ b/modules/performUQ/common/ERAClasses/ERACond.py @@ -388,7 +388,7 @@ def equation(param): for i in range(len(Par)): Par[i] = np.squeeze(Par[i]) - return Par # noqa: DOC201 + return Par # noqa: DOC201, RUF100 # %% def condCDF(self, x, cond): # noqa: C901, N802 @@ -442,7 +442,7 @@ def condCDF(self, x, cond): # noqa: C901, N802 elif self.Name == 'weibull': CDF = stats.weibull_min.cdf(x, c=par[1], scale=par[0]) # noqa: N806 - return CDF # noqa: DOC201 + return CDF # noqa: DOC201, RUF100 # %% def condiCDF(self, y, cond): # noqa: C901, N802 @@ -496,7 +496,7 @@ def condiCDF(self, y, cond): # noqa: C901, N802 elif self.Name == 'weibull': iCDF = stats.weibull_min.ppf(y, c=par[1], scale=par[0]) # noqa: N806 - return iCDF # noqa: DOC201 + return iCDF # noqa: DOC201, RUF100 # %% def condPDF(self, x, cond): # noqa: C901, N802 @@ -550,7 +550,7 @@ def condPDF(self, x, cond): # noqa: C901, N802 elif self.Name == 'weibull': PDF = stats.weibull_min.pdf(x, c=par[1], scale=par[0]) # noqa: N806 - return PDF # noqa: DOC201 + return PDF # noqa: DOC201, RUF100 # %% def condRandom(self, cond): # noqa: C901, N802 @@ -602,4 +602,4 @@ def condRandom(self, cond): # noqa: C901, N802 elif self.Name == 'weibull': Random = stats.weibull_min.rvs(c=par[1], scale=par[0]) # noqa: N806 - return Random # noqa: DOC201 + return Random # noqa: DOC201, RUF100 diff --git a/modules/performUQ/other/UQpyRunner.py b/modules/performUQ/other/UQpyRunner.py index 926d1c718..1667cf7b0 100644 --- a/modules/performUQ/other/UQpyRunner.py +++ b/modules/performUQ/other/UQpyRunner.py @@ -192,4 +192,4 @@ def runUQ( # noqa: C901, N802 # Factory for creating UQpy runner class Factory: # noqa: D106 def create(self): # noqa: D102 - return UQpyRunner() \ No newline at end of file + return UQpyRunner() diff --git a/modules/systemPerformance/REWET/REWET/Damage.py b/modules/systemPerformance/REWET/REWET/Damage.py index 2a1181e1e..00e58c304 100644 --- a/modules/systemPerformance/REWET/REWET/Damage.py +++ b/modules/systemPerformance/REWET/REWET/Damage.py @@ -75,7 +75,7 @@ def readDamageFromPickleFile( # noqa: N802 Returns ------- - """ # noqa: D205, D400, D401, D404, D414, DOC202 + """ # noqa: D205, D400, D401, D404, D414, DOC202, RUF100 with open(pickle_file_name, 'rb') as pckf: # noqa: PTH123 w = pickle.load(pckf) # noqa: S301 @@ -313,7 +313,7 @@ def applyNodalDamage(self, WaterNetwork, current_time): # noqa: C901, N802, N80 ------- None. - """ # noqa: D400, DOC202 + """ # noqa: D400, DOC202, RUF100 if self.node_damage.empty: print('no node damage at all') # noqa: T201 return @@ -1078,7 +1078,7 @@ def sortEarthquakeListTimely(self): # noqa: N802 ------- None. - """ # noqa: D400, D401, D404, DOC202 + """ # noqa: D400, D401, D404, DOC202, RUF100 self._earthquake.sort_index() self.is_timely_sorted = True @@ -1097,7 +1097,7 @@ def predictDamage(self, wn, iClear=False): # noqa: FBT002, N802, N803 ------- None. - """ # noqa: D401, D404, DOC202 + """ # noqa: D401, D404, DOC202, RUF100 if iClear: self.pipe_leak = pd.Series() self.pipe_break = pd.Series() diff --git a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/epanet/io.py b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/epanet/io.py index e94400be6..c9fef211c 100644 --- a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/epanet/io.py +++ b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/epanet/io.py @@ -3985,7 +3985,7 @@ def contains_section(self, sec): """ # noqa: D205 try: self.get_section(sec) - return True # noqa: DOC201, TRY300 + return True # noqa: DOC201, RUF100, TRY300 except NoSectionError: return False @@ -4345,4 +4345,4 @@ def _diff_inp_files( # noqa: C901 g.write(html_diff) g.close() - return n # noqa: DOC201 + return n # noqa: DOC201, RUF100 diff --git a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/network/model.py b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/network/model.py index e9a214792..ec54f7220 100644 --- a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/network/model.py +++ b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/network/model.py @@ -75,7 +75,7 @@ def updateWaterNetworkModelWithResult( # noqa: C901, N802 ------- None. - """ # noqa: D205, D401, DOC202 + """ # noqa: D205, D401, DOC202, RUF100 max_time = result.node['head'].index.max() if latest_simulation_time == None: # noqa: E711 latest_simulation_time = max_time diff --git a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/sim/epanet.py b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/sim/epanet.py index 50fde8b0c..e9d529c3b 100644 --- a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/sim/epanet.py +++ b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/sim/epanet.py @@ -229,7 +229,7 @@ def run_sim( # noqa: C901 if run_successful: break - return result_data, run_successful # noqa: DOC201 + return result_data, run_successful # noqa: DOC201, RUF100 def _updateResultStartTime(self, result_data, start_time): # noqa: N802 for res_type, res in result_data.link.items(): # noqa: B007, PERF102 @@ -379,7 +379,7 @@ def _initialize_internal_graph(self): # noqa: C901 self._node_pairs_with_multiple_links = OrderedDict() for from_node_id, to_node_id in n_links.keys(): # noqa: SIM118 - if n_links[(from_node_id, to_node_id)] > 1: # noqa: RUF031 + if n_links[(from_node_id, to_node_id)] > 1: # noqa: RUF031, RUF100 if ( to_node_id, from_node_id, @@ -390,7 +390,7 @@ def _initialize_internal_graph(self): # noqa: C901 from_node_name = self._node_id_to_name[from_node_id] to_node_name = self._node_id_to_name[to_node_id] tmp_list = self._node_pairs_with_multiple_links[ - (from_node_id, to_node_id) # noqa: RUF031 + (from_node_id, to_node_id) # noqa: RUF031, RUF100 ] = [] for link_name in self._wn.get_links_for_node(from_node_name): link = self._wn.get_link(link_name) diff --git a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/sim/io.py b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/sim/io.py index 8c9ccbca0..0422e1318 100644 --- a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/sim/io.py +++ b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/sim/io.py @@ -131,7 +131,7 @@ def _is_number(s): """ # noqa: D400, D401 try: float(s) - return True # noqa: DOC201, TRY300 + return True # noqa: DOC201, RUF100, TRY300 except ValueError: return False diff --git a/modules/systemPerformance/REWET/REWET/Input/Policy_IO.py b/modules/systemPerformance/REWET/REWET/Input/Policy_IO.py index 3e8509f92..68935d5a0 100644 --- a/modules/systemPerformance/REWET/REWET/Input/Policy_IO.py +++ b/modules/systemPerformance/REWET/REWET/Input/Policy_IO.py @@ -67,7 +67,7 @@ def __init__(self, definition_file_name): ------- None. - """ # noqa: D400, DOC202 + """ # noqa: D400, DOC202, RUF100 # some of the following lines have been adopted from WNTR self.rm = restoration_data() @@ -230,7 +230,7 @@ def _read_entities(self): # noqa: C901 ------- None. - """ # noqa: D205, D401, DOC202 + """ # noqa: D205, D401, DOC202, RUF100 # Entities is kept for legacy compatibility with the first version damage_group_data = self.sections.get( '[ENTITIES]', self.sections.get('[Damage Group]') diff --git a/modules/systemPerformance/REWET/REWET/Output/GUI_Curve_API.py b/modules/systemPerformance/REWET/REWET/Output/GUI_Curve_API.py index ba1afcc3c..85c68db92 100644 --- a/modules/systemPerformance/REWET/REWET/Output/GUI_Curve_API.py +++ b/modules/systemPerformance/REWET/REWET/Output/GUI_Curve_API.py @@ -72,7 +72,7 @@ def QNExceedanceCurve(pr, percentage_list, time_type, time_shift=0): # noqa: N8 raise ValueError('Uknown time_type: ' + repr(time_type)) res[percentage] = temp_res - return res # noqa: DOC201 + return res # noqa: DOC201, RUF100 def DLExceedanceCurve(pr, percentage_list, time_type, time_shift=0): # noqa: N802 @@ -123,4 +123,4 @@ def DLExceedanceCurve(pr, percentage_list, time_type, time_shift=0): # noqa: N8 raise ValueError('Uknown time_type: ' + repr(time_type)) res[percentage] = temp_res - return res # noqa: DOC201 + return res # noqa: DOC201, RUF100 diff --git a/modules/systemPerformance/REWET/REWET/initial.py b/modules/systemPerformance/REWET/REWET/initial.py index 8555b6377..06af15b3a 100644 --- a/modules/systemPerformance/REWET/REWET/initial.py +++ b/modules/systemPerformance/REWET/REWET/initial.py @@ -46,7 +46,7 @@ def run(self, project_file=None): # noqa: C901 ------- None. - """ # noqa: D205, D401, DOC202 + """ # noqa: D205, D401, DOC202, RUF100 settings = Settings() if project_file is not None: project_file = str(project_file) diff --git a/modules/systemPerformance/REWET/REWET/restoration/base.py b/modules/systemPerformance/REWET/REWET/restoration/base.py index 9bfaef207..c9cac8ac6 100644 --- a/modules/systemPerformance/REWET/REWET/restoration/base.py +++ b/modules/systemPerformance/REWET/REWET/restoration/base.py @@ -232,7 +232,7 @@ def addAgent(self, agent_name, agent_type, definition): # noqa: N802 ------- None. - """ # noqa: D400, D401, DOC202 + """ # noqa: D400, D401, DOC202, RUF100 # number_of_agents = int(definition['Number']) agent_speed = self.registry.settings['crew_travel_speed'] temp_agent_data = AgentData( @@ -270,7 +270,7 @@ def setActiveAgents(self, active_agent_ID_list): # noqa: N802, N803 ------- None. - """ # noqa: D400, DOC202 + """ # noqa: D400, DOC202, RUF100 for active_agent_ID in active_agent_ID_list: # noqa: N806 self._agents['active'].loc[active_agent_ID] = True @@ -600,7 +600,7 @@ def addShift(self, name, beginning, ending): # noqa: N802 ------- None. - """ # noqa: D400, D401, DOC202 + """ # noqa: D400, D401, DOC202, RUF100 if name in self._shift_data: raise ValueError('Shift name already registered') # noqa: EM101, TRY003 if type(beginning) != int and type(beginning) != float: # noqa: E721 @@ -673,7 +673,7 @@ def assignShiftToAgent(self, agent_ID, shift_name): # noqa: N802, N803 ------- None. - """ # noqa: D400, D401, DOC202 + """ # noqa: D400, D401, DOC202, RUF100 if agent_ID in self._all_agent_shift_data: raise ValueError('The agent ID currently in Agent ALl Shifts') # noqa: EM101, TRY003 if shift_name not in self._shift_data: diff --git a/modules/systemPerformance/REWET/REWET/restoration/io.py b/modules/systemPerformance/REWET/REWET/restoration/io.py index 5fbf41721..ce6e932da 100644 --- a/modules/systemPerformance/REWET/REWET/restoration/io.py +++ b/modules/systemPerformance/REWET/REWET/restoration/io.py @@ -44,7 +44,7 @@ def __init__(self, restoration_model, definition_file_name): ------- None. - """ # noqa: D400, DOC202 + """ # noqa: D400, DOC202, RUF100 # some of the following lines have been adopted from WNTR self.rm = restoration_model self.crew_data = {} @@ -1180,7 +1180,7 @@ def _read_config(self): ------- None. - """ # noqa: D205, D400, D401, DOC202 + """ # noqa: D205, D400, D401, DOC202, RUF100 edata = OrderedDict() self._crew_file_name = [] self._crew_file_type = [] diff --git a/modules/systemPerformance/REWET/REWET/restoration/model.py b/modules/systemPerformance/REWET/REWET/restoration/model.py index 29291cc72..3f92bf261 100644 --- a/modules/systemPerformance/REWET/REWET/restoration/model.py +++ b/modules/systemPerformance/REWET/REWET/restoration/model.py @@ -907,7 +907,7 @@ def updateShifiting(self, time): # noqa: N802 ------- None. - """ # noqa: D400, D401, DOC202 + """ # noqa: D400, D401, DOC202, RUF100 if type(time) != int and type(time) != float: # noqa: E721 raise ValueError('Time must be integer not ' + str(type(time))) time = int(time) diff --git a/modules/systemPerformance/REWET/REWET/restoration/registry.py b/modules/systemPerformance/REWET/REWET/restoration/registry.py index 3b09d331f..38037c5b8 100644 --- a/modules/systemPerformance/REWET/REWET/restoration/registry.py +++ b/modules/systemPerformance/REWET/REWET/restoration/registry.py @@ -515,7 +515,7 @@ def addPipeDamageToRegistry(self, node_name, data): # noqa: N802 ------- None. - """ # noqa: D400, D401, DOC202 + """ # noqa: D400, D401, DOC202, RUF100 # self._pipe_node_damage_status[name] = data leaking_pipe_with_pipeA_orginal_pipe = self._pipe_leak_history[ # noqa: N806 @@ -1280,7 +1280,7 @@ def occupyNode(self, node_name, occupier_name): # noqa: N802 ------- None. - """ # noqa: D400, DOC202 + """ # noqa: D400, DOC202, RUF100 if occupier_name in self._occupancy: # if not iNodeCoupled(node_name): raise ValueError( # noqa: TRY003 @@ -1307,7 +1307,7 @@ def removeOccupancy(self, occupier_name): # noqa: N802 ------- None. - """ # noqa: D401, DOC202 + """ # noqa: D401, DOC202, RUF100 temp = self._occupancy[self._occupancy == occupier_name] if len(temp) == 0: @@ -1350,7 +1350,7 @@ def whereIsOccupiedByName(self, occupier_name): # noqa: N802 str or series node(s) ID. - """ # noqa: D400, D401, DOC202 + """ # noqa: D400, D401, DOC202, RUF100 temp = self._occupancy[self._occupancy == occupier_name] if len(temp) == 0: raise ValueError('there is no occupancy with this name') # noqa: EM101, TRY003 @@ -1387,7 +1387,7 @@ def coupleTwoBreakNodes(self, break_point_1_name, break_point_2_name): # noqa: ------- None. - """ # noqa: D205, DOC202 + """ # noqa: D205, DOC202, RUF100 self._pipe_break_node_coupling[break_point_1_name] = break_point_2_name self._pipe_break_node_coupling[break_point_2_name] = break_point_1_name self._break_point_attached_to_mainPipe.append(break_point_1_name) diff --git a/modules/systemPerformance/REWET/REWET/timeline.py b/modules/systemPerformance/REWET/REWET/timeline.py index af3b1e6a7..fa1a875ab 100644 --- a/modules/systemPerformance/REWET/REWET/timeline.py +++ b/modules/systemPerformance/REWET/REWET/timeline.py @@ -143,7 +143,7 @@ def addEventTime(self, event_distinct_time, event_type='dmg'): # noqa: N802 ------- None. - """ # noqa: D205, D401, D404, DOC202 + """ # noqa: D205, D401, D404, DOC202, RUF100 if type(event_distinct_time) != pd.core.series.Series: # noqa: E721 if ( type(event_distinct_time) == numpy.float64 # noqa: E721 @@ -218,7 +218,7 @@ def checkAndAmendTime(self): # noqa: N802 ------- None. - """ # noqa: D205, D401, DOC202 + """ # noqa: D205, D401, DOC202, RUF100 first_length = len(self._event_time_register.index) self._event_time_register = self._event_time_register[ self._event_time_register.index <= self._simulation_end_time diff --git a/modules/systemPerformance/REWET/preprocessorIO.py b/modules/systemPerformance/REWET/preprocessorIO.py index c9b44067e..10f291d48 100644 --- a/modules/systemPerformance/REWET/preprocessorIO.py +++ b/modules/systemPerformance/REWET/preprocessorIO.py @@ -187,7 +187,7 @@ def save_scenario_table(scenario_table, scenario_table_file_path): ------- None. - """ # noqa: D205, D400, D401, DOC202 + """ # noqa: D205, D400, D401, DOC202, RUF100 if isinstance(scenario_table, pd.core.frame.DataFrame): pass elif isinstance(scenario_table, list): From 830d6a5986c6af07371ae7ede3d5e804e2413a52 Mon Sep 17 00:00:00 2001 From: jinyan1214 Date: Tue, 20 Aug 2024 15:03:12 -0700 Subject: [PATCH 8/9] ruff check add noqa --- .../CleanBeamSectionDatabase.ipynb | 148 +++++++++--------- 1 file changed, 74 insertions(+), 74 deletions(-) diff --git a/modules/createSAM/AutoSDA/Preprocessing/CleanBeamSectionDatabase.ipynb b/modules/createSAM/AutoSDA/Preprocessing/CleanBeamSectionDatabase.ipynb index 5e1cc9ae2..5d6469510 100644 --- a/modules/createSAM/AutoSDA/Preprocessing/CleanBeamSectionDatabase.ipynb +++ b/modules/createSAM/AutoSDA/Preprocessing/CleanBeamSectionDatabase.ipynb @@ -1,74 +1,74 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Clean Beam Section Database" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ - { - "ename": "FileNotFoundError", - "evalue": "[Errno 2] No such file or directory: 'BeamDatabase.csv'", - "output_type": "error", - "traceback": [ - "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[1;31mFileNotFoundError\u001b[0m Traceback (most recent call last)", - "\u001b[1;32m\u001b[0m in \u001b[0;36m\u001b[1;34m()\u001b[0m\n\u001b[0;32m 1\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mpandas\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0mpd\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 2\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 3\u001b[1;33m \u001b[1;32mwith\u001b[0m \u001b[0mopen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'BeamDatabase.csv'\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m'r'\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0mfile\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 4\u001b[0m \u001b[0mbeam_section_database\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mpd\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mread_csv\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfile\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mheader\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 5\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n", - "\u001b[1;31mFileNotFoundError\u001b[0m: [Errno 2] No such file or directory: 'BeamDatabase.csv'" - ] - } - ], - "source": [ - "import pandas as pd\n", - "\n", - "with open('BeamDatabase1.csv', 'r') as file:\n", - " beam_section_database = pd.read_csv(file, header=0)\n", - "\n", - "# Beam section weight shall be less than 300 lb/ft\n", - "# Beam flange thickness shall be less than 1.75 inch.\n", - "target_index = []\n", - "for indx in beam_section_database['index']:\n", - " if (beam_section_database.loc[indx, 'weight'] >= 300):\n", - " target_index.append(indx)\n", - " elif (beam_section_database.loc[indx, 'tf'] >= 1.75):\n", - " target_index.append(indx)\n", - "clean_beam_section = beam_section_database.drop(index=target_index)\n", - "clean_beam_section.to_csv('BeamDatabase2.csv', sep=',', index=False)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.5" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Clean Beam Section Database" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "ename": "FileNotFoundError", + "evalue": "[Errno 2] No such file or directory: 'BeamDatabase.csv'", + "output_type": "error", + "traceback": [ + "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[1;31mFileNotFoundError\u001b[0m Traceback (most recent call last)", + "\u001b[1;32m\u001b[0m in \u001b[0;36m\u001b[1;34m()\u001b[0m\n\u001b[0;32m 1\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mpandas\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0mpd\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 2\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 3\u001b[1;33m \u001b[1;32mwith\u001b[0m \u001b[0mopen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'BeamDatabase.csv'\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m'r'\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0mfile\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 4\u001b[0m \u001b[0mbeam_section_database\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mpd\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mread_csv\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfile\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mheader\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 5\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;31mFileNotFoundError\u001b[0m: [Errno 2] No such file or directory: 'BeamDatabase.csv'" + ] + } + ], + "source": [ + "import pandas as pd\n", + "\n", + "with open('BeamDatabase1.csv', 'r') as file: # noqa: PTH123, UP015\n", + " beam_section_database = pd.read_csv(file, header=0)\n", + "\n", + "# Beam section weight shall be less than 300 lb/ft\n", + "# Beam flange thickness shall be less than 1.75 inch.\n", + "target_index = []\n", + "for indx in beam_section_database['index']:\n", + " if (beam_section_database.loc[indx, 'weight'] >= 300): # noqa: PLR2004, SIM114\n", + " target_index.append(indx)\n", + " elif (beam_section_database.loc[indx, 'tf'] >= 1.75): # noqa: PLR2004\n", + " target_index.append(indx)\n", + "clean_beam_section = beam_section_database.drop(index=target_index)\n", + "clean_beam_section.to_csv('BeamDatabase2.csv', sep=',', index=False)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.5" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} From 9ac6a87affeeaa03f2a7a19620554a3667fb72bd Mon Sep 17 00:00:00 2001 From: jinyan1214 Date: Tue, 20 Aug 2024 15:03:30 -0700 Subject: [PATCH 9/9] fix an error of reading R2D output json --- .../regionalGroundMotion/ComputeIntensityMeasure.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/ComputeIntensityMeasure.py b/modules/performRegionalEventSimulation/regionalGroundMotion/ComputeIntensityMeasure.py index 30d826ff2..f500a4b31 100644 --- a/modules/performRegionalEventSimulation/regionalGroundMotion/ComputeIntensityMeasure.py +++ b/modules/performRegionalEventSimulation/regionalGroundMotion/ComputeIntensityMeasure.py @@ -725,12 +725,15 @@ def compute_im( # noqa: C901, D103 saveInJson = False # noqa: N806 filename = os.path.join(output_dir, filename) # noqa: PTH118 im_list = [] - if 'PGA' in im_info.keys(): # noqa: SIM118 + if 'PGA' in im_info.keys() or im_info.get('Type', None) == 'PGA': # noqa: SIM118 im_list.append('PGA') if 'SA' in im_info.keys(): # noqa: SIM118 for cur_period in im_info['SA']['Periods']: im_list.append(f'SA({cur_period!s})') # noqa: PERF401 - if 'PGV' in im_info.keys(): # noqa: SIM118 + if im_info.get('Type', None) == 'SA': + for cur_period in im_info['Periods']: + im_list.append(f'SA({cur_period!s})') # noqa: PERF401 + if 'PGV' in im_info.keys() or im_info.get('Type', None) == 'PGV': # noqa: SIM118 im_list.append('PGV') # Stations station_list = [