diff --git a/modules/Workflow/WorkflowApplications.json b/modules/Workflow/WorkflowApplications.json
index 5083dde0b..117f2d51c 100644
--- a/modules/Workflow/WorkflowApplications.json
+++ b/modules/Workflow/WorkflowApplications.json
@@ -1,20 +1,18 @@
-
{
"DefaultValues": {
- "workflowInput": "scInput.json",
- "filenameAIM": "AIM.json",
- "filenameEVENT": "EVENT.json",
- "filenameSAM": "SAM.json",
- "filenameEDP": "EDP.json",
- "filenameSIM": "SIM.json",
- "driverFile": "driver",
- "filenameDL": "AIM.json",
- "workflowOutput": "EDP.json"
+ "workflowInput": "scInput.json",
+ "filenameAIM": "AIM.json",
+ "filenameEVENT": "EVENT.json",
+ "filenameSAM": "SAM.json",
+ "filenameEDP": "EDP.json",
+ "filenameSIM": "SIM.json",
+ "driverFile": "driver",
+ "filenameDL": "AIM.json",
+ "workflowOutput": "EDP.json"
},
"AssetsApplications": {
"API": {
- "Inputs": [
- ],
+ "Inputs": [],
"Outputs": [
{
"id": "assetFile",
@@ -27,7 +25,7 @@
"Applications": [
{
"Name": "GenericAimDatabase",
- "ExecutablePath": "applications/createAIM/genericAimDatabase/GenericAimDatabase",
+ "ExecutablePath": "applications/createAIM/genericAimDatabase/GenericAimDatabase",
"ApplicationSpecificInputs": [
{
"id": "Min",
@@ -53,7 +51,7 @@
},
{
"Name": "GeoJSON_to_AIM",
- "ExecutablePath": "applications/createAIM/GeoJSON_to_AIM/GeoJSON_to_AIM.py",
+ "ExecutablePath": "applications/createAIM/GeoJSON_to_AIM/GeoJSON_to_AIM.py",
"ApplicationSpecificInputs": [
{
"id": "Min",
@@ -79,7 +77,7 @@
},
{
"Name": "GEOJSON_TO_ASSET",
- "ExecutablePath": "applications/createAIM/GeoJSON_to_ASSET/GeoJSON_to_ASSET.py",
+ "ExecutablePath": "applications/createAIM/GeoJSON_to_ASSET/GeoJSON_to_ASSET.py",
"ApplicationSpecificInputs": [
{
"id": "assetSourceFile",
@@ -98,9 +96,9 @@
}
]
},
- {
+ {
"Name": "INP_FILE",
- "ExecutablePath": "applications/createAIM/INP_FILE/INP_FILE.py",
+ "ExecutablePath": "applications/createAIM/INP_FILE/INP_FILE.py",
"ApplicationSpecificInputs": [
{
"id": "assetFile",
@@ -121,8 +119,8 @@
},
{
"Name": "CSV_to_AIM",
- "ExecutablePath": "applications/createAIM/CSV_to_AIM/CSV_to_AIM.py",
- "RunsParallel": true,
+ "ExecutablePath": "applications/createAIM/CSV_to_AIM/CSV_to_AIM.py",
+ "RunsParallel": true,
"ApplicationSpecificInputs": [
{
"id": "Min",
@@ -240,11 +238,11 @@
}
]
}
- ]
+ ]
},
"RegionalEventApplications": {
"API": {
- "Inputs": [
+ "Inputs": [
{
"id": "input",
"type": "path",
@@ -252,10 +250,9 @@
"default": "inputRWHALE.json"
}
],
- "Outputs": [
- ]
+ "Outputs": []
},
- "Applications":[
+ "Applications": [
{
"Name": "DummyEventApp",
"ExecutablePath": "applications/performRegionalEventSimulation/DummyEventApp/DEA.py",
@@ -275,14 +272,12 @@
{
"Name": "UserInputGM",
"ExecutablePath": null,
- "ApplicationSpecificInputs": [
- ]
+ "ApplicationSpecificInputs": []
},
{
"Name": "EQSS",
"ExecutablePath": null,
- "ApplicationSpecificInputs": [
- ]
+ "ApplicationSpecificInputs": []
},
{
"Name": "UserInputShakeMap",
@@ -308,27 +303,24 @@
{
"Name": "UserInputHurricane",
"ExecutablePath": null,
- "ApplicationSpecificInputs": [
- ]
+ "ApplicationSpecificInputs": []
},
{
"Name": "UserInputRasterHazard",
"ExecutablePath": null,
- "ApplicationSpecificInputs": [
- ]
+ "ApplicationSpecificInputs": []
},
{
"Name": "RegionalSiteResponse",
"ExecutablePath": "applications/Workflow/siteResponseWHALE.py",
- "RunsParallel": true,
- "ApplicationSpecificInputs": [
- ]
+ "RunsParallel": true,
+ "ApplicationSpecificInputs": []
}
]
},
"RegionalMappingApplications": {
"API": {
- "Inputs":[
+ "Inputs": [
{
"id": "assetFile",
"type": "path",
@@ -336,14 +328,13 @@
"default": "assets.json"
}
],
- "Outputs":[
- ]
+ "Outputs": []
},
"Applications": [
{
"Name": "NearestNeighborEvents",
"ExecutablePath": "applications/performRegionalMapping/NearestNeighborEvents/NNE.py",
- "RunsParallel": true,
+ "RunsParallel": true,
"ApplicationSpecificInputs": [
{
"id": "filenameEVENTgrid",
@@ -374,8 +365,8 @@
},
{
"Name": "SiteSpecifiedEvents",
- "ExecutablePath": "applications/performRegionalMapping/SiteSpecifiedEvents/SSE.py",
- "RunsParallel": true,
+ "ExecutablePath": "applications/performRegionalMapping/SiteSpecifiedEvents/SSE.py",
+ "RunsParallel": true,
"ApplicationSpecificInputs": [
{
"id": "filenameEVENTgrid",
@@ -386,8 +377,8 @@
},
{
"Name": "GISSpecifiedEvents",
- "ExecutablePath": "applications/performRegionalMapping/GISSpecifiedEvents/GISSpecifiedEvent.py",
- "RunsParallel": true,
+ "ExecutablePath": "applications/performRegionalMapping/GISSpecifiedEvents/GISSpecifiedEvent.py",
+ "RunsParallel": true,
"ApplicationSpecificInputs": [
{
"id": "filenameEVENTgrid",
@@ -401,54 +392,51 @@
"EventApplications": {
"API": {
"Inputs": [
- {
+ {
"id": "filenameAIM",
"type": "workflowDefault",
"description": "name of AIM file"
- }
+ }
],
"Outputs": [
- {
+ {
"id": "filenameEVENT",
"type": "workflowDefault",
"description": "name of file containing the event data"
- }
+ }
]
},
"Applications": [
{
"Name": "ExistingSimCenterEvents",
"ExecutablePath": "applications/createEVENT/multipleSimCenter/MultipleSCEvents",
- "ApplicationSpecificInputs": [
- ]
+ "ApplicationSpecificInputs": []
},
{
"Name": "MultiModel",
"ExecutablePath": "applications/Workflow/MultiModelApplication.py",
- "ApplicationSpecificInputs": [
- {"id": "appKey",
- "type": "string",
- "description": "keyword of application data in input file"
- }
- ]
- },
- {
+ "ApplicationSpecificInputs": [
+ {
+ "id": "appKey",
+ "type": "string",
+ "description": "keyword of application data in input file"
+ }
+ ]
+ },
+ {
"Name": "PhysicsBasedMotion",
"ExecutablePath": "applications/createEVENT/physicsBasedMotion/PhysicsBasedMotion",
- "ApplicationSpecificInputs": [
- ]
- },
+ "ApplicationSpecificInputs": []
+ },
{
"Name": "ExistingPEER_Events",
"ExecutablePath": "applications/createEVENT/multiplePEER/MultiplePEER",
- "ApplicationSpecificInputs": [
- ]
+ "ApplicationSpecificInputs": []
},
{
"Name": "Site Response",
"ExecutablePath": "applications/createEVENT/siteResponse/SiteResponse.py",
- "ApplicationSpecificInputs": [
- ]
+ "ApplicationSpecificInputs": []
},
{
"Name": "RegionalSiteResponse",
@@ -479,37 +467,37 @@
{
"Name": "HazardBasedEvent",
"ExecutablePath": "applications/createEVENT/hazardBasedEvent/HazardBasedEvent.py",
- "ApplicationSpecificInputs":[]
+ "ApplicationSpecificInputs": []
},
{
"Name": "DEDM_HRP",
"ExecutablePath": "applications/createEVENT/DEDM_HRP/DEDM_HRP.py",
- "ApplicationSpecificInputs":[]
+ "ApplicationSpecificInputs": []
},
{
"Name": "LowRiseTPU",
"ExecutablePath": "applications/createEVENT/LowRiseTPU/LowRiseTPU",
- "ApplicationSpecificInputs":[]
+ "ApplicationSpecificInputs": []
},
{
"Name": "HighRiseTPU",
"ExecutablePath": "applications/createEVENT/HighRiseTPU/HighRiseTPU",
- "ApplicationSpecificInputs":[]
- },
+ "ApplicationSpecificInputs": []
+ },
{
"Name": "WindTunnelExperiment",
"ExecutablePath": "applications/createEVENT/windTunnelExperiment/WindTunnelExperiment",
- "ApplicationSpecificInputs":[]
+ "ApplicationSpecificInputs": []
},
{
"Name": "StochasticWindWittigSinha",
"ExecutablePath": "applications/createEVENT/stochasticWind/StochasticWind",
- "ApplicationSpecificInputs":[]
+ "ApplicationSpecificInputs": []
},
{
"Name": "StochasticGroundMotion",
"ExecutablePath": "applications/createEVENT/stochasticGroundMotion/StochasticGM",
- "ApplicationSpecificInputs":[
+ "ApplicationSpecificInputs": [
{
"id": "modelName",
"type": "string",
@@ -522,46 +510,44 @@
}
]
},
- {
- "Name": "CFDEvent",
- "ExecutablePath": "applications/createEVENT/CFDEvent/CFDEvent.py",
- "ApplicationSpecificInputs":[
- {
- "id":"OpenFOAMCase",
- "type":"string",
- "description": "Remote directory containing OpenFOAM case input"
- },
- {
- "id":"OpenFOAMSolver",
- "type":"string",
- "description": "OpenFOAM solver used"
- }]
- },
+ {
+ "Name": "CFDEvent",
+ "ExecutablePath": "applications/createEVENT/CFDEvent/CFDEvent.py",
+ "ApplicationSpecificInputs": [
+ {
+ "id": "OpenFOAMCase",
+ "type": "string",
+ "description": "Remote directory containing OpenFOAM case input"
+ },
+ {
+ "id": "OpenFOAMSolver",
+ "type": "string",
+ "description": "OpenFOAM solver used"
+ }
+ ]
+ },
{
"Name": "IsolatedBuildingCFD",
"ExecutablePath": "applications/createEVENT/IsolatedBuildingCFD/IsolatedBuildingCFD.py",
- "ApplicationSpecificInputs":[
- ]
+ "ApplicationSpecificInputs": []
},
{
"Name": "SurroundedBuildingCFD",
"ExecutablePath": "applications/createEVENT/SurroundedBuildingCFD/SurroundedBuildingCFD.py",
- "ApplicationSpecificInputs":[
- ]
+ "ApplicationSpecificInputs": []
},
{
"Name": "CoupledDigitalTwin",
"ExecutablePath": "applications/createEVENT/coupledDigitalTwin/CoupledDigitalTwin.py",
- "ApplicationSpecificInputs":[
- ]
- },
+ "ApplicationSpecificInputs": []
+ },
{
"Name": "MPM",
"ExecutablePath": "applications/createEVENT/MPM/MPM.py",
- "ApplicationSpecificInputs":[
+ "ApplicationSpecificInputs": [
{
- "id":"publicDirectory",
- "type":"string",
+ "id": "publicDirectory",
+ "type": "string",
"description": "Remote directory hosting ClaymoreUW MPM assets (e.g. geometries, checkpoints, motion-paths) for the application for any user to access on the HPC system."
}
]
@@ -569,26 +555,28 @@
{
"Name": "StochasticWave",
"ExecutablePath": "applications/createEVENT/stochasticWave/StochasticWave.py",
- "ApplicationSpecificInputs":[]
- },
+ "ApplicationSpecificInputs": []
+ },
{
"Name": "TaichiEvent",
"ExecutablePath": "applications/createEVENT/TaichiEvent/TaichiEvent.py",
- "ApplicationSpecificInputs":[]
- },
+ "ApplicationSpecificInputs": []
+ },
{
"Name": "GeoClawOpenFOAM",
"ExecutablePath": "applications/createEVENT/GeoClawOpenFOAM/GeoClawOpenFOAM.py",
- "ApplicationSpecificInputs":[
- {
- "id":"OpenFOAMCase",
- "type":"string",
+ "ApplicationSpecificInputs": [
+ {
+ "id": "OpenFOAMCase",
+ "type": "string",
"description": "Remote directory containing OpenFOAM case input"
- },{
- "id":"OpenFOAMSolver",
- "type":"string",
+ },
+ {
+ "id": "OpenFOAMSolver",
+ "type": "string",
"description": "OpenFOAM solver used"
- }]
+ }
+ ]
},
{
"Name": "NNGM",
@@ -631,12 +619,14 @@
{
"Name": "ASCE7_WindSpeed",
"ExecutablePath": "applications/createEVENT/ASCE7_WindSpeed/ASCE7_WindSpeed.py",
- "ApplicationSpecificInputs":[]
+ "ApplicationSpecificInputs": []
},
{
"Name": "pointWindSpeed",
"ExecutablePath": "applications/createEVENT/pointWindSpeed/pointWindSpeed",
- "ApplicationSpecificInputs":["filenameWindSpeedCloudData"]
+ "ApplicationSpecificInputs": [
+ "filenameWindSpeedCloudData"
+ ]
},
{
"Name": "gridGroundMoionSelection",
@@ -659,23 +649,23 @@
"ModelingApplications": {
"API": {
"Inputs": [
- {
+ {
"id": "filenameAIM",
"type": "workflowDefault",
"description": "name of AIM file"
- },
- {
+ },
+ {
"id": "filenameEVENT",
"type": "workflowDefault",
"description": "name of file containing the event data"
- }
+ }
],
"Outputs": [
- {
+ {
"id": "filenameSAM",
"type": "workflowDefault",
"description": "name of file containing the outputted SAM"
- }
+ }
]
},
"Applications": [
@@ -797,12 +787,13 @@
{
"Name": "MultiModel",
"ExecutablePath": "applications/Workflow/MultiModelApplication.py",
- "ApplicationSpecificInputs": [
- {"id": "appKey",
- "type": "string",
- "description": "keyword of application data in input file"
- }
- ]
+ "ApplicationSpecificInputs": [
+ {
+ "id": "appKey",
+ "type": "string",
+ "description": "keyword of application data in input file"
+ }
+ ]
},
{
"Name": "SteelBuildingModel",
@@ -828,28 +819,28 @@
"EDPApplications": {
"API": {
"Inputs": [
- {
+ {
"id": "filenameAIM",
"type": "workflowDefault",
"description": "name of AIM file"
- },
- {
+ },
+ {
"id": "filenameEVENT",
"type": "workflowDefault",
"description": "name of file containing the event data"
- },
- {
+ },
+ {
"id": "filenameSAM",
"type": "workflowDefault",
"description": "name of file containing the SAM"
- }
+ }
],
"Outputs": [
- {
+ {
"id": "filenameEDP",
"type": "workflowDefault",
- "description": "name of file containing the EDP to be determined from the analysis"
- }
+ "description": "name of file containing the EDP to be determined from the analysis"
+ }
]
},
"Applications": [
@@ -924,7 +915,7 @@
"type": "path",
"description": "Identifies the location of where the txt files with requested EDPs are stored."
}
- ]
+ ]
},
{
"Name": "StandardTsunamiEDP",
@@ -935,7 +926,7 @@
"type": "path",
"description": "Identifies the location of where the txt files with requested EDPs are stored."
}
- ]
+ ]
},
{
"Name": "StandardStormSurgeEDP",
@@ -946,40 +937,40 @@
"type": "path",
"description": "Identifies the location of where the txt files with requested EDPs are stored."
}
- ]
+ ]
}
]
},
"SimulationApplications": {
"API": {
"Inputs": [
- {
+ {
"id": "filenameAIM",
"type": "workflowDefault",
"description": "name of file containing the AIM"
- },
- {
+ },
+ {
"id": "filenameSAM",
"type": "workflowDefault",
"description": "name of file containing the SAM"
- },
- {
+ },
+ {
"id": "filenameEVENT",
"type": "workflowDefault",
"description": "name of file containing the event data"
- }
+ }
],
"Outputs": [
- {
+ {
"id": "filenameEDP",
"type": "workflowDefault",
"description": "name of EDP file to be added to"
- },
- {
+ },
+ {
"id": "filenameSIM",
"type": "workflowDefault",
"description": "name of SIM file to be added to"
- }
+ }
]
},
"Applications": [
@@ -1002,13 +993,14 @@
{
"Name": "MultiModel",
"ExecutablePath": "applications/Workflow/MultiModelApplication.py",
- "ApplicationSpecificInputs": [
- {"id": "appKey",
- "type": "string",
- "description": "keyword of application data in input file"
- }
- ]
- },
+ "ApplicationSpecificInputs": [
+ {
+ "id": "appKey",
+ "type": "string",
+ "description": "keyword of application data in input file"
+ }
+ ]
+ },
{
"Name": "OpenSees-Simulation_R",
"ExecutablePath": "applications/performSIMULATION/openSees_R/OpenSeesSimulation.py"
@@ -1036,26 +1028,26 @@
]
},
"UQApplications": {
- "API": {
- "Inputs": [
+ "API": {
+ "Inputs": [
{
- "id": "workflowInput",
- "type": "workflowDefault",
- "description": "name of input file"
+ "id": "workflowInput",
+ "type": "workflowDefault",
+ "description": "name of input file"
},
{
- "id": "driverFile",
- "type": "workflowDefault",
- "description": "name of file containing the simulation driver"
+ "id": "driverFile",
+ "type": "workflowDefault",
+ "description": "name of file containing the simulation driver"
}
- ],
- "Outputs": [
+ ],
+ "Outputs": [
{
- "id": "workflowOutput",
- "type": "workflowDefault",
- "description": "name of EDP file to be added to"
+ "id": "workflowOutput",
+ "type": "workflowDefault",
+ "description": "name of EDP file to be added to"
}
- ]
+ ]
},
"Applications": [
{
@@ -1069,15 +1061,16 @@
{
"Name": "UCSD-UQ",
"ExecutablePath": "applications/performUQ/UCSD_UQ/UCSD_UQ.py"
- },
+ },
{
"Name": "UQpy",
"ExecutablePath": "applications/performUQ/UQpy/UQpyEngine.py"
- },
+ },
{
"Name": "Dakota-FEM",
"ExecutablePath": "applications/performUQ/dakota_R/DakotaFEM.py"
- },{
+ },
+ {
"Name": "Dakota-UQ1",
"ExecutablePath": "applications/performUQ/dakota/DakotaFEM1.py"
},
@@ -1090,26 +1083,25 @@
"FEMApplications": {
"API": {
"Inputs": [
- {
+ {
"id": "workflowInput",
"type": "workflowDefault",
"description": "name of file containing the input"
- }
+ }
],
"Outputs": [
- {
+ {
"id": "driverFile",
"type": "workflowDefault",
"description": "name of output file"
- }
+ }
]
},
"Applications": [
{
"Name": "OpenSees",
"ExecutablePath": "applications/performFEM/OpenSees/createOpenSeesDriver",
- "ApplicationSpecificInputs": [
- ]
+ "ApplicationSpecificInputs": []
},
{
"Name": "OpenSeesPy",
@@ -1130,33 +1122,32 @@
"Name": "MultiModel",
"ExecutablePath": "applications/Workflow/MultiModelDriver.py",
"ApplicationSpecificInputs": []
- } ,
+ },
{
"Name": "Custom",
"ExecutablePath": "applications/performFEM/Custom/createCustomDriver",
"ApplicationSpecificInputs": []
- }
+ }
]
- },
+ },
"DLApplications": {
"API": {
"Inputs": [
- {
+ {
"id": "filenameDL",
"type": "workflowDefault",
"description": "name of file containing the DL model information"
- },
- {
+ },
+ {
"id": "demandFile",
"type": "string",
"description": "name of file containing the EDP data",
"default": "response.csv"
- }
+ }
],
- "Outputs": [
- ]
+ "Outputs": []
},
- "Applications": [
+ "Applications": [
{
"Name": "Pelicun3",
"ExecutablePath": "applications/performDL/pelicun3/pelicun3_wrapper.py",
@@ -1211,33 +1202,58 @@
{
"Name": "CBCitiesDL",
"ExecutablePath": "applications/performDL/CBCities/CBCitiesWDNDL.py",
+ "ApplicationSpecificInputs": []
+ }
+ ]
+ },
+ "SystemPerformanceApplications": {
+ "API": {
+ "Inputs": [],
+ "Outputs": []
+ },
+ "Applications": [
+ {
+ "Name": "REWETRecovery",
+ "ExecutablePath": "applications/systemPerformance/REWEt/REWET_Wrapper.py",
+ "ApplicationSpecificInputs": []
+ },
+ {
+ "Name": "ResidualDemand",
+ "ExecutablePath": "applications/systemPerformance/ResidualDemand/run_residual_demand.py",
"ApplicationSpecificInputs": [
+ {
+ "id": "edgeFile",
+ "type": "path",
+ "description": "path to the roadway network edges geojson file"
+ },
+ {
+ "id": "nodeFile",
+ "type": "path",
+ "description": "path to the roadway network nodes geojson file"
+ },
+ {
+ "id": "ODFilePre",
+ "type": "path",
+ "description": "path to the traffic demand before the event"
+ },
+ {
+ "id": "ODFilePost",
+ "type": "path",
+ "description": "path to the traffic demand after the event"
+ },
+ {
+ "id": "configFile",
+ "type": "path",
+ "description": "path to the residual demand configuration file"
+ }
]
}
]
},
- "SystemPerformanceApplications":{
- "API": {
- "Inputs": [
- ],
- "Outputs": [
- ]
- },
- "Applications": [
- {
- "Name": "REWETRecovery",
- "ExecutablePath": "applications/systemPerformance/REWEt/REWET_Wrapper.py",
- "ApplicationSpecificInputs": [
- ]
- }
- ]
- },
"PerformanceApplications": {
"API": {
- "Inputs": [
- ],
- "Outputs": [
- ]
+ "Inputs": [],
+ "Outputs": []
},
"Applications": [
{
@@ -1258,6 +1274,4 @@
}
]
}
-}
-
-
+}
\ No newline at end of file
diff --git a/modules/Workflow/whale/main.py b/modules/Workflow/whale/main.py
index a9e101b3d..38a2a540a 100644
--- a/modules/Workflow/whale/main.py
+++ b/modules/Workflow/whale/main.py
@@ -313,7 +313,7 @@ def create_command(command_list, enforced_python=None):
return command # noqa: DOC201, RUF100
-def run_command(command, app_category=""): # noqa: C901
+def run_command(command, app_category=''):
"""Short description
Long description
@@ -380,13 +380,12 @@ def run_command(command, app_category=""): # noqa: C901
#
try:
-
- if (platform.system() == 'Windows') and ('python.exe' in str(command)):
- if returncode != 0:
- raise WorkFlowInputError('Analysis Failed at ' + app_category) # noqa: TRY301
+ # if (platform.system() == 'Windows') and ('python.exe' in str(command)):
+ if returncode != 0:
+ raise WorkFlowInputError('Analysis Failed at ' + app_category) # noqa: TRY301
# sy - safe apps should be added below
- elif ('OpenSeesInput' in str(command)):
+ elif 'OpenSeesInput' in str(command): # noqa: RET506
if returncode != 0:
raise WorkFlowInputError('Analysis Failed at ' + app_category) # noqa: TRY301
@@ -394,13 +393,13 @@ def run_command(command, app_category=""): # noqa: C901
except WorkFlowInputError as e:
# this will catch the error
- print(str(e).replace('\'','')) # noqa: T201
- print(" =====================================") # noqa: T201
+ print(str(e).replace("'", '')) # noqa: T201
+ print(' =====================================') # noqa: T201
print(str(result)) # noqa: T201
sys.exit(-20)
except: # noqa: E722
- # if for whatever reason the function inside "try" failes, move on without checking error
+ # if for whatever reason the function inside "try" fails, move on without checking error
return str(result), 0
return result, returncode
@@ -1307,7 +1306,7 @@ def create_asset_files(self):
prepend_blank_space=False,
)
- result, returncode = run_command(command,"Asset Creater")
+ result, returncode = run_command(command, 'Asset Creater')
# Check if the command was completed successfully
if returncode != 0:
@@ -1546,7 +1545,15 @@ def perform_system_performance_assessment(self, asset_type):
# Check if system performance is requested
if 'SystemPerformance' in self.workflow_apps:
- performance_app = self.workflow_apps['SystemPerformance'][asset_type]
+ if asset_type in self.workflow_apps['SystemPerformance']:
+ performance_app = self.workflow_apps['SystemPerformance'][asset_type]
+ else:
+ log_msg(
+ f'No Performance application to run for asset type: {asset_type}.',
+ prepend_timestamp=False,
+ )
+ log_div()
+ return False
else:
log_msg(
f'No Performance application to run for asset type: {asset_type}.',
@@ -1597,7 +1604,7 @@ def perform_system_performance_assessment(self, asset_type):
prepend_blank_space=False,
)
- result, returncode = run_command(command,"Performance Assessment")
+ result, returncode = run_command(command, 'Performance Assessment')
log_msg(
f'\n{result}\n',
prepend_timestamp=False,
@@ -1661,7 +1668,7 @@ def perform_regional_event(self):
prepend_blank_space=False,
)
- result, returncode = run_command(command,"Hazard Event")
+ result, returncode = run_command(command, 'Hazard Event')
log_msg('Output: ', prepend_timestamp=False, prepend_blank_space=False)
log_msg(
@@ -1724,7 +1731,7 @@ def perform_regional_recovery(self, asset_keys): # noqa: ARG002
prepend_blank_space=False,
)
- result, returncode = run_command(command, "Recovery")
+ result, returncode = run_command(command, 'Recovery')
log_msg('Output: ', prepend_timestamp=False, prepend_blank_space=False)
log_msg(
@@ -1807,7 +1814,7 @@ def perform_regional_mapping(self, AIM_file_path, assetType, doParallel=True):
)
else:
- result, returncode = run_command(command,"Hazard-Asset Mapping (HTA)")
+ result, returncode = run_command(command, 'Hazard-Asset Mapping (HTA)')
log_msg(
'Output: ' + str(returncode),
@@ -2047,7 +2054,9 @@ def preprocess_inputs( # noqa: C901
prepend_blank_space=False,
)
- result, returncode = run_command(command,f'{app_type} - at the initial setup (getRV)')
+ result, returncode = run_command(
+ command, f'{app_type} - at the initial setup (getRV)'
+ )
log_msg(
'Output: ' + str(returncode),
@@ -2457,7 +2466,7 @@ def simulate_response(self, AIM_file_path='AIM.json', asst_id=None): # noqa: C9
prepend_blank_space=False,
)
- result, returncode = run_command(command, "Response Simulator")
+ result, returncode = run_command(command, 'Response Simulator')
if self.run_type in ['run', 'runningLocal']:
log_msg(
@@ -2534,7 +2543,7 @@ def perform_asset_performance(asset_type): # noqa: N805, D102
app_path=self.app_dir_local # noqa: F821
)
command = create_command(app_command_list)
- result, returncode = run_command(command,"Performance assessment")
+ result, returncode = run_command(command, 'Performance assessment')
def estimate_losses( # noqa: C901
self,
@@ -2627,7 +2636,7 @@ def estimate_losses( # noqa: C901
prepend_blank_space=False,
)
- result, returncode = run_command(command,"Damage and loss")
+ result, returncode = run_command(command, 'Damage and loss')
log_msg(result, prepend_timestamp=False)
@@ -2678,7 +2687,7 @@ def estimate_losses( # noqa: C901
prepend_blank_space=False,
)
- result, returncode = run_command(command,"Damage and loss")
+ result, returncode = run_command(command, 'Damage and loss')
log_msg(result, prepend_timestamp=False)
@@ -2821,7 +2830,7 @@ def estimate_performance( # noqa: D102
prepend_blank_space=False,
)
- result, returncode = run_command(command,"Performance Assessment")
+ result, returncode = run_command(command, 'Performance Assessment')
log_msg(result, prepend_timestamp=False)
diff --git a/modules/createEDP/surrogateEDP/surrogateEDP.py b/modules/createEDP/surrogateEDP/surrogateEDP.py
index 46b71e2dd..6b2c4b4ed 100644
--- a/modules/createEDP/surrogateEDP/surrogateEDP.py
+++ b/modules/createEDP/surrogateEDP/surrogateEDP.py
@@ -10,7 +10,8 @@
import argparse
import json
-errPath = os.path.join(os.getcwd(),'workflow.err') # noqa: N816
+errPath = os.path.join(os.getcwd(), 'workflow.err') # noqa: N816, PTH109, PTH118
+
def write_RV(AIM_input_path, EDP_input_path, EDP_type): # noqa: ARG001, N802, N803, D103
# load the AIM file
diff --git a/modules/createEVENT/IsolatedBuildingCFD/post_process_output.py b/modules/createEVENT/IsolatedBuildingCFD/post_process_output.py
index afa851574..ee02c9daa 100644
--- a/modules/createEVENT/IsolatedBuildingCFD/post_process_output.py
+++ b/modules/createEVENT/IsolatedBuildingCFD/post_process_output.py
@@ -1,4 +1,4 @@
-# -*- coding: utf-8 -*-
+# -*- coding: utf-8 -*- # noqa: INP001, D100, UP009
# Copyright (c) 2016-2017, The Regents of the University of California (Regents).
# All rights reserved.
#
@@ -43,7 +43,7 @@
# pressure on predicted set of probes.
#
-import sys
+import sys # noqa: I001
import os
import subprocess
import json
@@ -52,7 +52,7 @@
from pathlib import Path
import numpy as np
import matplotlib.pyplot as plt
-import matplotlib.gridspec as gridspec
+import matplotlib.gridspec as gridspec # noqa: PLR0402
from scipy import signal
from scipy.interpolate import interp1d
from scipy.interpolate import UnivariateSpline
@@ -64,7 +64,7 @@
import re
-def read_bin_forces(fileName):
+def read_bin_forces(fileName): # noqa: C901, N803
"""
Reads binData measured at the center of each bin.
@@ -72,38 +72,38 @@ def read_bin_forces(fileName):
bin heights, time, and the forces and moments vector on the bins for each
time step.
- """
+ """ # noqa: D401
forces = []
moments = []
time = []
nbins = 0
- with open(fileName, 'r') as f:
+ with open(fileName, 'r') as f: # noqa: PTH123, UP015
for line in f:
if line.startswith('#'):
# Read the origin where the force are integrated
if line.startswith('# bins'):
- line = line.replace('# bins', '')
- line = line.replace(':', '')
- line = line.split()
+ line = line.replace('# bins', '') # noqa: PLW2901
+ line = line.replace(':', '') # noqa: PLW2901
+ line = line.split() # noqa: PLW2901
nbins = int(line[0])
coords = np.zeros((nbins, 3))
elif line.startswith('# x co-ords'):
- line = line.replace('# x co-ords', '')
- line = line.replace(':', '')
- line = line.split()
+ line = line.replace('# x co-ords', '') # noqa: PLW2901
+ line = line.replace(':', '') # noqa: PLW2901
+ line = line.split() # noqa: PLW2901
for i in range(nbins):
coords[i, 0] = line[i]
elif line.startswith('# y co-ords'):
- line = line.replace('# y co-ords', '')
- line = line.replace(':', '')
- line = line.split()
+ line = line.replace('# y co-ords', '') # noqa: PLW2901
+ line = line.replace(':', '') # noqa: PLW2901
+ line = line.split() # noqa: PLW2901
for i in range(nbins):
coords[i, 1] = line[i]
elif line.startswith('# z co-ords'):
- line = line.replace('# z co-ords', '')
- line = line.replace(':', '')
- line = line.split()
+ line = line.replace('# z co-ords', '') # noqa: PLW2901
+ line = line.replace(':', '') # noqa: PLW2901
+ line = line.split() # noqa: PLW2901
for i in range(nbins):
coords[i, 2] = line[i]
else:
@@ -111,9 +111,9 @@ def read_bin_forces(fileName):
# Read only the pressure part
else:
- line = line.replace('(', '')
- line = line.replace(')', '')
- line = line.split()
+ line = line.replace('(', '') # noqa: PLW2901
+ line = line.replace(')', '') # noqa: PLW2901
+ line = line.split() # noqa: PLW2901
time.append(float(line[0]))
story_force = np.zeros((nbins, 3))
story_moments = np.zeros((nbins, 3))
@@ -139,26 +139,26 @@ def read_bin_forces(fileName):
return coords, time, forces, moments
-def read_forces(fileName):
+def read_forces(fileName): # noqa: N803
"""
Reads force data integrated over a surface from OpenFOAM file and returns
origin(center of rotation), time, and the forces and moments vector for
each time step.
- """
+ """ # noqa: D205, D401
origin = np.zeros(3)
forces = []
moments = []
time = []
- with open(fileName, 'r') as f:
+ with open(fileName, 'r') as f: # noqa: PTH123, UP015
for line in f:
if line.startswith('#'):
# Read the origin where the force are integrated
if line.startswith('# CofR'):
- line = line.replace('(', '')
- line = line.replace(')', '')
- line = line.split()
+ line = line.replace('(', '') # noqa: PLW2901
+ line = line.replace(')', '') # noqa: PLW2901
+ line = line.split() # noqa: PLW2901
origin[0] = line[3] # x-coordinate
origin[1] = line[4] # y-coordinate
origin[2] = line[5] # z-coordinate
@@ -167,9 +167,9 @@ def read_forces(fileName):
# Read only the pressure part of force and moments.
# Viscous and porous are ignored
else:
- line = line.replace('(', '')
- line = line.replace(')', '')
- line = line.split()
+ line = line.replace('(', '') # noqa: PLW2901
+ line = line.replace(')', '') # noqa: PLW2901
+ line = line.split() # noqa: PLW2901
time.append(float(line[0]))
forces.append([float(line[1]), float(line[2]), float(line[3])])
moments.append(
@@ -183,7 +183,7 @@ def read_forces(fileName):
return origin, time, forces, moments
-def readPressureProbes(fileName):
+def readPressureProbes(fileName): # noqa: N802, N803
"""
Created on Wed May 16 14:31:42 2018
@@ -191,23 +191,23 @@ def readPressureProbes(fileName):
for each time step.
@author: Abiy
- """
+ """ # noqa: D400, D401
probes = []
p = []
time = []
- with open(fileName, 'r') as f:
+ with open(fileName, 'r') as f: # noqa: PTH123, UP015
for line in f:
if line.startswith('#'):
if line.startswith('# Probe'):
- line = line.replace('(', '')
- line = line.replace(')', '')
- line = line.split()
+ line = line.replace('(', '') # noqa: PLW2901
+ line = line.replace(')', '') # noqa: PLW2901
+ line = line.split() # noqa: PLW2901
probes.append([float(line[3]), float(line[4]), float(line[5])])
else:
continue
else:
- line = line.split()
+ line = line.split() # noqa: PLW2901
time.append(float(line[0]))
p_probe_i = np.zeros([len(probes)])
for i in range(len(probes)):
@@ -236,7 +236,7 @@ def read_pressure_data(file_names):
-------
time, pressure
Returns the pressure time and pressure data of the connected file.
- """
+ """ # noqa: D205, D401, D404
no_files = len(file_names)
connected_time = [] # Connected array of time
connected_p = [] # connected array of pressure.
@@ -258,7 +258,7 @@ def read_pressure_data(file_names):
index = np.where(time2 > time1[-1])[0][0]
# index += 1
- except:
+ except: # noqa: E722
# sys.exit('Fatal Error!: the pressure filese have time gap')
index = 0 # Joint them even if they have a time gap
@@ -266,7 +266,7 @@ def read_pressure_data(file_names):
connected_p = np.concatenate((connected_p, p2[index:]))
time1 = time2
- p1 = p2
+ p1 = p2 # noqa: F841
return probes, connected_time, connected_p
@@ -275,7 +275,7 @@ class PressureData:
A class that holds a pressure data and performs the following operations:
- mean and rms pressure coefficients
- peak pressure coefficients
- """
+ """ # noqa: D205, D400
def __init__(
self, path, u_ref=0.0, rho=1.25, p_ref=0.0, start_time=None, end_time=None
@@ -296,17 +296,17 @@ def __init__(
self.dt = np.mean(np.diff(self.time))
self.probe_count = np.shape(self.probes)[0]
- def read_cfd_data(self):
- if os.path.isdir(self.path):
- print('Reading from path : %s' % (self.path))
+ def read_cfd_data(self): # noqa: D102
+ if os.path.isdir(self.path): # noqa: PTH112
+ print('Reading from path : %s' % (self.path)) # noqa: T201, UP031
time_names = os.listdir(self.path)
- sorted_index = np.argsort(np.float_(time_names)).tolist()
+ sorted_index = np.argsort(np.float_(time_names)).tolist() # noqa: NPY201
# print(sorted_index)
# print("\tTime directories: %s" %(time_names))
file_names = []
for i in range(len(sorted_index)):
- file_name = os.path.join(self.path, time_names[sorted_index[i]], 'p')
+ file_name = os.path.join(self.path, time_names[sorted_index[i]], 'p') # noqa: PTH118
file_names.append(file_name)
# print(file_names)
@@ -319,27 +319,27 @@ def read_cfd_data(self):
# self.p = np.transpose(self.p) # OpenFOAM gives p/rho
else:
- print('Cannot find the file path: %s' % (self.path))
+ print('Cannot find the file path: %s' % (self.path)) # noqa: T201, UP031
- def set_time(self):
- if self.start_time != None:
+ def set_time(self): # noqa: D102
+ if self.start_time != None: # noqa: E711
start_index = int(np.argmax(self.time > self.start_time))
self.time = self.time[start_index:]
# self.cp = self.cp[:,start_index:]
try:
self.p = self.p[:, start_index:]
self.cp = self.cp[:, start_index:]
- except:
+ except: # noqa: S110, E722
pass
- if self.end_time != None:
+ if self.end_time != None: # noqa: E711
end_index = int(np.argmax(self.time > self.end_time))
self.time = self.time[:end_index]
# self.cp = self.cp[:,:end_index]
try:
self.p = self.p[:, :end_index]
self.cp = self.cp[:, :end_index]
- except:
+ except: # noqa: S110, E722
pass
@@ -361,13 +361,13 @@ def set_time(self):
case_path = arguments.case
# case_path = "C:\\Users\\fanta\\Documents\\WE-UQ\\LocalWorkDir\\IsolatedBuildingCFD"
- print('Case full path: ', case_path)
+ print('Case full path: ', case_path) # noqa: T201
# Read JSON data
- json_path = os.path.join(
+ json_path = os.path.join( # noqa: PTH118
case_path, 'constant', 'simCenter', 'input', 'IsolatedBuildingCFD.json'
)
- with open(json_path) as json_file:
+ with open(json_path) as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -375,19 +375,19 @@ def set_time(self):
wc_data = json_data['windCharacteristics']
duration = json_data['numericalSetup']['duration']
- load_output_path = os.path.join(
+ load_output_path = os.path.join( # noqa: PTH118
case_path, 'constant', 'simCenter', 'output', 'windLoads'
)
# Check if it exists and remove files
- if os.path.exists(load_output_path):
+ if os.path.exists(load_output_path): # noqa: PTH110
shutil.rmtree(load_output_path)
# Create new path
Path(load_output_path).mkdir(parents=True, exist_ok=True)
# Read and write the story forces
- force_file_name = os.path.join(
+ force_file_name = os.path.join( # noqa: PTH118
case_path, 'postProcessing', 'storyForces', '0', 'forces_bins.dat'
)
origin, time, forces, moments = read_bin_forces(force_file_name)
@@ -396,12 +396,12 @@ def set_time(self):
forces = forces[start_time:, :, :]
moments = moments[start_time:, :, :]
- print(force_file_name)
+ print(force_file_name) # noqa: T201
num_times = len(time)
num_stories = rm_data['numStories']
- storyLoads = np.zeros((num_times, num_stories * 3 + 1))
+ storyLoads = np.zeros((num_times, num_stories * 3 + 1)) # noqa: N816
storyLoads[:, 0] = time
@@ -411,12 +411,14 @@ def set_time(self):
storyLoads[:, 3 * i + 3] = moments[:, i, 2]
np.savetxt(
- os.path.join(load_output_path, 'storyLoad.txt'), storyLoads, delimiter='\t'
+ os.path.join(load_output_path, 'storyLoad.txt'), # noqa: PTH118
+ storyLoads,
+ delimiter='\t',
)
# Write base loads
if rm_data['monitorBaseLoad']:
- force_file_name = os.path.join(
+ force_file_name = os.path.join( # noqa: PTH118
case_path, 'postProcessing', 'baseForces', '0', 'forces.dat'
)
origin, time, forces, moments = read_forces(force_file_name)
@@ -425,34 +427,36 @@ def set_time(self):
forces = forces[start_time:, :]
moments = moments[start_time:, :]
- print(force_file_name)
+ print(force_file_name) # noqa: T201
num_times = len(time)
- baseLoads = np.zeros((num_times, 3 * 2 + 1))
+ baseLoads = np.zeros((num_times, 3 * 2 + 1)) # noqa: N816
baseLoads[:, 0] = time
baseLoads[:, 1:4] = forces
baseLoads[:, 4:7] = moments
np.savetxt(
- os.path.join(load_output_path, 'baseLoad.txt'), baseLoads, delimiter='\t'
+ os.path.join(load_output_path, 'baseLoad.txt'), # noqa: PTH118
+ baseLoads,
+ delimiter='\t',
)
# Write base loads
if rm_data['monitorSurfacePressure']:
p_file_name = ''
if rm_data['importPressureSamplingPoints']:
- p_file_name = os.path.join(
+ p_file_name = os.path.join( # noqa: PTH118
case_path, 'postProcessing', 'importedPressureSamplingPoints'
)
else:
- p_file_name = os.path.join(
+ p_file_name = os.path.join( # noqa: PTH118
case_path, 'postProcessing', 'generatedPressureSamplingPoints'
)
wind_speed = wc_data['referenceWindSpeed']
- print(p_file_name)
+ print(p_file_name) # noqa: T201
cfd_p = PressureData(
p_file_name,
@@ -465,13 +469,13 @@ def set_time(self):
num_times = len(cfd_p.time)
- pressureData = np.zeros((num_times, cfd_p.probe_count + 1))
+ pressureData = np.zeros((num_times, cfd_p.probe_count + 1)) # noqa: N816
pressureData[:, 0] = cfd_p.time
pressureData[:, 1:] = np.transpose(cfd_p.cp)
np.savetxt(
- os.path.join(load_output_path, 'pressureData.txt'),
+ os.path.join(load_output_path, 'pressureData.txt'), # noqa: PTH118
pressureData,
delimiter='\t',
)
diff --git a/modules/createEVENT/IsolatedBuildingCFD/setup_case.py b/modules/createEVENT/IsolatedBuildingCFD/setup_case.py
index f99cda085..9f23f3453 100644
--- a/modules/createEVENT/IsolatedBuildingCFD/setup_case.py
+++ b/modules/createEVENT/IsolatedBuildingCFD/setup_case.py
@@ -1248,10 +1248,10 @@ def write_controlDict_file(input_json_path, template_dict_path, case_path): # n
monitor_vtk_planes = rm_data['monitorVTKPlane']
vtk_planes = rm_data['vtkPlanes']
-
- # Need to change this for
+ # noqa: W293
+ # Need to change this for # noqa: W291
max_delta_t = 10*time_step
-
+ # noqa: W293
#Write 10 times
write_frequency = 10.0
write_interval_time = duration / write_frequency
@@ -1322,21 +1322,21 @@ def write_controlDict_file(input_json_path, template_dict_path, case_path): # n
added_part = ' #includeFunc baseForces\n'
dict_lines.insert(start_index, added_part)
- #Write VTK sampling sampling points
+ #Write VTK sampling sampling points # noqa: W291
if monitor_vtk_planes:
added_part = ""
for pln in vtk_planes:
added_part += " #includeFunc {}\n".format(pln["name"])
dict_lines.insert(start_index, added_part)
-
+ # noqa: W293
#Write edited dict to file
write_file_name = case_path + "/system/controlDict"
-
+ # noqa: W293
if os.path.exists(write_file_name):
os.remove(write_file_name)
-
+ # noqa: W293
output_file = open(write_file_name, "w+")
for line in dict_lines:
output_file.write(line)
@@ -1994,7 +1994,7 @@ def write_vtk_plane_file(input_json_path, template_dict_path, case_path):
json_data = json.load(json_file)
# Returns JSON object as a dictionary
- rm_data = json_data["resultMonitoring"]
+ rm_data = json_data["resultMonitoring"] # noqa: W291
ns_data = json_data["numericalSetup"]
solver_type = ns_data['solverType']
time_step = ns_data['timeStep']
@@ -2004,9 +2004,9 @@ def write_vtk_plane_file(input_json_path, template_dict_path, case_path):
write_interval = rm_data['vtkWriteInterval']
if rm_data['monitorVTKPlane'] == False:
- return
-
- if len(vtk_planes)==0:
+ return # noqa: W291
+ # noqa: W293
+ if len(vtk_planes)==0: # noqa: W291
return
#Write dict files for wind profiles
@@ -2016,38 +2016,38 @@ def write_vtk_plane_file(input_json_path, template_dict_path, case_path):
dict_lines = dict_file.readlines()
dict_file.close()
-
- #Write writeControl
- start_index = foam.find_keyword_line(dict_lines, "writeControl")
+ # noqa: W293
+ #Write writeControl # noqa: W291
+ start_index = foam.find_keyword_line(dict_lines, "writeControl") # noqa: W291
if solver_type=="pimpleFoam":
dict_lines[start_index] = " writeControl \t{};\n".format("adjustableRunTime")
else:
- dict_lines[start_index] = " writeControl \t{};\n".format("timeStep")
+ dict_lines[start_index] = " writeControl \t{};\n".format("timeStep") # noqa: W291
#Write writeInterval
- start_index = foam.find_keyword_line(dict_lines, "writeInterval")
+ start_index = foam.find_keyword_line(dict_lines, "writeInterval") # noqa: W291
if solver_type=="pimpleFoam":
dict_lines[start_index] = " writeInterval \t{:.6f};\n".format(write_interval*time_step)
else:
dict_lines[start_index] = " writeInterval \t{};\n".format(write_interval)
- #Write start and end time for the section
+ #Write start and end time for the section # noqa: W291
start_time = pln['startTime']
end_time = pln['endTime']
- start_index = foam.find_keyword_line(dict_lines, "timeStart")
- dict_lines[start_index] = " timeStart \t\t{:.6f};\n".format(start_time)
+ start_index = foam.find_keyword_line(dict_lines, "timeStart") # noqa: W291
+ dict_lines[start_index] = " timeStart \t\t{:.6f};\n".format(start_time) # noqa: W291
- start_index = foam.find_keyword_line(dict_lines, "timeEnd")
- dict_lines[start_index] = " timeEnd \t\t{:.6f};\n".format(end_time)
+ start_index = foam.find_keyword_line(dict_lines, "timeEnd") # noqa: W291
+ dict_lines[start_index] = " timeEnd \t\t{:.6f};\n".format(end_time) # noqa: W291
- #Write name of the profile
+ #Write name of the profile # noqa: W291
name = pln["name"]
- start_index = foam.find_keyword_line(dict_lines, "planeName")
- dict_lines[start_index] = "{}\n".format(name)
+ start_index = foam.find_keyword_line(dict_lines, "planeName") # noqa: W291
+ dict_lines[start_index] = "{}\n".format(name) # noqa: W291
- #Write field type
+ #Write field type # noqa: W291
field_type = pln["field"]
- start_index = foam.find_keyword_line(dict_lines, "fields")
+ start_index = foam.find_keyword_line(dict_lines, "fields") # noqa: W291
if field_type=="Velocity":
dict_lines[start_index] = " fields \t\t({});\n".format("U")
@@ -2061,31 +2061,31 @@ def write_vtk_plane_file(input_json_path, template_dict_path, case_path):
normal_axis = pln["normalAxis"]
- start_index = foam.find_keyword_line(dict_lines, "point")
+ start_index = foam.find_keyword_line(dict_lines, "point") # noqa: W291
dict_lines[start_index] = "\t point\t\t({:.6f} {:.6f} {:.6f});\n".format(point_x, point_y, point_z)
- start_index = foam.find_keyword_line(dict_lines, "normal")
- if normal_axis=="X":
+ start_index = foam.find_keyword_line(dict_lines, "normal") # noqa: W291
+ if normal_axis=="X": # noqa: W291
dict_lines[start_index] = "\t normal\t\t({} {} {});\n".format(1, 0, 0)
- if normal_axis=="Y":
+ if normal_axis=="Y": # noqa: W291
dict_lines[start_index] = "\t normal\t\t({} {} {});\n".format(0, 1, 0)
- if normal_axis=="Z":
+ if normal_axis=="Z": # noqa: W291
dict_lines[start_index] = "\t normal\t\t({} {} {});\n".format(0, 0, 1)
#Write edited dict to file
write_file_name = case_path + "/system/" + name
-
+ # noqa: W293
if os.path.exists(write_file_name):
os.remove(write_file_name)
-
+ # noqa: W293
output_file = open(write_file_name, "w+")
for line in dict_lines:
output_file.write(line)
output_file.close()
-if __name__ == '__main__':
-
+if __name__ == '__main__': # noqa: W291
+ # noqa: W293
=======
if __name__ == '__main__':
>>>>>>> upstream/master
@@ -2174,6 +2174,6 @@ def write_vtk_plane_file(input_json_path, template_dict_path, case_path):
<<<<<<< HEAD
-
+ # noqa: W293
=======
>>>>>>> upstream/master
diff --git a/modules/createEVENT/M9/M9App2.py b/modules/createEVENT/M9/M9App2.py
index c7e00cbeb..6d5bb43f3 100644
--- a/modules/createEVENT/M9/M9App2.py
+++ b/modules/createEVENT/M9/M9App2.py
@@ -1,5 +1,5 @@
-# %%
-import os
+# %% # noqa: INP001, D100
+import os # noqa: I001
import json
from datetime import datetime
import time
@@ -7,10 +7,10 @@
# change the directory to the current directory
-os.chdir(os.path.dirname(os.path.realpath(__file__)))
+os.chdir(os.path.dirname(os.path.realpath(__file__))) # noqa: PTH120
-def Submit_tapis_job(username, password):
+def Submit_tapis_job(username, password): # noqa: N802, D103
t = Tapis(
base_url='https://designsafe.tapis.io', username=username, password=password
)
@@ -19,14 +19,14 @@ def Submit_tapis_job(username, password):
t.authenticator.list_profiles()
- with open('TapisFiles/information.json', 'r') as file:
+ with open('TapisFiles/information.json', 'r') as file: # noqa: PTH123, UP015
information = json.load(file)
file.close()
# %%
- savingDirectory = information['directory']
- if not os.path.exists(savingDirectory):
- os.makedirs(savingDirectory)
+ savingDirectory = information['directory'] # noqa: N806
+ if not os.path.exists(savingDirectory): # noqa: PTH110
+ os.makedirs(savingDirectory) # noqa: PTH103
# print("Uploading files to designsafe storage")
t.files.mkdir(
@@ -35,23 +35,23 @@ def Submit_tapis_job(username, password):
t.files.mkdir(
systemId='designsafe.storage.default', path=f'{t.username}/physics_based/M9'
)
- with open('TapisFiles/M9.py', 'rb') as file:
+ with open('TapisFiles/M9.py', 'rb') as file: # noqa: PTH123
contents = file.read()
result = t.files.insert(
systemId='designsafe.storage.default',
path=f'{t.username}/physics_based/M9/M9.py',
file=contents,
)
- with open('TapisFiles/information.json', 'rb') as file:
+ with open('TapisFiles/information.json', 'rb') as file: # noqa: PTH123
contents = file.read()
result = t.files.insert(
systemId='designsafe.storage.default',
path=f'{t.username}/physics_based/M9/information.json',
file=contents,
)
- with open('TapisFiles/selectedSites.csv', 'rb') as file:
+ with open('TapisFiles/selectedSites.csv', 'rb') as file: # noqa: PTH123
contents = file.read()
- result = t.files.insert(
+ result = t.files.insert( # noqa: F841
systemId='designsafe.storage.default',
path=f'{t.username}/physics_based/M9/selectedSites.csv',
file=contents,
@@ -60,19 +60,19 @@ def Submit_tapis_job(username, password):
# %%
# -------------------------------------------------------------------------------
# Define Inputs
- input_Directory = (
+ input_Directory = ( # noqa: N806
f'tapis://designsafe.storage.default/{t.username}/physics_based/M9'
)
- fileInputs = [{'name': 'Input Directory', 'sourceUrl': input_Directory}]
+ fileInputs = [{'name': 'Input Directory', 'sourceUrl': input_Directory}] # noqa: N806
# -------------------------------------------------------------------------------
# Define parameterSet
input_filename = 'M9.py'
- input_uri = f'tapis://designsafe.storage.default/{t.username}/physics_based/M9/'
+ input_uri = f'tapis://designsafe.storage.default/{t.username}/physics_based/M9/' # noqa: F841
# parameterSet = {"envVariables": [{"key": "inputScript", "value": input_filename},
# {"key": "dataDirectory", "value": input_uri}]}
- parameterSet = {
+ parameterSet = { # noqa: N806
'envVariables': [
{'key': 'inputScript', 'value': input_filename},
{
@@ -99,16 +99,16 @@ def Submit_tapis_job(username, password):
}
# Generate a timestamp to append to the job name an
- timestamp = datetime.now().strftime('%Y%m%d%H%M%S')
+ timestamp = datetime.now().strftime('%Y%m%d%H%M%S') # noqa: DTZ005
jobname = f'PhysicsBasedMotion_M9_{t.username}_{timestamp}'
- print('Submitting job')
+ print('Submitting job') # noqa: T201
# submit the job
jobdict['name'] = jobname
# %%
res = t.jobs.submitJob(**jobdict)
- mjobUuid = res.uuid
+ mjobUuid = res.uuid # noqa: N806
tlapse = 1
status = t.jobs.getJobStatus(jobUuid=mjobUuid).status
@@ -119,22 +119,22 @@ def Submit_tapis_job(username, password):
status = t.jobs.getJobStatus(jobUuid=mjobUuid).status
if status == previous:
continue
- else:
+ else: # noqa: RET507
previous = status
- print(f'\tStatus: {status}')
+ print(f'\tStatus: {status}') # noqa: T201
time.sleep(tlapse)
# %%
- print('Downloading extracted motions')
- archivePath = t.jobs.getJob(jobUuid=mjobUuid).archiveSystemDir
- archivePath = f'{archivePath}/M9/Events'
+ print('Downloading extracted motions') # noqa: T201
+ archivePath = t.jobs.getJob(jobUuid=mjobUuid).archiveSystemDir # noqa: N806
+ archivePath = f'{archivePath}/M9/Events' # noqa: N806
files = t.files.listFiles(
systemId='designsafe.storage.default', path=archivePath
)
# %%
if len(files) <= 1:
- print('No files in the archive')
+ print('No files in the archive') # noqa: T201
else:
for file in files:
filename = file.name
@@ -145,11 +145,11 @@ def Submit_tapis_job(username, password):
systemId='designsafe.storage.default', path=path
)
- with open(f'{savingDirectory}/{filename}', 'w') as f:
+ with open(f'{savingDirectory}/{filename}', 'w') as f: # noqa: PTH123
f.write(res.decode('utf-8'))
f.close()
- print('Files downloaded')
- print('Please check the directory for the extracted motions')
+ print('Files downloaded') # noqa: T201
+ print('Please check the directory for the extracted motions') # noqa: T201
# %%
diff --git a/modules/createEVENT/M9/TapisFiles/M9.py b/modules/createEVENT/M9/TapisFiles/M9.py
index 1b1ada183..9c1d0f760 100644
--- a/modules/createEVENT/M9/TapisFiles/M9.py
+++ b/modules/createEVENT/M9/TapisFiles/M9.py
@@ -1,5 +1,5 @@
-# %%
-import os
+# %% # noqa: INP001, D100
+import os # noqa: I001
import numpy as np
import pandas as pd
import json
@@ -8,41 +8,41 @@
# 'netcdf4', 'h5netcdf', 'scipy'
# %%
-def M9(information):
+def M9(information): # noqa: N802
"""
the default is to select sites from all M9 sites, but
grid type (options: A, B, C, D, E, Y, and Z, can be empty)
(ref: https://sites.uw.edu/pnet/m9-simulations/about-m9-simulations/extent-of-model/)
- """
+ """ # noqa: D202, D205, D400, D401
- LocationFlag = information['LocationFlag']
- numSiteGM = information['number_of_realizations']
- grid_type = information[
+ LocationFlag = information['LocationFlag'] # noqa: N806
+ numSiteGM = information['number_of_realizations'] # noqa: N806
+ grid_type = information[ # noqa: F841
'grid_type'
] # grid type (options: A, B, C, D, E, Y, and Z, can be "all")
- randomFLag = True # if True, the realizations are selected randomly, otherwise, the first numSiteGM sites are selected
- maxnumSiteGM = 30
- numSiteGM = min(numSiteGM, maxnumSiteGM) # number of realizations
+ randomFLag = True # if True, the realizations are selected randomly, otherwise, the first numSiteGM sites are selected # noqa: N806
+ maxnumSiteGM = 30 # noqa: N806
+ numSiteGM = min(numSiteGM, maxnumSiteGM) # number of realizations # noqa: N806
- my_Directory = os.getcwd()
- print(f'Current Directory: {my_Directory}')
+ my_Directory = os.getcwd() # noqa: PTH109, N806
+ print(f'Current Directory: {my_Directory}') # noqa: T201
- files = [f for f in os.listdir() if os.path.isfile(f)]
- print(files)
+ files = [f for f in os.listdir() if os.path.isfile(f)] # noqa: PTH113
+ print(files) # noqa: T201
# changing realizations order
# indicies = list(range(maxnumSiteGM));
- Realizations = ['032']
+ Realizations = ['032'] # noqa: N806
indicies = [0]
if randomFLag:
np.random.shuffle(indicies)
indicies = indicies[:numSiteGM]
username = os.getlogin()
- print(f'Username: {username}')
+ print(f'Username: {username}') # noqa: T201
- M9Path = f'/home/{username}/work/projects/PRJ-4603'
+ M9Path = f'/home/{username}/work/projects/PRJ-4603' # noqa: N806
# M9Path = "/home/parduino/work/projects/PRJ-4603"
# M9Path = "/home/jovyan/work/projects/PRJ-4603"
@@ -50,19 +50,19 @@ def M9(information):
# M9Path = f{"tapis://project-"+_UserProjects"
# M9Path = "/data/projects/PRJ-4603"
- print('M9PATH defined')
+ print('M9PATH defined') # noqa: T201
directory = './Events' # directory to save the data
# create the directory if it does not exist
- if not os.path.exists(directory):
- os.makedirs(directory)
+ if not os.path.exists(directory): # noqa: PTH110
+ os.makedirs(directory) # noqa: PTH103
- print('Checking if the directory is created')
+ print('Checking if the directory is created') # noqa: T201
all_entries = os.listdir()
- print(all_entries)
+ print(all_entries) # noqa: T201
gdf = pd.read_csv('selectedSites.csv', index_col=0)
- APIFLAG = information[
+ APIFLAG = information[ # noqa: N806
'APIFLAG'
] # if the APIFLAG is True, we use M9 API to get the motion data
@@ -73,19 +73,19 @@ def M9(information):
site_name = site['Station Name']
lat = site['Latitude']
lon = site['Longitude']
- firstLetter = site_name[0]
+ firstLetter = site_name[0] # noqa: N806
filename = f'{M9Path}/csz{Realizations[i]}/{firstLetter}/Xarray.nc'
# reading the nc file
data = xr.open_dataset(filename)
subset = data.sel(lat=lat, lon=lon, method='nearest')
- dt = data.coords['time'].values
+ dt = data.coords['time'].values # noqa: PD011
dt = dt[1] - dt[0]
sitedata = {
'dT': dt,
- 'accel_x': subset['acc_x'].values.tolist(),
- 'accel_y': subset['acc_y'].values.tolist(),
- 'accel_z': subset['acc_z'].values.tolist(),
+ 'accel_x': subset['acc_x'].values.tolist(), # noqa: PD011
+ 'accel_y': subset['acc_y'].values.tolist(), # noqa: PD011
+ 'accel_z': subset['acc_z'].values.tolist(), # noqa: PD011
}
write_motion(site_name, directory, i, sitedata, APIFLAG)
gdf['filename'] = f'{site_name}_{i}'
@@ -100,13 +100,13 @@ def M9(information):
f'{directory}/sites.csv', index=False
)
- print('Script is done')
+ print('Script is done') # noqa: T201
-def write_motion(site_name, directory, i, motiondict, APIFLAG):
+def write_motion(site_name, directory, i, motiondict, APIFLAG): # noqa: N803, D103
filename = f'{directory}/{site_name}_{i}.json'
- print(f'Writing {filename}')
+ print(f'Writing {filename}') # noqa: T201
if APIFLAG:
accel_x = 'AccelerationHistory-EW'
@@ -127,14 +127,14 @@ def write_motion(site_name, directory, i, motiondict, APIFLAG):
datatowrite['Data'] = 'Time history generated using M9 simulations'
datatowrite['name'] = f'{site_name}_{i}'
- with open(filename, 'w') as f:
+ with open(filename, 'w') as f: # noqa: PTH123
json.dump(datatowrite, f, indent=2)
if __name__ == '__main__':
# change the directory to the directory of the current file
- os.chdir(os.path.dirname(os.path.abspath(__file__)))
+ os.chdir(os.path.dirname(os.path.abspath(__file__))) # noqa: PTH100, PTH120
- with open('information.json', 'r') as file:
+ with open('information.json', 'r') as file: # noqa: PTH123, UP015
information = json.load(file)
M9(information)
diff --git a/modules/createEVENT/experimentalWindForces/experimentalWindForces.py b/modules/createEVENT/experimentalWindForces/experimentalWindForces.py
index ed11b319c..ce2441c47 100644
--- a/modules/createEVENT/experimentalWindForces/experimentalWindForces.py
+++ b/modules/createEVENT/experimentalWindForces/experimentalWindForces.py
@@ -17,7 +17,8 @@
from convertWindMat import * # noqa: F403
-errPath = os.path.join(os.getcwd(),'workflow.err') # noqa: N816
+errPath = os.path.join(os.getcwd(), 'workflow.err') # noqa: N816, PTH109, PTH118
+
def main(aimName, evtName, getRV): # noqa: C901, N803, D103, PLR0915
# THIS IS PERFORMED ONLY ONCE with open(aimName, 'r', encoding='utf-8') as f:
diff --git a/modules/createEVENT/experimentalWindPressures/experimentalWindPressures.py b/modules/createEVENT/experimentalWindPressures/experimentalWindPressures.py
index 9e5c5076a..82b9c68e0 100644
--- a/modules/createEVENT/experimentalWindPressures/experimentalWindPressures.py
+++ b/modules/createEVENT/experimentalWindPressures/experimentalWindPressures.py
@@ -19,7 +19,7 @@
from convertWindMat import * # noqa: F403
-errPath = os.path.join(os.getcwd(),'workflow.err') # noqa: N816
+errPath = os.path.join(os.getcwd(), 'workflow.err') # noqa: N816, PTH109, PTH118
sys.stderr = open( # noqa: SIM115, PTH123
errPath, 'w'
) # redirecting stderr (this way we can capture all sorts of python errors)
diff --git a/modules/createEVENT/shakeMapEvent/shakeMapEvent.py b/modules/createEVENT/shakeMapEvent/shakeMapEvent.py
index d6f21e1b5..244195009 100644
--- a/modules/createEVENT/shakeMapEvent/shakeMapEvent.py
+++ b/modules/createEVENT/shakeMapEvent/shakeMapEvent.py
@@ -37,22 +37,22 @@
# Stevan Gavrilovic
#
-import argparse
+import argparse # noqa: I001
import xml.etree.ElementTree as ET
import geopandas as gpd
from shapely.geometry import Point
from pathlib import Path
-def create_shakemap_event(eventDirectory, eventPath, IMTypes): # noqa: D103
- IMTypesList = eval(IMTypes)
+def create_shakemap_event(eventDirectory, eventPath, IMTypes): # noqa: D103, N803
+ IMTypesList = eval(IMTypes) # noqa: S307, N806
- print('Creating shakemap event')
+ print('Creating shakemap event') # noqa: T201
xml_file_path = Path(eventDirectory) / eventPath / 'grid.xml'
# Parse the XML file
- tree = ET.parse(xml_file_path)
+ tree = ET.parse(xml_file_path) # noqa: S314
root = tree.getroot()
# Find the grid_data element
@@ -90,13 +90,13 @@ def create_shakemap_event(eventDirectory, eventPath, IMTypes): # noqa: D103
gdf = gpd.GeoDataFrame(attributes, geometry=points, crs='EPSG:4326')
# Display the first few rows
- print('Saving shakemap to gpkg')
+ print('Saving shakemap to gpkg') # noqa: T201
# Save as a GeoPackage file
gdf_path = Path(eventDirectory) / 'EventGrid.gpkg'
gdf.to_file(gdf_path, driver='GPKG')
- return
+ return # noqa: PLR1711
if __name__ == '__main__':
diff --git a/modules/createSAM/multiFidelityBuildingModel/MultiFidelityBuildingModel.py b/modules/createSAM/multiFidelityBuildingModel/MultiFidelityBuildingModel.py
index 40a596841..e227d08f4 100644
--- a/modules/createSAM/multiFidelityBuildingModel/MultiFidelityBuildingModel.py
+++ b/modules/createSAM/multiFidelityBuildingModel/MultiFidelityBuildingModel.py
@@ -56,8 +56,8 @@ def create_SAM( # noqa: N802, D103, C901
current_building_id = AIM['GeneralInformation']['AIM_id']
database_path = AIM['Modeling']['buildingDatabase']
with open( # noqa: PTH123, UP015
- os.path.join('..', '..', '..', '..', 'input_data', database_path),
- 'r', # noqa: PTH118
+ os.path.join('..', '..', '..', '..', 'input_data', database_path), # noqa: PTH118
+ 'r', # noqa: PTH118, RUF100
) as f:
json_string = f.read()
database = json.loads(json_string)
diff --git a/modules/createSAM/surrogateGP/SurrogateGP.py b/modules/createSAM/surrogateGP/SurrogateGP.py
index ad31aa177..adfe2755c 100644
--- a/modules/createSAM/surrogateGP/SurrogateGP.py
+++ b/modules/createSAM/surrogateGP/SurrogateGP.py
@@ -47,7 +47,8 @@
import os
import sys
-errFileName = os.path.join(os.getcwd(),'workflow.err') # noqa: N816
+errFileName = os.path.join(os.getcwd(), 'workflow.err') # noqa: N816, PTH109, PTH118
+
def create_SAM(AIM_file, SAM_file): # noqa: N802, N803, D103
#
diff --git a/modules/performFEM/surrogateGP/gpPredict.py b/modules/performFEM/surrogateGP/gpPredict.py
index 50d02b4dc..d7d124bf7 100644
--- a/modules/performFEM/surrogateGP/gpPredict.py
+++ b/modules/performFEM/surrogateGP/gpPredict.py
@@ -210,7 +210,7 @@ def set_XY2(self, X=None, Y=None, Y_metadata=None): # noqa: N802, N803
self.set_XY(X, Y)
- def get_stochastic_variance(X, Y, x, ny): # noqa: N803
+ def get_stochastic_variance(X, Y, x, ny): # noqa: C901, N803
# X_unique, X_idx, indices, counts = np.unique(X, axis=0, return_index=True, return_counts=True, return_inverse=True)
X_unique, dummy, indices, counts = np.unique( # noqa: N806
X, axis=0, return_index=True, return_counts=True, return_inverse=True
@@ -250,13 +250,13 @@ def get_stochastic_variance(X, Y, x, ny): # noqa: N803
var_pred = np.exp(log_var_pred)
if did_normalization:
- # Y_normFact = np.var(Y_mean) # noqa: N806
+ # Y_normFact = np.var(Y_mean) # noqa: N806, RUF100
Y_normFact = np.mean(var_pred.T[0]) # noqa: N806
else:
Y_normFact = 1 # noqa: N806
norm_var_str = (
- (var_pred.T[0]) / Y_normFact
+ (var_pred.T[0]) / Y_normFact
) # if normalization was used..
log_var_pred_x, dum = m_var.predict(x)
@@ -272,13 +272,16 @@ def get_stochastic_variance(X, Y, x, ny): # noqa: N803
#
old_version = False
for key, val in sur['modelInfo'][g_name_sur[ny] + '_Var'].items(): # noqa: B007, PERF102
- if "sum" in key:
+ if 'sum' in key:
old_version = True
break
if old_version:
- print("The surrogate model was trained using an older version of the tool. Please retrain the model using this version or use older version.", file=sys.stderr)
- exit(-1)
+ print( # noqa: T201
+ 'The surrogate model was trained using an older version of the tool. Please retrain the model using this version or use older version.',
+ file=sys.stderr,
+ )
+ exit(-1) # noqa: PLR1722
log_vars = np.atleast_2d(
sur['modelInfo'][g_name_sur[ny] + '_Var']['TrainingSamplesY']
@@ -298,14 +301,14 @@ def get_stochastic_variance(X, Y, x, ny): # noqa: N803
var_pred = np.exp(log_var_pred)
if did_normalization:
- # Y_normFact = np.var(Y) # noqa: N806
+ # Y_normFact = np.var(Y) # noqa: N806, RUF100
Y_normFact = np.mean(var_pred.T[0]) # noqa: N806
else:
Y_normFact = 1 # noqa: N806
norm_var_str = (
- (var_pred.T[0]) / Y_normFact
+ (var_pred.T[0]) / Y_normFact
) # if normalization was used..
log_var_pred_x, dum = m_var.predict(x)
@@ -347,12 +350,12 @@ def get_stochastic_variance(X, Y, x, ny): # noqa: N803
continue
if (
- not name_values[1]
- .replace('.', '', 1)
- .replace('e', '', 1)
- .replace('-', '', 2)
- .replace('+', '', 1)
- .isdigit()
+ not name_values[1]
+ .replace('.', '', 1)
+ .replace('e', '', 1)
+ .replace('-', '', 2)
+ .replace('+', '', 1)
+ .isdigit()
):
# surrogate model does not accept discrete
continue
@@ -478,7 +481,7 @@ def get_stochastic_variance(X, Y, x, ny): # noqa: N803
s = [str(rv_name_sur[id]) for id in missing_ids] # noqa: A001
if first_eeuq_found and all(
- [missingEDP.endswith('-2') for missingEDP in s] # noqa: C419
+ [missingEDP.endswith('-2') for missingEDP in s] # noqa: C419
):
msg = 'ground motion dimension does not match with that of the training'
# for i in range(len(s)):
@@ -537,8 +540,12 @@ def get_stochastic_variance(X, Y, x, ny): # noqa: N803
lin_list = []
for ny in range(ng_sur):
tmp_lin = LinearRegression()
- tmp_lin.coef_ = np.array(sur['modelInfo'][g_name_sur[ny] + '_Lin']['coef'])
- tmp_lin.intercept_ = np.array(sur['modelInfo'][g_name_sur[ny] + '_Lin']['intercept'])
+ tmp_lin.coef_ = np.array(
+ sur['modelInfo'][g_name_sur[ny] + '_Lin']['coef']
+ )
+ tmp_lin.intercept_ = np.array(
+ sur['modelInfo'][g_name_sur[ny] + '_Lin']['intercept']
+ )
lin_list += [tmp_lin]
else:
did_linear = False
@@ -582,9 +589,9 @@ def get_stochastic_variance(X, Y, x, ny): # noqa: N803
exec('m_list[ny].' + key + '= np.array(val)') # noqa: S102
nugget_var_list[ny] = (
- m_list[ny].Gaussian_noise.parameters
- * nugget_var_pred
- * Y_normFact
+ m_list[ny].Gaussian_noise.parameters
+ * nugget_var_pred
+ * Y_normFact
)
else:
@@ -612,8 +619,8 @@ def get_stochastic_variance(X, Y, x, ny): # noqa: N803
for ny in range(ng_sur):
Y_normFact = np.var(Y[:, ny]) # noqa: N806
nugget_var_list[ny] = (
- m_list[ny].gpy_model['mixed_noise.Gaussian_noise.variance']
- * Y_normFact
+ m_list[ny].gpy_model['mixed_noise.Gaussian_noise.variance']
+ * Y_normFact
)
# read param in file and sort input
@@ -757,7 +764,7 @@ def get_stochastic_variance(X, Y, x, ny): # noqa: N803
templatedirFolder = os.path.join(os.getcwd(), 'templatedir_SIM') # noqa: PTH109, PTH118, N806
if (
- (isEEUQ or isWEUQ) and nsamp == 1
+ (isEEUQ or isWEUQ) and nsamp == 1
): # because stochastic ground motion generation uses folder number when generating random seed.............
current_dir_i = os.path.join( # noqa: PTH118
os.getcwd(), # noqa: PTH109
@@ -853,15 +860,15 @@ def get_stochastic_variance(X, Y, x, ny): # noqa: N803
if isEEUQ or isWEUQ:
if (
- os_type.lower().startswith('win')
- and run_type.lower() == 'runninglocal'
+ os_type.lower().startswith('win')
+ and run_type.lower() == 'runninglocal'
):
workflowDriver = 'sc_driver.bat' # noqa: N806
else:
workflowDriver = 'sc_driver' # noqa: N806
elif (
- os_type.lower().startswith('win')
- and run_type.lower() == 'runninglocal'
+ os_type.lower().startswith('win')
+ and run_type.lower() == 'runninglocal'
):
workflowDriver = 'driver.bat' # noqa: N806
else:
@@ -889,9 +896,9 @@ def get_stochastic_variance(X, Y, x, ny): # noqa: N803
elif when_inaccurate == 'continue':
msg2 = (
- msg0
- + msg1[ns]
- + '- CONTINUE [Warning: results may not be accurate]\n'
+ msg0
+ + msg1[ns]
+ + '- CONTINUE [Warning: results may not be accurate]\n'
)
error_warning(msg2)
@@ -902,8 +909,8 @@ def get_stochastic_variance(X, Y, x, ny): # noqa: N803
else:
msg3 = (
- msg0
- + f'Prediction error level of output {idx[ns]} is {np.max(error_ratio2[ns]) * 100:.2f}%\n'
+ msg0
+ + f'Prediction error level of output {idx[ns]} is {np.max(error_ratio2[ns]) * 100:.2f}%\n'
)
error_warning(msg3)
@@ -1016,7 +1023,7 @@ def predict(m, X, did_mf): # noqa: N803, D103
# TODO change below to noiseless # noqa: TD002, TD004
X_list = convert_x_list_to_array([X, X]) # noqa: N806
X_list_l = X_list[: X.shape[0]] # noqa: N806, F841
- X_list_h = X_list[X.shape[0]:] # noqa: N806
+ X_list_h = X_list[X.shape[0] :] # noqa: N806
return m.predict(X_list_h)
diff --git a/modules/performREC/CMakeLists.txt b/modules/performREC/CMakeLists.txt
new file mode 100644
index 000000000..db707a42f
--- /dev/null
+++ b/modules/performREC/CMakeLists.txt
@@ -0,0 +1,2 @@
+simcenter_add_python_script(SCRIPT transportation.py)
+add_subdirectory(pyrecodes)
diff --git a/modules/performREC/transportation.py b/modules/performREC/transportation.py
new file mode 100644
index 000000000..ea28ab52d
--- /dev/null
+++ b/modules/performREC/transportation.py
@@ -0,0 +1,2049 @@
+"""Methods for performance simulations of transportation networks.""" # noqa: INP001
+
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2024 The Regents of the University of California
+#
+# This file is part of SimCenter Backend Applications.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# BRAILS. If not, see .
+#
+# Contributors:
+# Barbaros Cetiner
+# Tianyu Han
+#
+# Last updated:
+# 08-14-2024
+
+from __future__ import annotations # noqa: I001
+
+import gc
+import os
+import json
+import logging
+import time
+import copy
+from collections import defaultdict
+from abc import ABC, abstractmethod
+from pathlib import Path
+from typing import List, Set, Tuple, Dict, Literal, Union, Optional
+from shapely.geometry import LineString, Polygon, Point, mapping
+from shapely.ops import split
+from sklearn.metrics.pairwise import cosine_similarity
+
+import geopandas as gpd
+import numpy as np
+import pandana.network as pdna
+import pandas as pd
+from sklearn.feature_extraction.text import TfidfVectorizer
+from scipy.spatial.distance import cdist
+from shapely.wkt import loads
+from brails.utils.geoTools import haversine_dist
+from brails.workflow.TransportationElementHandler import (
+ ROADLANES_MAP,
+ ROADSPEED_MAP,
+ calculate_road_capacity,
+)
+
+
+class TransportationPerformance(ABC): # noqa: B024
+ """
+ An abstract base class for simulating transportation networks.
+
+ This class provides an interface for implementing methods that process
+ transportation data (such as system state and origin-destination files) and
+ compute network performance metrics.
+
+ Subclasses must define how to process these data inputs and update system
+ performance in concrete terms.
+
+ Attributes__
+ assets (list): A list of asset types (e.g., 'Bridge', 'Roadway',
+ 'Tunnel') to be analyzed in the transportation network.
+ csv_filenames (list): A list of filenames (e.g., 'edges.csv', '
+ nodes.csv', 'closed_edges.csv') that are required
+ for network simulations.
+ capacity_map (dict): A mapping that relates damage states to capacity
+ ratios. For example, damage states of 0-2 may
+ represent full capacity (1), while higher damage
+ states reduce capacity (e.g., 0.5 or 0).
+ no_identifier (dict): A mapping of asset types to their unique
+ identifiers (e.g., 'StructureNumber' for Bridges,
+ 'TigerOID' for Roadways). These are used to track
+ and manage assets within the system.
+
+ Methods__
+ system_state(detfile: str, csv_file_dir: str) -> None:
+ Abstract method to process a given det (damage state) file and
+ update the system state with capacity ratios. Also checks for
+ missing or necessary CSV files for network simulation.
+
+ update_od_file(old_nodes: str, old_det: str, new_nodes: str, new_det:
+ str, od: str, origin_ids: list) -> pd.DataFrame:
+ Abstract method to update the origin-destination (OD) file based
+ on population changes between time steps. The method tracks
+ population growth in nodes and generates new trips in the OD file
+ accordingly.
+
+ system_performance(detfile: str, csv_file_dir: str) -> None:
+ Abstract method to compute or update the performance of the
+ transportation system based on current state and available data.
+
+ Notes__
+ This is an abstract class. To use it, create a subclass that implements
+ the abstract methods for specific behavior related to transportation
+ network performance analysis.
+ """
+
+ def __init__(
+ self,
+ assets: Union[list[str], None] = None, # noqa: UP007
+ capacity_map: Union[dict[int, float], None] = None, # noqa: UP007
+ csv_files: Union[dict[str, str], None] = None, # noqa: UP007
+ no_identifier: Union[dict[str, str], None] = None, # noqa: UP007
+ network_inventory: Union[dict[str, str], None] = None, # noqa: UP007
+ ):
+ """
+ Initialize the TransportationPerformance class with essential data.
+
+ Args__
+ assets (list): A list of asset types such as 'Bridge', 'Roadway',
+ 'Tunnel'.
+ capacity_map (dict): A mapping of damage states to capacity ratios.
+ csv_files (dict): A dictionary of CSV filenames for network data,
+ including 'network_edges', 'network_nodes',
+ 'edge_closures', and 'od_pairs'.
+ no_identifier (dict): A mapping of asset types to their unique
+ identifiers.
+ network_inventory (dict): A dictionary of "edges" and "nodes" file.
+ """
+ if assets is None:
+ assets = ['Bridge', 'Roadway', 'Tunnel']
+ if capacity_map is None:
+ # capacity_map = {0: 1, 1: 1, 2: 1, 3: 0.5, 4: 0}
+ capacity_map = {0: 1, 1: 1, 2: 1, 3: 0, 4: 0}
+ if csv_files is None:
+ csv_files = {
+ 'network_edges': 'edges.csv',
+ 'network_nodes': 'nodes.csv',
+ 'edge_closures': 'closed_edges.csv',
+ 'od_pairs': 'od.csv',
+ }
+ if no_identifier is None:
+ no_identifier = {
+ 'Bridge': 'StructureNumber',
+ 'Roadway': 'TigerOID',
+ 'Tunnel': 'TunnelNumber',
+ }
+
+ self.assets = assets
+ self.csv_files = csv_files
+ self.capacity_map = capacity_map
+ self.no_identifier = no_identifier
+ self.attribute_maps = {
+ 'lanes': ROADLANES_MAP,
+ 'speed': ROADSPEED_MAP,
+ 'capcity': calculate_road_capacity,
+ }
+ self.network_inventory = network_inventory
+
+ # @abstractmethod
+ def system_state(
+ self, initial_state: str, csv_file_dir: str
+ ) -> dict: # updated_state
+ """
+ Process given det and damage results file to get updated system state.
+
+ This function reads a JSON file containing undamaged network attributes
+ and JSON file containing damage states and updates the state of the
+ network (i.e., determines edges experiencing capacity reductions)
+ using capacity ratios defined for each damage state. It also checks
+ for the existence of required CSV files, created the if missing, and
+ generates a file listing closed edges.
+
+ Args__
+ detfile (str): Path to the JSON file containing the asset data.
+ csv_file_dir (str): Directory containing the CSV files needed for
+ running network simulations.
+
+ Returns__
+ The function does not return any value. It creates updated det file
+ and CSV file necessary to run network simulations
+
+ Raises__
+ FileNotFoundError: If the `detfile` or any required CSV files are
+ not found.
+ json.JSONDecodeError: If the `detfile` cannot be parsed as JSON.
+ KeyError: If expected keys are missing in the JSON data.
+ ValueError: If there are issues with data conversions, e.g., JSON
+ to integer.
+
+ Examples__
+ >>> system_state('damage_states.json', '/path/to/csv/files')
+ Missing files: nodes.csv
+ All required files are present.
+ # This will process 'damage_states.json', update it, and use CSV
+ files in '/path/to/csv/files'
+
+ >>> system_state('damage_states.json', '/path/to/nonexistent/dir')
+ Missing files: edges.csv, nodes.csv
+ # This will attempt to process 'damage_states.json' and handle
+ missing files in a non-existent directory
+ """
+
+ def files_exist(directory, filenames):
+ # Convert directory path to a Path object
+ dir_path = Path(directory)
+
+ # Get a set of files in the directory
+ files_in_directory = {f.name for f in dir_path.iterdir() if f.is_file()}
+
+ # Check if each file exists in the directory
+ missing_files = [
+ filename
+ for filename in filenames
+ if filename not in files_in_directory
+ ]
+
+ if missing_files:
+ print(f"Missing files: {', '.join(missing_files)}") # noqa: T201
+ out = False
+ else:
+ print('All required files are present.') # noqa: T201
+ out = True
+
+ return out
+
+ # Create link closures for network simulations:
+ fexists = files_exist(csv_file_dir, [self.csv_files['network_edges']])
+
+ if fexists:
+ pass
+ # graph_edge_file = os.path.join(csv_file_dir, self.csv_files['network_edges'])
+ # graph_edge_df = pd.read_csv(graph_edge_file)
+ else:
+ self.get_graph_network(csv_file_dir)
+
+ # Read damage states for det file and determine element capacity ratios
+ # 1 denotes fully open and 0 denotes fully closed:
+ capacity_dict = {}
+ closed_edges = []
+ with Path.open(Path(initial_state), encoding='utf-8') as file:
+ temp = json.load(file)
+ data = temp['TransportationNetwork']
+ for asset_type in self.assets:
+ datadict = data[asset_type]
+ for aim_id in datadict:
+ item_id = datadict[aim_id]['GeneralInformation'][
+ self.no_identifier[asset_type]
+ ]
+ damage_state = int(
+ datadict[aim_id]['R2Dres'][
+ 'R2Dres_MostLikelyCriticalDamageState'
+ ]
+ )
+ capacity_ratio = self.capacity_map[damage_state]
+ capacity_dict[item_id] = capacity_ratio
+ datadict[aim_id]['GeneralInformation']['Open'] = capacity_ratio
+ if capacity_ratio == 0:
+ if asset_type == 'Roadway':
+ closed_edges.append(int(aim_id))
+ elif asset_type == 'Bridge' or asset_type == 'Tunnel': # noqa: PLR1714
+ carried_edges = datadict[aim_id]['GeneralInformation'][
+ 'RoadID'
+ ]
+ carried_edges = [int(x) for x in carried_edges]
+ closed_edges += carried_edges
+
+ # Update det file with closure information:
+ temp = os.path.basename(initial_state).split('.') # noqa: PTH119
+ detfile_updated = temp[0] + '_updated.' + temp[1]
+ detfile_updated = os.path.join(csv_file_dir, detfile_updated) # noqa: PTH118
+
+ with Path.open(Path(detfile_updated), 'w', encoding='utf-8') as file:
+ json.dump(data, file, indent=2)
+
+ # Write closed edges:
+ edge_closure_file = os.path.join( # noqa: PTH118
+ csv_file_dir, self.csv_files['edge_closures']
+ )
+ with open(edge_closure_file, 'w', encoding='utf-8') as file: # noqa: PTH123
+ # Write each item on a new line
+ file.write('uniqueid\n')
+ for item in closed_edges:
+ file.write(str(item) + '\n')
+
+ return
+
+
+ def get_graph_network(self, csv_file_dir) -> None: # noqa: D102
+ # Get edges and nodes from the network inventory
+ edges_file = self.network_inventory['edges']
+ nodes_file = self.network_inventory['nodes']
+ edges_gpd = gpd.read_file(edges_file)
+ nodes_gpd = gpd.read_file(nodes_file)
+ # Format edges and nodes for Pandana
+ ## Edges
+ edges_gpd['id'] = edges_gpd['id'].astype(int)
+ edges_gpd = edges_gpd.rename(
+ columns={
+ 'id': 'uniqueid',
+ 'StartNode': 'start_nid',
+ 'EndNode': 'end_nid',
+ 'RoadType': 'type',
+ 'NumOfLanes': 'lanes',
+ 'MaxMPH': 'maxspeed',
+ }
+ )
+ edges_gpd['capacity'] = edges_gpd['lanes'].map(calculate_road_capacity)
+ edges_gpd = edges_gpd.to_crs('EPSG:6500')
+ edges_gpd['length'] = edges_gpd['geometry'].apply(lambda x: x.length)
+ edges_gpd = edges_gpd.to_crs('EPSG:4326')
+ edges_fils = os.path.join(csv_file_dir, self.csv_files['network_edges']) # noqa: PTH118
+ edges_gpd.to_csv(edges_fils, index=False)
+ ## Nodes
+ nodes_gpd = nodes_gpd.rename(columns={'nodeID': 'node_id'})
+ nodes_gpd['lon'] = nodes_gpd['geometry'].apply(lambda x: x.x)
+ nodes_gpd['lat'] = nodes_gpd['geometry'].apply(lambda x: x.y)
+ nodes_file = os.path.join(csv_file_dir, self.csv_files['network_nodes']) # noqa: PTH118
+ nodes_gpd.to_csv(nodes_file, index=False)
+ return # noqa: PLR1711
+
+ # @abstractmethod
+ def system_performance( # noqa: C901, D102, PLR0915
+ self, state # noqa: ARG002
+ ) -> None: # Move the CSV creation here
+ def substep_assignment(
+ nodes_df=None,
+ weighted_edges_df=None,
+ od_ss=None,
+ quarter_demand=None,
+ assigned_demand=None,
+ quarter_counts=4,
+ trip_info=None,
+ agent_time_limit=0,
+ sample_interval=1,
+ agents_path=None,
+ hour=None,
+ quarter=None,
+ ss_id=None,
+ alpha_f=0.3,
+ beta_f=3,
+ ):
+ open_edges_df = weighted_edges_df.loc[weighted_edges_df['fft'] < 36000] # noqa: PLR2004
+
+ net = pdna.Network(
+ nodes_df['x'],
+ nodes_df['y'],
+ open_edges_df['start_nid'],
+ open_edges_df['end_nid'],
+ open_edges_df[['weight']],
+ twoway=False,
+ )
+
+ print('network') # noqa: T201
+ net.set(pd.Series(net.node_ids))
+ print('net') # noqa: T201
+
+ nodes_origin = od_ss['origin_nid'].to_numpy()
+ nodes_destin = od_ss['destin_nid'].to_numpy()
+ nodes_current = od_ss['current_nid'].to_numpy()
+ agent_ids = od_ss['agent_id'].to_numpy()
+ agent_current_links = od_ss['current_link'].to_numpy()
+ agent_current_link_times = od_ss['current_link_time'].to_numpy()
+ paths = net.shortest_paths(nodes_current, nodes_destin)
+
+ # check agent time limit
+ path_lengths = net.shortest_path_lengths(nodes_current, nodes_destin)
+ remove_agent_list = []
+ if agent_time_limit is None:
+ pass
+ else:
+ for agent_idx, agent_id in enumerate(agent_ids):
+ planned_trip_length = path_lengths[agent_idx]
+ # agent_time_limit[agent_id]
+ trip_length_limit = agent_time_limit
+ if planned_trip_length > trip_length_limit + 0:
+ remove_agent_list.append(agent_id)
+
+ edge_travel_time_dict = weighted_edges_df['t_avg'].T.to_dict()
+ edge_current_vehicles = weighted_edges_df['veh_current'].T.to_dict()
+ edge_quarter_vol = weighted_edges_df['vol_true'].T.to_dict()
+ # edge_length_dict = weighted_edges_df['length'].T.to_dict()
+ od_residual_ss_list = []
+ # all_paths = []
+ path_i = 0
+ for path in paths:
+ trip_origin = nodes_origin[path_i]
+ trip_destin = nodes_destin[path_i]
+ agent_id = agent_ids[path_i]
+ # remove some agent (path too long)
+ if agent_id in remove_agent_list:
+ path_i += 1
+ # no need to update trip info
+ continue
+ remaining_time = (
+ 3600 / quarter_counts + agent_current_link_times[path_i]
+ )
+ used_time = 0
+ for edge_s, edge_e in zip(path, path[1:]):
+ edge_str = f'{edge_s}-{edge_e}'
+ edge_travel_time = edge_travel_time_dict[edge_str]
+
+ if (remaining_time > edge_travel_time) and (
+ edge_travel_time < 36000 # noqa: PLR2004
+ ):
+ # all_paths.append(edge_str)
+ # p_dist += edge_travel_time
+ remaining_time -= edge_travel_time
+ used_time += edge_travel_time
+ edge_quarter_vol[edge_str] += 1 * sample_interval
+ trip_stop = edge_e
+
+ if edge_str == agent_current_links[path_i]:
+ edge_current_vehicles[edge_str] -= 1 * sample_interval
+ else:
+ if edge_str != agent_current_links[path_i]:
+ edge_current_vehicles[edge_str] += 1 * sample_interval
+ new_current_link = edge_str
+ new_current_link_time = remaining_time
+ trip_stop = edge_s
+ od_residual_ss_list.append(
+ [
+ agent_id,
+ trip_origin,
+ trip_destin,
+ trip_stop,
+ new_current_link,
+ new_current_link_time,
+ ]
+ )
+ break
+ trip_info[(agent_id, trip_origin, trip_destin)][0] += (
+ 3600 / quarter_counts
+ )
+ trip_info[(agent_id, trip_origin, trip_destin)][1] += used_time
+ trip_info[(agent_id, trip_origin, trip_destin)][2] = trip_stop
+ trip_info[(agent_id, trip_origin, trip_destin)][3] = hour
+ trip_info[(agent_id, trip_origin, trip_destin)][4] = quarter
+ trip_info[(agent_id, trip_origin, trip_destin)][5] = ss_id
+ path_i += 1
+
+ new_edges_df = weighted_edges_df[
+ [
+ 'uniqueid',
+ 'u',
+ 'v',
+ 'start_nid',
+ 'end_nid',
+ 'fft',
+ 'capacity',
+ 'normal_fft',
+ 'normal_capacity',
+ 'length',
+ 'vol_true',
+ 'vol_tot',
+ 'veh_current',
+ 'geometry',
+ ]
+ ].copy()
+ # new_edges_df = new_edges_df.join(edge_volume, how='left')
+ # new_edges_df['vol_ss'] = new_edges_df['vol_ss'].fillna(0)
+ # new_edges_df['vol_true'] += new_edges_df['vol_ss']
+ new_edges_df['vol_true'] = new_edges_df.index.map(edge_quarter_vol)
+ new_edges_df['veh_current'] = new_edges_df.index.map(
+ edge_current_vehicles
+ )
+ # new_edges_df['vol_tot'] += new_edges_df['vol_ss']
+ new_edges_df['flow'] = (
+ new_edges_df['vol_true'] * quarter_demand / assigned_demand
+ ) * quarter_counts
+ new_edges_df['t_avg'] = new_edges_df['fft'] * (
+ 1
+ + alpha_f
+ * (new_edges_df['flow'] / new_edges_df['capacity']) ** beta_f
+ )
+ new_edges_df['t_avg'] = np.where(
+ new_edges_df['t_avg'] > 36000, 36000, new_edges_df['t_avg'] # noqa: PLR2004
+ )
+ new_edges_df['t_avg'] = new_edges_df['t_avg'].round(2)
+
+ return new_edges_df, od_residual_ss_list, trip_info, agents_path
+
+ def write_edge_vol(
+ edges_df=None,
+ simulation_outputs=None,
+ quarter=None,
+ hour=None,
+ scen_nm=None,
+ ):
+ if 'flow' in edges_df.columns:
+ if edges_df.shape[0] < 10: # noqa: PLR2004
+ edges_df[
+ [
+ 'uniqueid',
+ 'start_nid',
+ 'end_nid',
+ 'capacity',
+ 'veh_current',
+ 'vol_true',
+ 'vol_tot',
+ 'flow',
+ 't_avg',
+ 'geometry',
+ ]
+ ].to_csv(
+ f'{simulation_outputs}/edge_vol/edge_vol_hr{hour}_'
+ f'qt{quarter}_{scen_nm}.csv',
+ index=False,
+ )
+
+ else:
+ edges_df.loc[
+ edges_df['vol_true'] > 0,
+ [
+ 'uniqueid',
+ 'start_nid',
+ 'end_nid',
+ 'capacity',
+ 'veh_current',
+ 'vol_true',
+ 'vol_tot',
+ 'flow',
+ 't_avg',
+ 'geometry',
+ ],
+ ].to_csv(
+ f'{simulation_outputs}/edge_vol/edge_vol_hr{hour}_'
+ f'qt{quarter}_{scen_nm}.csv',
+ index=False,
+ )
+
+ def write_final_vol(
+ edges_df=None,
+ simulation_outputs=None,
+ quarter=None,
+ hour=None,
+ scen_nm=None,
+ ):
+ edges_df.loc[
+ edges_df['vol_tot'] > 0,
+ ['uniqueid', 'start_nid', 'end_nid', 'vol_tot', 'geometry'],
+ ].to_csv(
+ f'{simulation_outputs}/edge_vol/final_edge_vol_hr{hour}_qt'
+ f'{quarter}_{scen_nm}.csv',
+ index=False,
+ )
+
+ def assignment( # noqa: C901, PLR0913
+ quarter_counts=6,
+ substep_counts=15,
+ substep_size=30000,
+ edges_df=None,
+ nodes_df=None,
+ od_all=None,
+ simulation_outputs=None,
+ scen_nm=None,
+ hour_list=None,
+ quarter_list=None,
+ cost_factor=None, # noqa: ARG001
+ closure_hours=None,
+ closed_links=None,
+ agent_time_limit=None,
+ sample_interval=1,
+ agents_path=None,
+ alpha_f=0.3,
+ beta_f=4,
+ ):
+ if closure_hours is None:
+ closure_hours = []
+
+ # Check if all od pairs has path. If not,
+ orig = od_all['origin_nid'].to_numpy()
+ dest = od_all['destin_nid'].to_numpy()
+ open_edges_df = edges_df[
+ ~edges_df['uniqueid'].isin(closed_links['uniqueid'].to_numpy())
+ ]
+ net = pdna.Network(
+ nodes_df['x'],
+ nodes_df['y'],
+ open_edges_df['start_nid'],
+ open_edges_df['end_nid'],
+ open_edges_df[['fft']],
+ twoway=False,
+ )
+ paths = net.shortest_paths(orig, dest)
+ no_path_ind = [i for i in range(len(paths)) if len(paths[i]) == 0]
+ od_no_path = od_all.iloc[no_path_ind].copy()
+ od_all = od_all.drop(no_path_ind)
+
+ od_all['current_nid'] = od_all['origin_nid']
+ trip_info = {
+ (od.agent_id, od.origin_nid, od.destin_nid): [
+ 0,
+ 0,
+ od.origin_nid,
+ 0,
+ od.hour,
+ od.quarter,
+ 0,
+ 0,
+ ]
+ for od in od_all.itertuples()
+ }
+
+ # Quarters and substeps
+ # probability of being in each division of hour
+ if quarter_list is None:
+ quarter_counts = 4
+ else:
+ quarter_counts = len(quarter_list)
+ quarter_ps = [1 / quarter_counts for i in range(quarter_counts)]
+ quarter_ids = list(range(quarter_counts))
+
+ # initial setup
+ od_residual_list = []
+ # accumulator
+ edges_df['vol_tot'] = 0
+ edges_df['veh_current'] = 0
+
+ # Loop through days and hours
+ for _day in ['na']:
+ for hour in hour_list:
+ gc.collect()
+ if hour in closure_hours:
+ for row in closed_links.itertuples():
+ edges_df.loc[
+ (edges_df['uniqueid'] == row.uniqueid), 'capacity'
+ ] = 1
+ edges_df.loc[
+ (edges_df['uniqueid'] == row.uniqueid), 'fft'
+ ] = 36000
+ else:
+ edges_df['capacity'] = edges_df['normal_capacity']
+ edges_df['fft'] = edges_df['normal_fft']
+
+ # Read OD
+ od_hour = od_all[od_all['hour'] == hour].copy()
+ if od_hour.shape[0] == 0:
+ od_hour = pd.DataFrame([], columns=od_all.columns)
+ od_hour['current_link'] = None
+ od_hour['current_link_time'] = 0
+
+ # Divide into quarters
+ if 'quarter' in od_all.columns:
+ pass
+ else:
+ od_quarter_msk = np.random.choice(
+ quarter_ids, size=od_hour.shape[0], p=quarter_ps
+ )
+ od_hour['quarter'] = od_quarter_msk
+
+ if quarter_list is None:
+ quarter_list = quarter_ids
+ for quarter in quarter_list:
+ # New OD in assignment period
+ od_quarter = od_hour.loc[
+ od_hour['quarter'] == quarter,
+ [
+ 'agent_id',
+ 'origin_nid',
+ 'destin_nid',
+ 'current_nid',
+ 'current_link',
+ 'current_link_time',
+ ],
+ ]
+ # Add resudal OD
+ od_residual = pd.DataFrame(
+ od_residual_list,
+ columns=[
+ 'agent_id',
+ 'origin_nid',
+ 'destin_nid',
+ 'current_nid',
+ 'current_link',
+ 'current_link_time',
+ ],
+ )
+ od_residual['quarter'] = quarter
+ # Total OD in each assignment period is the combined
+ # of new and residual OD:
+ od_quarter = pd.concat(
+ [od_quarter, od_residual], sort=False, ignore_index=True
+ )
+ # Residual OD is no longer residual after it has been
+ # merged to the quarterly OD:
+ od_residual_list = []
+ od_quarter = od_quarter[
+ od_quarter['current_nid'] != od_quarter['destin_nid']
+ ]
+
+ # total demand for this quarter, including total and
+ # residual demand:
+ quarter_demand = od_quarter.shape[0]
+ # how many among the OD pairs to be assigned in this
+ # quarter are actually residual from previous quarters
+ residual_demand = od_residual.shape[0]
+ assigned_demand = 0
+
+ substep_counts = max(1, (quarter_demand // substep_size) + 1)
+ logging.info(
+ f'HR {hour} QT {quarter} has '
+ f'{quarter_demand}/{residual_demand} od'
+ f's/residuals {substep_counts} substeps'
+ )
+ substep_ps = [
+ 1 / substep_counts for i in range(substep_counts)
+ ]
+ substep_ids = list(range(substep_counts))
+ od_substep_msk = np.random.choice(
+ substep_ids, size=quarter_demand, p=substep_ps
+ )
+ od_quarter['ss_id'] = od_substep_msk
+
+ # reset volume at each quarter
+ edges_df['vol_true'] = 0
+
+ for ss_id in substep_ids:
+ gc.collect()
+
+ time_ss_0 = time.time()
+ logging.info(
+ f'Hour: {hour}, Quarter: {quarter}, '
+ 'SS ID: {ss_id}'
+ )
+ od_ss = od_quarter[od_quarter['ss_id'] == ss_id]
+ assigned_demand += od_ss.shape[0]
+ if assigned_demand == 0:
+ continue
+ # calculate weight
+ weighted_edges_df = edges_df.copy()
+ # weight by travel distance
+ # weighted_edges_df['weight'] = edges_df['length']
+ # weight by travel time
+ # weighted_edges_df['weight'] = edges_df['t_avg']
+ # weight by travel time
+ # weighted_edges_df['weight'] = (edges_df['t_avg']
+ # - edges_df['fft']) * 0.5 + edges_df['length']*0.1
+ # + cost_factor*edges_df['length']*0.1*(
+ # edges_df['is_highway'])
+ # 10 yen per 100 m --> 0.1 yen per m
+ weighted_edges_df['weight'] = edges_df['t_avg']
+ # weighted_edges_df['weight'] = np.where(
+ # weighted_edges_df['weight']<0.1, 0.1,
+ # weighted_edges_df['weight'])
+
+ # traffic assignment with truncated path
+ (
+ edges_df,
+ od_residual_ss_list,
+ trip_info,
+ agents_path,
+ ) = substep_assignment(
+ nodes_df=nodes_df,
+ weighted_edges_df=weighted_edges_df,
+ od_ss=od_ss,
+ quarter_demand=quarter_demand,
+ assigned_demand=assigned_demand,
+ quarter_counts=quarter_counts,
+ trip_info=trip_info,
+ agent_time_limit=agent_time_limit,
+ sample_interval=sample_interval,
+ agents_path=agents_path,
+ hour=hour,
+ quarter=quarter,
+ ss_id=ss_id,
+ alpha_f=alpha_f,
+ beta_f=beta_f,
+ )
+
+ od_residual_list += od_residual_ss_list
+ # write_edge_vol(edges_df=edges_df,
+ # simulation_outputs=simulation_outputs,
+ # quarter=quarter,
+ # hour=hour,
+ # scen_nm='ss{}_{}'.format(ss_id, scen_nm))
+ logging.info(
+ f'HR {hour} QT {quarter} SS {ss_id}'
+ ' finished, max vol '
+ f'{np.max(edges_df["vol_true"])}, '
+ f'time {time.time() - time_ss_0}'
+ )
+
+ # write quarterly results
+ edges_df['vol_tot'] += edges_df['vol_true']
+ if True: # hour >=16 or (hour==15 and quarter==3):
+ write_edge_vol(
+ edges_df=edges_df,
+ simulation_outputs=simulation_outputs,
+ quarter=quarter,
+ hour=hour,
+ scen_nm=scen_nm,
+ )
+
+ if hour % 3 == 0:
+ trip_info_df = pd.DataFrame(
+ [
+ [
+ trip_key[0],
+ trip_key[1],
+ trip_key[2],
+ trip_value[0],
+ trip_value[1],
+ trip_value[2],
+ trip_value[3],
+ trip_value[4],
+ trip_value[5],
+ ]
+ for trip_key, trip_value in trip_info.items()
+ ],
+ columns=[
+ 'agent_id',
+ 'origin_nid',
+ 'destin_nid',
+ 'travel_time',
+ 'travel_time_used',
+ 'stop_nid',
+ 'stop_hour',
+ 'stop_quarter',
+ 'stop_ssid',
+ ],
+ )
+ trip_info_df.to_csv(
+ simulation_outputs + '/trip_info'
+ f'/trip_info_{scen_nm}_hr{hour}'
+ '.csv',
+ index=False,
+ )
+
+ # output individual trip travel time and stop location
+
+ trip_info_df = pd.DataFrame(
+ [
+ [
+ trip_key[0],
+ trip_key[1],
+ trip_key[2],
+ trip_value[0],
+ trip_value[1],
+ trip_value[2],
+ trip_value[3],
+ trip_value[4],
+ trip_value[5],
+ ]
+ for trip_key, trip_value in trip_info.items()
+ ],
+ columns=[
+ 'agent_id',
+ 'origin_nid',
+ 'destin_nid',
+ 'travel_time',
+ 'travel_time_used',
+ 'stop_nid',
+ 'stop_hour',
+ 'stop_quarter',
+ 'stop_ssid',
+ ],
+ )
+ # Add the no path OD to the trip info
+ trip_info_no_path = od_no_path.drop(
+ columns=[
+ col
+ for col in od_no_path.columns
+ if col not in ['agent_id', 'origin_nid', 'destin_nid']
+ ]
+ )
+ trip_info_no_path['travel_time'] = 360000
+ trip_info_no_path['travel_time_used'] = np.nan
+ trip_info_no_path['stop_nid'] = np.nan
+ trip_info_no_path['stop_hour'] = np.nan
+ trip_info_no_path['stop_quarter'] = np.nan
+ trip_info_no_path['stop_ssid'] = np.nan
+ trip_info_df = pd.concat(
+ [trip_info_df, trip_info_no_path], ignore_index=True
+ )
+
+ trip_info_df.to_csv(
+ simulation_outputs + f'/trip_info/trip_info_{scen_nm}.csv',
+ index=False,
+ )
+
+ write_final_vol(
+ edges_df=edges_df,
+ simulation_outputs=simulation_outputs,
+ quarter=quarter,
+ hour=hour,
+ scen_nm=scen_nm,
+ )
+
+ network_edges = self.csv_filenames[0]
+ network_nodes = self.csv_filenames[1]
+ closed_edges_file = self.csv_filenames[2]
+ demand_file = self.csv_filenames[3]
+ simulation_outputs = self.simulation_outputs
+ scen_nm = 'simulation_out'
+
+ hour_list = list(range(6, 9))
+ quarter_list = [0, 1, 2, 3, 4, 5]
+ closure_hours = hour_list
+
+ edges_df = pd.read_csv(network_edges)
+ # edges_df = edges_df[["uniqueid", "geometry", "osmid", "length", "type",
+ # "lanes", "maxspeed", "fft", "capacity",
+ # "start_nid", "end_nid"]]
+ edges_df = edges_df[
+ [
+ 'uniqueid',
+ 'geometry',
+ 'length',
+ 'type',
+ 'lanes',
+ 'maxspeed',
+ 'capacity',
+ 'start_nid',
+ 'end_nid',
+ ]
+ ]
+ edges_df = gpd.GeoDataFrame(
+ edges_df, crs='epsg:4326', geometry=edges_df['geometry'].map(loads)
+ )
+ # pay attention to the unit conversion, lenth is in meters, maxspeed is mph
+ # fft is in seconds
+ edges_df['fft'] = edges_df['length'] / edges_df['maxspeed'] * 2.23694
+ edges_df = edges_df.sort_values(by='fft', ascending=False).drop_duplicates(
+ subset=['start_nid', 'end_nid'], keep='first'
+ )
+ edges_df['edge_str'] = (
+ edges_df['start_nid'].astype('str')
+ + '-'
+ + edges_df['end_nid'].astype('str')
+ )
+ edges_df['capacity'] = np.where(
+ edges_df['capacity'] < 1, 950, edges_df['capacity']
+ )
+ edges_df['normal_capacity'] = edges_df['capacity']
+ edges_df['normal_fft'] = edges_df['fft']
+ edges_df['t_avg'] = edges_df['fft']
+ edges_df['u'] = edges_df['start_nid']
+ edges_df['v'] = edges_df['end_nid']
+ edges_df = edges_df.set_index('edge_str')
+ # closure locations
+ closed_links = pd.read_csv(closed_edges_file)
+ for row in closed_links.itertuples():
+ edges_df.loc[(edges_df['uniqueid'] == row.uniqueid), 'capacity'] = 1
+ edges_df.loc[(edges_df['uniqueid'] == row.uniqueid), 'fft'] = 36000
+ # output closed file for visualization
+ # edges_df.loc[edges_df['fft'] == 36000, ['uniqueid',
+ # 'start_nid',
+ # 'end_nid',
+ # 'capacity',
+ # 'fft',
+ # 'geometry']].to_csv(
+ # simulation_outputs +
+ # '/closed_links_'
+ # f'{scen_nm}.csv')
+
+ # nodes processing
+ nodes_df = pd.read_csv(network_nodes)
+
+ nodes_df['x'] = nodes_df['lon']
+ nodes_df['y'] = nodes_df['lat']
+ nodes_df = nodes_df.set_index('node_id')
+
+ # demand processing
+ t_od_0 = time.time()
+ od_all = pd.read_csv(demand_file)
+ t_od_1 = time.time()
+ logging.info('%d sec to read %d OD pairs', t_od_1 - t_od_0, od_all.shape[0])
+ # run residual_demand_assignment
+ assignment(
+ edges_df=edges_df,
+ nodes_df=nodes_df,
+ od_all=od_all,
+ simulation_outputs=simulation_outputs,
+ scen_nm=scen_nm,
+ hour_list=hour_list,
+ quarter_list=quarter_list,
+ closure_hours=closure_hours,
+ closed_links=closed_links,
+ )
+
+ # @abstractmethod
+ def update_od_file(
+ self,
+ old_nodes: str,
+ old_det: str,
+ new_nodes: str,
+ new_det: str,
+ od: str,
+ origin_ids: list[int],
+ ) -> pd.DataFrame:
+ """
+ Update origin-destination (OD) file from changes in population data.
+
+ This function updates an OD file by calculating the population changes
+ at each node between two time steps and generates trips originating
+ from specified origins and ending at nodes where the population has
+ increased. The updated OD data is saved to a new file.
+
+ Args__
+ old_nodes (str): Path to the CSV file containing the node
+ information at the previous time step.
+ old_det (str): Path to the JSON file containing the building
+ information at the previous time step.
+ new_nodes (str): Path to the CSV file containing the node
+ information at the current time step.
+ new_det (str): Path to the JSON file containing the building
+ information at the current time step.
+ od (str): Path to the existing OD file to be updated.
+ origin_ids (List[int]): List of IDs representing possible origins
+ for generating trips.
+
+ Returns__
+ pd.DataFrame: The updated OD DataFrame with new trips based on
+ population changes.
+
+ Raises__
+ FileNotFoundError: If any of the provided file paths are incorrect
+ or the files do not exist.
+ json.JSONDecodeError: If the JSON files cannot be read or parsed
+ correctly.
+ KeyError: If expected keys are missing in the JSON data.
+ ValueError: If there are issues with data conversions or
+ calculations.
+
+ Examples__
+ >>> update_od_file(
+ 'old_nodes.csv',
+ 'old_building_data.json',
+ 'new_nodes.csv',
+ 'new_building_data.json',
+ 'existing_od.csv',
+ [1, 2, 3, 4]
+ )
+ The function will process the provided files, calculate population
+ changes, and update the OD file with new trips. The result will be
+ saved to 'updated_od.csv'.
+
+ Notes__
+ - Ensure that the columns `lat`, `lon`, and `node_id` are present
+ in the nodes CSV files.
+ - The `det` files should contain the `Buildings` and
+ `GeneralInformation` sections with appropriate keys.
+ - The OD file should have the columns `agent_id`, `origin_nid`,
+ `destin_nid`, `hour`, and `quarter`.
+ """
+
+ # Extract the building information from the det file and convert it to
+ # a pandas dataframe
+ def extract_building_from_det(det):
+ # Open the det file
+ with Path.open(det, encoding='utf-8') as file:
+ # Return the JSON object as a dictionary
+ json_data = json.load(file)
+
+ # Extract the required information and convert it to a pandas
+ # dataframe
+ extracted_data = []
+
+ for aim_id, info in json_data['Buildings']['Building'].items():
+ general_info = info.get('GeneralInformation', {})
+ extracted_data.append(
+ {
+ 'AIM_id': aim_id,
+ 'Latitude': general_info.get('Latitude'),
+ 'Longitude': general_info.get('Longitude'),
+ 'Population': general_info.get('Population'),
+ }
+ )
+
+ return pd.DataFrame(extracted_data)
+
+ # Aggregate the population in buildings to the closest road network
+ # node
+ def closest_neighbour(building_df, nodes_df):
+ # Find the nearest road network node to each building
+ nodes_xy = np.array(
+ [nodes_df['lat'].to_numpy(), nodes_df['lon'].to_numpy()]
+ ).transpose()
+ building_df['closest_node'] = building_df.apply(
+ lambda x: cdist(
+ [(x['Latitude'], x['Longitude'])], nodes_xy
+ ).argmin(),
+ axis=1,
+ )
+
+ # Merge the road network and building dataframes
+ merged_df = nodes_df.merge(
+ building_df, left_on='node_id', right_on='closest_node', how='left'
+ )
+ merged_df = merged_df.drop(
+ columns=['AIM_id', 'Latitude', 'Longitude', 'closest_node']
+ )
+ merged_df = merged_df.fillna(0)
+
+ # Aggregate population of neareast buildings to the road network
+ # node
+ updated_nodes_df = (
+ merged_df.groupby('node_id')
+ .agg(
+ {
+ 'lon': 'first',
+ 'lat': 'first',
+ 'geometry': 'first',
+ 'Population': 'sum',
+ }
+ )
+ .reset_index()
+ )
+
+ return updated_nodes_df # noqa: RET504
+
+ # Function to add the population information to the nodes file
+ def update_nodes_file(nodes, det):
+ # Read the nodes file
+ nodes_df = pd.read_csv(nodes)
+ # Extract the building information from the det file and convert it
+ # to a pandas dataframe
+ building_df = extract_building_from_det(det)
+ # Aggregate the population in buildings to the closest road network
+ # node
+ updated_nodes_df = closest_neighbour(building_df, nodes_df)
+
+ return updated_nodes_df # noqa: RET504
+
+ # Read the od file
+ od_df = pd.read_csv(od)
+ # Add the population information to nodes dataframes at the last and
+ # current time steps
+ old_nodes_df = update_nodes_file(old_nodes, old_det)
+ new_nodes_df = update_nodes_file(new_nodes, new_det)
+ # Calculate the population changes at each node (assuming that
+ # population will only increase at each node)
+ population_change_df = old_nodes_df.copy()
+ population_change_df['Population Change'] = (
+ new_nodes_df['Population'] - old_nodes_df['Population']
+ )
+ population_change_df['Population Change'].astype(int)
+ # Randomly generate the trips that start at one of the connections
+ # between Alameda Island and Oakland, and end at the nodes where
+ # population increases and append them to the od file
+ # Generate OD data for each node with increased population and append
+ # it to the od file
+ for _, row in population_change_df.iterrows():
+ # Only process rows with positive population difference
+ if row['Population_Difference'] > 0:
+ for _ in range(row['Population_Difference']):
+ # Generate random origin_nid
+ origin_nid = np.random.choice(origin_ids)
+ # Set the destin_nid to the node_id of the increased
+ # population
+ destin_nid = row['node_id']
+ # Generate random hour and quarter
+ hour = np.random.randint(5, 24)
+ quarter = np.random.randint(0, 5)
+ # Append to od dataframe
+ od_df = od_df.append(
+ {
+ 'agent_id': 0,
+ 'origin_nid': origin_nid,
+ 'destin_nid': destin_nid,
+ 'hour': hour,
+ 'quarter': quarter,
+ },
+ ignore_index=True,
+ )
+ od_df.to_csv('updated_od.csv')
+ return od_df
+
+ # def get_graph_network(self,
+ # det_file_path: str,
+ # output_files: Optional[List[str]] = None,
+ # save_additional_attributes:
+ # Literal['', 'residual_demand'] = ''
+ # ) -> Tuple[Dict, Dict]:
+ # """
+ # Create a graph network from inventory data .
+
+ # This function processes inventory files containing road and structural
+ # data, constructs a graph network with nodes and edges, and optionally
+ # saves additional attributes such as residual demand. The node and edge
+ # features are saved to specified output files.
+
+ # Args__
+ # det_file_path (str): The path to the deterministic result JSON
+ # output_files (Optional[List[str]]): A list of file paths where the
+ # node and edge features will be saved. The first file in the
+ # list is used for edges and the second for nodes.
+ # save_additional_attributes (Literal['', 'residual_demand']):
+ # A flag indicating whether to save additional attributes.
+ # The only supported additional attribute is 'residual_demand'.
+
+ # Returns__
+ # Tuple[Dict, Dict]: A tuple containing two dictionaries:
+ # - The first dictionary contains the edge features.
+ # - The second dictionary contains the node features.
+
+ # Example__
+ # >>> det_file_path = 'path/to/Results_det.json'
+ # >>> output_files = ['edges.csv', 'nodes.csv']
+ # >>> save_additional_attributes = 'residual_demand'
+ # >>> edges, nodes = get_graph_network(det_file_path,
+ # output_files,
+ # save_additional_attributes)
+ # >>> print(edges)
+ # >>> print(nodes)
+ # """
+ # # if output_files is None:
+ # # self.graph_network['output_files'] = ['edges.csv', 'nodes.csv']
+
+ # def create_circle_from_lat_lon(lat, lon, radius_ft, num_points=100):
+ # """
+ # Create a circle polygon from latitude and longitude.
+
+ # Args__
+ # lat (float): Latitude of the center.
+ # lon (float): Longitude of the center.
+ # radius_ft (float): Radius of the circle in feet.
+ # num_points (int): Number of points to approximate the circle.
+
+ # Returns__
+ # Polygon: A Shapely polygon representing the circle.
+ # """
+ # # Earth's radius in kilometers
+ # R_EARTH_FT = 20925721.784777
+
+ # # Convert radius from kilometers to radians
+ # radius_rad = radius_ft / R_EARTH_FT
+
+ # # Convert latitude and longitude to radians
+ # lat_rad = np.radians(lat)
+ # lon_rad = np.radians(lon)
+
+ # # Generate points around the circle
+ # angle = np.linspace(0, 2 * np.pi, num_points)
+ # lat_points = lat_rad + radius_rad * np.cos(angle)
+ # lon_points = lon_rad + radius_rad * np.sin(angle)
+
+ # # Convert radians back to degrees
+ # lat_points_deg = np.degrees(lat_points)
+ # lon_points_deg = np.degrees(lon_points)
+
+ # # Create a polygon from the points
+ # points = list(zip(lon_points_deg, lat_points_deg))
+ # return Polygon(points)
+
+ # def parse_element_geometries_from_geojson(
+ # det_file: str,
+ # save_additional_attributes: str
+ # ) -> Tuple[List[Dict], Tuple[List[LineString], List[Dict]]]:
+ # """
+ # Parse geometries from R2D deterministic result json.
+
+ # This function reads json files specified in `output_files`. It
+ # extracts and parses `LineString` geometries from files that contain
+ # "road" in their name. For other files, it accumulates point
+ # features.
+
+ # Args__
+ # det_file (str): Path to the R2D deterministic result Json.
+
+ # Returns__
+ # tuple: A tuple containing two elements:
+ # - ptdata (list of dict): List of point features parsed from
+ # GeoJSON files for bridges and tunnels
+ # - road_data (tuple): A tuple containing:
+ # - road_polys (list of LineString): List of `LineString`
+ # objects created from the road geometries in GeoJSON
+ # files that contain "road" in their name.
+ # - road_features (list of dict): List of road features
+ # as dictionaries from GeoJSON files that contain
+ # "roads" in their name.
+
+ # Raises__
+ # FileNotFoundError: If any of the specified files in
+ # `output_files` do not exist.
+ # json.JSONDecodeError: If a file cannot be parsed as valid JSON.
+ # """
+ # ptdata = {}
+ # with open(det_file, "r", encoding="utf-8") as file:
+ # # Read the JSON file:
+ # temp = json.load(file)
+ # if 'TransportationNetwork' not in temp:
+ # raise KeyError(
+ # 'The deterministic result JSON file does not contain TransportationNetwork')
+ # temp = temp['TransportationNetwork']
+ # # If the file contains road information:
+ # if 'Roadway' in temp:
+ # # Extract road features:
+ # road_features = temp['Roadway']
+ # # Read road polygons, add asset type information to
+ # # road features and, if required, parse existing road
+ # # attributes to infer information required for network
+ # # analysis:
+ # road_polys = {}
+ # for AIM_ID, item in road_features.items():
+ # road_polys.update({AIM_ID: loads(item["GeneralInformation"]["geometry"])})
+ # # road_features[ind]['properties']['asset_type'] = \
+ # # 'road'
+ # if save_additional_attributes:
+ # # Access properties for element at index ind:
+ # properties = item['GeneralInformation']
+ # # Get MTFCC value:
+ # mtfcc = properties['MTFCC']
+ # # Update road features for the indexed element
+ # # with number of lanes, maximum speed, and
+ # # road capacity:
+ # properties.update({
+ # 'lanes':
+ # self.attribute_maps['lanes'][mtfcc],
+ # 'maxspeed':
+ # self.attribute_maps['speed'][mtfcc],
+ # 'capacity':
+ # self.attribute_maps['capacity'][
+ # properties['lanes']]
+ # })
+ # if 'Bridge' in temp:
+ # ptdata['Bridge'] = temp['Bridge']
+ # # for feature in temp["features"]:
+ # # feature['properties']['asset_type'] = asset_type
+ # # ptdata += temp["features"]
+ # if 'Tunnel' in temp:
+ # ptdata['Tunnel'] = temp['Tunnel']
+
+ # return ptdata, (road_polys, road_features)
+
+ # def find_intersections(lines: List[LineString]) -> Set[Point]:
+ # """
+ # Find intersection points between pairs of LineString geometries.
+
+ # This function takes a list of `LineString` objects and identifies
+ # points where any two lines intersect. The intersections are
+ # returned as a set of `Point` objects.
+
+ # Args__
+ # lines (List[LineString]): A list of `LineString` objects. The
+ # function computes intersections between each pair of
+ # `LineString` objects in this list.
+
+ # Returns__
+ # Set[Point]: A set of `Point` objects representing the
+ # intersection points between the `LineString` objects. If
+ # multiple intersections occur at the same location, it will
+ # only be included once in the set.
+
+ # Example__
+ # >>> from shapely.geometry import LineString
+ # >>> line1 = LineString([(0, 0), (1, 1)])
+ # >>> line2 = LineString([(0, 1), (1, 0)])
+ # >>> find_intersections([line1, line2])
+ # {}
+
+ # Notes__
+ # - The function assumes that all input geometries are valid
+ # `LineString`
+ # objects.
+ # - The resulting set may be empty if no intersections are found.
+
+ # Raises__
+ # TypeError: If any element in `lines` is not a `LineString`
+ # object.
+ # """
+ # intersections = set()
+ # for i, line1 in enumerate(lines):
+ # for line2 in lines[i + 1:]:
+ # if line1.intersects(line2):
+ # inter_points = line1.intersection(line2)
+ # if inter_points.geom_type == "Point":
+ # intersections.add(inter_points)
+ # elif inter_points.geom_type == "MultiPoint":
+ # intersections.update(inter_points.geoms)
+ # return intersections
+
+ # def cut_lines_at_intersections(lines: List[LineString],
+ # line_features: List[Dict],
+ # intersections: List[Point]
+ # ) -> List[LineString]:
+ # """
+ # Cut LineStrings at intersection points & return resulting segments.
+
+ # This function takes a list of `LineString` objects and a list of
+ # `Point` objects representing intersection points. For each
+ # `LineString`, it splits the line at each intersection point. The
+ # resulting segments are collected and returned.
+
+ # Args__
+ # lines (List[LineString]): A list of `LineString` objects to be
+ # cut at the intersection points.
+ # line_features (List[Dict]): List of features for the
+ # `LineString` objects in lines.
+ # intersections (List[Point]): A list of `Point` objects where
+ # the lines are intersected and split.
+
+ # Returns__
+ # List[LineString]: A list of `LineString` objects resulting from
+ # cutting the original lines at the
+ # intersection points.
+
+ # Notes__
+ # - The `split` function from `shapely.ops` is used to perform
+ # the cutting of lines at intersection points.
+ # - The function handles cases where splitting results in a
+ # `GeometryCollection` by extracting only `LineString`
+ # geometries.
+
+ # Example__
+ # >>> from shapely.geometry import LineString, Point
+ # >>> from shapely.ops import split
+ # >>> lines = [
+ # ... LineString([(0, 0), (2, 2)]),
+ # ... LineString([(2, 0), (0, 2)])
+ # ... ]
+ # >>> intersections = [
+ # ... Point(1, 1)
+ # ... ]
+ # >>> cut_lines_at_intersections(lines, intersections)
+ # [,
+ # ]
+ # """
+ # new_lines = []
+ # new_line_features = []
+
+ # for ind_line, line in enumerate(lines):
+ # segments = [line] # Start with the original line
+ # for point in intersections:
+ # new_segments = []
+ # features = []
+ # for segment in segments:
+ # if segment.intersects(point):
+ # split_result = split(segment, point)
+ # if split_result.geom_type == "GeometryCollection":
+ # new_segments.extend(
+ # geom
+ # for geom in split_result.geoms
+ # if geom.geom_type == "LineString"
+ # )
+ # features.extend([copy.deepcopy(
+ # line_features[ind_line])
+ # for _ in range(
+ # len(
+ # split_result.geoms
+ # ))])
+ # elif split_result.geom_type == "LineString":
+ # segments.append(split_result)
+ # features.append(line_features[ind_line])
+ # else:
+ # new_segments.append(segment)
+ # features.append(line_features[ind_line])
+ # segments = new_segments
+
+ # # Add remaining segments that were not intersected by any
+ # # points:
+ # new_lines.extend(segments)
+ # new_line_features.extend(features)
+
+ # return (new_lines, new_line_features)
+
+ # def save_cut_lines_and_intersections(lines: List[LineString],
+ # points: List[Point]) -> None:
+ # """
+ # Save LineString and Point objects to separate GeoJSON files.
+
+ # This function converts lists of `LineString` and `Point` objects to
+ # GeoJSON format and saves them to separate files. The `LineString`
+ # objects are saved to "lines.geojson", and the `Point` objects are
+ # saved to "points.geojson".
+
+ # Args__
+ # lines (List[LineString]): A list of `LineString` objects to be
+ # saved in GeoJSON format.
+ # intersections (List[Point]): A list of `Point` objects to be
+ # saved in GeoJSON format.
+
+ # Returns__
+ # None: This function does not return any value. It writes
+ # GeoJSON data to files.
+
+ # Notes__
+ # - The function uses the `mapping` function from
+ # `shapely.geometry` to convert geometries to GeoJSON format.
+ # - Two separate GeoJSON files are created: one for lines and one
+ # for points.
+ # - The output files are named "lines.geojson" and
+ # "points.geojson" respectively.
+
+ # Example__
+ # >>> from shapely.geometry import LineString, Point
+ # >>> lines = [
+ # ... LineString([(0, 0), (1, 1)]),
+ # ... LineString([(1, 1), (2, 2)])
+ # ... ]
+ # >>> points = [
+ # ... Point(0.5, 0.5),
+ # ... Point(1.5, 1.5)
+ # ... ]
+ # >>> save_cut_lines_and_intersections(lines, points)
+ # # This will create "lines.geojson" and "points.geojson" files
+ # with the corresponding GeoJSON data.
+ # """
+ # # Convert LineString objects to GeoJSON format
+ # features = []
+ # for line in lines:
+ # features.append(
+ # {"type": "Feature", "geometry": mapping(
+ # line), "properties": {}}
+ # )
+
+ # # Create a GeoJSON FeatureCollection
+ # geojson = {"type": "FeatureCollection", "features": features}
+
+ # # Save the GeoJSON to a file
+ # with open("lines.geojson", "w", encoding="utf-8") as file:
+ # json.dump(geojson, file, indent=2)
+
+ # # Convert Point objects to GeoJSON format
+ # features = []
+ # for point in points:
+ # features.append(
+ # {"type": "Feature", "geometry": mapping(
+ # point), "properties": {}}
+ # )
+
+ # # Create a GeoJSON FeatureCollection
+ # geojson = {"type": "FeatureCollection", "features": features}
+
+ # # Save the GeoJSON to a file
+ # with open("points.geojson", "w", encoding="utf-8") as file:
+ # json.dump(geojson, file, indent=2)
+
+ # def find_repeated_line_pairs(lines: List[LineString]) -> Set[Tuple]:
+ # """
+ # Find and groups indices of repeated LineString objects from a list.
+
+ # This function processes a list of `LineString` objects to identify
+ # and group all unique index pairs where LineString objects are
+ # repeated. The function converts each `LineString` to its
+ # Well-Known Text (WKT) representation to identify duplicates.
+
+ # Args__
+ # lines (List[LineString]): A list of `LineString` objects to be
+ # analyzed for duplicates.
+
+ # Returns__
+ # Set[Tuple]: A set of tuples, where each tuple contains indices
+ # for LineString objects that are repeated.
+
+ # Raises__
+ # TypeError: If any element in the `lines` list is not an
+ # instance of `LineString`.
+
+ # Example__
+ # >>> from shapely.geometry import LineString
+ # >>> lines = [
+ # ... LineString([(0, 0), (1, 1)]),
+ # ... LineString([(0, 0), (1, 1)]),
+ # ... LineString([(1, 1), (2, 2)]),
+ # ... LineString([(0, 0), (1, 1)]),
+ # ... LineString([(1, 1), (2, 2)])
+ # ... ]
+ # >>> find_repeated_line_pairs(lines)
+ # [{0, 1, 3}, {2, 4}]
+ # """
+ # line_indices = defaultdict(list)
+
+ # for id, line in lines.items():
+ # if not isinstance(line, LineString):
+ # raise TypeError(
+ # 'All elements in the input list must be LineString'
+ # ' objects.')
+
+ # # Convert LineString to its WKT representation to use as a
+ # # unique identifier:
+ # line_wkt = line.wkt
+ # line_indices[line_wkt].append(id)
+
+ # repeated_pairs = set()
+ # for indices in line_indices.values():
+ # if len(indices) > 1:
+ # # Create pairs of all indices for the repeated LineString
+ # for i, _ in enumerate(indices):
+ # for j in range(i + 1, len(indices)):
+ # repeated_pairs.add((indices[i], indices[j]))
+
+ # repeated_pairs = list(repeated_pairs)
+ # ind_matched = []
+ # repeated_polys = []
+ # for index_p1, pair1 in enumerate(repeated_pairs):
+ # pt1 = set(pair1)
+ # for index_p2, pair2 in enumerate(repeated_pairs[index_p1+1:]):
+ # if (index_p1 + index_p2 + 1) not in ind_matched:
+ # pt2 = set(pair2)
+ # if bool(pt1 & pt2):
+ # pt1 |= pt2
+ # ind_matched.append(index_p1 + index_p2 + 1)
+ # if pt1 not in repeated_polys and index_p1 not in ind_matched:
+ # repeated_polys.append(pt1)
+
+ # return repeated_polys
+
+ # def match_edges_to_points(ptdata: List[Dict],
+ # road_polys: List[LineString],
+ # road_features: List[Dict]
+ # ) -> List[List[int]]:
+ # """
+ # Match points to closest road polylines based on name similarity.
+
+ # This function takes a list of points and a list of road polylines.
+ # For each point, it searches for intersecting road polylines within
+ # a specified radius and calculates the similarity between the
+ # point's associated facility name and the road's name. It returns a
+ # list of lists where each sublist contains indices of the road
+ # polylines that best match the point based on the similarity score.
+
+ # Args__
+ # ptdata (List[Dict[str, Any]]): A list of dictionaries where
+ # each dictionary represents a point with its geometry and
+ # properties. The 'geometry' key should contain 'coordinates'
+ # , and the 'properties' key should contain 'tunnel_name' or
+ # 'facility_carried'.
+ # road_polys (List[LineString]): A list of `LineString` objects
+ # representing road polylines.
+
+ # Returns__
+ # List[List[int]]: A list of lists where each sublist contains
+ # the indices of road polylines that have the highest textual
+ # similarity to the point's facility name. If no similarity
+ # is found, the sublist is empty.
+
+ # Notes__
+ # - The function uses a search radius of 100 feet to find
+ # intersecting road polylines.
+ # - TF-IDF vectors are used to compute the textual similarity
+ # between the facility names and road names.
+ # - Cosine similarity is used to determine the best matches.
+
+ # Example__
+ # >>> from shapely.geometry import Point, LineString
+ # >>> ptdata = [
+ # ... {"geometry": {"coordinates": [1.0, 1.0]},
+ # "properties": {"tunnel_name": "Tunnel A"}},
+ # ... {"geometry": {"coordinates": [2.0, 2.0]},
+ # "properties": {"facility_carried": "Road B"}}
+ # ... ]
+ # >>> road_polys = [
+ # ... LineString([(0, 0), (2, 2)]),
+ # ... LineString([(1, 1), (3, 3)])
+ # ... ]
+ # >>> match_edges_to_points(ptdata, road_polys)
+ # [[0], [1]]
+ # """
+ # edge_matches = []
+ # for ptdata_type, type_data in ptdata.items():
+ # for AIM_ID, point in type_data.items():
+ # lon = point["GeneralInformation"]["location"]["longitude"]
+ # lat = point["GeneralInformation"]["location"]["latitude"]
+ # search_radius = create_circle_from_lat_lon(lat, lon, 100)
+ # # Check for intersections:
+ # intersecting_polylines = [
+ # AIM_ID
+ # for (AIM_ID, poly) in road_polys.items()
+ # if poly.intersects(search_radius)
+ # ]
+ # properties = point["GeneralInformation"]
+ # if ptdata_type == "Tunnel":
+ # facility = properties.get("tunnel_name", "")
+ # elif ptdata_type == "Bridge":
+ # # facility = properties["facility_carried"]
+ # facility = properties.get("facility_carried", "")
+ # max_similarity = 0
+ # similarities = {}
+ # for AIM_ID in intersecting_polylines:
+ # roadway = road_features[AIM_ID]["GeneralInformation"].get("NAME", None)
+ # if roadway:
+ # # Create TF-IDF vectors:
+ # vectorizer = TfidfVectorizer()
+ # tfidf_matrix = vectorizer.fit_transform(
+ # [facility, roadway.lower()]
+ # )
+
+ # # Calculate cosine similarity:
+ # similarity = cosine_similarity(
+ # tfidf_matrix[0:1], tfidf_matrix[1:2]
+ # )
+ # else:
+ # similarity = -1
+ # similarities.update({AIM_ID:similarity})
+ # if similarity > max_similarity:
+ # max_similarity = similarity
+ # if max_similarity == 0:
+ # max_similarity = -1
+ # indices_of_max = [
+ # intersecting_polylines[index]
+ # for index, value in (similarities).items()
+ # if value == max_similarity
+ # ]
+ # edge_matches.append(indices_of_max)
+
+ # return edge_matches
+
+ # def merge_brtn_features(ptdata: List[Dict],
+ # road_features_brtn: List[Dict],
+ # edge_matches: List[List],
+ # save_additional_attributes: str) -> List[Dict]:
+ # """
+ # Merge bridge/tunnel features to road features using edge matches.
+
+ # This function updates road features with additional attributes
+ # derived from bridge or tunnel point data. It uses edge matches to
+ # determine how to distribute lane and capacity information among
+ # road features.
+
+ # Args__
+ # ptdata (List[Dict]): A list of dictionaries where each
+ # dictionary contains properties of bridge or tunnel
+ # features. Each dictionary should have 'asset_type',
+ # 'traffic_lanes_on', 'structure_number',
+ # 'total_number_of_lanes', and 'tunnel_number' as keys.
+ # road_features_brtn (List[Dict]): A list of dictionaries
+ # representing road features that will be updated. Each
+ # dictionary should have a 'properties' key where attributes
+ # are stored.
+ # edge_matches (List[List[int]]): A list of lists, where each
+ # sublist contains indices that correspond to
+ # `road_features_brtn` and represent which features should be
+ # updated together.
+ # save_additional_attributes (str): A flag indicating whether to
+ # save additional attributes like 'lanes' and 'capacity'. If
+ # non-empty, additional attributes will be saved.
+
+ # Returns__
+ # List[Dict]: The updated list of road features with merged
+ # attributes.
+
+ # Example__
+ # >>> ptdata = [
+ # ... {'properties': {'asset_type': 'bridge',
+ # 'traffic_lanes_on': 4,
+ # 'structure_number': '12345'}},
+ # ... {'properties': {'asset_type': 'tunnel',
+ # 'total_number_of_lanes': 6,
+ # 'tunnel_number': '67890'}}
+ # ... ]
+ # # List of empty
+ # >>> road_features_brtn = [{} for _ in range(4)]
+ # dictionaries for demonstration
+ # >>> edge_matches = [[0, 1], [2, 3]]
+ # >>> save_additional_attributes = 'yes'
+ # >>> updated_features = merge_brtn_features(
+ # ptdata,
+ # road_features_brtn,
+ # edge_matches,
+ # save_additional_attributes)
+ # >>> print(updated_features)
+ # """
+ # poly_index = 0
+ # for item_index, edge_indices in enumerate(edge_matches):
+ # nitems = len(edge_indices)
+ # features = ptdata[item_index]['properties']
+ # asset_type = features['asset_type']
+ # if asset_type == 'bridge':
+ # total_nlanes = features['traffic_lanes_on']
+ # struct_no = features['structure_number']
+ # else:
+ # total_nlanes = features['total_number_of_lanes']
+ # struct_no = features['tunnel_number']
+
+ # lanes_per_item = round(int(total_nlanes)/nitems)
+
+ # for index in range(poly_index, poly_index + nitems):
+ # properties = road_features_brtn[index]['properties']
+ # properties['asset_type'] = asset_type
+ # properties['OID'] = struct_no
+ # if save_additional_attributes:
+ # properties['lanes'] = lanes_per_item
+ # properties['capacity'] = calculate_road_capacity(
+ # lanes_per_item)
+
+ # poly_index += nitems
+ # return road_features_brtn
+
+ # def get_nodes_edges(lines: List[LineString],
+ # length_unit: Literal['ft', 'm'] = 'ft'
+ # ) -> Tuple[Dict, Dict]:
+ # """
+ # Extract nodes and edges from a list of LineString objects.
+
+ # This function processes a list of `LineString` geometries to
+ # generate nodes and edges. Nodes are defined by their unique
+ # coordinates, and edges are represented by their start and end
+ # nodes, length, and geometry.
+
+ # Args__
+ # lines (List[LineString]): A list of `LineString` objects
+ # representing road segments.
+ # length_unit (Literal['ft', 'm']): The unit of length for the
+ # edge distances. Defaults to 'ft'. Acceptable values are
+ # 'ft' for feet and 'm' for meters.
+
+ # Returns__
+ # Tuple[Dict[int, Dict[str, float]], Dict[int, Dict[str, Any]]]:
+ # - A dictionary where keys are node IDs and values are
+ # dictionaries with node attributes:
+ # - 'lon': Longitude of the node.
+ # - 'lat': Latitude of the node.
+ # - 'geometry': WKT representation of the node.
+ # - A dictionary where keys are edge IDs and values are
+ # dictionaries with edge attributes:
+ # - 'start_nid': ID of the start node.
+ # - 'end_nid': ID of the end node.
+ # - 'length': Length of the edge in the specified unit.
+ # - 'geometry': WKT representation of the edge.
+
+ # Raises__
+ # TypeError: If any element in the `lines` list is not an
+ # instance of `LineString`.
+
+ # Example__
+ # >>> from shapely.geometry import LineString
+ # >>> lines = [
+ # ... LineString([(0, 0), (1, 1)]),
+ # ... LineString([(1, 1), (2, 2)])
+ # ... ]
+ # >>> nodes, edges = get_nodes_edges(lines, length_unit='m')
+ # >>> print(nodes)
+ # >>> print(edges)
+ # """
+ # node_list = []
+ # edges = {}
+ # node_counter = 0
+ # for line_counter, line in enumerate(lines):
+ # # Ensure the object is a LineString
+ # if not isinstance(line, LineString):
+ # raise TypeError(
+ # "All elements in the list must be LineString objects.")
+
+ # # Extract coordinates
+ # coords = list(line.coords)
+ # ncoord_pairs = len(coords)
+ # if ncoord_pairs > 0:
+ # start_node_coord = coords[0]
+ # end_node_coord = coords[-1]
+
+ # if start_node_coord not in node_list:
+ # node_list.append(start_node_coord)
+ # start_nid = node_counter
+ # node_counter += 1
+ # else:
+ # start_nid = node_list.index(start_node_coord)
+
+ # if end_node_coord not in node_list:
+ # node_list.append(end_node_coord)
+ # end_nid = node_counter
+ # node_counter += 1
+ # else:
+ # end_nid = node_list.index(end_node_coord)
+
+ # length = 0
+ # (lon, lat) = line.coords.xy
+ # for pair_no in range(ncoord_pairs - 1):
+ # length += haversine_dist([lat[pair_no], lon[pair_no]],
+ # [lat[pair_no+1],
+ # lon[pair_no+1]])
+ # if length_unit == 'm':
+ # length = 0.3048*length
+
+ # edges[line_counter] = {'start_nid': start_nid,
+ # 'end_nid': end_nid,
+ # 'length': length,
+ # 'geometry': line.wkt}
+
+ # nodes = {}
+ # for node_id, node_coords in enumerate(node_list):
+ # nodes[node_id] = {'lon': node_coords[0],
+ # 'lat': node_coords[1],
+ # 'geometry': 'POINT ('
+ # f'{node_coords[0]:.7f} '
+ # f'{node_coords[1]:.7f})'}
+
+ # return (nodes, edges)
+
+ # def get_node_edge_features(updated_road_polys: List[LineString],
+ # updated_road_features: List[Dict],
+ # output_files: List[str]
+ # ) -> Tuple[Dict, Dict]:
+ # """
+ # Extract/write node and edge features from updated road polygons.
+
+ # This function processes road polygon data to generate nodes and
+ # edges, then writes the extracted features to specified output
+ # files.
+
+ # Args__
+ # updated_road_polys (List[LineString]): A list of LineString
+ # objects representing updated road polygons.
+ # updated_road_features (List[Dict]): A list of dictionaries
+ # containing feature properties for each road segment.
+ # output_files (List[str]): A list of two file paths where the
+ # first path is for edge data and the second for node data.
+
+ # Returns__
+ # Tuple[Dict, Dict]: A tuple containing two dictionaries:
+ # - The first dictionary contains edge data.
+ # - The second dictionary contains node data.
+
+ # Raises__
+ # TypeError: If any input is not of the expected type.
+
+ # Example__
+ # >>> from shapely.geometry import LineString
+ # >>> road_polys = [LineString([(0, 0), (1, 1)]),
+ # LineString([(1, 1), (2, 2)])]
+ # >>> road_features = [{'properties': {'OID': 1,
+ # 'asset_type': 'road',
+ # 'lanes': 2,
+ # 'capacity': 2000,
+ # 'maxspeed': 30}}]
+ # >>> output_files = ['edges.csv', 'nodes.csv']
+ # >>> get_node_edge_features(road_polys,
+ # road_features,
+ # output_files)
+ # """
+ # (nodes, edges) = get_nodes_edges(
+ # updated_road_polys, length_unit='m')
+
+ # with open(output_files[1], 'w', encoding="utf-8") as nodes_file:
+ # nodes_file.write('node_id, lon, lat, geometry\n')
+ # for key in nodes:
+ # nodes_file.write(f'{key}, {nodes[key]["lon"]}, '
+ # f'{nodes[key]["lat"]}, '
+ # f'{nodes[key]["geometry"]}\n')
+
+ # with open(output_files[0], 'w', encoding="utf-8") as edge_file:
+ # edge_file.write('uniqueid, start_nid, end_nid, osmid, length, '
+ # 'type, lanes, maxspeed, capacity, fft, '
+ # 'geometry\n')
+
+ # for key in edges:
+ # features = updated_road_features[key]['properties']
+ # edges[key]['osmid'] = features['OID']
+ # edges[key]['type'] = features['asset_type']
+ # edges[key]['lanes'] = features['lanes']
+ # edges[key]['capacity'] = features['capacity']
+ # maxspeed = features['maxspeed']
+ # edges[key]['maxspeed'] = maxspeed
+ # free_flow_time = edges[key]['length'] / \
+ # (maxspeed*1609.34/3600)
+ # edges[key]['fft'] = free_flow_time
+ # edge_file.write(f'{key}, {edges[key]["start_nid"]}, '
+ # f'{edges[key]["end_nid"]}, '
+ # f'{edges[key]["osmid"]}, '
+ # f'{edges[key]["length"]}, '
+ # f'{edges[key]["type"]}, '
+ # f'{edges[key]["lanes"]}, {maxspeed}, '
+ # f'{edges[key]["capacity"]}, '
+ # f'{free_flow_time}, '
+ # f'{edges[key]["geometry"]}\n')
+
+ # return (edges, nodes)
+
+ # print('Getting graph network for elements in '
+ # f'{det_file_path}...')
+
+ # # Read inventory data:
+ # ptdata, (road_polys, road_features) = \
+ # parse_element_geometries_from_geojson(det_file_path,
+ # save_additional_attributes=save_additional_attributes)
+
+ # # Find edges that match bridges and tunnels:
+ # edge_matches = match_edges_to_points(ptdata, road_polys, road_features)
+
+ # # Get the indices for bridges and tunnels:
+ # brtn_poly_idx = [item for sublist in edge_matches for item in sublist]
+
+ # # Detect repeated edges and save their indices:
+ # repeated_edges = find_repeated_line_pairs(road_polys)
+
+ # edges_remove = []
+ # for edge_set in repeated_edges:
+ # bridge_poly = set(brtn_poly_idx)
+ # if edge_set & bridge_poly:
+ # remove = list(edge_set - bridge_poly)
+ # else:
+ # temp = list(edge_set)
+ # remove = temp[1:].copy()
+ # edges_remove.extend(remove)
+
+ # # Save polygons that are not bridge or tunnel edges or marked for
+ # # removal in road polygons:
+ # road_polys_local = [poly for (ind, poly) in road_polys.items() if
+ # ind not in brtn_poly_idx + edges_remove]
+ # road_features_local = [feature for (ind, feature) in
+ # road_features.items()
+ # if ind not in brtn_poly_idx + edges_remove]
+
+ # # Save polygons that are not bridge or tunnel edges:
+ # road_polys_brtn = [poly for (ind, poly) in enumerate(road_polys)
+ # if ind in brtn_poly_idx]
+ # road_features_brtn = [feature for (ind, feature)
+ # in enumerate(road_features)
+ # if ind in brtn_poly_idx]
+ # road_features_brtn = merge_brtn_features(ptdata,
+ # road_features_brtn,
+ # edge_matches,
+ # save_additional_attributes)
+
+ # # Compute the intersections of road polygons:
+ # intersections = find_intersections(road_polys_local)
+
+ # # Cut road polygons at intersection points:
+ # cut_lines, cut_features = cut_lines_at_intersections(
+ # road_polys_local,
+ # road_features_local,
+ # intersections)
+ # # Come back and update cut_lines_at_intersections to not intersect
+ # # lines within a certain diameter of a bridge point
+
+ # # Combine all polygons and their features:
+ # updated_road_polys = cut_lines + road_polys_brtn
+ # updated_road_features = cut_features + road_features_brtn
+
+ # # Save created polygons (for debugging only)
+ # # save_cut_lines_and_intersections(updated_road_polys, intersections)
+
+ # # Get nodes and edges of the final set of road polygons:
+ # (edges, nodes) = get_node_edge_features(updated_road_polys,
+ # updated_road_features,
+ # output_files)
+ # self.graph_network['edges'] = edges
+ # self.graph_network['nodes'] = nodes
+
+ # print('Edges and nodes of the graph network are saved in '
+ # f'{", ".join(output_files)}')
diff --git a/modules/performRegionalMapping/GISSpecifiedEvents/GISSpecifiedEvent.py b/modules/performRegionalMapping/GISSpecifiedEvents/GISSpecifiedEvent.py
index c23ad992d..e9cdacfd2 100644
--- a/modules/performRegionalMapping/GISSpecifiedEvents/GISSpecifiedEvent.py
+++ b/modules/performRegionalMapping/GISSpecifiedEvents/GISSpecifiedEvent.py
@@ -37,14 +37,14 @@
# Stevan Gavrilovic
#
-import argparse
+import argparse # noqa: I001
from pathlib import Path
import xml.etree.ElementTree as ET
from RasterEvent import create_event as create_raster_event
-def is_raster_file(filename):
+def is_raster_file(filename): # noqa: D103
# Define a set of common raster file extensions
raster_extensions = {'.jpg', '.jpeg', '.png', '.bmp', '.gif', '.tiff', '.tif'}
@@ -55,29 +55,29 @@ def is_raster_file(filename):
return file_path.suffix.lower() in raster_extensions
-def is_xml_file(filename):
+def is_xml_file(filename): # noqa: D103
# Check if the file has an .xml extension
if not filename.lower().endswith('.xml'):
return False
# Try to parse the file as XML
try:
- ET.parse(filename)
- return True
+ ET.parse(filename) # noqa: S314
+ return True # noqa: TRY300
except ET.ParseError:
return False
-def create_event(asset_file: str, event_grid_file: str): # noqa: C901, N803, D103
+def create_event(asset_file: str, event_grid_file: str): # noqa: C901, D103, N803, RUF100
if is_raster_file(event_grid_file):
return create_raster_event(asset_file, event_grid_file)
- elif is_xml_file(event_grid_file):
+ elif is_xml_file(event_grid_file): # noqa: RET505
# Here you would call a function to handle XML files
# For now, we'll just raise a NotImplementedError
- raise NotImplementedError('XML file handling is not yet implemented.')
+ raise NotImplementedError('XML file handling is not yet implemented.') # noqa: EM101
else:
- raise ValueError(
- f'{event_grid_file} is not a raster. Only rasters are currently supported.'
+ raise ValueError( # noqa: TRY003
+ f'{event_grid_file} is not a raster. Only rasters are currently supported.' # noqa: EM102
)
diff --git a/modules/performRegionalMapping/GISSpecifiedEvents/RasterEvent.py b/modules/performRegionalMapping/GISSpecifiedEvents/RasterEvent.py
index eb2ecad25..3c7e1de37 100644
--- a/modules/performRegionalMapping/GISSpecifiedEvents/RasterEvent.py
+++ b/modules/performRegionalMapping/GISSpecifiedEvents/RasterEvent.py
@@ -37,29 +37,29 @@
# Stevan Gavrilovic
#
-import argparse
-import json, csv
+import argparse # noqa: I001
+import json, csv # noqa: E401
from pathlib import Path
import rasterio
import pyproj
from rasterio.transform import rowcol
-def sample_raster_at_latlon(src, lat, lon):
+def sample_raster_at_latlon(src, lat, lon): # noqa: D103
# Get the row and column indices in the raster
row, col = rowcol(src.transform, lon, lat) # Note the order: lon, lat
# Ensure the indices are within the bounds of the raster
if row < 0 or row >= src.height or col < 0 or col >= src.width:
- raise IndexError('Transformed coordinates are out of raster bounds')
+ raise IndexError('Transformed coordinates are out of raster bounds') # noqa: EM101, TRY003
# Read the raster value at the given row and column
raster_value = src.read(1)[row, col]
- return raster_value
+ return raster_value # noqa: RET504
-def create_event(asset_file, event_grid_file): # noqa: C901, N803, D103
+def create_event(asset_file, event_grid_file): # noqa: C901, D103, N803, RUF100
# read the event grid data file
event_grid_path = Path(event_grid_file).resolve()
event_dir = event_grid_path.parent
@@ -90,7 +90,7 @@ def create_event(asset_file, event_grid_file): # noqa: C901, N803, D103
asset_file_path = asset['file']
# Load the corresponding file for each asset
- with open(asset_file_path, encoding='utf-8') as asset_file:
+ with open(asset_file_path, encoding='utf-8') as asset_file: # noqa: PTH123, PLR1704
# Load the asset data
asset_data = json.load(asset_file)
@@ -122,7 +122,7 @@ def create_event(asset_file, event_grid_file): # noqa: C901, N803, D103
data_final.append([file_name, lat, lon])
csv_save_path = event_dir / f'Site_{asset_id}.csv'
- with open(csv_save_path, 'w', newline='') as file:
+ with open(csv_save_path, 'w', newline='') as file: # noqa: PTH123
# Create a CSV writer object
writer = csv.writer(file)
@@ -143,9 +143,9 @@ def create_event(asset_file, event_grid_file): # noqa: C901, N803, D103
json.dump(asset_data, f, indent=2)
except IndexError as e:
- print(f'Error for asset ID {asset_id}: {e}')
+ print(f'Error for asset ID {asset_id}: {e}') # noqa: T201
else:
- print(f'Asset ID: {asset_id} is outside the raster bounds')
+ print(f'Asset ID: {asset_id} is outside the raster bounds') # noqa: T201
# # save the event dictionary to the BIM
# asset_data['Events'] = [{}]
@@ -157,12 +157,12 @@ def create_event(asset_file, event_grid_file): # noqa: C901, N803, D103
# # "type": "SimCenterEvents"
# }
- # with open(asset_file, 'w', encoding='utf-8') as f: # noqa: PTH123
+ # with open(asset_file, 'w', encoding='utf-8') as f: # noqa: PTH123, RUF100
# json.dump(asset_data, f, indent=2)
# Save the final event grid
csv_save_path = event_dir / 'EventGrid.csv'
- with open(csv_save_path, 'w', newline='') as file:
+ with open(csv_save_path, 'w', newline='') as file: # noqa: PTH123
# Create a CSV writer object
writer = csv.writer(file)
diff --git a/modules/performRegionalMapping/NearestNeighborEvents/NNE.py b/modules/performRegionalMapping/NearestNeighborEvents/NNE.py
index 2d2a0cb26..f423bcb97 100644
--- a/modules/performRegionalMapping/NearestNeighborEvents/NNE.py
+++ b/modules/performRegionalMapping/NearestNeighborEvents/NNE.py
@@ -38,7 +38,7 @@
# Tamika Bassman
#
-import argparse
+import argparse # noqa: I001
import importlib
import json
from pathlib import Path
diff --git a/modules/performSIMULATION/surrogateSimulation/SurrogateSimulation.py b/modules/performSIMULATION/surrogateSimulation/SurrogateSimulation.py
index 4668dc612..2d948e883 100644
--- a/modules/performSIMULATION/surrogateSimulation/SurrogateSimulation.py
+++ b/modules/performSIMULATION/surrogateSimulation/SurrogateSimulation.py
@@ -46,7 +46,7 @@
import numpy as np
-errFileName = os.path.join(os.getcwd(),'workflow.err') # noqa: N816
+errFileName = os.path.join(os.getcwd(), 'workflow.err') # noqa: N816, PTH109, PTH118
sys.stderr = open(errFileName, 'a') # noqa: SIM115, PTH123
# from simcenter_common import *
@@ -111,7 +111,7 @@ def run_surrogateGP(AIM_input_path, EDP_input_path): # noqa: ARG001, N802, N803
surrogate_meta_name,
surrogate_name,
]
- # subprocess.run(command, check=True) # noqa: S603
+ # subprocess.run(command, check=True) # noqa: RUF100, S603
try:
result = subprocess.check_output( # noqa: S603
@@ -122,11 +122,11 @@ def run_surrogateGP(AIM_input_path, EDP_input_path): # noqa: ARG001, N802, N803
result = e.output
returncode = e.returncode
- if not returncode == 0:
- print(
+ if not returncode == 0: # noqa: SIM201
+ print( # noqa: T201
result,
file=sys.stderr,
- ) # noqa: T201
+ ) # noqa: RUF100, T201
# os.system( # noqa: RUF100, S605
# f'{pythonEXE} {surrogatePredictionPath} {params_name} {surrogate_meta_name} {surrogate_name}'
@@ -148,10 +148,10 @@ def run_surrogateGP(AIM_input_path, EDP_input_path): # noqa: ARG001, N802, N803
# 'Do not select [None] in the FEM tab. [None] is used only when using pre-trained surrogate, i.e. when [Surrogate] is selected in the SIM Tab.'
# )
# exit(-1) # noqa: PLR1722, RUF100
- print(
+ print( # noqa: T201
'Do not select [None] in the FEM tab. [None] is used only when using pre-trained surrogate, i.e. when [Surrogate] is selected in the SIM Tab.',
file=sys.stderr,
- ) # noqa: T201
+ ) # noqa: RUF100, T201
exit(-1) # noqa: PLR1722
diff --git a/modules/performUQ/SimCenterUQ/UQengine.py b/modules/performUQ/SimCenterUQ/UQengine.py
index 9662105a6..b56e39d51 100644
--- a/modules/performUQ/SimCenterUQ/UQengine.py
+++ b/modules/performUQ/SimCenterUQ/UQengine.py
@@ -41,7 +41,7 @@ def __init__(self, inputArgs): # noqa: N803
# if self.os_type.lower().startswith('win'):
# self.workflowDriver = "workflow_driver.bat"
- def cleanup_workdir(self): # noqa: C901, D102
+ def cleanup_workdir(self): # noqa: C901, D102, RUF100
# if template dir already contains results.out, give an error
# Cleanup working directory if needed
@@ -79,11 +79,11 @@ def cleanup_workdir(self): # noqa: C901, D102
for del_pkl in del_pkls:
os.remove(del_pkl) # noqa: PTH107
- #try:
- # del_errs = glob.glob(os.path.join(self.work_dir, '*err')) # noqa: PTH118, PTH207
+ # try:
+ # del_errs = glob.glob(os.path.join(self.work_dir, '*err')) # noqa: PTH118, PTH207, RUF100
# for del_err in del_errs:
- # os.remove(del_err) # noqa: PTH107
- #except: # noqa: S110, E722
+ # os.remove(del_err) # noqa: PTH107, RUF100
+ # except: # noqa: E722, RUF100, S110
# pass
if glob.glob(os.path.join(self.work_dir, 'templatedir', 'results.out')): # noqa: PTH118, PTH207
@@ -126,7 +126,7 @@ def run_FEM_batch(self, X, id_sim, runIdx=0, alterInput=[]): # noqa: B006, C901
runIdx,
)
if Y_tmp.shape[0] != self.y_dim:
- msg = f'model output in sample {ns} contains {Y_tmp.shape[0]} value(s) while the number of QoIs specified is {y_dim}' # noqa: F821
+ msg = f'model output in sample {ns} contains {Y_tmp.shape[0]} value(s) while the number of QoIs specified is {y_dim}' # type: ignore # noqa: F821
self.exit(msg)
Y[ns, :] = Y_tmp
@@ -270,16 +270,15 @@ def make_pool( # noqa: D102
if self.run_type.lower() == 'runninglocal':
from multiprocessing import Pool
-
n_processor = os.cpu_count()
- if n_processor > 32:
+ if n_processor > 32: # noqa: PLR2004
n_processor = 8
- pool = Pool(n_processor, initializer=initfn, initargs=(seed_val,))
+ pool = Pool(n_processor, initializer=initfn, initargs=(seed_val,))
else:
- from mpi4py import MPI
- from mpi4py.futures import MPIPoolExecutor
+ from mpi4py import MPI # type: ignore # noqa: I001
+ from mpi4py.futures import MPIPoolExecutor # type: ignore
self.world = MPI.COMM_WORLD
n_processor = self.world.Get_size()
@@ -439,9 +438,12 @@ def run_FEM(X, id_sim, rv_name, work_dir, workflowDriver, runIdx=0): # noqa: C9
# def makePool(self):
# pass
+
# for creating pool
-def initfn(seed_val):
+def initfn(seed_val): # noqa: D103
np.random.seed(seed_val) # enforcing seeds
+
+
#
# When sampled X is different from surrogate input X. e.g. we sample ground motion parameters or indices, but we use IM as input of GP
#
diff --git a/modules/performUQ/SimCenterUQ/runPLoM.py b/modules/performUQ/SimCenterUQ/runPLoM.py
index 7aec2faf3..fd27bd83a 100644
--- a/modules/performUQ/SimCenterUQ/runPLoM.py
+++ b/modules/performUQ/SimCenterUQ/runPLoM.py
@@ -576,7 +576,7 @@ def _set_up_parallel(self):
if self.run_type.lower() == 'runninglocal':
self.n_processor = os.cpu_count()
# curtailing n_processor for docker containers running at TACC HPC
- if self.n_processor > 32:
+ if self.n_processor > 32: # noqa: PLR2004
self.n_processor = 8
from multiprocessing import Pool
diff --git a/modules/performUQ/SimCenterUQ/surrogateBuild.py b/modules/performUQ/SimCenterUQ/surrogateBuild.py
index a4853a3e0..2e4a08089 100644
--- a/modules/performUQ/SimCenterUQ/surrogateBuild.py
+++ b/modules/performUQ/SimCenterUQ/surrogateBuild.py
@@ -41,7 +41,7 @@
# Jan 31, 2023: let's not use GPy calibration parallel for now, because it seems to give local maxima
-import copy
+import copy # noqa: I001
import json
import os
import pickle
@@ -50,7 +50,7 @@
import time
import warnings
import numpy as np
-import random
+import random # noqa: F811
warnings.filterwarnings('ignore')
@@ -71,7 +71,7 @@
import GPy as GPy # noqa: PLC0414
moduleName = 'scipy.stats' # noqa: N816
- from scipy.stats import cramervonmises, lognorm, norm, qmc
+ from scipy.stats import cramervonmises, lognorm, norm, qmc # noqa: I001
import scipy
moduleName = 'sklearn.linear_model' # noqa: N816
@@ -83,28 +83,27 @@
error_tag = False # global variable
except: # noqa: E722
error_tag = True
- print('Failed to import module:' + moduleName) # noqa: T201
-
+ print('Failed to import module:' + moduleName) # type: ignore # noqa: T201
-print("Initializing error log file..")
-print(f"Current working dir (getcwd): {os.getcwd()}")
+print('Initializing error log file..') # noqa: T201
+print(f'Current working dir (getcwd): {os.getcwd()}') # noqa: T201, PTH109
work_dir_tmp = sys.argv[1].replace(os.sep, '/')
-errFileName = os.path.join(work_dir_tmp,'dakota.err') # noqa: N816
+errFileName = os.path.join(work_dir_tmp, 'dakota.err') # noqa: N816, PTH118
-develop_mode = (len(sys.argv)==8) # a flag for develeopmode
+develop_mode = len(sys.argv) == 8 # a flag for develeopmode # noqa: PLR2004
if develop_mode:
# import matplotlib
# matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
- print("Developer mode")
+
+ print('Developer mode') # noqa: T201
else:
- with open(errFileName, 'w') as f:
- f.write("")
+ with open(errFileName, 'w') as f: # noqa: PTH123
+ f.write('')
sys.stderr = open(errFileName, 'w') # noqa: SIM115, PTH123
- print(f"Error file created at: {errFileName}")
-
+ print(f'Error file created at: {errFileName}') # noqa: T201
#
@@ -240,7 +239,7 @@ def readJson(self): # noqa: C901, N802, D102, PLR0912, PLR0915
self.global_seed = 42
random.seed(self.global_seed)
- np.random.seed( self.global_seed)
+ np.random.seed(self.global_seed)
#
# EE-UQ
#
@@ -323,7 +322,7 @@ def readJson(self): # noqa: C901, N802, D102, PLR0912, PLR0915
self.do_parallel = True
if self.do_parallel:
- self.n_processor, self.pool = self.make_pool( self.global_seed)
+ self.n_processor, self.pool = self.make_pool(self.global_seed)
self.cal_interval = self.n_processor
else:
self.n_processor = 1
@@ -419,13 +418,15 @@ def set_XY2(self, X=None, Y=None, Y_metadata=None): # noqa: N802, N803
self.set_XY(X, Y)
@monkeypatch_method(GPy.core.GP)
- def subsample_XY(self, idx): # noqa: N802, N803
+ def subsample_XY(self, idx): # noqa: N802, N803, RUF100
if self.Y_metadata is not None:
new_meta = self.Y_metadata
- new_meta['variance_structure'] = self.Y_metadata['variance_structure'][idx]
+ new_meta['variance_structure'] = self.Y_metadata[
+ 'variance_structure'
+ ][idx]
self.Y_metadata.update(new_meta)
print('metadata_updated') # noqa: T201
- self.set_XY(self.X[idx,:], self.Y[idx,:])
+ self.set_XY(self.X[idx, :], self.Y[idx, :])
# Save model information
if (surrogateJson['method'] == 'Sampling and Simulation') or (
@@ -593,11 +594,11 @@ def create_kernel(self, x_dim): # noqa: D102
msg = f'Error running SimCenterUQ - Kernel name <{kernel}> not supported'
self.exit(msg)
- #if self.do_linear:
+ # if self.do_linear:
# kr = kr + GPy.kern.Linear(input_dim=x_dim, ARD=True)
if self.do_mf:
- kr = emf.kernels.LinearMultiFidelityKernel([kr.copy(), kr.copy()]) # noqa: F821
+ kr = emf.kernels.LinearMultiFidelityKernel([kr.copy(), kr.copy()]) # type: ignore # noqa: F821
return kr
@@ -622,13 +623,13 @@ def create_gpy_model(self, X_dummy, Y_dummy, kr): # noqa: N803, D102
# for multi fidelity case
else:
- X_list, Y_list = emf.convert_lists_to_array.convert_xy_lists_to_arrays( # noqa: N806, F821
+ X_list, Y_list = emf.convert_lists_to_array.convert_xy_lists_to_arrays( # noqa: N806, F821 # type: ignore
[X_dummy, X_dummy], [Y_dummy, Y_dummy]
)
- for i in range(y_dim): # noqa: B007, F821
- m_tmp = GPyMultiOutputWrapper( # noqa: F821
- emf.models.GPyLinearMultiFidelityModel( # noqa: F821
+ for i in range(y_dim): # type: ignore # noqa: B007, F821
+ m_tmp = GPyMultiOutputWrapper( # noqa: F821 # type: ignore
+ emf.models.GPyLinearMultiFidelityModel( # noqa: F821 # type: ignore
X_list, Y_list, kernel=kr.copy(), n_fidelities=2
),
2,
@@ -655,16 +656,14 @@ def create_gp_model(self): # noqa: D102
self.m_list = [0] * self.y_dim
if self.do_linear:
self.lin_list = [0] * self.y_dim
-
self.m_var_list, self.var_str, self.Y_mean = (
[0] * self.y_dim,
[1] * self.y_dim,
[0] * self.y_dim,
)
-
# below to facilitate better optimization bounds
- self.init_noise_var = [None]*y_dim
- self.init_process_var= [None]*y_dim
+ self.init_noise_var = [None] * y_dim
+ self.init_process_var = [None] * y_dim
for i in range(y_dim):
self.m_list[i] = self.create_gpy_model(X_dummy, Y_dummy, kr)
@@ -692,7 +691,7 @@ def predict(self, m_tmp, X, noise=0): # noqa: ARG002, N803, D102
0,
) # if high-fidelity response is constant - just return constant
else: # noqa: RET505
- X_list = convert_x_list_to_array([X, X]) # noqa: N806, F821
+ X_list = convert_x_list_to_array([X, X]) # type: ignore # noqa: N806, F821
X_list_h = X_list[X.shape[0] :] # noqa: N806
return m_tmp.predict(X_list_h)
@@ -740,16 +739,17 @@ def set_XY( # noqa: C901, N802, D102
else:
Y_lfs = Y_lf # noqa: N806
-
if self.do_linear:
- self.linear_list = [True] * X_hf.shape[1] # SY - TEMPORARY
+ self.linear_list = [True] * X_hf.shape[1] # SY - TEMPORARY
else:
self.linear_list = [False] * X_hf.shape[1] # SY - TEMPORARY
- if sum(self.linear_list)>0:
- self.lin_list[ny] = LinearRegression().fit(X_hf[:, self.linear_list], Y_hfs)
+ if sum(self.linear_list) > 0:
+ self.lin_list[ny] = LinearRegression().fit(
+ X_hf[:, self.linear_list], Y_hfs
+ )
y_pred = self.lin_list[ny].predict(X_hf[:, self.linear_list])
- Y_hfs = Y_hfs - y_pred
+ Y_hfs = Y_hfs - y_pred # noqa: N806
# # below is dummy
# if np.all(np.isnan(X_lf)) and np.all(np.isnan(Y_lf)):
@@ -787,7 +787,12 @@ def set_XY( # noqa: C901, N802, D102
elif n_unique == X_hf.shape[0]: # no repl
# Y_mean=Y_hfs[X_idx]
# Y_mean1, nugget_mean1 = self.predictStoMeans(X_new, Y_mean)
- Y_mean1, nugget_mean1, initial_noise_variance, initial_process_variance = self.predictStoMeans(X_hf, Y_hfs) # noqa: N806
+ (
+ Y_mean1, # noqa: N806
+ nugget_mean1,
+ initial_noise_variance,
+ initial_process_variance,
+ ) = self.predictStoMeans(X_hf, Y_hfs)
if np.max(nugget_mean1) < 1.0e-10: # noqa: PLR2004
self.set_XY(m_tmp, ny, X_hf, Y_hfs, enforce_hom=True)
@@ -806,7 +811,6 @@ def set_XY( # noqa: C901, N802, D102
self.init_noise_var[ny] = initial_noise_variance
self.init_process_var[ny] = initial_process_variance
else:
-
# nonunique set - check if nugget is zero
Y_mean, Y_var = np.zeros((n_unique, 1)), np.zeros((n_unique, 1)) # noqa: N806
@@ -831,9 +835,15 @@ def set_XY( # noqa: C901, N802, D102
return m_tmp
elif self.nugget_opt == 'Heteroscedastic': # noqa: RET505
-
- dummy, dummy, initial_noise_variance, initial_process_variance = self.predictStoMeans(X_hf,
- Y_hfs) # noqa: N806
+ (
+ dummy,
+ dummy, # noqa: PLW0128
+ initial_noise_variance,
+ initial_process_variance,
+ ) = self.predictStoMeans(
+ X_hf,
+ Y_hfs,
+ ) # noqa: N806, RUF100
#
# Constructing secondary GP model - can we make use of the "variance of sample variance"
#
@@ -891,7 +901,7 @@ def set_XY( # noqa: C901, N802, D102
(
X_list_tmp, # noqa: N806
Y_list_tmp, # noqa: N806
- ) = emf.convert_lists_to_array.convert_xy_lists_to_arrays( # noqa: F821
+ ) = emf.convert_lists_to_array.convert_xy_lists_to_arrays( # noqa: F821 # type: ignore
[X_hf, X_lf], [Y_hfs, Y_lfs]
)
m_tmp.set_data(X=X_list_tmp, Y=Y_list_tmp)
@@ -907,28 +917,29 @@ def set_XY( # noqa: C901, N802, D102
return m_tmp
- def predictStoVars(self, X_repl, Y_var_repl, X_new, Y_mean, counts): # noqa: N802, N803, D102
+ def predictStoVars(self, X_repl, Y_var_repl, X_new, Y_mean, counts): # noqa: ARG002, D102, N802, N803
my_x_dim = X_repl.shape[1]
- #kernel_var = GPy.kern.Matern52(
+ # kernel_var = GPy.kern.Matern52(
# input_dim=my_x_dim, ARD=True
- #) + GPy.kern.Linear(input_dim=my_x_dim, ARD=True)
+ # ) + GPy.kern.Linear(input_dim=my_x_dim, ARD=True)
- kernel_var = GPy.kern.Matern52( input_dim=my_x_dim, ARD=True)
+ kernel_var = GPy.kern.Matern52(input_dim=my_x_dim, ARD=True)
log_vars = np.log(Y_var_repl)
m_var = GPy.models.GPRegression(
X_repl, log_vars, kernel_var, normalizer=True, Y_metadata=None
)
- #m_var.Gaussian_noise.constrain_bounded(0.2, 2.0, warning=False)
+ # m_var.Gaussian_noise.constrain_bounded(0.2, 2.0, warning=False)
m_var.Gaussian_noise.constrain_bounded(0.5, 2.0, warning=False)
- m_var.Gaussian_noise = 1 # initial points
+ m_var.Gaussian_noise = 1 # initial points
for parname in m_var.parameter_names():
if parname.endswith('lengthscale'):
for nx in range(X_repl.shape[1]):
myrange = np.max(X_repl, axis=0) - np.min(X_repl, axis=0)
-
- m_var.Mat52.lengthscale[[nx]].constrain_bounded( myrange[nx]/X_repl.shape[0], myrange[nx]*5,warning=False)
- m_var.Mat52.lengthscale[[nx]] = myrange[nx] # initial points
+ m_var.Mat52.lengthscale[[nx]].constrain_bounded(
+ myrange[nx] / X_repl.shape[0], myrange[nx] * 5, warning=False
+ )
+ m_var.Mat52.lengthscale[[nx]] = myrange[nx] # initial points
# m_var.Gaussian_noise.value = 0.05
# m_var.Gaussian_noise.constrain_bounded(0.1/np.var(log_vars), 0.8/np.var(log_vars), warning=False)
# m_var.Mat52.lengthscale[[nx]].constrain_bounded(
@@ -941,75 +952,75 @@ def predictStoVars(self, X_repl, Y_var_repl, X_new, Y_mean, counts): # noqa: N8
# myrange[nx] * 100,
# warning=False,
# )
- #TODO change the kernel # noqa: TD002, TD004
- #m_var.optimize(max_f_eval=1000)
- #m_var.optimize_restarts(
+ # TODO change the kernel # noqa: TD002, TD004
+ # m_var.optimize(max_f_eval=1000)
+ # m_var.optimize_restarts(
# self.nopt, parallel=self.is_paralle_opt_safe, num_processes=self.n_processor, verbose=False
- #)
- print("Calibrating Secondary surrogate")
- m_var = my_optimize_restart(m_var,self.nopt)
+ # )
+ print('Calibrating Secondary surrogate') # noqa: T201
+ m_var = my_optimize_restart(m_var, self.nopt)
- #print(m_var) # noqa: T201
+ # print(m_var) # noqa: RUF100, T201
log_var_pred, dum = m_var.predict(X_new)
var_pred = np.exp(log_var_pred)
- norm_var_str = (var_pred.T[0]/counts) / max(var_pred.T[0]/counts)
+ norm_var_str = (var_pred.T[0] / counts) / max(var_pred.T[0] / counts)
self.var_norm = 1
if self.set_normalizer:
self.var_norm = np.mean(var_pred.T[0])
- #norm_var_str = (var_pred.T[0]) / np.var(Y_mean)
- norm_var_str = var_pred.T[0] / self.var_norm
+ # norm_var_str = (var_pred.T[0]) / np.var(Y_mean)
+ norm_var_str = var_pred.T[0] / self.var_norm
# if normalization was used..
else:
norm_var_str = var_pred.T[0] # if normalization was not used..
-
# norm_var_str = (X_new+2)**2/max((X_new+2)**2)
Y_metadata = {'variance_structure': norm_var_str / counts} # noqa: N806
if develop_mode:
-
plt.figure(3)
- nx=0
- plt.scatter(X_repl[:, nx], log_vars, alpha=0.1);
- plt.scatter(X_new[:, nx], log_var_pred, alpha=0.1);
+ nx = 0
+ plt.scatter(X_repl[:, nx], log_vars, alpha=0.1)
+ plt.scatter(X_new[:, nx], log_var_pred, alpha=0.1)
plt.show()
-
plt.figure(1)
- dum1, dum2 = m_var.predict(X_repl)
-
- plt.title("Sto Log-var QoI")
- plt.scatter(log_vars, dum1,alpha=0.5);
- plt.scatter(log_vars, log_vars,alpha=0.5);
- plt.xlabel("exact"); plt.ylabel("pred"); plt.grid()
- print(m_var)
- print(m_var.Mat52.lengthscale)
- plt.show();
-
+ dum1, dum2 = m_var.predict(X_repl)
+
+ plt.title('Sto Log-var QoI')
+ plt.scatter(log_vars, dum1, alpha=0.5)
+ plt.scatter(log_vars, log_vars, alpha=0.5)
+ plt.xlabel('exact')
+ plt.ylabel('pred')
+ plt.grid()
+ print(m_var) # noqa: T201
+ print(m_var.Mat52.lengthscale) # noqa: T201
+ plt.show()
return Y_metadata, m_var, norm_var_str
def predictStoMeans(self, X, Y): # noqa: N802, N803, D102
# under homoscedasticity
my_x_dim = X.shape[1]
myrange = np.max(X, axis=0) - np.min(X, axis=0)
- kernel_mean = GPy.kern.Matern52(input_dim=my_x_dim, ARD=True, lengthscale=myrange)
+ kernel_mean = GPy.kern.Matern52(
+ input_dim=my_x_dim, ARD=True, lengthscale=myrange
+ )
# kernel_mean = GPy.kern.Matern52(input_dim=my_x_dim, ARD=True) + GPy.kern.Linear(input_dim=my_x_dim, ARD=True)
- #if self.do_linear and not (self.isEEUQ or self.isWEUQ):
+ # if self.do_linear and not (self.isEEUQ or self.isWEUQ):
# kernel_mean = kernel_mean + GPy.kern.Linear(input_dim=my_x_dim, ARD=True)
- #
+ #
# if sum(self.linear_list)>0:
- #
+ #
# lin_tmp = LinearRegression().fit(X[:, self.linear_list], Y)
# y_pred = lin_tmp.predict(X[:, self.linear_list])
- #
+ #
# # Set GP
- #
+ #
# m_mean = GPy.models.GPRegression(
# X, Y - y_pred, kernel_mean, normalizer=True, Y_metadata=None
# )
- #
+ #
# else:
# m_mean = GPy.models.GPRegression(
# X, Y, kernel_mean, normalizer=True, Y_metadata=None
@@ -1018,7 +1029,7 @@ def predictStoMeans(self, X, Y): # noqa: N802, N803, D102
X, Y, kernel_mean, normalizer=True, Y_metadata=None
)
- '''
+ """
for parname in m_mean.parameter_names():
if parname.endswith('lengthscale'):
for nx in range(X.shape[1]):
@@ -1053,17 +1064,17 @@ def predictStoMeans(self, X, Y): # noqa: N802, N803, D102
myrange[nx] * 1,
warning=False
)
- '''
+ """
# m_mean.optimize(messages=True, max_f_eval=1000)
# # m_mean.Gaussian_noise.variance = np.var(Y) # First calibrate parameters
# m_mean.optimize_restarts(
# self.nopt, parallel=self.is_paralle_opt_safe, num_processes=self.n_processor, verbose=True
# ) # First calibrate parameters
- print("calibrating tertiary surrogate")
- m_mean = my_optimize_restart(m_mean,self.nopt)
+ print('calibrating tertiary surrogate') # noqa: T201
+ m_mean = my_optimize_restart(m_mean, self.nopt)
- #m_mean.optimize(messages=True, max_f_eval=1000)
+ # m_mean.optimize(messages=True, max_f_eval=1000)
# if self.do_linear:
@@ -1077,26 +1088,31 @@ def predictStoMeans(self, X, Y): # noqa: N802, N803, D102
initial_noise_variance = float(m_mean.Gaussian_noise.variance)
initial_process_variance = float(m_mean.Mat52.variance)
- #if np.sum(self.linear_list) > 0:
+ # if np.sum(self.linear_list) > 0:
# mean_pred = mean_pred + lin_tmp.predict(X[:, self.linear_list])
if develop_mode:
- print(m_mean)
- plt.scatter(X[:, 0], Y,alpha=0.2)
+ print(m_mean) # noqa: T201
+ plt.scatter(X[:, 0], Y, alpha=0.2)
plt.plot(X[:, 0], mean_pred, 'rx')
- plt.errorbar(X[:, 0],mean_pred.T[0],yerr=np.sqrt(mean_var.T)[0],fmt='rx',alpha=0.1);
- plt.title("Sto Log-mean QoI (initial)")
+ plt.errorbar(
+ X[:, 0],
+ mean_pred.T[0],
+ yerr=np.sqrt(mean_var.T)[0],
+ fmt='rx',
+ alpha=0.1,
+ )
+ plt.title('Sto Log-mean QoI (initial)')
plt.show()
plt.plot(m_mean.Y, mean_pred, 'rx')
- plt.scatter(m_mean.Y, m_mean.Y,alpha=0.2)
- plt.title("Sto Log-mean QoI (initial)")
+ plt.scatter(m_mean.Y, m_mean.Y, alpha=0.2)
+ plt.title('Sto Log-mean QoI (initial)')
plt.show()
-
return mean_pred, mean_var, initial_noise_variance, initial_process_variance
- def calibrate(self): # noqa: C901, D102
+ def calibrate(self): # noqa: C901, D102, RUF100
print('Calibrating in parallel', flush=True) # noqa: T201
warnings.filterwarnings('ignore')
t_opt = time.time()
@@ -1120,7 +1136,7 @@ def calibrate(self): # noqa: C901, D102
self.n_processor,
self.is_paralle_opt_safe,
self.init_noise_var[ny],
- self.init_process_var[ny]
+ self.init_process_var[ny],
)
for ny in range(self.y_dim)
)
@@ -1132,26 +1148,26 @@ def calibrate(self): # noqa: C901, D102
# TODO: terminate it gracefully.... # noqa: TD002
# see https://stackoverflow.com/questions/21104997/keyboard-interrupt-with-pythons-multiprocessing
-
if develop_mode:
for ny in range(self.y_dim):
- print(self.m_list[ny])
+ print(self.m_list[ny]) # noqa: T201
# print(m_tmp.rbf.lengthscale)
- tmp = self.m_list[ny].predict(self.m_list[ny].X)[0]
-
+ tmp = self.m_list[ny].predict(self.m_list[ny].X)[0]
# this one has a noise
- plt.title("Original Mean QoI")
+ plt.title('Original Mean QoI')
plt.scatter(self.m_list[ny].Y, tmp, alpha=0.1)
plt.scatter(self.m_list[ny].Y, self.m_list[ny].Y, alpha=0.1)
- plt.xlabel("exact")
- plt.ylabel("pred")
+ plt.xlabel('exact')
+ plt.ylabel('pred')
plt.show()
- plt.title("Original Mean QoI")
- plt.scatter(self.m_list[ny].X[:,0], tmp, alpha=0.1)
- plt.scatter(self.m_list[ny].X[:,0], self.m_list[ny].Y, alpha=0.1)
- plt.xlabel("exact")
- plt.ylabel("pred")
+ plt.title('Original Mean QoI')
+ plt.scatter(self.m_list[ny].X[:, 0], tmp, alpha=0.1)
+ plt.scatter(
+ self.m_list[ny].X[:, 0], self.m_list[ny].Y, alpha=0.1
+ )
+ plt.xlabel('exact')
+ plt.ylabel('pred')
plt.show()
else:
for ny in range(self.y_dim):
@@ -1167,23 +1183,22 @@ def calibrate(self): # noqa: C901, D102
self.n_processor,
self.is_paralle_opt_safe,
self.init_noise_var[ny],
- self.init_process_var[ny]
+ self.init_process_var[ny],
)
if msg != '':
self.exit(msg)
- ####
+ ####
if develop_mode:
- print(self.m_list[ny])
+ print(self.m_list[ny]) # noqa: T201
# print(m_tmp.rbf.lengthscale)
tmp = self.m_list[ny].predict(self.m_list[ny].X)
- plt.title("Final Mean QoI")
+ plt.title('Final Mean QoI')
plt.scatter(self.m_list[ny].Y, tmp[0], alpha=0.1)
plt.scatter(self.m_list[ny].Y, self.m_list[ny].Y, alpha=0.1)
- plt.xlabel("exact")
- plt.ylabel("pred")
- plt.show()
+ plt.xlabel('exact')
+ plt.ylabel('pred')
+ plt.show()
-
# because EE-UQ results are more likely to have huge nugget.
# if False:
# if self.isEEUQ:
@@ -1195,14 +1210,14 @@ def calibrate(self): # noqa: C901, D102
# for ny in range(self.y_dim):
# for parname in self.m_list[ny].parameter_names():
# if parname.endswith('variance') and ('Gauss' not in parname):
- # exec( # noqa: S102
+ # exec( # noqa: RUF100, S102
# 'my_new_var = max(self.m_list[ny].'
# + variance_keyword
# + ', 10*self.m_list[ny].'
# + parname
# + ')'
# )
- # exec('self.m_list[ny].' + variance_keyword + '= my_new_var') # noqa: S102
+ # exec('self.m_list[ny].' + variance_keyword + '= my_new_var') # noqa: RUF100, S102
#
# self.m_list[ny].optimize()
@@ -1215,9 +1230,6 @@ def calibrate(self): # noqa: C901, D102
return Y_preds, Y_pred_vars, Y_pred_vars_w_measures, e2
def train_surrogate(self, t_init): # noqa: C901, D102, PLR0915
-
-
-
self.seed = 43
np.random.seed(43)
@@ -1301,7 +1313,6 @@ def FEM_batch_lf(X, id_sim): # noqa: N802, N803
X_hf_tmp = model_hf.sampling(max([model_hf.n_init - model_hf.n_existing, 0])) # noqa: N806
-
#
# if X is from a data file & Y is from simulation
#
@@ -1356,9 +1367,9 @@ def FEM_batch_lf(X, id_sim): # noqa: N802, N803
msg = f'Error importing input data: dimension inconsistent: high fidelity model have {self.Y_hf.shape[1]} QoI(s) but low fidelity model have {self.Y_lf.shape[1]}.'
self.exit(msg)
- stoch_idx = []
+ stoch_idx = [] # noqa: F841
for i in range(y_dim):
- print("Setting up QoI {} among {}".format(i+1,y_dim))
+ print('Setting up QoI {} among {}'.format(i + 1, y_dim)) # noqa: T201, UP032
self.m_list[i] = self.set_XY(
self.m_list[i],
i,
@@ -1368,11 +1379,10 @@ def FEM_batch_lf(X, id_sim): # noqa: N802, N803
self.Y_lf[:, i][np.newaxis].transpose(),
) # log-transform is inside set_XY
-
# check stochastic ?
# if self.stochastic[i] and not self.do_mf:
# # see if we can run it parallel
- # X_new, X_idx, indices, counts = np.unique( # noqa: N806
+ # X_new, X_idx, indices, counts = np.unique( # noqa: N806, RUF100
# self.X_hf,
# axis=0,
# return_index=True,
@@ -1415,7 +1425,7 @@ def FEM_batch_lf(X, id_sim): # noqa: N802, N803
# for i in stoch_idx
# )
# result_objs = list(self.pool.starmap(set_XY_indi, iterables))
- # for ny, m_tmp_, Y_mean_, normMeans_, normVars_, m_var_list_, var_str_, indices_unique_, n_unique_hf_ in result_objs: # noqa: N806
+ # for ny, m_tmp_, Y_mean_, normMeans_, normVars_, m_var_list_, var_str_, indices_unique_, n_unique_hf_ in result_objs: # noqa: N806, RUF100
# self.m_tmp[ny] = m_tmp_
# self.Y_mean[ny] = Y_mean_
# self.normMeans[ny] = normMeans_
@@ -1425,7 +1435,6 @@ def FEM_batch_lf(X, id_sim): # noqa: N802, N803
# self.indices_unique = indices_unique_
# self.n_unique_hf[ny] = n_unique_hf_
-
#
# Verification measures
#
@@ -1447,7 +1456,9 @@ def FEM_batch_lf(X, id_sim): # noqa: N802, N803
# Initial calibration
# Calibrate self.m_list
- self.Y_cvs, self.Y_cv_vars, self.Y_cv_var_w_measures, e2 = self.calibrate()
+ self.Y_cvs, self.Y_cv_vars, self.Y_cv_var_w_measures, e2 = (
+ self.calibrate()
+ )
if self.do_logtransform:
# self.Y_cv = np.exp(2*self.Y_cvs+self.Y_cv_vars)*(np.exp(self.Y_cv_vars)-1) # in linear space
@@ -1587,79 +1598,129 @@ def FEM_batch_lf(X, id_sim): # noqa: N802, N803
print(f'3. time = {self.sim_time:.2f} s', flush=True) # noqa: T201
if develop_mode:
- print("CV: inbound50")
- print(self.inbound50)
- print("CV: max coverage")
- print(self.max_coverage)
- print("CV: quantile values")
- print(self.quantile_reconst_list)
+ print('CV: inbound50') # noqa: T201
+ print(self.inbound50) # noqa: T201
+ print('CV: max coverage') # noqa: T201
+ print(self.max_coverage) # noqa: T201
+ print('CV: quantile values') # noqa: T201
+ print(self.quantile_reconst_list) # noqa: T201
-
- ny = 0;
- nx =0;
+ ny = 0
+ nx = 0
sorted_y_std = np.sqrt(self.Y_cv_var_w_measure[:, ny])
sorted_y_std0 = np.sqrt(self.Y_cv_var[:, ny])
- plt.errorbar(self.X_hf[:, nx], (self.Y_cv[:, ny]), yerr=sorted_y_std, fmt='x',alpha=50/self.X_hf.shape[0]);
- plt.errorbar(self.X_hf[:, nx], (self.Y_cv[:, ny]), yerr=sorted_y_std0, fmt='x',alpha=50/self.X_hf.shape[0]);
- plt.scatter(self.X_hf[:, nx], (self.Y_hf[:, ny]), c='r',alpha=0.1);
- plt.title("RV={}, QoI={}".format(nx + 1, ny + 1))
+ plt.errorbar(
+ self.X_hf[:, nx],
+ (self.Y_cv[:, ny]),
+ yerr=sorted_y_std,
+ fmt='x',
+ alpha=50 / self.X_hf.shape[0],
+ )
+ plt.errorbar(
+ self.X_hf[:, nx],
+ (self.Y_cv[:, ny]),
+ yerr=sorted_y_std0,
+ fmt='x',
+ alpha=50 / self.X_hf.shape[0],
+ )
+ plt.scatter(self.X_hf[:, nx], (self.Y_hf[:, ny]), c='r', alpha=0.1)
+ plt.title('RV={}, QoI={}'.format(nx + 1, ny + 1)) # noqa: UP032
plt.show()
- log_Y_cv_sample = np.random.normal(self.Y_cvs[:, ny],np.sqrt(self.Y_cv_var_w_measures[:, ny]))
+ log_Y_cv_sample = np.random.normal( # noqa: F841, N806
+ self.Y_cvs[:, ny], np.sqrt(self.Y_cv_var_w_measures[:, ny])
+ )
ny = 0
- plt.scatter(self.Y_hf[:, ny], self.Y_cv[:, ny],alpha=0.1);
- plt.errorbar(self.Y_hf[:, ny], self.Y_cv[:, ny], yerr=sorted_y_std, fmt='x',alpha=0.1);
- plt.scatter(self.Y_hf[:, ny], self.Y_hf[:, ny],alpha=0.1);
- plt.title("QoI = {}".format(ny+1))
+ plt.scatter(self.Y_hf[:, ny], self.Y_cv[:, ny], alpha=0.1)
+ plt.errorbar(
+ self.Y_hf[:, ny],
+ self.Y_cv[:, ny],
+ yerr=sorted_y_std,
+ fmt='x',
+ alpha=0.1,
+ )
+ plt.scatter(self.Y_hf[:, ny], self.Y_hf[:, ny], alpha=0.1)
+ plt.title('QoI = {}'.format(ny + 1)) # noqa: UP032
plt.show()
- plt.scatter(np.log10(self.Y_hf[:, ny]),np.log10((self.Y_cv[:, ny])), alpha=1,marker='x');
- plt.plot(np.log10(self.Y_hf[:, ny]),np.log10(self.Y_hf[:, ny]),alpha=1,color='r');
- mycor_CV = np.corrcoef(self.Y_hf[:, nx], self.Y_cv[:, ny])[1,0]
- mycor_log_CV = np.corrcoef(np.log(self.Y_hf[:, nx]), np.log(self.Y_cv[:, ny]))[1,0]
- plt.title(f"train CV rho={round(mycor_CV*100)/100} rho_log={round(mycor_log_CV*100)/100}")
- plt.xlabel("QoI exact");
- plt.ylabel("QoI pred median");
+ plt.scatter(
+ np.log10(self.Y_hf[:, ny]),
+ np.log10((self.Y_cv[:, ny])), # noqa: UP034
+ alpha=1,
+ marker='x',
+ )
+ plt.plot(
+ np.log10(self.Y_hf[:, ny]),
+ np.log10(self.Y_hf[:, ny]),
+ alpha=1,
+ color='r',
+ )
+ mycor_CV = np.corrcoef(self.Y_hf[:, nx], self.Y_cv[:, ny])[1, 0] # noqa: N806
+ mycor_log_CV = np.corrcoef( # noqa: N806
+ np.log(self.Y_hf[:, nx]), np.log(self.Y_cv[:, ny])
+ )[1, 0]
+ plt.title(
+ f'train CV rho={round(mycor_CV*100)/100} rho_log={round(mycor_log_CV*100)/100}'
+ )
+ plt.xlabel('QoI exact')
+ plt.ylabel('QoI pred median')
plt.grid()
plt.show()
- [a,b] = self.m_list[0].predict(self.m_list[0].X)
+ [a, b] = self.m_list[0].predict(self.m_list[0].X)
if sum(self.linear_list) > 0:
- a = a + self.lin_list[ny].predict(self.X_hf[:, self.linear_list])[:, 0:1]
+ a = (
+ a
+ + self.lin_list[ny].predict(self.X_hf[:, self.linear_list])[
+ :, 0:1
+ ]
+ )
# Don't use b
- plt.scatter(np.log10(self.Y_hf[:, ny]),np.log10(np.exp(a[:, ny])),alpha=1,marker='x');
- plt.plot(np.log10(self.Y_hf[:, ny]),np.log10(self.Y_hf[:, ny]),alpha=1,color='r');
- mycor = np.corrcoef(self.Y_hf[:, ny], np.exp(a[:, ny]))[1,0]
- mycor_log = np.corrcoef(np.log(self.Y_hf[:, ny]), a[:, ny])[1,0]
- plt.title(f"train rho={round(mycor*100)/100} rho_log={round(mycor_log*100)/100}")
- plt.xlabel("QoI exact");
- plt.ylabel("QoI pred median");
+ plt.scatter(
+ np.log10(self.Y_hf[:, ny]),
+ np.log10(np.exp(a[:, ny])),
+ alpha=1,
+ marker='x',
+ )
+ plt.plot(
+ np.log10(self.Y_hf[:, ny]),
+ np.log10(self.Y_hf[:, ny]),
+ alpha=1,
+ color='r',
+ )
+ mycor = np.corrcoef(self.Y_hf[:, ny], np.exp(a[:, ny]))[1, 0]
+ mycor_log = np.corrcoef(np.log(self.Y_hf[:, ny]), a[:, ny])[1, 0]
+ plt.title(
+ f'train rho={round(mycor*100)/100} rho_log={round(mycor_log*100)/100}'
+ )
+ plt.xlabel('QoI exact')
+ plt.ylabel('QoI pred median')
plt.grid()
plt.show()
- plt.scatter((self.Y_hf[:, ny]),(np.exp(a[:, ny])),alpha=1,marker='x');
- plt.plot((self.Y_hf[:, ny]),(self.Y_hf[:, ny]),alpha=1,color='r');
- mycor = np.corrcoef(self.Y_hf[:, ny], np.exp(a[:, ny]))[1,0]
- mycor_log = np.corrcoef(np.log(self.Y_hf[:, ny]), a[:, ny])[1,0]
- plt.title(f"train rho={round(mycor*100)/100} rho_log={round(mycor_log*100)/100}")
- plt.xlabel("QoI exact");
- plt.ylabel("QoI pred median");
+ plt.scatter((self.Y_hf[:, ny]), (np.exp(a[:, ny])), alpha=1, marker='x')
+ plt.plot((self.Y_hf[:, ny]), (self.Y_hf[:, ny]), alpha=1, color='r')
+ mycor = np.corrcoef(self.Y_hf[:, ny], np.exp(a[:, ny]))[1, 0]
+ mycor_log = np.corrcoef(np.log(self.Y_hf[:, ny]), a[:, ny])[1, 0]
+ plt.title(
+ f'train rho={round(mycor*100)/100} rho_log={round(mycor_log*100)/100}'
+ )
+ plt.xlabel('QoI exact')
+ plt.ylabel('QoI pred median')
plt.grid()
plt.show()
-
- plt.scatter((self.X_hf[:, nx]), (self.Y_hf[:, ny]), alpha=1, color='r');
- plt.scatter((self.X_hf[:, nx]), (np.exp(a[:, ny])), alpha=1, marker='x');
- plt.xlabel("X");
- plt.ylabel("QoI pred median");
+ plt.scatter((self.X_hf[:, nx]), (self.Y_hf[:, ny]), alpha=1, color='r')
+ plt.scatter((self.X_hf[:, nx]), (np.exp(a[:, ny])), alpha=1, marker='x')
+ plt.xlabel('X')
+ plt.ylabel('QoI pred median')
plt.grid()
plt.show()
-
# plt.scatter(np.log10(self.Y_hf[:, ny]),np.log10(np.exp(log_Y_cv_sample)), alpha=1,marker='x');
# plt.plot(np.log10(self.Y_hf[:, ny]),np.log10(self.Y_hf[:, ny]),alpha=1,color='r');
# mycor = np.corrcoef(self.Y_hf[:, ny], np.exp(log_Y_cv_sample))[1,0]
@@ -1669,45 +1730,64 @@ def FEM_batch_lf(X, id_sim): # noqa: N802, N803
# plt.grid()
# plt.show()
- X_test = np.genfromtxt(r'C:\Users\SimCenter\Dropbox\SimCenterPC\Stochastic_GP_validation\input_test.csv', delimiter=',')
- Y_test = np.genfromtxt(r'C:\Users\SimCenter\Dropbox\SimCenterPC\Stochastic_GP_validation\output_test.csv', delimiter=',')
- [a,b] = self.m_list[0].predict(X_test)
+ X_test = np.genfromtxt( # noqa: N806
+ r'C:\Users\SimCenter\Dropbox\SimCenterPC\Stochastic_GP_validation\input_test.csv',
+ delimiter=',',
+ )
+ Y_test = np.genfromtxt( # noqa: N806
+ r'C:\Users\SimCenter\Dropbox\SimCenterPC\Stochastic_GP_validation\output_test.csv',
+ delimiter=',',
+ )
+ [a, b] = self.m_list[0].predict(X_test)
if sum(self.linear_list) > 0:
- a = a + self.lin_list[ny].predict(X_test[:, self.linear_list])[:, 0:1]
+ a = (
+ a
+ + self.lin_list[ny].predict(X_test[:, self.linear_list])[:, 0:1]
+ )
# Don't use b
-
- plt.scatter(np.log10(Y_test),np.log10(np.exp(a[:, ny])),alpha=1,marker='x');
- plt.plot(np.log10(Y_test),np.log10(Y_test),alpha=1,color='r');
- mycor_Test = np.corrcoef(Y_test, np.exp(a[:, ny]))[1,0]
- mycor_log_Test = np.corrcoef(np.log(Y_test), a[:, ny])[1,0]
- plt.title(f"test rho={round(mycor_Test*100)/100} rho_log={round(mycor_log_Test*100)/100}")
- plt.xlabel("QoI exact");
- plt.ylabel("QoI pred median");
+ plt.scatter(
+ np.log10(Y_test), np.log10(np.exp(a[:, ny])), alpha=1, marker='x'
+ )
+ plt.plot(np.log10(Y_test), np.log10(Y_test), alpha=1, color='r')
+ mycor_Test = np.corrcoef(Y_test, np.exp(a[:, ny]))[1, 0] # noqa: N806
+ mycor_log_Test = np.corrcoef(np.log(Y_test), a[:, ny])[1, 0] # noqa: N806
+ plt.title(
+ f'test rho={round(mycor_Test*100)/100} rho_log={round(mycor_log_Test*100)/100}'
+ )
+ plt.xlabel('QoI exact')
+ plt.ylabel('QoI pred median')
plt.grid()
plt.show()
-
#
# Predict variance
#
log_var_pred, dum = self.m_var_list[0].predict(X_test)
- log_Y_var_pred_w_measure = b + np.exp(log_var_pred) * self.m_list[ny].Gaussian_noise.parameters
+ log_Y_var_pred_w_measure = ( # noqa: N806
+ b + np.exp(log_var_pred) * self.m_list[ny].Gaussian_noise.parameters
+ )
qualtile_vals = np.arange(0.1, 1, 0.1)
qualtile_reconst = np.zeros([len(qualtile_vals)])
for nqu in range(len(qualtile_vals)):
- Q_b = norm.ppf(qualtile_vals[nqu], loc=a, scale=np.sqrt(log_Y_var_pred_w_measure))
- qualtile_reconst[nqu] = np.sum((np.log(Y_test) < Q_b[:,0])) / Y_test.shape[0]
+ Q_b = norm.ppf( # noqa: N806
+ qualtile_vals[nqu],
+ loc=a,
+ scale=np.sqrt(log_Y_var_pred_w_measure),
+ )
+ qualtile_reconst[nqu] = (
+ np.sum((np.log(Y_test) < Q_b[:, 0])) / Y_test.shape[0] # noqa: UP034
+ )
quant_err = abs(qualtile_reconst - qualtile_vals)
- print(f"Test: max coverage err: {np.max(quant_err)}")
- print(f"Test: mean coverage err: {np.mean(quant_err)}")
- print("Test: quantile range")
- print(qualtile_reconst)
- print(f"Corr(log) for CV: {round(mycor_log_CV*100)/100}")
- print(f"Corr(log) for Test: {round(mycor_log_Test*100)/100}")
- print("")
+ print(f'Test: max coverage err: {np.max(quant_err)}') # noqa: T201
+ print(f'Test: mean coverage err: {np.mean(quant_err)}') # noqa: T201
+ print('Test: quantile range') # noqa: T201
+ print(qualtile_reconst) # noqa: T201
+ print(f'Corr(log) for CV: {round(mycor_log_CV*100)/100}') # noqa: T201
+ print(f'Corr(log) for Test: {round(mycor_log_Test*100)/100}') # noqa: T201
+ print('') # noqa: T201, FURB105
def verify(self): # noqa: D102
Y_cv = self.Y_cv # noqa: N806
@@ -1753,7 +1833,7 @@ def verify(self): # noqa: D102
# y_pred_var[ns, ny] = y_pred_vars
error_ratio2_Pr = y_pred_var / y_data_var # noqa: N806
- # print(np.max(error_ratio2_Pr, axis=0), flush=True) # noqa: T201
+ # print(np.max(error_ratio2_Pr, axis=0), flush=True) # noqa: RUF100, T201
perc_thr_tmp = np.hstack(
[np.array([1]), np.arange(10, 1000, 50), np.array([999])]
@@ -1789,7 +1869,7 @@ def verify_nugget(self): # noqa: D102
Y = self.Y_hf # noqa: N806
model_hf = self.modelInfoHF # noqa: F841
- self.quantile_reconst_list = np.zeros((self.y_dim,9))
+ self.quantile_reconst_list = np.zeros((self.y_dim, 9))
self.max_coverage = np.zeros((self.y_dim,))
self.mean_coverage = np.zeros((self.y_dim,))
self.inbound50 = np.zeros((self.y_dim,))
@@ -1798,8 +1878,6 @@ def verify_nugget(self): # noqa: D102
if not self.do_mf:
for ny in range(self.y_dim):
if not self.do_logtransform:
-
-
#
# Interquarltile range
#
@@ -1816,24 +1894,27 @@ def verify_nugget(self): # noqa: D102
)
num_in_bound = np.sum((Y[:, ny] > PI_lb) * (Y[:, ny] < PI_ub))
-
#
# coverage range
#
- qualtile_vals = np.arange(0.1,1,0.1)
+ qualtile_vals = np.arange(0.1, 1, 0.1)
qualtile_reconst = np.zeros([len(qualtile_vals)])
for nqu in range(len(qualtile_vals)):
- Q_b = np.squeeze(norm.ppf(qualtile_vals[nqu], loc=Y_cv[:, ny], scale=np.sqrt(Y_cv_var_w_measure[:, ny])))
+ Q_b = np.squeeze( # noqa: N806
+ norm.ppf(
+ qualtile_vals[nqu],
+ loc=Y_cv[:, ny],
+ scale=np.sqrt(Y_cv_var_w_measure[:, ny]),
+ )
+ )
- #print(Y[:, ny])
- #print(Q_b)
- qualtile_reconst[nqu] = np.sum(Y[:, ny] < Q_b)/Y.shape[0]
+ # print(Y[:, ny])
+ # print(Q_b)
+ qualtile_reconst[nqu] = np.sum(Y[:, ny] < Q_b) / Y.shape[0]
quant_err = abs(qualtile_reconst - qualtile_vals)
-
-
norm_residual = (Y[:, ny] - Y_cv[:, ny]) / np.sqrt(
Y_cv_var_w_measure[:, ny]
)
@@ -1865,11 +1946,17 @@ def verify_nugget(self): # noqa: D102
# coverage range
#
- qualtile_vals = np.arange(0.1,1,0.1)
+ qualtile_vals = np.arange(0.1, 1, 0.1)
qualtile_reconst = np.zeros([len(qualtile_vals)])
for nqu in range(len(qualtile_vals)):
- Q_b = norm.ppf(qualtile_vals[nqu], loc=log_Y_cv, scale=np.sqrt(log_Y_cv_var_w_measure)).tolist()
- qualtile_reconst[nqu] = np.sum((np.log(Y[:, ny]) < Q_b))/Y.shape[0]
+ Q_b = norm.ppf( # noqa: N806
+ qualtile_vals[nqu],
+ loc=log_Y_cv,
+ scale=np.sqrt(log_Y_cv_var_w_measure),
+ ).tolist()
+ qualtile_reconst[nqu] = (
+ np.sum((np.log(Y[:, ny]) < Q_b)) / Y.shape[0] # noqa: UP034
+ )
quant_err = abs(qualtile_reconst - qualtile_vals)
@@ -1882,8 +1969,7 @@ def verify_nugget(self): # noqa: D102
)
stats = cramervonmises(norm_residual, 'norm')
-
- self.quantile_reconst_list[ny,:] = qualtile_reconst
+ self.quantile_reconst_list[ny, :] = qualtile_reconst
self.max_coverage[ny] = np.max(quant_err)
self.mean_coverage[ny] = np.mean(quant_err)
self.inbound50[ny] = num_in_bound / Y.shape[0]
@@ -2030,7 +2116,6 @@ def save_model(self, filename): # noqa: C901, D102, PLR0915
#
# Save surrogateinfo
#
-
results = {}
hfJson = {} # noqa: N806
@@ -2171,7 +2256,9 @@ def save_model(self, filename): # noqa: C901, D102, PLR0915
results['valCorrCoeff'][self.g_name[ny]] = self.corr_val[ny]
results['valIQratio'][self.g_name[ny]] = self.inbound50[ny]
results['valPval'][self.g_name[ny]] = self.Gausspvalue[ny]
- results['valCoverageVals'][self.g_name[ny]] = self.quantile_reconst_list[ny].tolist()
+ results['valCoverageVals'][self.g_name[ny]] = self.quantile_reconst_list[
+ ny
+ ].tolist()
if np.isnan(self.NRMSE_val[ny]) or np.isinf(self.NRMSE_val[ny]):
results['valNRMSE'][self.g_name[ny]] = 'null'
@@ -2212,11 +2299,10 @@ def save_model(self, filename): # noqa: C901, D102, PLR0915
] = self.m_var_list[ny].Y.flatten().tolist()
else:
results['modelInfo'][self.g_name[ny] + '_Var'] = 0
-
#
# Save the main model
#
- if not self.do_mf:
+ if not self.do_mf:
results['modelInfo'][self.g_name[ny]] = {}
for parname in self.m_list[ny].parameter_names():
results['modelInfo'][self.g_name[ny]][parname] = list(
@@ -2226,13 +2312,18 @@ def save_model(self, filename): # noqa: C901, D102, PLR0915
#
# Save the linear
#
- if self.do_linear>0:
+ if self.do_linear > 0:
results['modelInfo'][self.g_name[ny] + '_Lin'] = {}
- results['modelInfo'][self.g_name[ny] + '_Lin']['predictorList'] = [] # TBA
- results['modelInfo'][self.g_name[ny] + '_Lin']['coef'] = np.squeeze(self.lin_list[ny].coef_).tolist() #
- results['modelInfo'][self.g_name[ny] + '_Lin']['intercept'] = float(self.lin_list[ny].intercept_) #
+ results['modelInfo'][self.g_name[ny] + '_Lin'][
+ 'predictorList'
+ ] = [] # TBA
+ results['modelInfo'][self.g_name[ny] + '_Lin']['coef'] = np.squeeze(
+ self.lin_list[ny].coef_
+ ).tolist()
+ results['modelInfo'][self.g_name[ny] + '_Lin']['intercept'] = float(
+ self.lin_list[ny].intercept_
+ )
-
if self.isEEUQ or self.isWEUQ:
# read SAM.json
SAMpath = self.work_dir + '/templatedir/SAM.json' # noqa: N806
@@ -2551,11 +2642,11 @@ def sampling(N): # noqa: N803
try:
while True:
time.sleep(0.5)
- if all([r.ready() for r in result]): # noqa: C419, F821
+ if all([r.ready() for r in result]): # type: ignore # noqa: C419, F821
break
except KeyboardInterrupt:
- pool.terminate() # noqa: F821
- pool.join() # noqa: F821
+ pool.terminate() # type: ignore # noqa: F821
+ pool.join() # type: ignore # noqa: F821
else:
IMSEc1 = np.zeros(nc1) # noqa: N806
@@ -2824,14 +2915,14 @@ def normalized_mean_sq_error(self, yp, ye): # noqa: D102
def get_cross_validation_err(self): # noqa: D102
print('Calculating cross validation errors', flush=True) # noqa: T201
time_tmp = time.time()
- X_hf = self.X_hf # contains separate samples # noqa: N806
+ X_hf = self.X_hf # contains separate samples # noqa: N806
nsamp = self.X_hf.shape[0]
ydim = self.Y_hf.shape[1]
- e2 = np.zeros((nsamp,ydim)) # only for unique...
- Y_pred = np.zeros((nsamp,ydim)) # noqa: N806
- Y_pred_var = np.zeros((nsamp,ydim)) # noqa: N806
- Y_pred_var_w_measure = np.zeros((nsamp,ydim)) # noqa: N806
+ e2 = np.zeros((nsamp, ydim)) # only for unique...
+ Y_pred = np.zeros((nsamp, ydim)) # noqa: N806
+ Y_pred_var = np.zeros((nsamp, ydim)) # noqa: N806
+ Y_pred_var_w_measure = np.zeros((nsamp, ydim)) # noqa: N806
#
# Efficient cross validation TODO: check if it works for heteroskedacstic
#
@@ -2852,7 +2943,9 @@ def get_cross_validation_err(self): # noqa: D102
# works both for stochastic/stochastic
nugget_mat = (
np.diag(np.squeeze(self.var_str[ny]))
- * self.m_list[ny].Gaussian_noise.parameters # TODO
+ * self.m_list[
+ ny
+ ].Gaussian_noise.parameters # TODO # noqa: TD002, TD004
)
Rmat = self.m_list[ny].kern.K(Xm) # noqa: N806
@@ -2879,9 +2972,11 @@ def get_cross_validation_err(self): # noqa: D102
* self.normVars[ny],
)
- if sum(self.linear_list)>0:
- Y_pred[:, ny] = Y_pred[:, ny] + self.lin_list[ny].predict(X_hf[:,self.linear_list])[:,0]
-
+ if sum(self.linear_list) > 0:
+ Y_pred[:, ny] = (
+ Y_pred[:, ny]
+ + self.lin_list[ny].predict(X_hf[:, self.linear_list])[:, 0]
+ )
else:
Y_hf = self.Y_hf # noqa: N806
@@ -2989,12 +3084,12 @@ def imse(m_tmp, xcandi, xq, phiqr, i, y_idx, doeIdx='HF'): # noqa: ARG001, N803
Y_tmp = np.vstack([Y_hf, np.zeros((1, Y_hf.shape[1]))]) # noqa: N806
# self.set_XY(m_tmp, X_tmp, Y_tmp, X_lf, Y_lf)
X_list_tmp, Y_list_tmp = ( # noqa: N806
- emf.convert_lists_to_array.convert_xy_lists_to_arrays( # noqa: F821
+ emf.convert_lists_to_array.convert_xy_lists_to_arrays( # noqa: F821 # type: ignore
[X_tmp, X_lf], [Y_tmp, Y_lf]
)
)
m_tmp.set_data(X=X_list_tmp, Y=Y_list_tmp)
- xq_list = convert_x_list_to_array([xq, np.zeros((0, xq.shape[1]))]) # noqa: F821
+ xq_list = convert_x_list_to_array([xq, np.zeros((0, xq.shape[1]))]) # type: ignore # noqa: F821
dummy, Yq_var = m_tmp.predict(xq_list) # noqa: N806
elif doeIdx.startswith('LF'):
@@ -3009,12 +3104,12 @@ def imse(m_tmp, xcandi, xq, phiqr, i, y_idx, doeIdx='HF'): # noqa: ARG001, N803
Y_tmp = np.vstack([Y_lf, np.zeros((1, Y_lf.shape[1]))]) # noqa: N806
# self.set_XY(m_tmp, X_hf, Y_hf, X_tmp, Y_tmp)
X_list_tmp, Y_list_tmp = ( # noqa: N806
- emf.convert_lists_to_array.convert_xy_lists_to_arrays( # noqa: F821
+ emf.convert_lists_to_array.convert_xy_lists_to_arrays( # noqa: F821 # type: ignore
[X_hf, X_tmp], [Y_hf, Y_tmp]
)
)
m_tmp.set_data(X=X_list_tmp, Y=Y_list_tmp)
- xq_list = convert_x_list_to_array([xq, np.zeros((0, xq.shape[1]))]) # noqa: F821
+ xq_list = convert_x_list_to_array([xq, np.zeros((0, xq.shape[1]))]) # type: ignore # noqa: F821
dummy, Yq_var = m_tmp.predict(xq_list) # noqa: N806
else:
print(f'doe method <{doeIdx}> is not supported', flush=True) # noqa: T201
@@ -3215,7 +3310,7 @@ def sampling(self, n): # noqa: D102
if n > 0:
X_samples = np.zeros((n, self.x_dim)) # noqa: N806
# LHS
- sampler = qmc.LatinHypercube(d=self.x_dim, seed = self.global_seed)
+ sampler = qmc.LatinHypercube(d=self.x_dim, seed=self.global_seed)
U = sampler.random(n=n) # noqa: N806
for nx in range(self.x_dim):
if self.xDistTypeArr[nx] == 'U':
@@ -3312,9 +3407,8 @@ def calibrating( # noqa: C901, D103
n_processor,
is_paralle_opt_safe,
init_noise_var,
- init_process_var
+ init_process_var,
): # nuggetVal = self.nuggetVal[ny]
-
np.random.seed(int(ny))
random.seed(int(ny))
@@ -3331,15 +3425,15 @@ def calibrating( # noqa: C901, D103
X = m_tmp.X # noqa: N806
# for parname in m_tmp.parameter_names():
# if parname.endswith('lengthscale'):
- # for nx in range(X.shape[1]): # noqa: B007
+ # for nx in range(X.shape[1]): # noqa: B007, RUF100
# myrange = np.max(X, axis=0) - np.min(X, axis=0)
- # exec('m_tmp.' + parname + '[[nx]] = myrange[nx]') # noqa: S102
+ # exec('m_tmp.' + parname + '[[nx]] = myrange[nx]') # noqa: RUF100, S102
- m_tmp[variance_keyword].constrain_bounded(0.05,2,warning=False)
+ m_tmp[variance_keyword].constrain_bounded(0.05, 2, warning=False)
for parname in m_tmp.parameter_names():
if parname.endswith('lengthscale'):
for nx in range(X.shape[1]): # noqa: B007
- myrange = np.max(X, axis=0) - np.min(X, axis=0) # noqa: F841
+ myrange = np.max(X, axis=0) - np.min(X, axis=0) # noqa: F841, RUF100
exec( # noqa: S102
'm_tmp.'
+ parname
@@ -3363,13 +3457,11 @@ def calibrating( # noqa: C901, D103
myrange = np.max(X, axis=0) - np.min(X, axis=0)
exec('m_tmp.' + parname + '[[nx]] = myrange[nx]') # noqa: S102
elif nugget_opt_tmp == 'Heteroscedastic':
-
- if init_noise_var==None:
+ if init_noise_var == None: # noqa: E711
init_noise_var = 1
- if init_process_var == None:
+ if init_process_var == None: # noqa: E711
init_process_var = 1
-
X = m_tmp.X # noqa: N806
for parname in m_tmp.parameter_names():
@@ -3382,18 +3474,16 @@ def calibrating( # noqa: C901, D103
+ '[[nx]].constrain_bounded(myrange[nx] / X.shape[0]*10, myrange[nx],warning=False)'
)
exec( # noqa: S102
- 'm_tmp.'
- + parname
- + '[[nx]] = myrange[nx]*1'
+ 'm_tmp.' + parname + '[[nx]] = myrange[nx]*1'
)
- #m_tmp[parname][nx] = myrange[nx]*0.1
+ # m_tmp[parname][nx] = myrange[nx]*0.1
# m_tmp[parname][nx].constrain_bounded(myrange[nx] / X.shape[0], myrange[nx]*100)
# TODO change the kernel # noqa: TD002, TD004
-
- #m_tmp[variance_keyword].constrain_bounded(0.05/np.mean(m_tmp.Y_metadata['variance_structure']),2/np.mean(m_tmp.Y_metadata['variance_structure']),warning=False)
- m_tmp.Gaussian_noise.constrain_bounded(0.5*init_noise_var, 2*init_noise_var, warning=False)
-
+ # m_tmp[variance_keyword].constrain_bounded(0.05/np.mean(m_tmp.Y_metadata['variance_structure']),2/np.mean(m_tmp.Y_metadata['variance_structure']),warning=False)
+ m_tmp.Gaussian_noise.constrain_bounded(
+ 0.5 * init_noise_var, 2 * init_noise_var, warning=False
+ )
else:
msg = 'Nugget keyword not identified: ' + nugget_opt_tmp
@@ -3426,32 +3516,31 @@ def calibrating( # noqa: C901, D103
)
if msg == '':
- #m_tmp.optimize()
+ # m_tmp.optimize()
# n=0;
if not do_mf:
-
- #Here
-
- print("Calibrating final surrogate")
- m_tmp = my_optimize_restart(m_tmp,nopt)
-
- # if develop_mode:
- # print(m_tmp)
- # #print(m_tmp.rbf.lengthscale)
- # tmp = m_tmp.predict(m_tmp.X)
- # plt.title("Original Mean QoI")
- # plt.scatter(m_tmp.Y, tmp[0],alpha=0.1)
- # plt.scatter(m_tmp.Y, m_tmp.Y,alpha=0.1)
- # plt.xlabel("exact")
- # plt.ylabel("pred")
- # plt.show()
-
- # m_tmp.optimize_restarts(
- # num_restarts=nopt,
- # parallel=is_paralle_opt_safe,
- # num_processes=n_processor,
- # verbose=True,
- # )
+ # Here
+
+ print('Calibrating final surrogate') # noqa: T201
+ m_tmp = my_optimize_restart(m_tmp, nopt)
+ # noqa: RUF100, W293
+ # if develop_mode:
+ # print(m_tmp)
+ # #print(m_tmp.rbf.lengthscale)
+ # tmp = m_tmp.predict(m_tmp.X)
+ # plt.title("Original Mean QoI")
+ # plt.scatter(m_tmp.Y, tmp[0],alpha=0.1)
+ # plt.scatter(m_tmp.Y, m_tmp.Y,alpha=0.1)
+ # plt.xlabel("exact")
+ # plt.ylabel("pred")
+ # plt.show()
+
+ # m_tmp.optimize_restarts(
+ # num_restarts=nopt,
+ # parallel=is_paralle_opt_safe,
+ # num_processes=n_processor,
+ # verbose=True,
+ # )
else:
m_tmp.gpy_model.optimize_restarts(
num_restarts=nopt,
@@ -3459,7 +3548,7 @@ def calibrating( # noqa: C901, D103
num_processes=n_processor,
verbose=False,
)
- #print(m_tmp) # noqa: T201
+ # print(m_tmp) # noqa: RUF100, T201
# while n+20 <= nopt:
# m_tmp.optimize_restarts(num_restarts=20)
# n = n+20
@@ -3470,31 +3559,36 @@ def calibrating( # noqa: C901, D103
return m_tmp, msg, ny
-def my_optimize_restart(m,n_opt):
+
+def my_optimize_restart(m, n_opt): # noqa: D103
init = time.time()
n_sample = len(m.Y)
idx = list(range(n_sample))
n_batch = 700
- n_cluster = int(np.ceil(len(m.Y)/n_batch))
- n_batch = np.ceil(n_sample/n_cluster)
+ n_cluster = int(np.ceil(len(m.Y) / n_batch))
+ n_batch = np.ceil(n_sample / n_cluster)
random.shuffle(idx)
- X_full = m.X
- Y_full = m.Y
+ X_full = m.X # noqa: N806
+ Y_full = m.Y # noqa: N806
log_likelihoods = np.zeros((n_cluster,))
errors = np.zeros((n_cluster,))
m_list = []
for nc in range(n_cluster):
- inside_cluster = idx[int(n_batch*nc):int(np.min([n_batch*(nc+1),n_sample]))]
+ inside_cluster = idx[
+ int(n_batch * nc) : int(np.min([n_batch * (nc + 1), n_sample]))
+ ]
#
# Testing if this works better for parallel run
#
@monkeypatch_method(GPy.core.GP)
- def subsample_XY(self, idx): # noqa: N802, N803
+ def subsample_XY(self, idx): # noqa: N802, N803, RUF100
if self.Y_metadata is not None:
new_meta = self.Y_metadata
- new_meta['variance_structure'] = self.Y_metadata['variance_structure'][idx]
+ new_meta['variance_structure'] = self.Y_metadata[
+ 'variance_structure'
+ ][idx]
self.Y_metadata.update(new_meta)
print('metadata_updated') # noqa: T201
self.set_XY(self.X[idx, :], self.Y[idx, :])
@@ -3510,25 +3604,28 @@ def set_XY2(self, X=None, Y=None, Y_metadata=None): # noqa: N802, N803
self.set_XY(X, Y)
m_subset = m.copy()
- #m_subset.set_XY2(m_subset.X[inside_cluster,:],m_subset.Y[inside_cluster,:],{""})
+ # m_subset.set_XY2(m_subset.X[inside_cluster,:],m_subset.Y[inside_cluster,:],{""})
m_subset.subsample_XY(inside_cluster)
- #m_subset.optimize(max_f_eval=1000)
+ # m_subset.optimize(max_f_eval=1000)
m_subset.optimize_restarts(n_opt)
- variance1 = m_subset.normalizer.std**2
-
- #Option 1
+ variance1 = m_subset.normalizer.std**2
+ # Option 1
tmp_all = m_subset.predict(X_full)
errors[nc] = np.linalg.norm(tmp_all[0][:, 0] - Y_full[:, 0])
- #Option 2
+ # Option 2
m_subset.set_XY2(X_full, Y_full, m.Y_metadata)
variance2 = m_subset.normalizer.std**2
-
- m_subset.Gaussian_noise.variance = m_subset.Gaussian_noise.variance*variance1/variance2
+ m_subset.Gaussian_noise.variance = (
+ m_subset.Gaussian_noise.variance * variance1 / variance2
+ )
log_likelihoods[nc] = m_subset.log_likelihood()
-
m_list += [copy.deepcopy(m_subset)]
- print(" cluster {} among {} : logL {}".format(nc+1, n_cluster, log_likelihoods[nc]))
+ print( # noqa: T201
+ ' cluster {} among {} : logL {}'.format( # noqa: UP032
+ nc + 1, n_cluster, log_likelihoods[nc]
+ )
+ )
# import matplotlib.pyplot as plt
# tmp_all = m_subset.predict(X_full); plt.scatter(tmp_all[0][:, 0], Y_full[:, 0],alpha=0.1); plt.scatter(Y_full[:, 0], Y_full[:, 0],alpha=0.1); plt.show()
@@ -3536,17 +3633,17 @@ def set_XY2(self, X=None, Y=None, Y_metadata=None): # noqa: N802, N803
# best_cluster = np.argmin(errors) # not capturing skedasticity
best_cluster = np.argmax(log_likelihoods)
-
m = m_list[best_cluster]
# m.kern.parameters = kernels[best_cluster][0]
# m.Gaussian_noise.parameters = kernels[best_cluster][1]
# m.parameters_changed()
# tmp = m.predict(X_full[inside_cluster]); plt.scatter(tmp[0][:, 0], Y_full[inside_cluster, 0]); plt.scatter(Y_full[inside_cluster, 0], Y_full[inside_cluster, 0]); plt.show()
- print("Elapsed time: {:.2f} s".format(time.time() - init))
+ print('Elapsed time: {:.2f} s'.format(time.time() - init)) # noqa: T201, UP032
return m
-def set_XY_indi(
+
+def set_XY_indi( # noqa: N802, D103
m_tmp,
ny,
x_dim,
@@ -3555,8 +3652,8 @@ def set_XY_indi(
create_kernel,
create_gpy_model,
do_logtransform,
- predictStoMeans,
- set_normalizer
+ predictStoMeans, # noqa: N803
+ set_normalizer, # noqa: ARG001
):
#
# check if X dimension has changed...
@@ -3576,16 +3673,18 @@ def set_XY_indi(
if do_logtransform:
if np.min(Y_hf) < 0:
- raise 'Error running SimCenterUQ - Response contains negative values. Please uncheck the log-transform option in the UQ tab'
+ raise 'Error running SimCenterUQ - Response contains negative values. Please uncheck the log-transform option in the UQ tab' # noqa: B016
Y_hfs = np.log(Y_hf) # noqa: N806
else:
Y_hfs = Y_hf # noqa: N806
# Y_mean=Y_hfs[X_idx]
# Y_mean1, nugget_mean1 = self.predictStoMeans(X_new, Y_mean)
- Y_mean1, nugget_mean1, initial_noise_variance, initial_process_variance = predictStoMeans(X_hf, Y_hfs) # noqa: N806
+ Y_mean1, nugget_mean1, initial_noise_variance, initial_process_variance = ( # noqa: N806
+ predictStoMeans(X_hf, Y_hfs)
+ )
- Y_metadata, m_var, norm_var_str = self.predictStoVars( # noqa: N806
+ Y_metadata, m_var, norm_var_str = self.predictStoVars( # noqa: F821, N806 # type: ignore
X_hf, (Y_hfs - Y_mean1) ** 2, X_hf, Y_hfs, X_hf.shape[0]
)
m_tmp.set_XY2(X_hf, Y_hfs, Y_metadata=Y_metadata)
@@ -3594,12 +3693,23 @@ def set_XY_indi(
var_str = norm_var_str
indices_unique = range(Y_hfs.shape[0])
n_unique_hf = X_hf.shape[0]
- Y_mean = Y_hfs
+ Y_mean = Y_hfs # noqa: N806
- normMeans = 0
- normVars = 1
+ normMeans = 0 # noqa: N806
+ normVars = 1 # noqa: N806
+
+ return (
+ ny,
+ m_tmp,
+ Y_mean,
+ normMeans,
+ normVars,
+ m_var_list,
+ var_str,
+ indices_unique,
+ n_unique_hf,
+ )
- return ny, m_tmp, Y_mean, normMeans, normVars, m_var_list, var_str, indices_unique, n_unique_hf
def closest_node(x, X, ll): # noqa: N803, D103
X = np.asarray(X) # noqa: N806
@@ -3654,8 +3764,6 @@ def read_txt(text_dir, exit_fun): # noqa: D103
return X
-
-
if __name__ == '__main__':
main(sys.argv)
diff --git a/modules/performUQ/UCSD_UQ/runTMCMC.py b/modules/performUQ/UCSD_UQ/runTMCMC.py
index 7911a2b71..a3ecd866d 100644
--- a/modules/performUQ/UCSD_UQ/runTMCMC.py
+++ b/modules/performUQ/UCSD_UQ/runTMCMC.py
@@ -265,7 +265,8 @@ def run_TMCMC( # noqa: C901, N802, PLR0913
# Evaluate log-likelihood at current samples Sm
if run_type == 'runningLocal':
processor_count = mp.cpu_count()
- max_num_processes = 32 # max number of processes to use for multiprocessing when running locally
+
+ max_num_processes = 32 # noqa: PLR2004 max number of processes to use for multiprocessing when running locally
if processor_count > max_num_processes:
processor_count = 8
@@ -309,7 +310,7 @@ def run_TMCMC( # noqa: C901, N802, PLR0913
log_likelihoods_list.append(output[0])
predictions_list.append(output[1])
else:
- from mpi4py.futures import MPIPoolExecutor
+ from mpi4py.futures import MPIPoolExecutor # type: ignore # noqa: I001
executor = MPIPoolExecutor(max_workers=MPI_size)
write_eval_data_to_logfile(
diff --git a/modules/performUQ/common/uq_utilities.py b/modules/performUQ/common/uq_utilities.py
index 938944a53..58ce870de 100644
--- a/modules/performUQ/common/uq_utilities.py
+++ b/modules/performUQ/common/uq_utilities.py
@@ -226,9 +226,12 @@ def get_num_processors(self) -> int: # noqa: D102
if num_processors is None:
num_processors = 1
elif num_processors < 1:
- msg = f'Number of processes must be at least 1. Got {num_processors}.'
- raise ValueError(msg)
- elif num_processors > max_num_processors:
+
+ raise ValueError( # noqa: TRY003
+ 'Number of processes must be at least 1. ' # noqa: EM102
+ f' Got {num_processors}'
+ )
+ elif num_processors > max_num_processors:
# this is to get past memory problems when running large number processors in a container
num_processors = 8
diff --git a/modules/performUQ/dakota/DakotaUQ.py b/modules/performUQ/dakota/DakotaUQ.py
index 956d29a5e..e98aff784 100644
--- a/modules/performUQ/dakota/DakotaUQ.py
+++ b/modules/performUQ/dakota/DakotaUQ.py
@@ -138,61 +138,66 @@ def main(args): # noqa: C901, D103
returncode = 0
except subprocess.CalledProcessError as e:
result = e.output
- #print('RUNNING DAKOTA ERROR: ', result) # noqa: RUF100, T201
+ # print('RUNNING DAKOTA ERROR: ', result) # noqa: RUF100, T201
returncode = e.returncode # noqa: F841
run_success = False
- dakotaErrFile = os.path.join(cwd, 'dakota.err') # noqa: PTH109, PTH118, N806
- dakotaOutFile = os.path.join(cwd, 'dakota.out') # noqa: PTH109, PTH118, N806
- dakotaTabFile = os.path.join(cwd, 'dakotaTab.out') # noqa: PTH109, PTH118, N806
+ dakotaErrFile = os.path.join(cwd, 'dakota.err') # noqa: N806, PTH109, PTH118, RUF100, W291
+ dakotaOutFile = os.path.join(cwd, 'dakota.out') # noqa: N806, PTH109, PTH118, RUF100
+ dakotaTabFile = os.path.join(cwd, 'dakotaTab.out') # noqa: N806, PTH109, PTH118, RUF100
checkErrFile = os.path.exists(dakotaErrFile) # noqa: PTH110, N806
checkOutFile = os.path.exists(dakotaOutFile) # noqa: PTH110, N806
checkTabFile = os.path.exists(dakotaTabFile) # noqa: F841, N806, PTH110
-
- checkErrSize=-1
- if checkErrFile>0:
- checkErrSize = os.path.getsize(dakotaErrFile) # noqa: PTH202, N806
-
+ checkErrSize = -1 # noqa: N806
+ if checkErrFile > 0:
+ checkErrSize = os.path.getsize(dakotaErrFile) # noqa: F841, N806, PTH202
if checkOutFile == False and checkErrFile == 0: # noqa: E712
-
with open(dakotaErrFile, 'a') as file: # noqa: PTH123
file.write(result.decode('utf-8'))
else:
pass
-
if not run_success:
- # noqa: W293
- display_err = "\nERROR. Dakota did not run. dakota.err not created."
- # # noqa: PLR2044
+ # noqa: RUF100, W293
+ display_err = '\nERROR. Dakota did not run. dakota.err not created.'
+ # # noqa: PLR2044, RUF100
# First see if dakota.err is created
with open(dakotaErrFile, 'r') as file: # noqa: PTH123, UP015
dakota_err = file.read()
- display_err = "\nERROR. Workflow did not run: " + dakota_err
-
+ display_err = '\nERROR. Workflow did not run: ' + dakota_err
# Second, see if workflow.err is found
if 'workdir.' in dakota_err:
+ display_err = '\nERROR. Workflow did not run: ' + dakota_err
- display_err = "\nERROR. Workflow did not run: " + dakota_err
-
- start_index = dakota_err.find("workdir.") + len("workdir.")
- end_index = dakota_err.find("\\", start_index)
+ start_index = dakota_err.find('workdir.') + len('workdir.')
+ end_index = dakota_err.find('\\', start_index)
workdir_no = dakota_err[start_index:end_index]
- workflow_err_path = os.path.join(os.getcwd(),f'workdir.{workdir_no}/workflow.err') # noqa: PTH109, PTH118
+ workflow_err_path = os.path.join( # noqa: PTH118
+ os.getcwd(), f'workdir.{workdir_no}/workflow.err' # noqa: PTH109
+ )
if os.path.isfile(workflow_err_path): # noqa: N806, PTH110, PTH113, RUF100
with open(workflow_err_path, 'r') as file: # noqa: PTH123, UP015
workflow_err = file.read()
- if not workflow_err=="": # noqa: SIM201
- display_err = str("\nERROR running the workflow: \n" + workflow_err + "\n Check out more in " + str(os.path.dirname(workflow_err_path)).replace('\\','/'))
+ if not workflow_err == '': # noqa: SIM201
+ display_err = str(
+ '\nERROR running the workflow: \n'
+ + workflow_err
+ + '\n Check out more in '
+ + str(os.path.dirname(workflow_err_path)).replace( # noqa: PTH120
+ '\\', '/'
+ )
+ )
print(display_err) # noqa: T201
- exit(0) # sy - this could be -1 like any other tools. But if it is 0, quoFEM,EE,WE,Hydro will highlight the error messages in "red" by using the parser in UI. To use this parser in UI, we need to make UI believe that the analysis is successful. Something that needs improvment # noqa: PLR1722
+ exit( # noqa: PLR1722
+ 0
+ ) # sy - this could be -1 like any other tools. But if it is 0, quoFEM,EE,WE,Hydro will highlight the error messages in "red" by using the parser in UI. To use this parser in UI, we need to make UI believe that the analysis is successful. Something that needs improvment
if __name__ == '__main__':
diff --git a/modules/systemPerformance/CMakeLists.txt b/modules/systemPerformance/CMakeLists.txt
index c707f576b..dbbc4cdca 100644
--- a/modules/systemPerformance/CMakeLists.txt
+++ b/modules/systemPerformance/CMakeLists.txt
@@ -1,3 +1,4 @@
add_subdirectory(REWET)
+add_subdirectory(ResidualDemand)
diff --git a/modules/systemPerformance/ResidualDemand/CMakeLists.txt b/modules/systemPerformance/ResidualDemand/CMakeLists.txt
new file mode 100644
index 000000000..ea4721ab2
--- /dev/null
+++ b/modules/systemPerformance/ResidualDemand/CMakeLists.txt
@@ -0,0 +1,2 @@
+simcenter_add_python_script(SCRIPT run_residual_demand.py)
+simcenter_add_python_script(SCRIPT transportation.py)
\ No newline at end of file
diff --git a/modules/systemPerformance/ResidualDemand/PostProcessingTools/residual_demand_postprocessing.ipynb b/modules/systemPerformance/ResidualDemand/PostProcessingTools/residual_demand_postprocessing.ipynb
new file mode 100644
index 000000000..6cb5fb446
--- /dev/null
+++ b/modules/systemPerformance/ResidualDemand/PostProcessingTools/residual_demand_postprocessing.ipynb
@@ -0,0 +1,315 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import pandas as pd\n",
+ "import matplotlib.pyplot as plt"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Set the path to the trip info file, which is available in Documents/R2D/SystemPerformance/ResidualDemand\n",
+ "# or in Documents/R2D/tmp.SimCener/Results/ResidualDemand\n",
+ "path_to_trip_compare = './trip_info_compare.csv'"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " agent_id | \n",
+ " origin_nid_undamaged | \n",
+ " destin_nid_undamaged | \n",
+ " travel_time_undamaged | \n",
+ " travel_time_used_undamaged | \n",
+ " stop_nid_undamaged | \n",
+ " stop_hour_undamaged | \n",
+ " stop_quarter_undamaged | \n",
+ " stop_ssid_undamaged | \n",
+ " origin_nid_damaged | \n",
+ " destin_nid_damaged | \n",
+ " travel_time_damaged | \n",
+ " travel_time_used_damaged | \n",
+ " stop_nid_damaged | \n",
+ " stop_hour_damaged | \n",
+ " stop_quarter_damaged | \n",
+ " stop_ssid_damaged | \n",
+ " delay_duration | \n",
+ " delay_ratio | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 2355 | \n",
+ " 195 | \n",
+ " 600 | \n",
+ " 60.639811 | \n",
+ " 195 | \n",
+ " 7 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 2355 | \n",
+ " 195 | \n",
+ " 600 | \n",
+ " 60.639811 | \n",
+ " 195 | \n",
+ " 7 | \n",
+ " 0 | \n",
+ " 0 | \n",
+ " 0.00 | \n",
+ " 0.000000 | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " 1 | \n",
+ " 2355 | \n",
+ " 2260 | \n",
+ " 600 | \n",
+ " 69.590000 | \n",
+ " 2260 | \n",
+ " 7 | \n",
+ " 3 | \n",
+ " 0 | \n",
+ " 2355 | \n",
+ " 2260 | \n",
+ " 600 | \n",
+ " 99.440000 | \n",
+ " 2260 | \n",
+ " 7 | \n",
+ " 3 | \n",
+ " 0 | \n",
+ " 29.85 | \n",
+ " 0.428941 | \n",
+ "
\n",
+ " \n",
+ " 2 | \n",
+ " 2 | \n",
+ " 2355 | \n",
+ " 1549 | \n",
+ " 600 | \n",
+ " 204.130000 | \n",
+ " 1549 | \n",
+ " 7 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " 2355 | \n",
+ " 1549 | \n",
+ " 600 | \n",
+ " 211.690000 | \n",
+ " 1549 | \n",
+ " 7 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " 7.56 | \n",
+ " 0.037035 | \n",
+ "
\n",
+ " \n",
+ " 3 | \n",
+ " 3 | \n",
+ " 1098 | \n",
+ " 2354 | \n",
+ " 600 | \n",
+ " 262.320000 | \n",
+ " 2354 | \n",
+ " 7 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " 1098 | \n",
+ " 2354 | \n",
+ " 600 | \n",
+ " 262.140000 | \n",
+ " 2354 | \n",
+ " 7 | \n",
+ " 2 | \n",
+ " 0 | \n",
+ " -0.18 | \n",
+ " -0.000686 | \n",
+ "
\n",
+ " \n",
+ " 4 | \n",
+ " 4 | \n",
+ " 2355 | \n",
+ " 1355 | \n",
+ " 600 | \n",
+ " 223.610000 | \n",
+ " 1355 | \n",
+ " 7 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 2355 | \n",
+ " 1355 | \n",
+ " 600 | \n",
+ " 223.610000 | \n",
+ " 1355 | \n",
+ " 7 | \n",
+ " 1 | \n",
+ " 0 | \n",
+ " 0.00 | \n",
+ " 0.000000 | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " agent_id origin_nid_undamaged destin_nid_undamaged \\\n",
+ "0 0 2355 195 \n",
+ "1 1 2355 2260 \n",
+ "2 2 2355 1549 \n",
+ "3 3 1098 2354 \n",
+ "4 4 2355 1355 \n",
+ "\n",
+ " travel_time_undamaged travel_time_used_undamaged stop_nid_undamaged \\\n",
+ "0 600 60.639811 195 \n",
+ "1 600 69.590000 2260 \n",
+ "2 600 204.130000 1549 \n",
+ "3 600 262.320000 2354 \n",
+ "4 600 223.610000 1355 \n",
+ "\n",
+ " stop_hour_undamaged stop_quarter_undamaged stop_ssid_undamaged \\\n",
+ "0 7 0 0 \n",
+ "1 7 3 0 \n",
+ "2 7 2 0 \n",
+ "3 7 2 0 \n",
+ "4 7 1 0 \n",
+ "\n",
+ " origin_nid_damaged destin_nid_damaged travel_time_damaged \\\n",
+ "0 2355 195 600 \n",
+ "1 2355 2260 600 \n",
+ "2 2355 1549 600 \n",
+ "3 1098 2354 600 \n",
+ "4 2355 1355 600 \n",
+ "\n",
+ " travel_time_used_damaged stop_nid_damaged stop_hour_damaged \\\n",
+ "0 60.639811 195 7 \n",
+ "1 99.440000 2260 7 \n",
+ "2 211.690000 1549 7 \n",
+ "3 262.140000 2354 7 \n",
+ "4 223.610000 1355 7 \n",
+ "\n",
+ " stop_quarter_damaged stop_ssid_damaged delay_duration delay_ratio \n",
+ "0 0 0 0.00 0.000000 \n",
+ "1 3 0 29.85 0.428941 \n",
+ "2 2 0 7.56 0.037035 \n",
+ "3 2 0 -0.18 -0.000686 \n",
+ "4 1 0 0.00 0.000000 "
+ ]
+ },
+ "execution_count": 5,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "trip_compare = pd.read_csv(path_to_trip_compare)\n",
+ "trip_compare.head(5)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Mean delay ratio is: 16.5 %\n"
+ ]
+ }
+ ],
+ "source": [
+ "delay_ratio_pct = trip_compare['delay_ratio'] * 100\n",
+ "mean_delay_ratio_pct = delay_ratio_pct.mean()\n",
+ "print(f'Mean delay ratio is: {mean_delay_ratio_pct:.1f} %')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 16,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "(-50.0, 200.0)"
+ ]
+ },
+ "execution_count": 16,
+ "metadata": {},
+ "output_type": "execute_result"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAlEAAAGwCAYAAACJjDBkAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguNCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8fJSN1AAAACXBIWXMAAA9hAAAPYQGoP6dpAAA5P0lEQVR4nO3de1yUdd7/8feMnE1AQAELETt4ykNmGR3YTBJLS8u7O1dLtjyUoaW2tbqbh6xNs/WQ5mbeW1q7aYf73nbLysSzJpriWclOGq4KLCqichqY7++P1vk1gQqXgzPC6/l4zEPm+n7nw2fmy4xvrrnmwmaMMQIAAECN2L3dAAAAwKWIEAUAAGABIQoAAMACQhQAAIAFhCgAAAALCFEAAAAWEKIAAAAs8PN2A5cCp9Opw4cPq1GjRrLZbN5uBwAAVIMxRidPnlSzZs1kt3t+vxEhqhoOHz6suLg4b7cBAAAsOHjwoK644gqP1yVEVUOjRo0kSfv371dERISXu4HD4dCyZcvUo0cP+fv7e7udeo218B2she9gLXzHsWPHlJCQ4Pp/3NMIUdVw5i28Ro0aKTQ01MvdwOFwKCQkRKGhobxAeRlr4TtYC9/BWvgOh8MhSbV2KA4HlgMAAFhAiAIAALCAEAUAAGABIQoAAMACQhQAAIAFhCgAAAALCFEAAAAWEKIAAAAsIEQBAABYQIgCAACwgBAFAABgASEKAADAAkIUAACABYQoAAAAC/y83QAgSdnZ2crPz6/WXKfTKUnasWOH7Paqfw+IiopS8+bNPdYfAAC/RIiC12VnZ6tV6zYqKS6q1vzg4GAtXrxYSUlJKi4urnJOUHCI9n2dRZACANQaQhS8Lj8/XyXFRYrs/bT8I+POOz/IzyZJih4wVSXlptK44+hBHV0yXfn5+YQoAECtIUTBZ/hHxikw5qrzzgtoYCRVKCC6pUyFrfYbAwCgChxYDgAAYAEhCgAAwAJCFAAAgAWEKAAAAAsIUQAAABYQogAAACwgRAEAAFhAiAIAALCAEAUAAGABIQoAAMACQhQAAIAFhCgAAAALCFEAAAAWEKIAAAAsIEQBAABYQIgCAACwgBAFAABgASEKAADAAkIUAACABYQoAAAACwhRAAAAFhCiAAAALCBEAQAAWECIAgAAsIAQBQAAYAEhCgAAwAJCFAAAgAWEKAAAAAsIUQAAABYQogAAACwgRAEAAFhAiAIAALCAEAUAAGABIQoAAMACQhQAAIAFXg1RFRUVGj9+vBISEhQcHKwrr7xSL7zwgowxrjnGGE2YMEGxsbEKDg5WcnKyvv32W7c6x44d08CBAxUaGqrw8HANHjxYp06dcpuzc+dO3XbbbQoKClJcXJymTZt2Ue4jAACom7waol5++WW9/vrreu2115SVlaWXX35Z06ZN05w5c1xzpk2bptmzZ2vevHnatGmTGjZsqJSUFJWUlLjmDBw4UHv27FF6erqWLFmitWvXatiwYa7xwsJC9ejRQ/Hx8crMzNQrr7yiSZMmaf78+Rf1/gIAgLrDz5vffMOGDerTp4969eolSWrRooUWL16sr776StJPe6FmzZql5557Tn369JEkvfPOO4qOjtY//vEP9e/fX1lZWVq6dKk2b96sLl26SJLmzJmju+++W3/605/UrFkzvfvuuyorK9Nbb72lgIAAtWvXTtu3b9eMGTPcwtYZpaWlKi0tdV0vLCyUJDkcDjkcjlp9TOojp9Op4OBgBfnZFNDAnHd+oN24/ftLNj+bgoOD5XQ6Wa9adubx5XH2PtbCd7AWvqO218CrIermm2/W/Pnz9c033+iaa67Rjh07tH79es2YMUOStH//fuXk5Cg5Odl1m7CwMHXt2lUZGRnq37+/MjIyFB4e7gpQkpScnCy73a5NmzbpvvvuU0ZGhpKSkhQQEOCak5KSopdfflnHjx9X48aN3fqaMmWKnn/++Ur9rlq1SiEhIZ5+GCBp8eLF//mqotq3eaGL8ywj8dI9i3Xo0CEdOnTognvD+aWnp3u7BfwHa+E7WAvvKyoqqtX6Xg1RY8eOVWFhoVq3bq0GDRqooqJCf/zjHzVw4EBJUk5OjiQpOjra7XbR0dGusZycHDVt2tRt3M/PTxEREW5zEhISKtU4M/bLEDVu3DiNGTPGdb2wsFBxcXHq1q2bIiMjL/Ru4xd27NihpKQkRQ+YqoDoluedH2g3eqGLU+O32FXqtFUaL8v9QbmLxmrt2rXq2LFjbbSM/3A4HEpPT9edd94pf39/b7dTr7EWvoO18B1Hjx6t1fpeDVEffPCB3n33XS1atMj1FtuoUaPUrFkzpaameq2vwMBABQYGVtru7+/PE6IW2O12FRcXq6TcyFRUDkVnU+q0qbSK+aXlRsXFxbLb7azXRcJzw3ewFr6DtfC+2n78vRqinnnmGY0dO1b9+/eXJLVv314//vijpkyZotTUVMXExEiScnNzFRsb67pdbm6uOnXqJEmKiYlRXl6eW93y8nIdO3bMdfuYmBjl5ua6zTlz/cwcAACAmvDqp/OKiopkt7u30KBBAzmdPx3rkpCQoJiYGK1YscI1XlhYqE2bNikxMVGSlJiYqIKCAmVmZrrmrFy5Uk6nU127dnXNWbt2rdsBZunp6WrVqlWlt/IAAACqw6sh6p577tEf//hHffrppzpw4IA++ugjzZgxQ/fdd58kyWazadSoUXrxxRf18ccfa9euXRo0aJCaNWumvn37SpLatGmjnj17aujQofrqq6/05ZdfasSIEerfv7+aNWsmSRowYIACAgI0ePBg7dmzR++//75effVVt+OeAAAAasKrb+fNmTNH48eP1xNPPKG8vDw1a9ZMjz32mCZMmOCa8+yzz+r06dMaNmyYCgoKdOutt2rp0qUKCgpyzXn33Xc1YsQIde/eXXa7Xf369dPs2bNd42FhYVq2bJnS0tJ0/fXXKyoqShMmTKjy9AYAAADV4dUQ1ahRI82aNUuzZs066xybzabJkydr8uTJZ50TERGhRYsWnfN7dejQQevWrbPaKgAAgBv+dh4AAIAFhCgAAAALCFEAAAAWEKIAAAAsIEQBAABYQIgCAACwgBAFAABgASEKAADAAkIUAACABYQoAAAACwhRAAAAFhCiAAAALCBEAQAAWECIAgAAsIAQBQAAYAEhCgAAwAJCFAAAgAWEKAAAAAsIUQAAABYQogAAACwgRAEAAFhAiAIAALCAEAUAAGABIQoAAMACQhQAAIAFhCgAAAALCFEAAAAWEKIAAAAsIEQBAABYQIgCAACwgBAFAABggZ+3GwAuFdnZ2crPz/dozaioKDVv3tyjNQEAFwchCqiG7OxstWrdRiXFRR6tGxQcon1fZxGkAOASRIgCqiE/P18lxUWK7P20/CPjPFLTcfSgji6Zrvz8fEIUAFyCCFFADfhHxikw5ipvtwEA8AEcWA4AAGABIQoAAMACQhQAAIAFHBOFOisrK8snawEA6gZCFOqcilPHJZtNDz30kLdbAQDUYYQo1DnO0lOSMR49HUHxD1t0Yt3fPFILAFA3EKJQZ3nydASOowc9UgcAUHdwYDkAAIAFhCgAAAALCFEAAAAWEKIAAAAsIEQBAABYQIgCAACwgBAFAABgASEKAADAAkIUAACABYQoAAAACwhRAAAAFhCiAAAALCBEAQAAWECIAgAAsIAQBQAAYAEhCgAAwAJCFAAAgAWEKAAAAAsIUQAAABYQogAAACwgRAEAAFhAiAIAALCAEAUAAGABIQoAAMACr4eoQ4cO6aGHHlJkZKSCg4PVvn17bdmyxTVujNGECRMUGxur4OBgJScn69tvv3WrcezYMQ0cOFChoaEKDw/X4MGDderUKbc5O3fu1G233aagoCDFxcVp2rRpF+X+AQCAusmrIer48eO65ZZb5O/vr88//1x79+7V9OnT1bhxY9ecadOmafbs2Zo3b542bdqkhg0bKiUlRSUlJa45AwcO1J49e5Senq4lS5Zo7dq1GjZsmGu8sLBQPXr0UHx8vDIzM/XKK69o0qRJmj9//kW9vwAAoO7w8+Y3f/nllxUXF6cFCxa4tiUkJLi+NsZo1qxZeu6559SnTx9J0jvvvKPo6Gj94x//UP/+/ZWVlaWlS5dq8+bN6tKliyRpzpw5uvvuu/WnP/1JzZo107vvvquysjK99dZbCggIULt27bR9+3bNmDHDLWwBAABUl1dD1Mcff6yUlBQ98MADWrNmjS6//HI98cQTGjp0qCRp//79ysnJUXJysus2YWFh6tq1qzIyMtS/f39lZGQoPDzcFaAkKTk5WXa7XZs2bdJ9992njIwMJSUlKSAgwDUnJSVFL7/8so4fP+6250uSSktLVVpa6rpeWFgoSXI4HHI4HLXyWNRnTqdTwcHBCvKzKaCBOe/8QLtx+/eXyv0b1KheddRGTZufTcHBwXI6nZfsz9WZvi/V/usS1sJ3sBa+o7bXwKsh6ocfftDrr7+uMWPG6Pe//702b96sJ598UgEBAUpNTVVOTo4kKTo62u120dHRrrGcnBw1bdrUbdzPz08RERFuc36+h+vnNXNyciqFqClTpuj555+v1O+qVasUEhJyAfcYZ7N48eL/fFVR7du80MVZ9cCNN0upN9e43jnVRk3FS/cs1qFDh3To0CEP1fSO9PR0b7eA/2AtfAdr4X1FRUW1Wt+rIcrpdKpLly566aWXJEnXXXeddu/erXnz5ik1NdVrfY0bN05jxoxxXS8sLFRcXJy6deumyMhIr/VVV+3YsUNJSUmKHjBVAdEtzzs/0G70Qhenxm+xq9RpqzR+Omudji2dU+161VEbNctyf1DuorFau3atOnbs6JGaF5vD4VB6erruvPNO+fv7e7udeo218B2she84evRordb3aoiKjY1V27Zt3ba1adNG//d//ydJiomJkSTl5uYqNjbWNSc3N1edOnVyzcnLy3OrUV5ermPHjrluHxMTo9zcXLc5Z66fmfNzgYGBCgwMrLTd39+fJ0QtsNvtKi4uVkm5kamoHIrOptRpU2kV80scFZbqnUtt1CwtNyouLpbdbr/kf654bvgO1sJ3sBbeV9uPv1c/nXfLLbdo3759btu++eYbxcfHS/rpIPOYmBitWLHCNV5YWKhNmzYpMTFRkpSYmKiCggJlZma65qxcuVJOp1Ndu3Z1zVm7dq3be6Pp6elq1apVpbfyAAAAqsOrIWr06NHauHGjXnrpJX333XdatGiR5s+fr7S0NEmSzWbTqFGj9OKLL+rjjz/Wrl27NGjQIDVr1kx9+/aV9NOeq549e2ro0KH66quv9OWXX2rEiBHq37+/mjVrJkkaMGCAAgICNHjwYO3Zs0fvv/++Xn31Vbe37AAAAGrCq2/n3XDDDfroo480btw4TZ48WQkJCZo1a5YGDhzomvPss8/q9OnTGjZsmAoKCnTrrbdq6dKlCgoKcs159913NWLECHXv3l12u139+vXT7NmzXeNhYWFatmyZ0tLSdP311ysqKkoTJkzg9AYAAMAyr4YoSerdu7d69+591nGbzabJkydr8uTJZ50TERGhRYsWnfP7dOjQQevWrbPcJwAAwM95/c++AAAAXIoIUQAAABYQogAAACwgRAEAAFhAiAIAALCAEAUAAGABIQoAAMACQhQAAIAFhCgAAAALCFEAAAAWEKIAAAAsIEQBAABYQIgCAACwgBAFAABgASEKAADAAkIUAACABYQoAAAACwhRAAAAFhCiAAAALCBEAQAAWECIAgAAsIAQBQAAYAEhCgAAwAJLIaply5Y6evRope0FBQVq2bLlBTcFAADg6yyFqAMHDqiioqLS9tLSUh06dOiCmwIAAPB1fjWZ/PHHH7u+/uKLLxQWFua6XlFRoRUrVqhFixYeaw4AAMBX1ShE9e3bV5Jks9mUmprqNubv768WLVpo+vTpHmsOAADAV9UoRDmdTklSQkKCNm/erKioqFppCgAAwNfVKESdsX//fk/3AQAAcEmxFKIkacWKFVqxYoXy8vJce6jOeOutty64MQAAAF9mKUQ9//zzmjx5srp06aLY2FjZbDZP9wUAAODTLIWoefPmaeHChXr44Yc93Q8AAMAlwdJ5osrKynTzzTd7uhcAAIBLhqUQNWTIEC1atMjTvQAAAFwyLL2dV1JSovnz52v58uXq0KGD/P393cZnzJjhkeYAAAB8laUQtXPnTnXq1EmStHv3brcxDjIHAAD1gaUQtWrVKk/3AQAAcEmxdEwUAABAfWdpT1S3bt3O+bbdypUrLTcEAABwKbAUos4cD3WGw+HQ9u3btXv37kp/mBgAAKAushSiZs6cWeX2SZMm6dSpUxfUEAAAwKXAo8dEPfTQQ/zdPAAAUC94NERlZGQoKCjIkyUBAAB8kqW38+6//36368YYHTlyRFu2bNH48eM90hgAAIAvsxSiwsLC3K7b7Xa1atVKkydPVo8ePTzSGAAAgC+zFKIWLFjg6T4AAAAuKZZC1BmZmZnKysqSJLVr107XXXedR5oCAADwdZZCVF5envr376/Vq1crPDxcklRQUKBu3brpvffeU5MmTTzZIwAAgM+x9Om8kSNH6uTJk9qzZ4+OHTumY8eOaffu3SosLNSTTz7p6R4BAAB8jqU9UUuXLtXy5cvVpk0b17a2bdtq7ty5HFgOAADqBUt7opxOp/z9/Stt9/f3l9PpvOCmAAAAfJ2lEHXHHXfoqaee0uHDh13bDh06pNGjR6t79+4eaw4AAMBXWQpRr732mgoLC9WiRQtdeeWVuvLKK5WQkKDCwkLNmTPH0z0CAAD4HEvHRMXFxWnr1q1avny5vv76a0lSmzZtlJyc7NHmAAAAfFWN9kStXLlSbdu2VWFhoWw2m+68806NHDlSI0eO1A033KB27dpp3bp1tdUrAACAz6hRiJo1a5aGDh2q0NDQSmNhYWF67LHHNGPGDI81BwAA4KtqFKJ27Nihnj17nnW8R48eyszMvOCmAAAAfF2NQlRubm6VpzY4w8/PT//+978vuCkAAABfV6MQdfnll2v37t1nHd+5c6diY2MvuCkAAABfV6MQdffdd2v8+PEqKSmpNFZcXKyJEyeqd+/eHmsOAADAV9XoFAfPPfec/v73v+uaa67RiBEj1KpVK0nS119/rblz56qiokJ/+MMfaqVRAAAAX1KjEBUdHa0NGzZo+PDhGjdunIwxkiSbzaaUlBTNnTtX0dHRtdIoAACAL6nxyTbj4+P12Wef6fjx4/ruu+9kjNHVV1+txo0b10Z/AAAAPsnSGcslqXHjxrrhhhs82QsAAMAlw9LfzgMAAKjvCFEAAAAWEKIAAAAs8JkQNXXqVNlsNo0aNcq1raSkRGlpaYqMjNRll12mfv36KTc31+122dnZ6tWrl0JCQtS0aVM988wzKi8vd5uzevVqde7cWYGBgbrqqqu0cOHCi3CPAABAXeYTIWrz5s1644031KFDB7fto0eP1ieffKIPP/xQa9as0eHDh3X//fe7xisqKtSrVy+VlZVpw4YNevvtt7Vw4UJNmDDBNWf//v3q1auXunXrpu3bt2vUqFEaMmSIvvjii4t2/wAAQN1j+dN5nnLq1CkNHDhQ//M//6MXX3zRtf3EiRN68803tWjRIt1xxx2SpAULFqhNmzbauHGjbrrpJi1btkx79+7V8uXLFR0drU6dOumFF17Q7373O02aNEkBAQGaN2+eEhISNH36dElSmzZttH79es2cOVMpKSlV9lRaWqrS0lLX9cLCQkmSw+GQw+GorYei3nI6nQoODlaQn00BDcx55wfajdu/v1Tu36BG9aqjNmra/GwKDg6W0+m8ZH+uzvR9qfZfl7AWvoO18B21vQY2c+aMmV6SmpqqiIgIzZw5U7fffrs6deqkWbNmaeXKlerevbuOHz+u8PBw1/z4+HiNGjVKo0eP1oQJE/Txxx9r+/btrvH9+/erZcuW2rp1q6677jolJSWpc+fOmjVrlmvOggULNGrUKJ04caLKniZNmqTnn3++0vZFixYpJCTEU3cdAADUoqKiIg0YMEAnTpxQaGiox+t7dU/Ue++9p61bt2rz5s2VxnJychQQEOAWoKSfzpqek5PjmvPLM6SfuX6+OYWFhSouLlZwcHCl7z1u3DiNGTPGdb2wsFBxcXHq1q2bIiMja35HcU47duxQUlKSogdMVUB0y/POD7QbvdDFqfFb7Cp12iqNn85ap2NL51S7XnXURs2y3B+Uu2is1q5dq44dO3qk5sXmcDiUnp6uO++8U/7+/t5up15jLXwHa+E7jh49Wqv1vRaiDh48qKeeekrp6ekKCgryVhtVCgwMVGBgYKXt/v7+PCFqgd1uV3FxsUrKjUxF5VB0NqVOm0qrmF/iqLBU71xqo2ZpuVFxcbHsdvsl/3PFc8N3sBa+g7Xwvtp+/L12YHlmZqby8vLUuXNn+fn5yc/PT2vWrNHs2bPl5+en6OholZWVqaCgwO12ubm5iomJkSTFxMRU+rTemevnmxMaGlrlXigAAIDq8FqI6t69u3bt2qXt27e7Ll26dNHAgQNdX/v7+2vFihWu2+zbt0/Z2dlKTEyUJCUmJmrXrl3Ky8tzzUlPT1doaKjatm3rmvPzGmfmnKkBAABghdfezmvUqJGuvfZat20NGzZUZGSka/vgwYM1ZswYRUREKDQ0VCNHjlRiYqJuuukmSVKPHj3Utm1bPfzww5o2bZpycnL03HPPKS0tzfV23OOPP67XXntNzz77rB599FGtXLlSH3zwgT799NOLe4cBAECd4vVTHJzLzJkzZbfb1a9fP5WWliolJUV//vOfXeMNGjTQkiVLNHz4cCUmJqphw4ZKTU3V5MmTXXMSEhL06aefavTo0Xr11Vd1xRVX6C9/+ctZT28AAABQHT4VolavXu12PSgoSHPnztXcuXPPepv4+Hh99tln56x7++23a9u2bZ5oEQAAQJKPnLEcAADgUkOIAgAAsIAQBQAAYAEhCgAAwAJCFAAAgAWEKAAAAAsIUQAAABYQogAAACwgRAEAAFhAiAIAALCAEAUAAGABIQoAAMACQhQAAIAFhCgAAAALCFEAAAAWEKIAAAAsIEQBAABYQIgCAACwgBAFAABgASEKAADAAkIUAACABYQoAAAACwhRAAAAFhCiAAAALCBEAQAAWECIAgAAsIAQBQAAYAEhCgAAwAJCFAAAgAWEKAAAAAsIUQAAABYQogAAACwgRAEAAFhAiAIAALCAEAUAAGABIQoAAMACQhQAAIAFhCgAAAALCFEAAAAWEKIAAAAsIEQBAABYQIgCAACwgBAFAABgASEKAADAAkIUAACABYQoAAAACwhRAAAAFhCiAAAALCBEAQAAWECIAgAAsIAQBQAAYAEhCgAAwAJCFAAAgAWEKAAAAAsIUQAAABYQogAAACwgRAEAAFhAiAIAALCAEAUAAGABIQoAAMACQhQAAIAFhCgAAAALCFEAAAAWEKIAAAAsIEQBAABYQIgCAACwwKshasqUKbrhhhvUqFEjNW3aVH379tW+ffvc5pSUlCgtLU2RkZG67LLL1K9fP+Xm5rrNyc7OVq9evRQSEqKmTZvqmWeeUXl5uduc1atXq3PnzgoMDNRVV12lhQsX1vbdAwAAdZhXQ9SaNWuUlpamjRs3Kj09XQ6HQz169NDp06ddc0aPHq1PPvlEH374odasWaPDhw/r/vvvd41XVFSoV69eKisr04YNG/T2229r4cKFmjBhgmvO/v371atXL3Xr1k3bt2/XqFGjNGTIEH3xxRcX9f4CAIC6w8+b33zp0qVu1xcuXKimTZsqMzNTSUlJOnHihN58800tWrRId9xxhyRpwYIFatOmjTZu3KibbrpJy5Yt0969e7V8+XJFR0erU6dOeuGFF/S73/1OkyZNUkBAgObNm6eEhARNnz5dktSmTRutX79eM2fOVEpKykW/38DPZWVleaxWVFSUmjdv7rF6AICz82qI+qUTJ05IkiIiIiRJmZmZcjgcSk5Ods1p3bq1mjdvroyMDN10003KyMhQ+/btFR0d7ZqTkpKi4cOHa8+ePbruuuuUkZHhVuPMnFGjRlXZR2lpqUpLS13XCwsLJUkOh0MOh8Mj9xX/n9PpVHBwsIL8bApoYM47P9Bu3P79pXL/BjWqVx21UdNZekLBISEaOnSoR+pJUlBwiDK3bNYVV1zhsZrncub5wPPC+1gL38Fa+I7aXgOfCVFOp1OjRo3SLbfcomuvvVaSlJOTo4CAAIWHh7vNjY6OVk5OjmvOzwPUmfEzY+eaU1hYqOLiYgUHB7uNTZkyRc8//3ylHletWqWQkBDrdxJntXjx4v98VVHt27zQxVn1wI03S6k317jeOdVKzY5S/0WeqfUzO3fu1M6dOz1e91zS09Mv6vfD2bEWvoO18L6ioqJare8zISotLU27d+/W+vXrvd2Kxo0bpzFjxriuFxYWKi4uTt26dVNkZKQXO6ubduzYoaSkJEUPmKqA6JbnnR9oN3qhi1Pjt9hV6rRVGj+dtU7Hls6pdr3quBRqluX+oNxFY7V27Vp17NjRAx2en8PhUHp6uu688075+/tflO+JqrEWvoO18B1Hjx6t1fo+EaJGjBihJUuWaO3atW5vQ8TExKisrEwFBQVue6Nyc3MVExPjmvPVV1+51Tvz6b2fz/nlJ/pyc3MVGhpaaS+UJAUGBiowMLDSdn9/f54QtcBut6u4uFgl5UamonIoOptSp02lVcwvcVRYqncul0LN0nKj4uJi2e32i/5zynPDd7AWvoO18L7afvy9+uk8Y4xGjBihjz76SCtXrlRCQoLb+PXXXy9/f3+tWLHCtW3fvn3Kzs5WYmKiJCkxMVG7du1SXl6ea056erpCQ0PVtm1b15yf1zgz50wNAACAmvLqnqi0tDQtWrRI//znP9WoUSPXMUxhYWEKDg5WWFiYBg8erDFjxigiIkKhoaEaOXKkEhMTddNNN0mSevToobZt2+rhhx/WtGnTlJOTo+eee05paWmuvUmPP/64XnvtNT377LN69NFHtXLlSn3wwQf69NNPvXbfAQDApc2re6Jef/11nThxQrfffrtiY2Ndl/fff981Z+bMmerdu7f69eunpKQkxcTE6O9//7trvEGDBlqyZIkaNGigxMREPfTQQxo0aJAmT57smpOQkKBPP/1U6enp6tixo6ZPn66//OUvnN4AAABY5tU9Ucac/6PiQUFBmjt3rubOnXvWOfHx8frss8/OWef222/Xtm3batwjAABAVfjbeQAAABYQogAAACwgRAEAAFhAiAIAALCAEAUAAGABIQoAAMACQhQAAIAFhCgAAAALCFEAAAAWEKIAAAAsIEQBAABYQIgCAACwgBAFAABgASEKAADAAkIUAACABYQoAAAACwhRAAAAFhCiAAAALCBEAQAAWECIAgAAsIAQBQAAYAEhCgAAwAJCFAAAgAWEKAAAAAsIUQAAABYQogAAACwgRAEAAFhAiAIAALCAEAUAAGABIQoAAMACQhQAAIAFhCgAAAALCFEAAAAWEKIAAAAsIEQBAABYQIgCAACwwM/bDQDwrKysLI/Wi4qKUvPmzT1aEwDqAkIUUEdUnDou2Wx66KGHPFo3KDhE+77OIkgBwC8QooA6wll6SjJGkb2fln9knEdqOo4e1NEl05Wfn0+IAoBfIEQBdYx/ZJwCY67ydhsAUOdxYDkAAIAFhCgAAAALCFEAAAAWEKIAAAAsIEQBAABYQIgCAACwgBAFAABgASEKAADAAkIUAACABYQoAAAACwhRAAAAFhCiAAAALOAPEAM4r6ysrCq3O51OSdKOHTtkt1f/d7KoqCg1b97cI70BgLcQogCcVcWp45LNpoceeqjK8eDgYC1evFhJSUkqLi6udt2g4BDt+zqLIAXgkkaIAnBWztJTkjGK7P20/CPjKo0H+dkkSdEDpqqk3FSrpuPoQR1dMl35+fmEKACXNEIUgPPyj4xTYMxVlbYHNDCSKhQQ3VKmwnbxGwMAL+LAcgAAAAsIUQAAABYQogAAACzgmCgAXnG20yZYwSkTAHgDIQrARXW+0yZYwSkTAHgDIQrARXW+0ybUFKdMAOAthCgAXnG20yYAwKWCEIUay87OVn5+vsfqefLYGNRfnv454jgrAOdDiEKNZGdnq1XrNiopLvJ2K4Ck2jnGSuI4KwDnR4hCjeTn56ukuMhjx7NIUvEPW3Ri3d88Ugv1j6ePsZL+/3FW69atU5s2bTxSU5JKS0sVGBjosXoSe8wAbyJEwRJPHs/iOHrQI3VQv3nyZ7K29m7JZpeM06Mlz+wxi42N9WhdAOdXr0LU3Llz9corrygnJ0cdO3bUnDlzdOONN3q7LQA+pjb2bp3Z41pbe8xatWolSdqxY4fsduvnUa6NvWXsgUNdVW9C1Pvvv68xY8Zo3rx56tq1q2bNmqWUlBTt27dPTZs29XZ7AHxQbexxra09ZsHBwVq8eLGSkpJUXFxsvWgt7C2rzT1wvhyk/vWvf+n48eMerenp8OjpDwpJ9Svg1psQNWPGDA0dOlSPPPKIJGnevHn69NNP9dZbb2ns2LGWanr6h+9S+A2QT9IBvuPne8waRf/0n1b0gKkqKTeW6tXG3rLa3gPnqWPWPPla6XT+FBg7X99FBcePeaTmGYGBQfq///tfj7x9e+TIEfX7rwdUWnIBobsKnuzxDKvrU1hY6LEeqlIvQlRZWZkyMzM1btw41za73a7k5GRlZGRUml9aWqrS0lLX9RMnTkiSNmzYoEaNGkmS8vLyNOyxxz37w3ep/AYYFCTb0f0yztLzT64G+8kjNarp9JOKiuLkPHJQpvzC69VGj96o6Y0ez7cWF6PPS2FtartmgMrl5yxRUVGR/JwlCrD4lK+wO131/D3UY23UdJYUKCg4WEOGDPFIPUkefa0MDg7W3LlzZZNRk1v+Ww0aRXqkriP/oE7vWaH/+q//8kg9SbJJPt+jJMvrExQUJEkyxtovFudl6oFDhw4ZSWbDhg1u25955hlz4403Vpo/ceJEI4kLFy5cuHDhUgcu33//fa3ki3qxJ6qmxo0bpzFjxriuFxQUKD4+XtnZ2QoLC/NiZ5B+2j0bFxengwcPKjQ01Nvt1Gushe9gLXwHa+E7Tpw4oebNmysiIqJW6teLEBUVFaUGDRooNzfXbXtubq5iYmIqzQ8MDKzyvdewsDCeED4kNDSU9fARrIXvYC18B2vhOy7kE6vnrFsrVX1MQECArr/+eq1YscK1zel0asWKFUpMTPRiZwAA4FJVL/ZESdKYMWOUmpqqLl266MYbb9SsWbN0+vRp16f1AAAAaqLehKgHH3xQ//73vzVhwgTl5OSoU6dOWrp0qaKjo89728DAQE2cONHjpx+ANayH72AtfAdr4TtYC99R22thM6a2PvcHAABQd9WLY6IAAAA8jRAFAABgASEKAADAAkIUAACABYSo82jRooVsNpvbZerUqW5zdu7cqdtuu01BQUGKi4vTtGnTvNRt3Td37ly1aNFCQUFB6tq1q7766itvt1TnTZo0qdJzoHXr1q7xkpISpaWlKTIyUpdddpn69etX6cS2sGbt2rW655571KxZM9lsNv3jH/9wGzfGaMKECYqNjVVwcLCSk5P17bffus05duyYBg4cqNDQUIWHh2vw4ME6derURbwXdcf51uM3v/lNpedKz5493eawHhduypQpuuGGG9SoUSM1bdpUffv21b59+9zmVOd1KTs7W7169VJISIiaNm2qZ555RuXl1fwjoP9BiKqGyZMn68iRI67LyJEjXWOFhYXq0aOH4uPjlZmZqVdeeUWTJk3S/Pnzvdhx3fT+++9rzJgxmjhxorZu3aqOHTsqJSVFeXl53m6tzmvXrp3bc2D9+vWusdGjR+uTTz7Rhx9+qDVr1ujw4cO6//77vdht3XH69Gl17NhRc+fOrXJ82rRpmj17tubNm6dNmzapYcOGSklJUUlJiWvOwIEDtWfPHqWnp2vJkiVau3athg0bdrHuQp1yvvWQpJ49e7o9VxYvXuw2znpcuDVr1igtLU0bN25Uenq6HA6HevToodOnT7vmnO91qaKiQr169VJZWZk2bNigt99+WwsXLtSECRNq1kyt/EW+OiQ+Pt7MnDnzrON//vOfTePGjU1paalr2+9+9zvTqlWri9Bd/XLjjTeatLQ01/WKigrTrFkzM2XKFC92VfdNnDjRdOzYscqxgoIC4+/vbz788EPXtqysLCPJZGRkXKQO6wdJ5qOPPnJddzqdJiYmxrzyyiuubQUFBSYwMNAsXrzYGGPM3r17jSSzefNm15zPP//c2Gw2c+jQoYvWe130y/UwxpjU1FTTp0+fs96G9agdeXl5RpJZs2aNMaZ6r0ufffaZsdvtJicnxzXn9ddfN6GhoW7/n58Pe6KqYerUqYqMjNR1112nV155xW13X0ZGhpKSkhQQEODalpKSon379un48ePeaLdOKisrU2ZmppKTk13b7Ha7kpOTlZGR4cXO6odvv/1WzZo1U8uWLTVw4EBlZ2dLkjIzM+VwONzWpXXr1mrevDnrUsv279+vnJwct8c+LCxMXbt2dT32GRkZCg8PV5cuXVxzkpOTZbfbtWnTpovec32wevVqNW3aVK1atdLw4cN19OhR1xjrUTtOnDghSa4/Mlyd16WMjAy1b9/e7YTbKSkpKiws1J49e6r9vevNGcutevLJJ9W5c2dFRERow4YNGjdunI4cOaIZM2ZIknJycpSQkOB2mzOLkpOTo8aNG1/0nuui/Px8VVRUVDrDfHR0tL7++msvdVU/dO3aVQsXLlSrVq105MgRPf/887rtttu0e/du5eTkKCAgQOHh4W63iY6OVk5OjncarifOPL5VPSfOjOXk5Khp06Zu435+foqIiGB9akHPnj11//33KyEhQd9//71+//vf66677lJGRoYaNGjAetQCp9OpUaNG6ZZbbtG1114rSdV6XcrJyanyuXNmrLrqZYgaO3asXn755XPOycrKUuvWrTVmzBjXtg4dOiggIECPPfaYpkyZwin9US/cddddrq87dOigrl27Kj4+Xh988IGCg4O92BngW/r37+/6un379urQoYOuvPJKrV69Wt27d/diZ3VXWlqadu/e7Xac5sVUL9/Oe/rpp5WVlXXOS8uWLau8bdeuXVVeXq4DBw5IkmJiYiod8X/mekxMTK3ej/okKipKDRo0qPKx5nG+uMLDw3XNNdfou+++U0xMjMrKylRQUOA2h3WpfWce33M9J2JiYip98KK8vFzHjh1jfS6Cli1bKioqSt99950k1sPTRowYoSVLlmjVqlW64oorXNur87rkqf+762WIatKkiVq3bn3Oy8+Pcfq57du3y263u3bJJiYmau3atXI4HK456enpatWqFW/leVBAQICuv/56rVixwrXN6XRqxYoVSkxM9GJn9c+pU6f0/fffKzY2Vtdff738/f3d1mXfvn3Kzs5mXWpZQkKCYmJi3B77wsJCbdq0yfXYJyYmqqCgQJmZma45K1eulNPpVNeuXS96z/XNv/71Lx09elSxsbGSWA9PMcZoxIgR+uijj7Ry5cpKh9RU53UpMTFRu3btcgu16enpCg0NVdu2bWvUDM5iw4YNZubMmWb79u3m+++/N3/7299MkyZNzKBBg1xzCgoKTHR0tHn44YfN7t27zXvvvWdCQkLMG2+84cXO66b33nvPBAYGmoULF5q9e/eaYcOGmfDwcLdPV8Dznn76abN69Wqzf/9+8+WXX5rk5GQTFRVl8vLyjDHGPP7446Z58+Zm5cqVZsuWLSYxMdEkJiZ6ueu64eTJk2bbtm1m27ZtRpKZMWOG2bZtm/nxxx+NMcZMnTrVhIeHm3/+859m586dpk+fPiYhIcEUFxe7avTs2dNcd911ZtOmTWb9+vXm6quvNr/+9a+9dZcuaedaj5MnT5rf/va3JiMjw+zfv98sX77cdO7c2Vx99dWmpKTEVYP1uHDDhw83YWFhZvXq1ebIkSOuS1FRkWvO+V6XysvLzbXXXmt69Ohhtm/fbpYuXWqaNGlixo0bV6NeCFHnkJmZabp27WrCwsJMUFCQadOmjXnppZfcnhDGGLNjxw5z6623msDAQHP55ZebqVOneqnjum/OnDmmefPmJiAgwNx4441m48aN3m6pznvwwQdNbGysCQgIMJdffrl58MEHzXfffecaLy4uNk888YRp3LixCQkJMffdd585cuSIFzuuO1atWmUkVbqkpqYaY346zcH48eNNdHS0CQwMNN27dzf79u1zq3H06FHz61//2lx22WUmNDTUPPLII+bkyZNeuDeXvnOtR1FRkenRo4dp0qSJ8ff3N/Hx8Wbo0KGVfsljPS5cVWsgySxYsMA1pzqvSwcOHDB33XWXCQ4ONlFRUebpp582DoejRr3Y/tMQAAAAaqBeHhMFAABwoQhRAAAAFhCiAAAALCBEAQAAWECIAgAAsIAQBQAAYAEhCgAAwAJCFAAAgAWEKABeM2nSJHXq1MnbbVyQFi1aaNasWRdc580331SPHj0uqMa8efN0zz33XHAvAKqHEAWgRn7zm9/IZrPJZrPJ399f0dHRuvPOO/XWW2/J6XR6u71KVq9e7erXZrOpSZMmuvvuu7Vr164a1Vm4cKHCw8Mrbd+8ebOGDRt2QT2WlJRo/Pjxmjhxomtbenq6rrnmGoWGhurhhx9WWVmZa+zEiRO65ppr9OOPP7rVefTRR7V161atW7fugvoBUD2EKAA11rNnTx05ckQHDhzQ559/rm7duumpp55S7969VV5e7u32qrRv3z4dOXJEX3zxhUpLS9WrVy+3YGJVkyZNFBISckE1/vd//1ehoaG65ZZbJElOp1MDBgzQ448/royMDG3ZskXz5893zR87dqwef/xxxcfHu9UJCAjQgAEDNHv27AvqB0D1EKIA1FhgYKBiYmJ0+eWXq3Pnzvr973+vf/7zn/r888+1cOFC17yCggINGTJETZo0UWhoqO644w7t2LHjrHU3b96sO++8U1FRUQoLC9OvfvUrbd261TX+6KOPqnfv3m63cTgcatq0qd58881z9ty0aVPFxMSoc+fOGjVqlA4ePKivv/7aNT5jxgy1b99eDRs2VFxcnJ544gmdOnVK0k97sx555BGdOHHCtUdr0qRJkiq/nZedna0+ffrosssuU2hoqP77v/9bubm55+ztvffec3sbLj8/X/n5+XriiSfUrl073XvvvcrKypIkbdiwQZs3b9ZTTz1VZa177rlHH3/8sYqLi8/5PQFcOEIUAI+444471LFjR/397393bXvggQeUl5enzz//XJmZmercubO6d++uY8eOVVnj5MmTSk1N1fr167Vx40ZdffXVuvvuu3Xy5ElJ0pAhQ7R06VIdOXLEdZslS5aoqKhIDz74YLX6PHHihN577z1JP+25OcNut2v27Nnas2eP3n77ba1cuVLPPvusJOnmm2/WrFmzFBoaqiNHjujIkSP67W9/W6m20+lUnz59dOzYMa1Zs0bp6en64Ycfztvb+vXr1aVLF9f1Jk2aKDY2VsuWLVNRUZHWrVunDh06yOFwaPjw4XrjjTfUoEGDKmt16dJF5eXl2rRpU7UeDwAXwABADaSmppo+ffpUOfbggw+aNm3aGGOMWbdunQkNDTUlJSVuc6688krzxhtvGGOMmThxounYseNZv1dFRYVp1KiR+eSTT1zb2rZta15++WXX9Xvuucf85je/OWuNVatWGUmmYcOGpmHDhkaSkWTuvffec97PDz/80ERGRrquL1iwwISFhVWaFx8fb2bOnGmMMWbZsmWmQYMGJjs72zW+Z88eI8l89dVXVX6f48ePG0lm7dq1btvXrVtnunTpYlq0aGGeeOIJU1ZWZiZPnmyeeuops3v3bnPzzTeba665xsyZM6dSzcaNG5uFCxee8/4BuHB+Xk1wAOoUY4xsNpskaceOHTp16pQiIyPd5hQXF+v777+v8va5ubl67rnntHr1auXl5amiokJFRUXKzs52zRkyZIjmz5+vZ599Vrm5ufr888+1cuXK8/a2bt06hYSEaOPGjXrppZc0b948t/Hly5drypQp+vrrr1VYWKjy8nKVlJSoqKio2sc8ZWVlKS4uTnFxca5tbdu2VXh4uLKysnTDDTdUus2Zt92CgoLctt96663avHmz6/o333yjd955R9u2bVNSUpKeeuop3XXXXbr22muVlJSkDh06uOYGBwerqKioWj0DsI4QBcBjsrKylJCQIEk6deqUYmNjtXr16krzqvqUmySlpqbq6NGjevXVVxUfH6/AwEAlJia6HQA+aNAgjR07VhkZGdqwYYMSEhJ02223nbe3hIQEhYeHq1WrVsrLy9ODDz6otWvXSpIOHDig3r17a/jw4frjH/+oiIgIrV+/XoMHD1ZZWdkFHzh+LpGRkbLZbDp+/Pg55z322GOaPn26nE6ntm3bpgceeEAhISH61a9+pTVr1riFqGPHjqlJkya11jOAn3BMFACPWLlypXbt2qV+/fpJkjp37qycnBz5+fnpqquucrtERUVVWePLL7/Uk08+qbvvvlvt2rVTYGCg8vPz3eZERkaqb9++WrBggRYuXKhHHnmkxr2mpaVp9+7d+uijjyRJmZmZcjqdmj59um666SZdc801Onz4sNttAgICVFFRcc66bdq00cGDB3Xw4EHXtr1796qgoEBt27at8jYBAQFq27at9u7de9a6b775piIiInTvvfe6enA4HK5/f97X999/r5KSEl133XXn7BXAhSNEAaix0tJS5eTk6NChQ9q6dateeukl9enTR71799agQYMkScnJyUpMTFTfvn21bNkyHThwQBs2bNAf/vAHbdmypcq6V199tf76178qKytLmzZt0sCBAxUcHFxp3pAhQ/T2228rKytLqampNe4/JCREQ4cO1cSJE2WM0VVXXSWHw6E5c+bohx9+0F//+tdKb/e1aNFCp06d0ooVK5Sfn1/l22XJyclq3769Bg4cqK1bt+qrr77SoEGD9Ktf/crtwPFfSklJ0fr166scy8vL04svvqg5c+ZIkho3bqw2bdpo1qxZysjI0IoVK1ynRpB+etuyZcuWuvLKK2v8uACoIW8flAXg0pKamuo6ONvPz880adLEJCcnm7feestUVFS4zS0sLDQjR440zZo1M/7+/iYuLs4MHDjQdeD1Lw8s37p1q+nSpYsJCgoyV199tfnwww/dDtw+w+l0mvj4eHP33Xeft98zB5YfP37cbXt2drbx8/Mz77//vjHGmBkzZpjY2FgTHBxsUlJSzDvvvFPpdo8//riJjIw0kszEiRONMaZSfz/++KO59957TcOGDU2jRo3MAw88YHJycs7Z4549e0xwcLApKCioNNa/f/9KB49v2rTJtG7d2kRERJjnn3/ebaxHjx5mypQp53lUAHiCzRhjvJriAKCGTp06pcsvv1wLFizQ/fff7+12POKBBx5Q586dNW7cOMs19uzZozvuuEPffPONwsLCPNgdgKrwdh6AS4bT6VReXp5eeOEFhYeH69577/V2Sx7zyiuv6LLLLrugGkeOHNE777xDgAIuEvZEAbhkHDhwQAkJCbriiiu0cOFCde/e3dstAajHCFEAAAAW8HYeAACABYQoAAAACwhRAAAAFhCiAAAALCBEAQAAWECIAgAAsIAQBQAAYAEhCgAAwIL/Bzu8RLvhsoG1AAAAAElFTkSuQmCC",
+ "text/plain": [
+ "