Skip to content

Commit

Permalink
Minor fixes in many places
Browse files Browse the repository at this point in the history
  • Loading branch information
gbene committed May 9, 2024
1 parent 33c92a7 commit 2fd3837
Show file tree
Hide file tree
Showing 10 changed files with 113 additions and 111 deletions.
29 changes: 21 additions & 8 deletions development/Examples/Basics/1. Import and plot.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,21 @@
"print(pontrelli_data.available_data)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c7f9edb7-7488-4722-81bd-dfff2d9219b8",
"metadata": {},
"outputs": [],
"source": [
"data_dict = pontrelli_data.data_dict # Get dict of paths for the data\n",
"\n",
"set_a_path = data_dict['Set_a.shp']\n",
"set_b_path = data_dict['Set_b.shp']\n",
"set_c_path = data_dict['Set_c.shp']\n",
"boundary_path = data_dict['Interpretation_boundary.shp']"
]
},
{
"cell_type": "markdown",
"id": "dd457383-8b0c-4862-93f6-5b448dcdd9e2",
Expand All @@ -69,14 +84,12 @@
"metadata": {},
"outputs": [],
"source": [
"data_dict = pontrelli_data.data_dict # Get dict of paths for the data\n",
"\n",
"# Create the fractures and boundary objects. \n",
"set_a = Entities.Fractures(shp=data_dict['Set_a.shp'], set_n=1) # to add your data put the absolute path of the shp file\n",
"set_b = Entities.Fractures(shp=data_dict['Set_b.shp'], set_n=2)\n",
"set_c = Entities.Fractures(shp=data_dict['Set_c.shp'], set_n=3)\n",
"set_a = Entities.Fractures(shp=set_a_path, set_n=1) # to add your data put the absolute path of the shp file\n",
"set_b = Entities.Fractures(shp=set_b_path, set_n=2)\n",
"set_c = Entities.Fractures(shp=set_c_path, set_n=3)\n",
"\n",
"boundary = Entities.Boundary(shp=data_dict['Interpretation_boundary.shp'], group_n=1)"
"boundary = Entities.Boundary(shp=boundary_path, group_n=1)"
]
},
{
Expand All @@ -94,7 +107,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(set_a.entity_df)"
"set_a.entity_df"
]
},
{
Expand Down Expand Up @@ -166,7 +179,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(fracture_net.entity_df)"
"fracture_net.entity_df"
]
},
{
Expand Down
5 changes: 3 additions & 2 deletions development/Examples/Basics/2. Calculate topology.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@
"id": "3125d707-653c-4560-869f-1126f8026bdd",
"metadata": {},
"source": [
"### Plot the ternary diagram"
"### Plot the ternary diagram and return the number of nodes"
]
},
{
Expand All @@ -112,7 +112,8 @@
"metadata": {},
"outputs": [],
"source": [
"fracture_net.ternary_plot()"
"fracture_net.ternary_plot()\n",
"print(fracture_net.nodes.node_count)"
]
},
{
Expand Down
86 changes: 17 additions & 69 deletions development/Examples/Basics/3. Fitting a distribution.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": null,
"id": "d3122f6d-7e05-46c2-bf97-c4a523477ae9",
"metadata": {},
"outputs": [],
Expand All @@ -41,24 +41,10 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": null,
"id": "9c33a71a-33ff-4ed6-8ff2-e937d29f6bb1",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\n",
"Calculating intersections: 1944/1945\n",
"\n",
"\n",
"Analyzing nodes:20982/20983\r"
]
}
],
"outputs": [],
"source": [
"pontrelli_data = data.Pontrelli()\n",
"data_dict = pontrelli_data.data_dict # Get dict of paths for the data\n",
Expand Down Expand Up @@ -94,52 +80,14 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": null,
"id": "b4829d42-ada1-47da-85c9-213cc98ec076",
"metadata": {},
"outputs": [],
"source": [
"fitter = Statistics.NetworkFitter(fracture_net)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "bcad090e-855a-4429-b5f9-4a86432bbc83",
"metadata": {},
"outputs": [],
"source": [
"print(fitter.net_data.data)\n",
"print(fitter.net_data.censoring_percentage) # get the % of censored values\n",
"print(fitter.net_data.mean) # get the sample mean (i.e. ignoring censoring) \n",
"print(fitter.net_data.std) # get the sample std\n",
"print(fitter.net_data.mode) # get the sample mode\n",
"# ... See docs for the full list of sample statitistics"
]
},
{
"cell_type": "markdown",
"id": "c0f2f594-2423-4a5d-bad6-47e9ba78736f",
"metadata": {},
"source": [
"We can also estimate the empirical cumulative or survival curve using Kaplan Meier and compare it to a ecdf calculated on a \"ignoring censoring\" dataset (i.e. all the lentghs without censoring flag)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "38632b10-61ba-45e9-a0b7-d4d9377103f7",
"metadata": {},
"outputs": [],
"source": [
"KM = fitter.net_data.ecdf\n",
"lengths = fitter.network_data.lengths # get only the length data (without knowing which one are censored or not)\n",
"ignore_ecdf = ss.ecdf(lengths).cdf\n",
"plt.plot(lengths, KM, '-k')\n",
"plt.plot(lengths, ignore_ecdf.probabilities, '--k')\n",
"plt.show()"
]
},
{
"cell_type": "markdown",
"id": "9764c325-a97f-47cd-ab44-a890fce3ca94",
Expand All @@ -154,7 +102,7 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": null,
"id": "fc7b36c7-0251-4d3a-8447-747ec6051020",
"metadata": {},
"outputs": [],
Expand Down Expand Up @@ -182,7 +130,15 @@
"metadata": {},
"outputs": [],
"source": [
"Plotters.matplot_stats_uniform(fitter)"
"Plotters.matplot_stats_uniform(fitter,sort_by='Akaike') # Plot all the models\n",
"\n",
"# Plot specific model\n",
"Plotters.matplot_stats_uniform(fitter,position=3,sort_by='Akaike',show_plot=True)\n",
"\n",
"# Plot specific models together\n",
"Plotters.matplot_stats_uniform(fitter,position=1,sort_by='Akaike',show_plot=False)\n",
"Plotters.matplot_stats_uniform(fitter,position=2,sort_by='Akaike',show_plot=False)\n",
"Plotters.matplot_stats_uniform(fitter,position=3,sort_by='Akaike',show_plot=True)"
]
},
{
Expand Down Expand Up @@ -225,22 +181,14 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": null,
"id": "2bd844d0-ea57-444c-9c41-ec712d9b1bb0",
"metadata": {},
"outputs": [],
"source": [
"#Plotters.matplot_stats_summary(fitter, best=True,sort_by='Mean_rank')\n",
"Plotters.matplot_stats_summary(fitter, best=False,sort_by='Mean_rank') # plot them all"
"Plotters.matplot_stats_summary(fitter, position=1, sort_by='Mean_rank') # Plot specific model\n",
"Plotters.matplot_stats_summary(fitter, position=False, sort_by='Mean_rank') # plot them all"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "53ca949e-81ee-4dbf-8463-03d04af906e1",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
Expand Down
6 changes: 4 additions & 2 deletions fracability/AbstractClasses.py
Original file line number Diff line number Diff line change
Expand Up @@ -287,9 +287,11 @@ def save_shp(self, path: str):
if not self.entity_df.empty:
cwd = os.getcwd()
output_path = os.path.join(cwd, path, 'output', 'shp')

style_out = os.path.join(output_path, 'qgis_style')
if not os.path.isdir(output_path):
os.makedirs(output_path)
if not os.path.isdir(style_out):
os.makedirs(style_out)

if self.name == 'Fractures':
set_n = list(set(self.entity_df['f_set']))
Expand All @@ -311,7 +313,7 @@ def save_shp(self, path: str):
qgis_style_paths = QgisStyle().available_paths

for qgis_path in qgis_style_paths:
shutil.copy(qgis_path, output_path)
shutil.copy(qgis_path, style_out)

else:
print('Cannot save an empty entity')
Expand Down
21 changes: 15 additions & 6 deletions fracability/Entities.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,15 +91,12 @@ def network_object(self) -> Graph:
return network_obj

@property
def node_count(self) -> tuple[float, float, float, float, float]:
def ternary_node_count(self) -> tuple[float, float, float, float, float]:
"""
Calculate the node proportions and precise connectivity value following Manzocchi 2002
:return: A tuple of values PI, PY, PX, PU, precise_n
:return: A tuple of values PI, PY, PX, PU, CI
"""
nodes = self.vtk_object['n_type']
unique, count = np.unique(nodes, return_counts=True)
count_dict = dict(zip(unique, count))
count_dict = self.node_count

if 1 in count_dict.keys():
I_nodes = count_dict[1]
Expand Down Expand Up @@ -153,6 +150,18 @@ def node_count(self) -> tuple[float, float, float, float, float]:
return PI, PY, PX, PU, precise_n

@property
def node_count(self) -> dict:
"""
Calculate the node count dictionary
:return: A dictionary of nodes and their count
"""

nodes = self.vtk_object['n_type']
unique, count = np.unique(nodes, return_counts=True)
count_dict = dict(zip(unique, count))
return count_dict
@property
def n_censored(self) -> int:
""" Return the number of censored nodes"""

Expand Down
55 changes: 36 additions & 19 deletions fracability/Plotters.py
Original file line number Diff line number Diff line change
Expand Up @@ -815,7 +815,7 @@ def matplot_stats_summary(fitter: NetworkFitter,
function_list: list = ['pdf', 'cdf', 'sf'],
table: bool = True,
show_plot: bool = True,
best: bool = True,
position: int = 1,
sort_by: str = 'Akaike'):
"""
Summarize PDF, CDF and SF functions and display summary table all
Expand All @@ -832,15 +832,15 @@ def matplot_stats_summary(fitter: NetworkFitter,
show_plot: Bool. If true show the plot. By default, is True
best: Bool. If true show only the best fit sorted by the sort_by field. If false show the plots for each fit. By default is True.
position: int. Show the summary plot for the model at the indicated position (1 indexed) sorted by the sort_by field. If False show the plots for each fit. By default is 1.
sort_by: str. If best is True, show the best fit using the indicated column name. By default is Akaike
"""
sns.set_theme()

if best:
names = [fitter.get_fitted_distribution_names(sort_by)[0]]
if position:
names = [fitter.get_fitted_distribution_names(sort_by)[position-1]]
else:
names = fitter.get_fitted_distribution_names(sort_by)

Expand Down Expand Up @@ -875,35 +875,51 @@ def matplot_stats_summary(fitter: NetworkFitter,
plt.show()


def matplot_stats_uniform(network_fit: NetworkFitter):
def matplot_stats_uniform(fitter: NetworkFitter,
show_plot: bool = True,
position: int = 0,
sort_by: str = 'Akaike'):
"""
Confront the fitted data with the standard uniform 0,1. Following Kim 2019
:param network_fit:
:return:
Parameters
-------
fitter: Input NetworkFitter object
show_plot: Bool. If true show the plot. By default, is True
position: int. Show the summary plot for the model at the indicated position (1 indexed) sorted by the sort_by field. If False show the plots for each fit. By default is False.
sort_by: str. If best is True, show the best fit using the indicated column name. By default is Akaike
"""
#sns.set_theme()

fitted_list = network_fit.get_fitted_distribution_names()
if position:
names = [fitter.get_fitted_distribution_names(sort_by)[position-1]]
else:
names = fitter.get_fitted_distribution_names(sort_by)

fig = plt.figure(num=f'Comparison plot', figsize=(13, 7))

uniform_list = np.linspace(0, 1, num=10)
uniform_cdf = uniform.cdf(uniform_list)

for fit_name in fitted_list:
fitter = network_fit.get_fitted_distribution(fit_name)
Z = fitter.cdf()
delta = fitter.fit_data.delta
for name in names:
network_distribution = fitter.get_fitted_distribution(name)
Z = network_distribution.cdf()
delta = network_distribution.fit_data.delta
G_n = KM(Z, Z, delta)
plt.plot(Z, G_n, label=f'G_n {fit_name}') # plot the Kaplan-Meier curves with steps
plt.plot(Z, G_n, label=f'G_n {name}') # plot the Kaplan-Meier curves with steps

setFigLinesBW(fig)
plt.plot(uniform_list, uniform_cdf, 'r', label='U(0, 1)')
plt.title('Distance to Uniform comparison')
plt.grid(True)
plt.legend()
plt.show()

if show_plot:
plt.plot(uniform_list, uniform_cdf, 'r', label='U(0, 1)')
plt.title('Distance to Uniform comparison')
plt.grid(False)
plt.legend()
plt.show()



Expand Down Expand Up @@ -934,10 +950,11 @@ def matplot_ternary(entity,
elif entity.name == 'Nodes':
nodes = entity

PI, PY, PX = nodes.node_count[: 3]
PI, PY, PX, _, CI = nodes.ternary_node_count
points = [(PX, PI, PY)]

tax.scatter(points, marker='o', color='red', label='Classification')
tax.annotate(f'{np.round(CI,2)}', position=[PX, PI, PY])

for n in range(8):
n += 1
Expand Down
2 changes: 1 addition & 1 deletion fracability/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from os.path import dirname, join as joinpath

__version__ = "1.3.1"
__version__ = "1.3.2"
__author__ = 'Gabriele Benedetti'
Loading

0 comments on commit 2fd3837

Please sign in to comment.