From bb578db699de8c9fdad732d2ee252b36165e086b Mon Sep 17 00:00:00 2001 From: John Wesley Hostetter Date: Fri, 16 Aug 2024 17:54:59 -0400 Subject: [PATCH 01/12] Begin developing n-ary fuzzy relations --- requirements.txt | 1 + src/fuzzy/relations/continuous/n_ary.py | 139 ++++++++++++ .../continuous/{tnorm.py => old_tnorm.py} | 0 src/fuzzy/relations/continuous/t_norm.py | 62 ++++++ src/fuzzy/sets/continuous/group.py | 8 +- src/fuzzy/sets/continuous/impl.py | 8 +- tests/test_relations/test_n_ary.py | 209 ++++++++++++++++++ tests/test_relations/test_tnorms.py | 2 +- 8 files changed, 418 insertions(+), 11 deletions(-) create mode 100644 src/fuzzy/relations/continuous/n_ary.py rename src/fuzzy/relations/continuous/{tnorm.py => old_tnorm.py} (100%) create mode 100644 src/fuzzy/relations/continuous/t_norm.py create mode 100644 tests/test_relations/test_n_ary.py diff --git a/requirements.txt b/requirements.txt index e862889..17146c7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,5 @@ numpy==1.26.4 # new is 2.0.1 (update when possible - tests fail otherwise) +scipy==1.13.1 # for sparse matrices sympy==1.13.2 # for torch torch==2.4.0 torchquad==0.4.0 diff --git a/src/fuzzy/relations/continuous/n_ary.py b/src/fuzzy/relations/continuous/n_ary.py new file mode 100644 index 0000000..9d418ce --- /dev/null +++ b/src/fuzzy/relations/continuous/n_ary.py @@ -0,0 +1,139 @@ +""" +Classes for representing n-ary fuzzy relations, such as t-norms and t-conorms. These relations +are used to combine multiple membership values into a single value. The n-ary relations (of +differing types) can then be combined into a compound relation. +""" + +from typing import Tuple, List + +import torch +import numpy as np +import scipy.sparse as sps + +from fuzzy.sets.continuous.membership import Membership + + +class NAryRelation(torch.nn.Module): + """ + This class represents an n-ary fuzzy relation. An n-ary fuzzy relation is a relation that takes + n arguments and returns a (float) value. This class is useful for representing fuzzy relations + that take multiple arguments, such as a t-norm that takes two or more arguments and returns a + truth value. + """ + + def __init__(self, *indices: Tuple[int, int], device: torch.device, **kwargs): + """ + Apply an n-ary relation to the indices (i.e., relation's matrix) on the provided device. + + Args: + items: The 2-tuple indices to apply the n-ary relation to (e.g., (0, 1), (1, 0)). + device: The device to use for the relation. + """ + super().__init__(**kwargs) + self.indices = indices + data = np.ones(len(indices)) # a '1' indicates a relation exists + row, col = zip(*indices) + + self._coo_matrix: sps.sparse._coo.matrix = sps.coo_matrix( + (data, (row, col)), dtype=np.int8 + ) + self._original_shape: Tuple[int, int] = self._coo_matrix.shape + # this mask is used to zero out the values that are not part of the relation + self.mask: torch.Tensor = torch.tensor( + self._coo_matrix.toarray(), dtype=torch.float32, device=device + ) + # matrix size can increase (in-place) for more potential rows (vars) and columns (terms) + # self._coo_matrix.resize( + # self._coo_matrix.shape[0] + 1, self._coo_matrix.shape[1] + 1 + # ) + # we can create a graph from the adjacency matrix + # g = igraph.Graph.Adjacency(self._coo_matrix) + + def resize(self, *shape): + """ + Resize the matrix in-place to the given shape. + + Args: + shape: The new shape of the matrix. + """ + # resize the COO matrix in-place + self._coo_matrix.resize(*shape) + # update the mask to reflect the new shape + self.mask = torch.tensor( + self._coo_matrix.toarray(), dtype=torch.float32, device=self.mask.device + ) + + def apply_mask(self, membership: Membership) -> torch.Tensor: + """ + Apply the n-ary relation's mask to the given memberships. + + Args: + membership: The membership values to apply the minimum n-ary relation to. + + Returns: + The masked membership values (zero may or may not be a valid degree of truth). + """ + membership_shape: torch.Size = membership.mask.shape + if self._coo_matrix.shape != membership_shape: + if len(membership_shape) > 2: + # this is for the case where masks have been stacked due to compound relations + membership_shape = membership_shape[-2:] # get the last two dimensions + self.resize(*membership_shape) + # select the membership values that are not zeroed out (i.e., involved in the relation) + after_mask = membership.degrees * self.mask + return after_mask.sum(dim=1, keepdim=True) # drop the zeroed out values + + def forward(self, membership: Membership) -> torch.Tensor: + """ + Apply the n-ary relation to the given memberships. + + Args: + membership: The membership values to apply the minimum n-ary relation to. + + Returns: + The minimum membership value, according to the n-ary relation (i.e., which truth values + to actually consider). + """ + raise NotImplementedError( + f"The {self.__class__.__name__} has no defined forward function. Please create a class " + f"and inherit from {self.__class__.__name__}, or use a predefined class." + ) + + +class Compound(torch.nn.Module): + """ + This class represents an n-ary compound relation, where it expects at least 1 or more + instance of NAryRelation. + """ + + def __init__(self, *relations: NAryRelation, **kwargs): + """ + Initialize the compound relation with the given n-ary relation(s). + + Args: + relation: The n-ary compound relation. + """ + super().__init__(**kwargs) + # store the relations as a module list (as they are also modules) + self.relations = torch.nn.ModuleList(relations) + + def forward(self, membership: Membership) -> Membership: + """ + Apply the compound n-ary relation to the given membership values. + + Args: + membership: The membership values to apply the compound n-ary relation to. + + Returns: + The stacked output of the compound n-ary relation; ready for subsequent follow-up. + """ + # apply the compound n-ary relation to the membership values + memberships: List[Membership] = [ + relation(membership=membership) for relation in self.relations + ] + degrees: torch.Tensor = torch.cat( + [membership.degrees for membership in memberships], dim=-1 + ) + # create a new mask that accounts for the different masks for each relation + mask = torch.stack([relation.mask for relation in self.relations]) + return Membership(elements=membership.elements, degrees=degrees, mask=mask) diff --git a/src/fuzzy/relations/continuous/tnorm.py b/src/fuzzy/relations/continuous/old_tnorm.py similarity index 100% rename from src/fuzzy/relations/continuous/tnorm.py rename to src/fuzzy/relations/continuous/old_tnorm.py diff --git a/src/fuzzy/relations/continuous/t_norm.py b/src/fuzzy/relations/continuous/t_norm.py new file mode 100644 index 0000000..fca3bf8 --- /dev/null +++ b/src/fuzzy/relations/continuous/t_norm.py @@ -0,0 +1,62 @@ +""" +This module contains the implementation of the n-ary t-norm fuzzy relations. These relations +are used to combine multiple membership values into a single value. The minimum and product +relations are implemented here. +""" + +from fuzzy.sets.continuous.membership import Membership +from fuzzy.relations.continuous.n_ary import NAryRelation + + +class Minimum(NAryRelation): + """ + This class represents the minimum n-ary fuzzy relation. This is a special case of + the n-ary fuzzy relation where the minimum value is returned. + """ + + def forward(self, membership: Membership) -> Membership: + """ + Apply the minimum n-ary relation to the given memberships. + + Args: + membership: The membership values to apply the minimum n-ary relation to. + + Returns: + The minimum membership, according to the n-ary relation (i.e., which truth values + to actually consider). + """ + # first filter out the values that are not part of the relation + # then take the minimum value of those that remain in the last dimension + return Membership( + elements=membership.elements, + degrees=self.apply_mask(membership=membership) + .min(dim=-1, keepdim=True) + .values, + mask=self.mask, + ) + + +class Product(NAryRelation): + """ + This class represents the algebraic product n-ary fuzzy relation. This is a special case of + the n-ary fuzzy relation where the product value is returned. + """ + + def forward(self, membership: Membership) -> Membership: + """ + Apply the algebraic product n-ary relation to the given memberships. + + Args: + membership: The membership values to apply the algebraic product n-ary relation to. + + Returns: + The algebraic product membership value, according to the n-ary relation + (i.e., which truth values to actually consider). + """ + # first filter out the values that are not part of the relation + # then take the minimum value of those that remain in the last dimension + return Membership( + elements=membership.elements, + degrees=self.apply_mask(membership=membership).prod(dim=-1, keepdim=True), + mask=self.mask, + ) diff --git a/src/fuzzy/sets/continuous/group.py b/src/fuzzy/sets/continuous/group.py index b0c5132..8d55a8e 100644 --- a/src/fuzzy/sets/continuous/group.py +++ b/src/fuzzy/sets/continuous/group.py @@ -246,12 +246,8 @@ def expand( self.minimums = minimums self.maximums = maximums else: - self.minimums = torch.min( - minimums, self.minimums - ).detach() - self.maximums = torch.max( - maximums, self.maximums - ).detach() + self.minimums = torch.min(minimums, self.minimums).detach() + self.maximums = torch.max(maximums, self.maximums).detach() # find where the new centers should be added, if any # LogGaussian was used, then use following to check for real membership degrees: diff --git a/src/fuzzy/sets/continuous/impl.py b/src/fuzzy/sets/continuous/impl.py index c244dc6..ae9eaf4 100644 --- a/src/fuzzy/sets/continuous/impl.py +++ b/src/fuzzy/sets/continuous/impl.py @@ -125,7 +125,7 @@ def forward(self, observations) -> Membership: # ), "Infinite values detected in the membership degrees." return Membership( - elements=observations, + elements=observations.squeeze(dim=-1), # remove the last dimension degrees=degrees.to_sparse() if self.use_sparse_tensor else degrees, mask=self.get_mask(), ) @@ -204,7 +204,7 @@ def forward(self, observations) -> Membership: # ), "Infinite values detected in the membership degrees." return Membership( - elements=observations, + elements=observations.squeeze(dim=-1), # remove the last dimension degrees=degrees.to_sparse() if self.use_sparse_tensor else degrees, mask=self.get_mask(), ) @@ -308,7 +308,7 @@ def forward(self, observations) -> Membership: ), "Infinite values detected in the membership degrees." return Membership( - elements=observations, + elements=observations.squeeze(dim=-1), # remove the last dimension degrees=degrees.to_sparse() if self.use_sparse_tensor else degrees, mask=self.get_mask(), ) @@ -441,7 +441,7 @@ def forward(self, observations) -> Membership: ), "Infinite values detected in the membership degrees." return Membership( - elements=observations, + elements=observations.squeeze(dim=-1), # remove the last dimension degrees=degrees.to_sparse() if self.use_sparse_tensor else degrees, mask=self.get_mask(), ) diff --git a/tests/test_relations/test_n_ary.py b/tests/test_relations/test_n_ary.py new file mode 100644 index 0000000..ea2e357 --- /dev/null +++ b/tests/test_relations/test_n_ary.py @@ -0,0 +1,209 @@ +import unittest + +import torch +import numpy as np + +from fuzzy.sets.continuous.impl import Gaussian +from fuzzy.sets.continuous.membership import Membership +from fuzzy.relations.continuous.t_norm import Minimum, Product +from fuzzy.relations.continuous.n_ary import NAryRelation, Compound + +N_TERMS: int = 2 +N_VARIABLES: int = 4 +N_OBSERVATIONS: int = 3 +AVAILABLE_DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + +class TestNAryRelation(unittest.TestCase): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.gaussian_mf = Gaussian( + centers=np.array([[i, i + 1] for i in range(N_VARIABLES)]), + widths=np.array([[(i + 1) / 2, (i + 1) / 3] for i in range(N_VARIABLES)]), + device=AVAILABLE_DEVICE, + ) + self.data: np.ndarray = np.array( + [ + [0.0412, 0.4543, 0.1980, 0.3821], + [0.9327, 0.5900, 0.1569, 0.6902], + [0.0894, 0.9433, 0.9903, 0.5800], + ] + ) + + def test_gaussian_membership(self) -> Membership: + """ + Although this test is not directly related to the NAryRelation class, and is possibly + redundant due to Gaussian's unit testing, it is used to double-check that the Gaussian + membership function is working as intended as these unit tests rely on correct values + from the Gaussian membership function to work. + + Returns: + The membership values for the Gaussian membership function. + """ + membership: Membership = self.gaussian_mf( + torch.tensor(self.data, dtype=torch.float32, device=AVAILABLE_DEVICE) + ) + + self.assertEqual(membership.degrees.shape[0], N_OBSERVATIONS) + self.assertEqual(membership.degrees.shape[1], N_VARIABLES) + self.assertEqual(membership.degrees.shape[2], N_TERMS) + + # check that the membership is correct + expected_membership_degrees: torch.Tensor = torch.tensor( + [ + [ + [9.9323326e-01, 2.5514542e-04], + [7.4245834e-01, 4.6277978e-03], + [2.3617040e-01, 3.8928282e-04], + [1.8026091e-01, 6.3449936e-04], + ], + [ + [3.0816132e-02, 9.6005607e-01], + [8.4526926e-01, 1.1410457e-02], + [2.2095737e-01, 3.0867624e-04], + [2.6347569e-01, 2.1079029e-03], + ], + [ + [9.6853620e-01, 5.7408627e-04], + [9.9679035e-01, 8.1074789e-02], + [6.3564914e-01, 1.7616944e-02], + [2.3128603e-01, 1.3889252e-03], + ], + ], + device=AVAILABLE_DEVICE, + ) + self.assertTrue(torch.allclose(membership.degrees, expected_membership_degrees)) + return membership + + def test_n_ary_relation(self) -> None: + """ + Test the abstract n-ary relation. + + Returns: + None + """ + n_ary = NAryRelation((0, 1), (1, 0), device=AVAILABLE_DEVICE) + self.assertRaises(NotImplementedError, n_ary.forward, None) + # check that the matrix shape is correct + self.assertEqual(n_ary._coo_matrix.shape, (2, 2)) + # check that the original shape is stored + self.assertEqual(n_ary._original_shape, (2, 2)) + # matrix size can increase (in-place) for more potential rows (vars) and columns (terms) + n_ary._coo_matrix.resize(3, 3) + self.assertEqual(n_ary._coo_matrix.shape, (3, 3)) + # check that the original shape is still kept after resizing + self.assertEqual(n_ary._original_shape, (2, 2)) + + def test_minimum(self) -> None: + """ + Test the n-ary minimum relation. + + Returns: + None + """ + n_ary = Minimum((0, 1), (1, 0), device=AVAILABLE_DEVICE) + membership = self.test_gaussian_membership() + + # test the mask application + after_mask = n_ary.apply_mask(membership=membership) + expected_after_mask = torch.tensor( + [ + [[7.4245834e-01, 2.5514542e-04]], + [[8.4526926e-01, 9.6005607e-01]], + [[9.9679035e-01, 5.7408627e-04]], + ], + device=AVAILABLE_DEVICE, + ) + self.assertTrue(torch.allclose(after_mask, expected_after_mask)) + + # test the forward pass + min_membership: Membership = n_ary.forward(membership) + expected_min_values = torch.tensor( + [[[2.5514542e-04]], [[8.4526926e-01]], [[5.7408627e-04]]], + device=AVAILABLE_DEVICE, + ) + self.assertTrue(torch.allclose(min_membership.degrees, expected_min_values)) + + # check that it is torch.jit scriptable (currently not working) + # n_ary_script = torch.jit.script(n_ary) + # + # after_mask_script = n_ary_script.apply_mask(membership=membership) + # self.assertTrue(torch.allclose(after_mask_script, expected_after_mask)) + # + # min_values_script = n_ary_script.forward(membership) + # self.assertTrue(torch.allclose(min_values_script, expected_min_values)) + + def test_algebraic_product(self) -> None: + n_ary = Product((0, 1), (1, 0), device=AVAILABLE_DEVICE) + membership = self.test_gaussian_membership() + + # test the mask application + after_mask = n_ary.apply_mask(membership=membership) + expected_after_mask = torch.tensor( + [ + [[7.4245834e-01, 2.5514542e-04]], + [[8.4526926e-01, 9.6005607e-01]], + [[9.9679035e-01, 5.7408627e-04]], + ], + device=AVAILABLE_DEVICE, + ) + self.assertTrue(torch.allclose(after_mask, expected_after_mask)) + + # test the forward pass + prod_membership: Membership = n_ary.forward(membership) + expected_prod_values = torch.tensor( + [ + [[7.4245834e-01 * 2.5514542e-04]], + [[8.4526926e-01 * 9.6005607e-01]], + [[9.9679035e-01 * 5.7408627e-04]], + ], + device=AVAILABLE_DEVICE, + ) + self.assertTrue(torch.allclose(prod_membership.degrees, expected_prod_values)) + + # check that it is torch.jit scriptable (currently not working) + # n_ary_script = torch.jit.script(n_ary) + # + # after_mask_script = n_ary_script.apply_mask(membership=membership) + # self.assertTrue(torch.allclose(after_mask_script, expected_after_mask)) + # + # min_values_script = n_ary_script.forward(membership) + # self.assertTrue(torch.allclose(min_values_script, expected_min_values)) + + def test_combination_of_t_norms(self) -> None: + """ + Test we can create a combination of t-norms to reflect more complex compound propositions. + + Returns: + None + """ + n_ary_min = Minimum((0, 1), (1, 0), device=AVAILABLE_DEVICE) + n_ary_prod = Product((0, 1), (1, 0), device=AVAILABLE_DEVICE) + membership = self.test_gaussian_membership() + + t_norm = Compound(n_ary_min, n_ary_prod) + compound_values = t_norm(membership=membership) + expected_compound_values = torch.cat( + [ + n_ary_min(membership=membership).degrees, + n_ary_prod(membership=membership).degrees, + ], + dim=-1, + ) + self.assertTrue( + torch.allclose(compound_values.degrees, expected_compound_values) + ) + + # we can then follow it up with another t-norm + + n_ary_next_min = Minimum((0, 1), (1, 0), device=AVAILABLE_DEVICE) + min_membership: Membership = n_ary_next_min(compound_values) + expected_min_values = torch.tensor( + [ + [[7.4245834e-01 * 2.5514542e-04]], + [[8.4526926e-01 * 9.6005607e-01]], + [[9.9679035e-01 * 5.7408627e-04]], + ], + device=AVAILABLE_DEVICE, + ) + self.assertTrue(torch.allclose(min_membership.degrees, expected_min_values)) diff --git a/tests/test_relations/test_tnorms.py b/tests/test_relations/test_tnorms.py index d762b0e..5482b64 100644 --- a/tests/test_relations/test_tnorms.py +++ b/tests/test_relations/test_tnorms.py @@ -7,7 +7,7 @@ import torch import numpy as np -from fuzzy.relations.continuous.tnorm import AlgebraicProduct +from fuzzy.relations.continuous.old_tnorm import AlgebraicProduct def algebraic_product(elements: np.ndarray, importance: np.ndarray) -> np.float32: From 432e7601cdac54fb51f13875e4be23000e26f0d3 Mon Sep 17 00:00:00 2001 From: John Wesley Hostetter Date: Sat, 17 Aug 2024 07:31:57 -0400 Subject: [PATCH 02/12] Begin generalizing n-ary relation to be more efficient --- src/fuzzy/relations/continuous/n_ary.py | 84 +++++++++++++++++++----- src/fuzzy/relations/continuous/t_norm.py | 4 +- tests/test_relations/test_n_ary.py | 39 +++++++---- 3 files changed, 96 insertions(+), 31 deletions(-) diff --git a/src/fuzzy/relations/continuous/n_ary.py b/src/fuzzy/relations/continuous/n_ary.py index 9d418ce..0ca648b 100644 --- a/src/fuzzy/relations/continuous/n_ary.py +++ b/src/fuzzy/relations/continuous/n_ary.py @@ -4,7 +4,7 @@ differing types) can then be combined into a compound relation. """ -from typing import Tuple, List +from typing import Union, Tuple, List import torch import numpy as np @@ -21,7 +21,13 @@ class NAryRelation(torch.nn.Module): truth value. """ - def __init__(self, *indices: Tuple[int, int], device: torch.device, **kwargs): + # TODO: add support for indices to be List[Tuple[int, int]] for multiple compound indices + def __init__( + self, + *indices: Union[Tuple[int, int], List[Tuple[int, int]]], + device: torch.device, + **kwargs, + ): """ Apply an n-ary relation to the indices (i.e., relation's matrix) on the provided device. @@ -30,17 +36,33 @@ def __init__(self, *indices: Tuple[int, int], device: torch.device, **kwargs): device: The device to use for the relation. """ super().__init__(**kwargs) - self.indices = indices - data = np.ones(len(indices)) # a '1' indicates a relation exists - row, col = zip(*indices) + self.device: torch.device = device + # self.indices = indices + + if isinstance(indices[0], list): + # this scenario is for when we have multiple compound indices that use the same relation + # this is useful for computational efficiency (i.e., not having to use a for loop) + self._coo_matrix: List[sps._coo.coo_matrix] = [] + self._original_shape: List[Tuple[int, int]] = [] + for relation_indices in indices: + coo_matrix = self.convert_indices_to_matrix(relation_indices) + self._original_shape.append(coo_matrix.shape) + self._coo_matrix.append(coo_matrix) + # now convert to a list of matrices + max_var = max(t[0] for t in self._original_shape) + max_term = max(t[1] for t in self._original_shape) + self.make_np_matrix(max_var, max_term) + else: + # this is the normal scenario where we have a single relation but multiple indices + self._coo_matrix: sps._coo.coo_matrix = self.convert_indices_to_matrix( + indices + ) + self._original_shape = self._coo_matrix.shape + self.matrix: np.ndarray = self._coo_matrix.toarray()[:, :, None] - self._coo_matrix: sps.sparse._coo.matrix = sps.coo_matrix( - (data, (row, col)), dtype=np.int8 - ) - self._original_shape: Tuple[int, int] = self._coo_matrix.shape # this mask is used to zero out the values that are not part of the relation self.mask: torch.Tensor = torch.tensor( - self._coo_matrix.toarray(), dtype=torch.float32, device=device + self.matrix, dtype=torch.float32, device=device ) # matrix size can increase (in-place) for more potential rows (vars) and columns (terms) # self._coo_matrix.resize( @@ -49,6 +71,30 @@ def __init__(self, *indices: Tuple[int, int], device: torch.device, **kwargs): # we can create a graph from the adjacency matrix # g = igraph.Graph.Adjacency(self._coo_matrix) + def make_np_matrix(self, max_var: int, max_term: int): + matrices = [] + for coo_matrix in self._coo_matrix: + # first resize + coo_matrix.resize(max_var, max_term) + matrices.append(coo_matrix.toarray()) + # make a new axis and stack long that axis + self.matrix: np.ndarray = np.stack(matrices, axis=-1) + + @staticmethod + def convert_indices_to_matrix(indices) -> sps._coo.coo_matrix: + """ + Convert the given indices to a COO matrix. + + Args: + indices: The indices where a '1' will be placed at each index. + + Returns: + The COO matrix with a '1' at each index. + """ + data = np.ones(len(indices)) # a '1' indicates a relation exists + row, col = zip(*indices) + return sps.coo_matrix((data, (row, col)), dtype=np.int8) + def resize(self, *shape): """ Resize the matrix in-place to the given shape. @@ -57,11 +103,17 @@ def resize(self, *shape): shape: The new shape of the matrix. """ # resize the COO matrix in-place - self._coo_matrix.resize(*shape) + if isinstance(self._coo_matrix, list): + for coo_matrix in self._coo_matrix: + coo_matrix.resize(*shape) + self.make_np_matrix(shape[0], shape[1]) + else: + self._coo_matrix.resize(*shape) + # TODO: update the matrix to reflect the new shape + self.matrix = self._coo_matrix.toarray()[:, :, None] + # update the mask to reflect the new shape - self.mask = torch.tensor( - self._coo_matrix.toarray(), dtype=torch.float32, device=self.mask.device - ) + self.mask = torch.tensor(self.matrix, dtype=torch.float32, device=self.device) def apply_mask(self, membership: Membership) -> torch.Tensor: """ @@ -74,13 +126,13 @@ def apply_mask(self, membership: Membership) -> torch.Tensor: The masked membership values (zero may or may not be a valid degree of truth). """ membership_shape: torch.Size = membership.mask.shape - if self._coo_matrix.shape != membership_shape: + if self.matrix.shape != membership_shape: if len(membership_shape) > 2: # this is for the case where masks have been stacked due to compound relations membership_shape = membership_shape[-2:] # get the last two dimensions self.resize(*membership_shape) # select the membership values that are not zeroed out (i.e., involved in the relation) - after_mask = membership.degrees * self.mask + after_mask = membership.degrees.unsqueeze(dim=-1) * self.mask return after_mask.sum(dim=1, keepdim=True) # drop the zeroed out values def forward(self, membership: Membership) -> torch.Tensor: diff --git a/src/fuzzy/relations/continuous/t_norm.py b/src/fuzzy/relations/continuous/t_norm.py index fca3bf8..ba46837 100644 --- a/src/fuzzy/relations/continuous/t_norm.py +++ b/src/fuzzy/relations/continuous/t_norm.py @@ -30,7 +30,7 @@ def forward(self, membership: Membership) -> Membership: return Membership( elements=membership.elements, degrees=self.apply_mask(membership=membership) - .min(dim=-1, keepdim=True) + .min(dim=-2, keepdim=True) .values, mask=self.mask, ) @@ -57,6 +57,6 @@ def forward(self, membership: Membership) -> Membership: # then take the minimum value of those that remain in the last dimension return Membership( elements=membership.elements, - degrees=self.apply_mask(membership=membership).prod(dim=-1, keepdim=True), + degrees=self.apply_mask(membership=membership).prod(dim=-2, keepdim=True), mask=self.mask, ) diff --git a/tests/test_relations/test_n_ary.py b/tests/test_relations/test_n_ary.py index ea2e357..c00fc20 100644 --- a/tests/test_relations/test_n_ary.py +++ b/tests/test_relations/test_n_ary.py @@ -108,9 +108,9 @@ def test_minimum(self) -> None: after_mask = n_ary.apply_mask(membership=membership) expected_after_mask = torch.tensor( [ - [[7.4245834e-01, 2.5514542e-04]], - [[8.4526926e-01, 9.6005607e-01]], - [[9.9679035e-01, 5.7408627e-04]], + [[[7.4245834e-01], [2.5514542e-04]]], + [[[8.4526926e-01], [9.6005607e-01]]], + [[[9.9679035e-01], [5.7408627e-04]]], ], device=AVAILABLE_DEVICE, ) @@ -119,7 +119,7 @@ def test_minimum(self) -> None: # test the forward pass min_membership: Membership = n_ary.forward(membership) expected_min_values = torch.tensor( - [[[2.5514542e-04]], [[8.4526926e-01]], [[5.7408627e-04]]], + [[[[2.5514542e-04]]], [[[8.4526926e-01]]], [[[5.7408627e-04]]]], device=AVAILABLE_DEVICE, ) self.assertTrue(torch.allclose(min_membership.degrees, expected_min_values)) @@ -141,9 +141,9 @@ def test_algebraic_product(self) -> None: after_mask = n_ary.apply_mask(membership=membership) expected_after_mask = torch.tensor( [ - [[7.4245834e-01, 2.5514542e-04]], - [[8.4526926e-01, 9.6005607e-01]], - [[9.9679035e-01, 5.7408627e-04]], + [[[7.4245834e-01], [2.5514542e-04]]], + [[[8.4526926e-01], [9.6005607e-01]]], + [[[9.9679035e-01], [5.7408627e-04]]], ], device=AVAILABLE_DEVICE, ) @@ -153,9 +153,9 @@ def test_algebraic_product(self) -> None: prod_membership: Membership = n_ary.forward(membership) expected_prod_values = torch.tensor( [ - [[7.4245834e-01 * 2.5514542e-04]], - [[8.4526926e-01 * 9.6005607e-01]], - [[9.9679035e-01 * 5.7408627e-04]], + [[[7.4245834e-01 * 2.5514542e-04]]], + [[[8.4526926e-01 * 9.6005607e-01]]], + [[[9.9679035e-01 * 5.7408627e-04]]], ], device=AVAILABLE_DEVICE, ) @@ -200,10 +200,23 @@ def test_combination_of_t_norms(self) -> None: min_membership: Membership = n_ary_next_min(compound_values) expected_min_values = torch.tensor( [ - [[7.4245834e-01 * 2.5514542e-04]], - [[8.4526926e-01 * 9.6005607e-01]], - [[9.9679035e-01 * 5.7408627e-04]], + [[[7.4245834e-01 * 2.5514542e-04]]], + [[[8.4526926e-01 * 9.6005607e-01]]], + [[[9.9679035e-01 * 5.7408627e-04]]], ], device=AVAILABLE_DEVICE, ) self.assertTrue(torch.allclose(min_membership.degrees, expected_min_values)) + + def test_multiple_indices_passed_as_list(self): + n_ary = Minimum( + [(0, 1), (1, 0)], + [(1, 1), (2, 1)], + [(2, 1), (2, 0)], + [(0, 1), (2, 0)], + [(1, 1), (0, 1)], + device=AVAILABLE_DEVICE, + ) + n_ary(self.gaussian_mf(torch.tensor(self.data, device=AVAILABLE_DEVICE))) + self.assertEqual(n_ary._coo_matrix.shape, (2, 2)) + self.assertEqual(n_ary._original_shape, (2, 2)) From b30f7cebb628442949b1d7097c72231c962d2ac9 Mon Sep 17 00:00:00 2001 From: John Wesley Hostetter Date: Sat, 17 Aug 2024 08:13:40 -0400 Subject: [PATCH 03/12] Improve resulting shapes of t-norm outputs --- src/fuzzy/relations/continuous/n_ary.py | 14 +++--- src/fuzzy/relations/continuous/t_norm.py | 4 +- tests/test_relations/test_n_ary.py | 64 +++++++++++++++++------- 3 files changed, 56 insertions(+), 26 deletions(-) diff --git a/src/fuzzy/relations/continuous/n_ary.py b/src/fuzzy/relations/continuous/n_ary.py index 0ca648b..d4d1368 100644 --- a/src/fuzzy/relations/continuous/n_ary.py +++ b/src/fuzzy/relations/continuous/n_ary.py @@ -125,15 +125,15 @@ def apply_mask(self, membership: Membership) -> torch.Tensor: Returns: The masked membership values (zero may or may not be a valid degree of truth). """ - membership_shape: torch.Size = membership.mask.shape - if self.matrix.shape != membership_shape: - if len(membership_shape) > 2: - # this is for the case where masks have been stacked due to compound relations - membership_shape = membership_shape[-2:] # get the last two dimensions + membership_shape: torch.Size = membership.degrees.shape + if self.matrix.shape[:-1] != membership_shape[1:]: + # if len(membership_shape) > 2: + # this is for the case where masks have been stacked due to compound relations + membership_shape = membership_shape[1:] # get the last two dimensions self.resize(*membership_shape) # select the membership values that are not zeroed out (i.e., involved in the relation) after_mask = membership.degrees.unsqueeze(dim=-1) * self.mask - return after_mask.sum(dim=1, keepdim=True) # drop the zeroed out values + return after_mask.sum(dim=1, keepdim=False) # drop the zeroed out values def forward(self, membership: Membership) -> torch.Tensor: """ @@ -185,7 +185,7 @@ def forward(self, membership: Membership) -> Membership: ] degrees: torch.Tensor = torch.cat( [membership.degrees for membership in memberships], dim=-1 - ) + ).unsqueeze(dim=-1) # create a new mask that accounts for the different masks for each relation mask = torch.stack([relation.mask for relation in self.relations]) return Membership(elements=membership.elements, degrees=degrees, mask=mask) diff --git a/src/fuzzy/relations/continuous/t_norm.py b/src/fuzzy/relations/continuous/t_norm.py index ba46837..fdc3738 100644 --- a/src/fuzzy/relations/continuous/t_norm.py +++ b/src/fuzzy/relations/continuous/t_norm.py @@ -30,7 +30,7 @@ def forward(self, membership: Membership) -> Membership: return Membership( elements=membership.elements, degrees=self.apply_mask(membership=membership) - .min(dim=-2, keepdim=True) + .min(dim=-2, keepdim=False) .values, mask=self.mask, ) @@ -57,6 +57,6 @@ def forward(self, membership: Membership) -> Membership: # then take the minimum value of those that remain in the last dimension return Membership( elements=membership.elements, - degrees=self.apply_mask(membership=membership).prod(dim=-2, keepdim=True), + degrees=self.apply_mask(membership=membership).prod(dim=-2, keepdim=False), mask=self.mask, ) diff --git a/tests/test_relations/test_n_ary.py b/tests/test_relations/test_n_ary.py index c00fc20..d774dfa 100644 --- a/tests/test_relations/test_n_ary.py +++ b/tests/test_relations/test_n_ary.py @@ -11,6 +11,7 @@ N_TERMS: int = 2 N_VARIABLES: int = 4 N_OBSERVATIONS: int = 3 +N_COMPOUNDS: int = 5 AVAILABLE_DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") @@ -108,9 +109,9 @@ def test_minimum(self) -> None: after_mask = n_ary.apply_mask(membership=membership) expected_after_mask = torch.tensor( [ - [[[7.4245834e-01], [2.5514542e-04]]], - [[[8.4526926e-01], [9.6005607e-01]]], - [[[9.9679035e-01], [5.7408627e-04]]], + [[7.4245834e-01], [2.5514542e-04]], + [[8.4526926e-01], [9.6005607e-01]], + [[9.9679035e-01], [5.7408627e-04]], ], device=AVAILABLE_DEVICE, ) @@ -119,7 +120,7 @@ def test_minimum(self) -> None: # test the forward pass min_membership: Membership = n_ary.forward(membership) expected_min_values = torch.tensor( - [[[[2.5514542e-04]]], [[[8.4526926e-01]]], [[[5.7408627e-04]]]], + [[2.5514542e-04], [8.4526926e-01], [5.7408627e-04]], device=AVAILABLE_DEVICE, ) self.assertTrue(torch.allclose(min_membership.degrees, expected_min_values)) @@ -141,9 +142,9 @@ def test_algebraic_product(self) -> None: after_mask = n_ary.apply_mask(membership=membership) expected_after_mask = torch.tensor( [ - [[[7.4245834e-01], [2.5514542e-04]]], - [[[8.4526926e-01], [9.6005607e-01]]], - [[[9.9679035e-01], [5.7408627e-04]]], + [[7.4245834e-01], [2.5514542e-04]], + [[8.4526926e-01], [9.6005607e-01]], + [[9.9679035e-01], [5.7408627e-04]], ], device=AVAILABLE_DEVICE, ) @@ -153,9 +154,9 @@ def test_algebraic_product(self) -> None: prod_membership: Membership = n_ary.forward(membership) expected_prod_values = torch.tensor( [ - [[[7.4245834e-01 * 2.5514542e-04]]], - [[[8.4526926e-01 * 9.6005607e-01]]], - [[[9.9679035e-01 * 5.7408627e-04]]], + [7.4245834e-01 * 2.5514542e-04], + [8.4526926e-01 * 9.6005607e-01], + [9.9679035e-01 * 5.7408627e-04], ], device=AVAILABLE_DEVICE, ) @@ -189,7 +190,7 @@ def test_combination_of_t_norms(self) -> None: n_ary_prod(membership=membership).degrees, ], dim=-1, - ) + ).unsqueeze(dim=-1) self.assertTrue( torch.allclose(compound_values.degrees, expected_compound_values) ) @@ -200,9 +201,9 @@ def test_combination_of_t_norms(self) -> None: min_membership: Membership = n_ary_next_min(compound_values) expected_min_values = torch.tensor( [ - [[[7.4245834e-01 * 2.5514542e-04]]], - [[[8.4526926e-01 * 9.6005607e-01]]], - [[[9.9679035e-01 * 5.7408627e-04]]], + [7.4245834e-01 * 2.5514542e-04], + [8.4526926e-01 * 9.6005607e-01], + [9.9679035e-01 * 5.7408627e-04], ], device=AVAILABLE_DEVICE, ) @@ -217,6 +218,35 @@ def test_multiple_indices_passed_as_list(self): [(1, 1), (0, 1)], device=AVAILABLE_DEVICE, ) - n_ary(self.gaussian_mf(torch.tensor(self.data, device=AVAILABLE_DEVICE))) - self.assertEqual(n_ary._coo_matrix.shape, (2, 2)) - self.assertEqual(n_ary._original_shape, (2, 2)) + membership = self.test_gaussian_membership() + prod_membership: Membership = n_ary(membership) + expected_prod_values = torch.tensor( + [ + [ + 2.5514542e-04, + 0.0000000e00, + 3.8928282e-04, + 2.5514542e-04, + 0.0000000e00, + ], + [ + 8.4526926e-01, + 0.0000000e00, + 3.0867624e-04, + 2.2095737e-01, + 0.0000000e00, + ], + [ + 5.7408627e-04, + 0.0000000e00, + 1.7616944e-02, + 5.7408627e-04, + 0.0000000e00, + ], + ], + device=AVAILABLE_DEVICE, + ) + self.assertEqual(prod_membership.degrees.shape[0], N_OBSERVATIONS) + self.assertEqual(prod_membership.degrees.shape[1], N_COMPOUNDS) + self.assertEqual(prod_membership.degrees.shape, expected_prod_values.shape) + self.assertTrue(torch.allclose(prod_membership.degrees, expected_prod_values)) From 43db59478b51b29164c7201aad063f0dd7b8f027 Mon Sep 17 00:00:00 2001 From: John Wesley Hostetter Date: Mon, 19 Aug 2024 11:26:27 -0400 Subject: [PATCH 04/12] Fix fuzzy n-ary relations to work with only some variables --- src/fuzzy/relations/continuous/n_ary.py | 80 +++++---- src/fuzzy/relations/continuous/old_tnorm.py | 8 +- src/fuzzy/relations/continuous/t_norm.py | 27 +++ tests/test_relations/test_n_ary.py | 189 +++++++++++++++----- 4 files changed, 227 insertions(+), 77 deletions(-) diff --git a/src/fuzzy/relations/continuous/n_ary.py b/src/fuzzy/relations/continuous/n_ary.py index d4d1368..f6aaf97 100644 --- a/src/fuzzy/relations/continuous/n_ary.py +++ b/src/fuzzy/relations/continuous/n_ary.py @@ -37,28 +37,33 @@ def __init__( """ super().__init__(**kwargs) self.device: torch.device = device + self.indices: List[List[Tuple[int, int]]] = [] # self.indices = indices - if isinstance(indices[0], list): - # this scenario is for when we have multiple compound indices that use the same relation - # this is useful for computational efficiency (i.e., not having to use a for loop) - self._coo_matrix: List[sps._coo.coo_matrix] = [] - self._original_shape: List[Tuple[int, int]] = [] - for relation_indices in indices: - coo_matrix = self.convert_indices_to_matrix(relation_indices) - self._original_shape.append(coo_matrix.shape) - self._coo_matrix.append(coo_matrix) - # now convert to a list of matrices - max_var = max(t[0] for t in self._original_shape) - max_term = max(t[1] for t in self._original_shape) - self.make_np_matrix(max_var, max_term) - else: - # this is the normal scenario where we have a single relation but multiple indices - self._coo_matrix: sps._coo.coo_matrix = self.convert_indices_to_matrix( - indices - ) - self._original_shape = self._coo_matrix.shape - self.matrix: np.ndarray = self._coo_matrix.toarray()[:, :, None] + if not isinstance(indices[0], list): + indices = [indices] + + # this scenario is for when we have multiple compound indices that use the same relation + # this is useful for computational efficiency (i.e., not having to use a for loop) + self._coo_matrix: List[sps._coo.coo_matrix] = [] + self._original_shape: List[Tuple[int, int]] = [] + for relation_indices in indices: + coo_matrix = self.convert_indices_to_matrix(relation_indices) + self._original_shape.append(coo_matrix.shape) + self._coo_matrix.append(coo_matrix) + # now convert to a list of matrices + max_var = max(t[0] for t in self._original_shape) + max_term = max(t[1] for t in self._original_shape) + self.make_np_matrix(max_var, max_term) + self.indices.extend(indices) + # else: + # # this is the normal scenario where we have a single relation but multiple indices + # self._coo_matrix: sps._coo.coo_matrix = self.convert_indices_to_matrix( + # indices + # ) + # self._original_shape = self._coo_matrix.shape + # self.matrix: np.ndarray = self._coo_matrix.toarray()[:, :, None] + # self.indices.append(list(indices)) # this mask is used to zero out the values that are not part of the relation self.mask: torch.Tensor = torch.tensor( @@ -78,7 +83,7 @@ def make_np_matrix(self, max_var: int, max_term: int): coo_matrix.resize(max_var, max_term) matrices.append(coo_matrix.toarray()) # make a new axis and stack long that axis - self.matrix: np.ndarray = np.stack(matrices, axis=-1) + self.matrix: np.ndarray = np.stack(matrices).swapaxes(0, 1).swapaxes(1, 2) @staticmethod def convert_indices_to_matrix(indices) -> sps._coo.coo_matrix: @@ -102,15 +107,18 @@ def resize(self, *shape): Args: shape: The new shape of the matrix. """ + for coo_matrix in self._coo_matrix: + coo_matrix.resize(*shape) + self.make_np_matrix(shape[0], shape[1]) # resize the COO matrix in-place - if isinstance(self._coo_matrix, list): - for coo_matrix in self._coo_matrix: - coo_matrix.resize(*shape) - self.make_np_matrix(shape[0], shape[1]) - else: - self._coo_matrix.resize(*shape) - # TODO: update the matrix to reflect the new shape - self.matrix = self._coo_matrix.toarray()[:, :, None] + # if isinstance(self._coo_matrix, list): + # for coo_matrix in self._coo_matrix: + # coo_matrix.resize(*shape) + # self.make_np_matrix(shape[0], shape[1]) + # else: + # self._coo_matrix.resize(*shape) + # # TODO: update the matrix to reflect the new shape + # self.matrix = self._coo_matrix.toarray()[:, :, None] # update the mask to reflect the new shape self.mask = torch.tensor(self.matrix, dtype=torch.float32, device=self.device) @@ -131,9 +139,17 @@ def apply_mask(self, membership: Membership) -> torch.Tensor: # this is for the case where masks have been stacked due to compound relations membership_shape = membership_shape[1:] # get the last two dimensions self.resize(*membership_shape) - # select the membership values that are not zeroed out (i.e., involved in the relation) - after_mask = membership.degrees.unsqueeze(dim=-1) * self.mask - return after_mask.sum(dim=1, keepdim=False) # drop the zeroed out values + # 1st part: select memberships that are not zeroed out (i.e., involved in the relation) + # 2nd part: add the mask complement to ignore the zeros + # after_mask = membership.degrees.unsqueeze(dim=-1) * self.mask + after_mask = membership.degrees.unsqueeze(dim=-1) * self.mask.unsqueeze(0) + # the complement mask adds zeros where the mask is zero, these are not part of the relation + complement_mask = 1 - torch.heaviside( + self.mask.sum(dim=0), values=torch.zeros(1, device=self.device) + ) + return (after_mask + (1 - self.mask)).prod(dim=2, keepdim=False) + return after_mask.sum(dim=2, keepdim=False) + # return after_mask.sum(dim=2, keepdim=False) + complement_mask # drop the zeroed out values def forward(self, membership: Membership) -> torch.Tensor: """ diff --git a/src/fuzzy/relations/continuous/old_tnorm.py b/src/fuzzy/relations/continuous/old_tnorm.py index d0f92dc..5410c19 100644 --- a/src/fuzzy/relations/continuous/old_tnorm.py +++ b/src/fuzzy/relations/continuous/old_tnorm.py @@ -6,16 +6,18 @@ import torch +from fuzzy.relations.continuous.t_norm import Product, Minimum, SoftmaxSum + class TNorm(Enum): """ Enumerates the types of t-norms. """ - PRODUCT = "product" # i.e., algebraic product - MINIMUM = "minimum" + PRODUCT = Product # i.e., algebraic product + MINIMUM = Minimum ACZEL_ALSINA = "aczel_alsina" # not yet implemented - SOFTMAX_SUM = "softmax_sum" + SOFTMAX_SUM = SoftmaxSum # not yet implemented SOFTMAX_MEAN = "softmax_mean" LUKASIEWICZ = "generalized_lukasiewicz" # the following are to be implemented diff --git a/src/fuzzy/relations/continuous/t_norm.py b/src/fuzzy/relations/continuous/t_norm.py index fdc3738..f4aa052 100644 --- a/src/fuzzy/relations/continuous/t_norm.py +++ b/src/fuzzy/relations/continuous/t_norm.py @@ -4,6 +4,8 @@ relations are implemented here. """ +import torch + from fuzzy.sets.continuous.membership import Membership from fuzzy.relations.continuous.n_ary import NAryRelation @@ -60,3 +62,28 @@ def forward(self, membership: Membership) -> Membership: degrees=self.apply_mask(membership=membership).prod(dim=-2, keepdim=False), mask=self.mask, ) + + +class SoftmaxSum(NAryRelation): + """ + This class represents the softmax sum n-ary fuzzy relation. This is a special case when dealing + with high-dimensional TSK systems, where the softmax sum is used to leverage Gaussians' + defuzzification relationship to the softmax function. + """ + + def forward(self, membership: Membership) -> Membership: + """ + Calculates the fuzzy compound's applicability using the softmax sum inference engine. + This is particularly useful for when dealing with high-dimensional data, and is considered + a traditional variant of TSK fuzzy stems on high-dimensional datasets. + + Args: + membership: The memberships. + + Returns: + The applicability of the fuzzy compounds (e.g., fuzzy logic rules). + """ + # intermediate_values = self.calc_intermediate_input(antecedents_memberships) + firing_strengths = membership.sum(dim=1) + max_values, _ = firing_strengths.max(dim=-1, keepdim=True) + return torch.nn.functional.softmax(firing_strengths - max_values, dim=-1) diff --git a/tests/test_relations/test_n_ary.py b/tests/test_relations/test_n_ary.py index d774dfa..9a9ece6 100644 --- a/tests/test_relations/test_n_ary.py +++ b/tests/test_relations/test_n_ary.py @@ -3,6 +3,8 @@ import torch import numpy as np +from fuzzy.sets.continuous.abstract import ContinuousFuzzySet +from fuzzy.sets.continuous.group import GroupedFuzzySets from fuzzy.sets.continuous.impl import Gaussian from fuzzy.sets.continuous.membership import Membership from fuzzy.relations.continuous.t_norm import Minimum, Product @@ -86,14 +88,14 @@ def test_n_ary_relation(self) -> None: n_ary = NAryRelation((0, 1), (1, 0), device=AVAILABLE_DEVICE) self.assertRaises(NotImplementedError, n_ary.forward, None) # check that the matrix shape is correct - self.assertEqual(n_ary._coo_matrix.shape, (2, 2)) + self.assertEqual(n_ary._coo_matrix[0].shape, (2, 2)) # check that the original shape is stored - self.assertEqual(n_ary._original_shape, (2, 2)) + self.assertEqual(n_ary._original_shape[0], (2, 2)) # matrix size can increase (in-place) for more potential rows (vars) and columns (terms) - n_ary._coo_matrix.resize(3, 3) - self.assertEqual(n_ary._coo_matrix.shape, (3, 3)) + n_ary._coo_matrix[0].resize(3, 3) + self.assertEqual(n_ary._coo_matrix[0].shape, (3, 3)) # check that the original shape is still kept after resizing - self.assertEqual(n_ary._original_shape, (2, 2)) + self.assertEqual(n_ary._original_shape[0], (2, 2)) def test_minimum(self) -> None: """ @@ -105,26 +107,28 @@ def test_minimum(self) -> None: n_ary = Minimum((0, 1), (1, 0), device=AVAILABLE_DEVICE) membership = self.test_gaussian_membership() + # test the forward pass + min_membership: Membership = n_ary.forward(membership) + expected_min_values = torch.tensor( + [[2.5514542e-04], [8.4526926e-01], [5.7408627e-04]], + device=AVAILABLE_DEVICE, + ) + self.assertTrue(torch.allclose(min_membership.degrees, expected_min_values)) + + # now check the interior workings + # test the mask application after_mask = n_ary.apply_mask(membership=membership) expected_after_mask = torch.tensor( [ - [[7.4245834e-01], [2.5514542e-04]], - [[8.4526926e-01], [9.6005607e-01]], - [[9.9679035e-01], [5.7408627e-04]], + [[2.5514542e-04], [7.4245834e-01], [1.0000000e00], [1.0000000e00]], + [[9.6005607e-01], [8.4526926e-01], [1.0000000e00], [1.0000000e00]], + [[5.7408627e-04], [9.9679035e-01], [1.0000000e00], [1.0000000e00]], ], device=AVAILABLE_DEVICE, ) self.assertTrue(torch.allclose(after_mask, expected_after_mask)) - # test the forward pass - min_membership: Membership = n_ary.forward(membership) - expected_min_values = torch.tensor( - [[2.5514542e-04], [8.4526926e-01], [5.7408627e-04]], - device=AVAILABLE_DEVICE, - ) - self.assertTrue(torch.allclose(min_membership.degrees, expected_min_values)) - # check that it is torch.jit scriptable (currently not working) # n_ary_script = torch.jit.script(n_ary) # @@ -136,19 +140,20 @@ def test_minimum(self) -> None: def test_algebraic_product(self) -> None: n_ary = Product((0, 1), (1, 0), device=AVAILABLE_DEVICE) - membership = self.test_gaussian_membership() - # test the mask application - after_mask = n_ary.apply_mask(membership=membership) - expected_after_mask = torch.tensor( - [ - [[7.4245834e-01], [2.5514542e-04]], - [[8.4526926e-01], [9.6005607e-01]], - [[9.9679035e-01], [5.7408627e-04]], - ], + # TODO: do not allow duplicate indices + Product( + (1, 1), (1, 1), device=AVAILABLE_DEVICE + ) # results in [[0, 0], [0, 2]] !!! + Product( + [(0, 0), (1, 0)], + [(0, 1), (1, 0)], + [(0, 1), (1, 1)], + [(1, 1)], device=AVAILABLE_DEVICE, ) - self.assertTrue(torch.allclose(after_mask, expected_after_mask)) + + membership = self.test_gaussian_membership() # test the forward pass prod_membership: Membership = n_ary.forward(membership) @@ -162,6 +167,20 @@ def test_algebraic_product(self) -> None: ) self.assertTrue(torch.allclose(prod_membership.degrees, expected_prod_values)) + # now check the interior workings + + # test the mask application + after_mask = n_ary.apply_mask(membership=membership) + expected_after_mask = torch.tensor( + [ + [[2.5514542e-04], [7.4245834e-01], [1.0000000e00], [1.0000000e00]], + [[9.6005607e-01], [8.4526926e-01], [1.0000000e00], [1.0000000e00]], + [[5.7408627e-04], [9.9679035e-01], [1.0000000e00], [1.0000000e00]], + ], + device=AVAILABLE_DEVICE, + ) + self.assertTrue(torch.allclose(after_mask, expected_after_mask)) + # check that it is torch.jit scriptable (currently not working) # n_ary_script = torch.jit.script(n_ary) # @@ -210,7 +229,7 @@ def test_combination_of_t_norms(self) -> None: self.assertTrue(torch.allclose(min_membership.degrees, expected_min_values)) def test_multiple_indices_passed_as_list(self): - n_ary = Minimum( + n_ary = Product( [(0, 1), (1, 0)], [(1, 1), (2, 1)], [(2, 1), (2, 0)], @@ -223,25 +242,40 @@ def test_multiple_indices_passed_as_list(self): expected_prod_values = torch.tensor( [ [ - 2.5514542e-04, - 0.0000000e00, - 3.8928282e-04, - 2.5514542e-04, - 0.0000000e00, + membership.degrees[0][0][1].item() + * membership.degrees[0][1][0].item(), + membership.degrees[0][1][1].item() + * membership.degrees[0][2][1].item(), + membership.degrees[0][2][1].item() + * membership.degrees[0][2][0].item(), + membership.degrees[0][0][1].item() + * membership.degrees[0][2][0].item(), + membership.degrees[0][1][1].item() + * membership.degrees[0][0][1].item(), ], [ - 8.4526926e-01, - 0.0000000e00, - 3.0867624e-04, - 2.2095737e-01, - 0.0000000e00, + membership.degrees[1][0][1].item() + * membership.degrees[1][1][0].item(), + membership.degrees[1][1][1].item() + * membership.degrees[1][2][1].item(), + membership.degrees[1][2][1].item() + * membership.degrees[1][2][0].item(), + membership.degrees[1][0][1].item() + * membership.degrees[1][2][0].item(), + membership.degrees[1][1][1].item() + * membership.degrees[1][0][1].item(), ], [ - 5.7408627e-04, - 0.0000000e00, - 1.7616944e-02, - 5.7408627e-04, - 0.0000000e00, + membership.degrees[2][0][1].item() + * membership.degrees[2][1][0].item(), + membership.degrees[2][1][1].item() + * membership.degrees[2][2][1].item(), + membership.degrees[2][2][1].item() + * membership.degrees[2][2][0].item(), + membership.degrees[2][0][1].item() + * membership.degrees[2][2][0].item(), + membership.degrees[2][1][1].item() + * membership.degrees[2][0][1].item(), ], ], device=AVAILABLE_DEVICE, @@ -250,3 +284,74 @@ def test_multiple_indices_passed_as_list(self): self.assertEqual(prod_membership.degrees.shape[1], N_COMPOUNDS) self.assertEqual(prod_membership.degrees.shape, expected_prod_values.shape) self.assertTrue(torch.allclose(prod_membership.degrees, expected_prod_values)) + + n_ary_prod = Product( + [(0, 0), (1, 0)], + [(0, 1), (1, 0)], + [(0, 1), (1, 1)], + [(1, 1)], + device=AVAILABLE_DEVICE, + ) + + +class TestMinimum(unittest.TestCase): + """ + Test the Minimum n-ary relation. + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.data = torch.tensor( + [ + [1.5409961, -0.2934289], + [-2.1787894, 0.56843126], + [-1.0845224, -1.3985955], + [0.40334684, 0.83802634], + ], + device=AVAILABLE_DEVICE, + ) + self.gaussian_mf = [ + Gaussian( + centers=np.array([-1, 0.0, 1.0]), + widths=np.array([1.0, 1.0, 1.0]), + device=AVAILABLE_DEVICE, + ), + Gaussian( + centers=np.array([-1.0, 0.0, 1.0]), + widths=np.array([1.0, 1.0, 1.0]), + device=AVAILABLE_DEVICE, + ), + ] + self.hypercube = GroupedFuzzySets( + modules_list=[ContinuousFuzzySet.stack(self.gaussian_mf)] + ) + + def test_minimum(self) -> None: + self.minimum = Minimum( + [(0, 0), (1, 0)], + [(0, 0), (1, 1)], + [(0, 1), (1, 0)], + [(0, 1), (1, 1)], + [(0, 1), (1, 2)], + device=AVAILABLE_DEVICE, + ) + + membership: Membership = self.hypercube(self.data) + min_membership: Membership = self.minimum(membership) + expected_degrees = torch.tensor( + [ + [0.00157003, 0.00157003, 0.09304529, 0.09304529, 0.09304529], + [ + 8.5436940e-02, + 2.4918883e-01, + 8.6766202e-03, + 8.6766e-03, + 8.6766202e-03, + ], + [0.8531001, 0.14141318, 0.3084521, 0.14141318, 0.00317242], + [0.034104, 0.13954304, 0.034104, 0.49545035, 0.8498557], + ], + device=AVAILABLE_DEVICE, + ) + + self.assertTrue(torch.allclose(min_membership.degrees, expected_degrees)) From 519cb9e2194d52d2736abfe902ad3526d7b1d262 Mon Sep 17 00:00:00 2001 From: John Wesley Hostetter Date: Mon, 19 Aug 2024 11:45:18 -0400 Subject: [PATCH 05/12] Clean up the source code and unit tests --- src/fuzzy/relations/continuous/n_ary.py | 43 ++-- tests/test_relations/test_n_ary.py | 275 ++++++++++++++---------- 2 files changed, 173 insertions(+), 145 deletions(-) diff --git a/src/fuzzy/relations/continuous/n_ary.py b/src/fuzzy/relations/continuous/n_ary.py index f6aaf97..1450335 100644 --- a/src/fuzzy/relations/continuous/n_ary.py +++ b/src/fuzzy/relations/continuous/n_ary.py @@ -21,7 +21,6 @@ class NAryRelation(torch.nn.Module): truth value. """ - # TODO: add support for indices to be List[Tuple[int, int]] for multiple compound indices def __init__( self, *indices: Union[Tuple[int, int], List[Tuple[int, int]]], @@ -48,6 +47,10 @@ def __init__( self._coo_matrix: List[sps._coo.coo_matrix] = [] self._original_shape: List[Tuple[int, int]] = [] for relation_indices in indices: + if len(set(relation_indices)) < len(relation_indices): + raise ValueError( + "The indices must be unique for the relation to be well-defined." + ) coo_matrix = self.convert_indices_to_matrix(relation_indices) self._original_shape.append(coo_matrix.shape) self._coo_matrix.append(coo_matrix) @@ -56,14 +59,6 @@ def __init__( max_term = max(t[1] for t in self._original_shape) self.make_np_matrix(max_var, max_term) self.indices.extend(indices) - # else: - # # this is the normal scenario where we have a single relation but multiple indices - # self._coo_matrix: sps._coo.coo_matrix = self.convert_indices_to_matrix( - # indices - # ) - # self._original_shape = self._coo_matrix.shape - # self.matrix: np.ndarray = self._coo_matrix.toarray()[:, :, None] - # self.indices.append(list(indices)) # this mask is used to zero out the values that are not part of the relation self.mask: torch.Tensor = torch.tensor( @@ -76,7 +71,17 @@ def __init__( # we can create a graph from the adjacency matrix # g = igraph.Graph.Adjacency(self._coo_matrix) - def make_np_matrix(self, max_var: int, max_term: int): + def make_np_matrix(self, max_var: int, max_term: int) -> None: + """ + Make (or update) the numpy matrix from the COO matrices. + + Args: + max_var: The maximum number of variables. + max_term: The maximum number of terms. + + Returns: + None + """ matrices = [] for coo_matrix in self._coo_matrix: # first resize @@ -110,15 +115,6 @@ def resize(self, *shape): for coo_matrix in self._coo_matrix: coo_matrix.resize(*shape) self.make_np_matrix(shape[0], shape[1]) - # resize the COO matrix in-place - # if isinstance(self._coo_matrix, list): - # for coo_matrix in self._coo_matrix: - # coo_matrix.resize(*shape) - # self.make_np_matrix(shape[0], shape[1]) - # else: - # self._coo_matrix.resize(*shape) - # # TODO: update the matrix to reflect the new shape - # self.matrix = self._coo_matrix.toarray()[:, :, None] # update the mask to reflect the new shape self.mask = torch.tensor(self.matrix, dtype=torch.float32, device=self.device) @@ -139,17 +135,10 @@ def apply_mask(self, membership: Membership) -> torch.Tensor: # this is for the case where masks have been stacked due to compound relations membership_shape = membership_shape[1:] # get the last two dimensions self.resize(*membership_shape) - # 1st part: select memberships that are not zeroed out (i.e., involved in the relation) - # 2nd part: add the mask complement to ignore the zeros - # after_mask = membership.degrees.unsqueeze(dim=-1) * self.mask + # select memberships that are not zeroed out (i.e., involved in the relation) after_mask = membership.degrees.unsqueeze(dim=-1) * self.mask.unsqueeze(0) # the complement mask adds zeros where the mask is zero, these are not part of the relation - complement_mask = 1 - torch.heaviside( - self.mask.sum(dim=0), values=torch.zeros(1, device=self.device) - ) return (after_mask + (1 - self.mask)).prod(dim=2, keepdim=False) - return after_mask.sum(dim=2, keepdim=False) - # return after_mask.sum(dim=2, keepdim=False) + complement_mask # drop the zeroed out values def forward(self, membership: Membership) -> torch.Tensor: """ diff --git a/tests/test_relations/test_n_ary.py b/tests/test_relations/test_n_ary.py index 9a9ece6..5ccacb2 100644 --- a/tests/test_relations/test_n_ary.py +++ b/tests/test_relations/test_n_ary.py @@ -1,14 +1,18 @@ +""" +Test the fuzzy n-ary relations work as expected. +""" + import unittest import torch import numpy as np -from fuzzy.sets.continuous.abstract import ContinuousFuzzySet -from fuzzy.sets.continuous.group import GroupedFuzzySets -from fuzzy.sets.continuous.impl import Gaussian from fuzzy.sets.continuous.membership import Membership from fuzzy.relations.continuous.t_norm import Minimum, Product from fuzzy.relations.continuous.n_ary import NAryRelation, Compound +from fuzzy.sets.continuous.abstract import ContinuousFuzzySet +from fuzzy.sets.continuous.group import GroupedFuzzySets +from fuzzy.sets.continuous.impl import Gaussian N_TERMS: int = 2 N_VARIABLES: int = 4 @@ -18,6 +22,10 @@ class TestNAryRelation(unittest.TestCase): + """ + Test the abstract n-ary relation, including functionality that is common to all n-ary relations. + """ + def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.gaussian_mf = Gaussian( @@ -86,6 +94,7 @@ def test_n_ary_relation(self) -> None: None """ n_ary = NAryRelation((0, 1), (1, 0), device=AVAILABLE_DEVICE) + # the forward pass should not be implemented self.assertRaises(NotImplementedError, n_ary.forward, None) # check that the matrix shape is correct self.assertEqual(n_ary._coo_matrix[0].shape, (2, 2)) @@ -97,25 +106,38 @@ def test_n_ary_relation(self) -> None: # check that the original shape is still kept after resizing self.assertEqual(n_ary._original_shape[0], (2, 2)) - def test_minimum(self) -> None: + def test_duplicates(self) -> None: """ - Test the n-ary minimum relation. + Test that the NAryRelation class throws an error when given duplicate indices. Otherwise, a + duplicate index will result in a value greater than 1 in the mask, which is not allowed. Returns: None """ - n_ary = Minimum((0, 1), (1, 0), device=AVAILABLE_DEVICE) - membership = self.test_gaussian_membership() - - # test the forward pass - min_membership: Membership = n_ary.forward(membership) - expected_min_values = torch.tensor( - [[2.5514542e-04], [8.4526926e-01], [5.7408627e-04]], + self.assertRaises( + ValueError, + NAryRelation, + (0, 1), + (1, 0), + (1, 0), device=AVAILABLE_DEVICE, ) - self.assertTrue(torch.allclose(min_membership.degrees, expected_min_values)) - # now check the interior workings + +class TestProduct(TestNAryRelation): + """ + Test the Product n-ary relation. + """ + + def test_algebraic_product(self) -> None: + """ + Test the n-ary product operation given a single relation. + + Returns: + + """ + n_ary = Product((0, 1), (1, 0), device=AVAILABLE_DEVICE) + membership = self.test_gaussian_membership() # test the mask application after_mask = n_ary.apply_mask(membership=membership) @@ -129,32 +151,6 @@ def test_minimum(self) -> None: ) self.assertTrue(torch.allclose(after_mask, expected_after_mask)) - # check that it is torch.jit scriptable (currently not working) - # n_ary_script = torch.jit.script(n_ary) - # - # after_mask_script = n_ary_script.apply_mask(membership=membership) - # self.assertTrue(torch.allclose(after_mask_script, expected_after_mask)) - # - # min_values_script = n_ary_script.forward(membership) - # self.assertTrue(torch.allclose(min_values_script, expected_min_values)) - - def test_algebraic_product(self) -> None: - n_ary = Product((0, 1), (1, 0), device=AVAILABLE_DEVICE) - - # TODO: do not allow duplicate indices - Product( - (1, 1), (1, 1), device=AVAILABLE_DEVICE - ) # results in [[0, 0], [0, 2]] !!! - Product( - [(0, 0), (1, 0)], - [(0, 1), (1, 0)], - [(0, 1), (1, 1)], - [(1, 1)], - device=AVAILABLE_DEVICE, - ) - - membership = self.test_gaussian_membership() - # test the forward pass prod_membership: Membership = n_ary.forward(membership) expected_prod_values = torch.tensor( @@ -167,20 +163,6 @@ def test_algebraic_product(self) -> None: ) self.assertTrue(torch.allclose(prod_membership.degrees, expected_prod_values)) - # now check the interior workings - - # test the mask application - after_mask = n_ary.apply_mask(membership=membership) - expected_after_mask = torch.tensor( - [ - [[2.5514542e-04], [7.4245834e-01], [1.0000000e00], [1.0000000e00]], - [[9.6005607e-01], [8.4526926e-01], [1.0000000e00], [1.0000000e00]], - [[5.7408627e-04], [9.9679035e-01], [1.0000000e00], [1.0000000e00]], - ], - device=AVAILABLE_DEVICE, - ) - self.assertTrue(torch.allclose(after_mask, expected_after_mask)) - # check that it is torch.jit scriptable (currently not working) # n_ary_script = torch.jit.script(n_ary) # @@ -190,45 +172,15 @@ def test_algebraic_product(self) -> None: # min_values_script = n_ary_script.forward(membership) # self.assertTrue(torch.allclose(min_values_script, expected_min_values)) - def test_combination_of_t_norms(self) -> None: + def test_multiple_indices_passed_as_list(self) -> None: """ - Test we can create a combination of t-norms to reflect more complex compound propositions. + Test the Product operation given multiple relations, where some variables are never used + by those relations. This is a test to ensure that the Product operation can handle + relations that do not use all variables (i.e., does not wrongly output zeros). Returns: None """ - n_ary_min = Minimum((0, 1), (1, 0), device=AVAILABLE_DEVICE) - n_ary_prod = Product((0, 1), (1, 0), device=AVAILABLE_DEVICE) - membership = self.test_gaussian_membership() - - t_norm = Compound(n_ary_min, n_ary_prod) - compound_values = t_norm(membership=membership) - expected_compound_values = torch.cat( - [ - n_ary_min(membership=membership).degrees, - n_ary_prod(membership=membership).degrees, - ], - dim=-1, - ).unsqueeze(dim=-1) - self.assertTrue( - torch.allclose(compound_values.degrees, expected_compound_values) - ) - - # we can then follow it up with another t-norm - - n_ary_next_min = Minimum((0, 1), (1, 0), device=AVAILABLE_DEVICE) - min_membership: Membership = n_ary_next_min(compound_values) - expected_min_values = torch.tensor( - [ - [7.4245834e-01 * 2.5514542e-04], - [8.4526926e-01 * 9.6005607e-01], - [9.9679035e-01 * 5.7408627e-04], - ], - device=AVAILABLE_DEVICE, - ) - self.assertTrue(torch.allclose(min_membership.degrees, expected_min_values)) - - def test_multiple_indices_passed_as_list(self): n_ary = Product( [(0, 1), (1, 0)], [(1, 1), (2, 1)], @@ -285,23 +237,82 @@ def test_multiple_indices_passed_as_list(self): self.assertEqual(prod_membership.degrees.shape, expected_prod_values.shape) self.assertTrue(torch.allclose(prod_membership.degrees, expected_prod_values)) - n_ary_prod = Product( - [(0, 0), (1, 0)], - [(0, 1), (1, 0)], - [(0, 1), (1, 1)], - [(1, 1)], - device=AVAILABLE_DEVICE, - ) - -class TestMinimum(unittest.TestCase): +class TestMinimum(TestNAryRelation): """ Test the Minimum n-ary relation. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.data = torch.tensor( + self.hypercube = GroupedFuzzySets( + modules_list=[ + ContinuousFuzzySet.stack( + [ + Gaussian( + centers=np.array([-1, 0.0, 1.0]), + widths=np.array([1.0, 1.0, 1.0]), + device=AVAILABLE_DEVICE, + ), + Gaussian( + centers=np.array([-1.0, 0.0, 1.0]), + widths=np.array([1.0, 1.0, 1.0]), + device=AVAILABLE_DEVICE, + ), + ] + ) + ] + ) + + def test_minimum(self) -> None: + """ + Test the n-ary minimum operation given a single relation. + + Returns: + None + """ + n_ary = Minimum((0, 1), (1, 0), device=AVAILABLE_DEVICE) + membership = self.test_gaussian_membership() + + # test the mask application + after_mask = n_ary.apply_mask(membership=membership) + expected_after_mask = torch.tensor( + [ + [[2.5514542e-04], [7.4245834e-01], [1.0000000e00], [1.0000000e00]], + [[9.6005607e-01], [8.4526926e-01], [1.0000000e00], [1.0000000e00]], + [[5.7408627e-04], [9.9679035e-01], [1.0000000e00], [1.0000000e00]], + ], + device=AVAILABLE_DEVICE, + ) + self.assertTrue(torch.allclose(after_mask, expected_after_mask)) + + # test the forward pass + min_membership: Membership = n_ary.forward(membership) + expected_min_values = torch.tensor( + [[2.5514542e-04], [8.4526926e-01], [5.7408627e-04]], + device=AVAILABLE_DEVICE, + ) + self.assertTrue(torch.allclose(min_membership.degrees, expected_min_values)) + + # check that it is torch.jit scriptable (currently not working) + # n_ary_script = torch.jit.script(n_ary) + # + # after_mask_script = n_ary_script.apply_mask(membership=membership) + # self.assertTrue(torch.allclose(after_mask_script, expected_after_mask)) + # + # min_values_script = n_ary_script.forward(membership) + # self.assertTrue(torch.allclose(min_values_script, expected_min_values)) + + def test_multiple_indices_passed_as_list(self) -> None: + """ + Test the Minimum operation given multiple relations, where some variables are never used + by those relations. This is a test to ensure that the Minimum operation can handle + relations that do not use all variables (i.e., does not wrongly output zeros). + + Returns: + None + """ + data = torch.tensor( [ [1.5409961, -0.2934289], [-2.1787894, 0.56843126], @@ -310,24 +321,7 @@ def __init__(self, *args, **kwargs): ], device=AVAILABLE_DEVICE, ) - self.gaussian_mf = [ - Gaussian( - centers=np.array([-1, 0.0, 1.0]), - widths=np.array([1.0, 1.0, 1.0]), - device=AVAILABLE_DEVICE, - ), - Gaussian( - centers=np.array([-1.0, 0.0, 1.0]), - widths=np.array([1.0, 1.0, 1.0]), - device=AVAILABLE_DEVICE, - ), - ] - self.hypercube = GroupedFuzzySets( - modules_list=[ContinuousFuzzySet.stack(self.gaussian_mf)] - ) - - def test_minimum(self) -> None: - self.minimum = Minimum( + minimum = Minimum( [(0, 0), (1, 0)], [(0, 0), (1, 1)], [(0, 1), (1, 0)], @@ -336,8 +330,8 @@ def test_minimum(self) -> None: device=AVAILABLE_DEVICE, ) - membership: Membership = self.hypercube(self.data) - min_membership: Membership = self.minimum(membership) + membership: Membership = self.hypercube(data) + min_membership: Membership = minimum(membership) expected_degrees = torch.tensor( [ [0.00157003, 0.00157003, 0.09304529, 0.09304529, 0.09304529], @@ -355,3 +349,48 @@ def test_minimum(self) -> None: ) self.assertTrue(torch.allclose(min_membership.degrees, expected_degrees)) + + +class TestCompound(TestNAryRelation): + """ + Test the Compound n-ary relation, which allows the user to compound/aggregate multiple n-ary + relations together. + """ + + def test_combination_of_t_norms(self) -> None: + """ + Test we can create a combination of t-norms to reflect more complex compound propositions. + + Returns: + None + """ + n_ary_min = Minimum((0, 1), (1, 0), device=AVAILABLE_DEVICE) + n_ary_prod = Product((0, 1), (1, 0), device=AVAILABLE_DEVICE) + membership = self.test_gaussian_membership() + + t_norm = Compound(n_ary_min, n_ary_prod) + compound_values = t_norm(membership=membership) + expected_compound_values = torch.cat( + [ + n_ary_min(membership=membership).degrees, + n_ary_prod(membership=membership).degrees, + ], + dim=-1, + ).unsqueeze(dim=-1) + self.assertTrue( + torch.allclose(compound_values.degrees, expected_compound_values) + ) + + # we can then follow it up with another t-norm + + n_ary_next_min = Minimum((0, 1), (1, 0), device=AVAILABLE_DEVICE) + min_membership: Membership = n_ary_next_min(compound_values) + expected_min_values = torch.tensor( + [ + [7.4245834e-01 * 2.5514542e-04], + [8.4526926e-01 * 9.6005607e-01], + [9.9679035e-01 * 5.7408627e-04], + ], + device=AVAILABLE_DEVICE, + ) + self.assertTrue(torch.allclose(min_membership.degrees, expected_min_values)) From f2fe6871df2ad54f0f91ca541b013d1cc1c1fe3d Mon Sep 17 00:00:00 2001 From: John Wesley Hostetter Date: Mon, 19 Aug 2024 16:16:02 -0400 Subject: [PATCH 06/12] Remove old AlgebraicProduct class --- src/fuzzy/relations/continuous/old_tnorm.py | 42 --------- src/fuzzy/relations/continuous/t_norm.py | 3 +- tests/test_relations/test_tnorms.py | 94 --------------------- 3 files changed, 2 insertions(+), 137 deletions(-) delete mode 100644 tests/test_relations/test_tnorms.py diff --git a/src/fuzzy/relations/continuous/old_tnorm.py b/src/fuzzy/relations/continuous/old_tnorm.py index 5410c19..b0d6f21 100644 --- a/src/fuzzy/relations/continuous/old_tnorm.py +++ b/src/fuzzy/relations/continuous/old_tnorm.py @@ -4,8 +4,6 @@ from enum import Enum -import torch - from fuzzy.relations.continuous.t_norm import Product, Minimum, SoftmaxSum @@ -28,43 +26,3 @@ class TNorm(Enum): YAGER = "yager" DUBOIS = "dubois" DIF = "dif" - - -class AlgebraicProduct(torch.nn.Module): - """ - Implementation of the Algebraic Product t-norm (Fuzzy AND). - """ - - def __init__(self, in_features=None, importance=None): - """ - Initialization. - INPUT: - - in_features: shape of the input - - centers: trainable parameter - - sigmas: trainable parameter - importance is initialized to a one vector by default - """ - super().__init__() - self.in_features = in_features - - # initialize antecedent importance - if importance is None: - self.importance = torch.nn.parameter.Parameter(torch.tensor(1.0)) - self.importance.requires_grad = False - else: - if not isinstance(importance, torch.Tensor): - importance = torch.Tensor(importance) - self.importance = torch.nn.parameter.Parameter( - torch.abs(importance) - ) # importance can only be [0, 1] - self.importance.requires_grad = True - - def forward(self, elements): - """ - Forward pass of the function. - Applies the function to the input elementwise. - """ - self.importance = torch.nn.parameter.Parameter( - torch.abs(self.importance) - ) # importance can only be [0, 1] - return torch.prod(torch.mul(elements, self.importance)) diff --git a/src/fuzzy/relations/continuous/t_norm.py b/src/fuzzy/relations/continuous/t_norm.py index f4aa052..afbbf19 100644 --- a/src/fuzzy/relations/continuous/t_norm.py +++ b/src/fuzzy/relations/continuous/t_norm.py @@ -84,6 +84,7 @@ def forward(self, membership: Membership) -> Membership: The applicability of the fuzzy compounds (e.g., fuzzy logic rules). """ # intermediate_values = self.calc_intermediate_input(antecedents_memberships) - firing_strengths = membership.sum(dim=1) + # TODO: these dimensions are possibly not correct, need to be fixed/tested + firing_strengths = membership.degrees.sum(dim=1) max_values, _ = firing_strengths.max(dim=-1, keepdim=True) return torch.nn.functional.softmax(firing_strengths - max_values, dim=-1) diff --git a/tests/test_relations/test_tnorms.py b/tests/test_relations/test_tnorms.py deleted file mode 100644 index 5482b64..0000000 --- a/tests/test_relations/test_tnorms.py +++ /dev/null @@ -1,94 +0,0 @@ -""" -Test various t-norm operations, such as the algebraic product. -""" - -import unittest - -import torch -import numpy as np - -from fuzzy.relations.continuous.old_tnorm import AlgebraicProduct - - -def algebraic_product(elements: np.ndarray, importance: np.ndarray) -> np.float32: - """ - Numpy calculation of the algebraic product. - - Args: - elements: The elements to be multiplied. - importance: The importance of each element. - - Returns: - The algebraic product of the given elements. - """ - return np.prod(elements * importance) - - -class TestAlgebraicProduct(unittest.TestCase): - """ - Test the algebraic product operation. - """ - - def test_single_input(self) -> None: - """ - The t-norm of a single input (w/o importance) should be == input. - """ - element = torch.rand(1) - n_inputs = 1 - tnorm = AlgebraicProduct(n_inputs) - importance_before_calculation = tnorm.importance - mu_pytorch = tnorm(element) - mu_numpy = algebraic_product( - element.cpu().detach().numpy(), - importance_before_calculation.cpu().detach().numpy(), - ) - - # make sure the parameters are still identical afterward - assert torch.isclose(tnorm.importance, importance_before_calculation).all() - # the outputs of the PyTorch and Numpy versions should be approx. equal - assert np.isclose(mu_pytorch.cpu().detach().numpy(), mu_numpy, rtol=1e-8).all() - - def test_multi_input(self) -> None: - """ - Test that the algebraic product is correctly calculated when multiple inputs are given. - - Returns: - None - """ - elements = torch.rand(4) - n_inputs = len(elements) - tnorm = AlgebraicProduct(n_inputs) - importance_before_calculation = tnorm.importance - mu_pytorch = tnorm(elements) - mu_numpy = algebraic_product( - elements.cpu().detach().numpy(), - importance_before_calculation.cpu().detach().numpy(), - ) - - # make sure the parameters are still identical afterward - assert torch.isclose(tnorm.importance, importance_before_calculation).all() - # the outputs of the PyTorch and Numpy versions should be approx. equal - assert np.isclose(mu_pytorch.cpu().detach().numpy(), mu_numpy, rtol=1e-8).all() - - def test_multi_input_with_importance_given(self) -> None: - """ - Test that the algebraic product is correctly calculated when multiple inputs (and their - varying degrees of importance) are given. - - Returns: - None - """ - elements = torch.rand(5) - n_inputs = len(elements) - importance_before_calculation = torch.tensor([0.0, 0.25, 0.5, 0.75, 1.0]) - tnorm = AlgebraicProduct(n_inputs, importance=importance_before_calculation) - mu_pytorch = tnorm(elements) - mu_numpy = algebraic_product( - elements.cpu().detach().numpy(), - importance_before_calculation.cpu().detach().numpy(), - ) - - # make sure the parameters are still identical afterward - assert torch.isclose(tnorm.importance, importance_before_calculation).all() - # the outputs of the PyTorch and Numpy versions should be approx. equal - assert np.isclose(mu_pytorch.cpu().detach().numpy(), mu_numpy, rtol=1e-8).all() From 0831a043205bcfdb8a2f78a472d9fc8a6440028b Mon Sep 17 00:00:00 2001 From: John Wesley Hostetter Date: Mon, 19 Aug 2024 16:38:35 -0400 Subject: [PATCH 07/12] Make SoftmaxSum return Membership --- src/fuzzy/relations/continuous/t_norm.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/fuzzy/relations/continuous/t_norm.py b/src/fuzzy/relations/continuous/t_norm.py index afbbf19..774bc5e 100644 --- a/src/fuzzy/relations/continuous/t_norm.py +++ b/src/fuzzy/relations/continuous/t_norm.py @@ -87,4 +87,8 @@ def forward(self, membership: Membership) -> Membership: # TODO: these dimensions are possibly not correct, need to be fixed/tested firing_strengths = membership.degrees.sum(dim=1) max_values, _ = firing_strengths.max(dim=-1, keepdim=True) - return torch.nn.functional.softmax(firing_strengths - max_values, dim=-1) + return Membership( + elements=membership.elements, + degrees=torch.nn.functional.softmax(firing_strengths - max_values, dim=-1), + mask=self.mask, + ) From 4ea560d151f9f46f9e25796fe98d71d146f6ca85 Mon Sep 17 00:00:00 2001 From: John Wesley Hostetter Date: Tue, 20 Aug 2024 13:12:52 -0400 Subject: [PATCH 08/12] Add graph representation of NAryRelation --- src/fuzzy/relations/continuous/n_ary.py | 61 ++++++++++++++++----- tests/test_relations/test_n_ary.py | 73 ++++++++++++++++++++++++- 2 files changed, 116 insertions(+), 18 deletions(-) diff --git a/src/fuzzy/relations/continuous/n_ary.py b/src/fuzzy/relations/continuous/n_ary.py index 1450335..0339733 100644 --- a/src/fuzzy/relations/continuous/n_ary.py +++ b/src/fuzzy/relations/continuous/n_ary.py @@ -6,6 +6,7 @@ from typing import Union, Tuple, List +import igraph import torch import numpy as np import scipy.sparse as sps @@ -71,6 +72,21 @@ def __init__( # we can create a graph from the adjacency matrix # g = igraph.Graph.Adjacency(self._coo_matrix) + @staticmethod + def convert_indices_to_matrix(indices) -> sps._coo.coo_matrix: + """ + Convert the given indices to a COO matrix. + + Args: + indices: The indices where a '1' will be placed at each index. + + Returns: + The COO matrix with a '1' at each index. + """ + data = np.ones(len(indices)) # a '1' indicates a relation exists + row, col = zip(*indices) + return sps.coo_matrix((data, (row, col)), dtype=np.int8) + def make_np_matrix(self, max_var: int, max_term: int) -> None: """ Make (or update) the numpy matrix from the COO matrices. @@ -90,21 +106,6 @@ def make_np_matrix(self, max_var: int, max_term: int) -> None: # make a new axis and stack long that axis self.matrix: np.ndarray = np.stack(matrices).swapaxes(0, 1).swapaxes(1, 2) - @staticmethod - def convert_indices_to_matrix(indices) -> sps._coo.coo_matrix: - """ - Convert the given indices to a COO matrix. - - Args: - indices: The indices where a '1' will be placed at each index. - - Returns: - The COO matrix with a '1' at each index. - """ - data = np.ones(len(indices)) # a '1' indicates a relation exists - row, col = zip(*indices) - return sps.coo_matrix((data, (row, col)), dtype=np.int8) - def resize(self, *shape): """ Resize the matrix in-place to the given shape. @@ -119,6 +120,36 @@ def resize(self, *shape): # update the mask to reflect the new shape self.mask = torch.tensor(self.matrix, dtype=torch.float32, device=self.device) + def get_graph(self) -> igraph.Graph: + """ + Get the graph representation of the relation(s). + + Returns: + The graph representation of the relation(s). + """ + graphs: List[igraph.Graph] = [] + for relation in self.indices: + # create a directed (mode="in") star graph with the relation as the center (vertex 0) + graphs.append(igraph.Graph.Star(n=len(relation) + 1, mode="in", center=0)) + # relation vertices are the first vertices in the graph + relation_vertex: igraph.Vertex = graphs[-1].vs.find(0) # located at index 0 + # set item and tags for the relation vertex for easy retrieval; name is for graph union + ( + relation_vertex["name"], + relation_vertex["item"], + relation_vertex["tags"], + ) = (hash(self) + hash(tuple(relation)), self, {"relation"}) + # anchor vertices are the var-term pairs that are involved in the relation vertex + anchor_vertices: List[igraph.Vertex] = relation_vertex.predecessors() + # set anchor vertices' item and tags for easy retrieval; name is for graph union + for anchor_vertex, index_pair in zip(anchor_vertices, relation): + anchor_vertex["name"], anchor_vertex["item"], anchor_vertex["tags"] = ( + index_pair, + index_pair, + {"anchor"}, + ) + return igraph.union(graphs, byname=True) + def apply_mask(self, membership: Membership) -> torch.Tensor: """ Apply the n-ary relation's mask to the given memberships. diff --git a/tests/test_relations/test_n_ary.py b/tests/test_relations/test_n_ary.py index 5ccacb2..005e81d 100644 --- a/tests/test_relations/test_n_ary.py +++ b/tests/test_relations/test_n_ary.py @@ -3,16 +3,18 @@ """ import unittest +from typing import List, Tuple +import igraph import torch import numpy as np +from fuzzy.sets.continuous.impl import Gaussian from fuzzy.sets.continuous.membership import Membership +from fuzzy.sets.continuous.group import GroupedFuzzySets +from fuzzy.sets.continuous.abstract import ContinuousFuzzySet from fuzzy.relations.continuous.t_norm import Minimum, Product from fuzzy.relations.continuous.n_ary import NAryRelation, Compound -from fuzzy.sets.continuous.abstract import ContinuousFuzzySet -from fuzzy.sets.continuous.group import GroupedFuzzySets -from fuzzy.sets.continuous.impl import Gaussian N_TERMS: int = 2 N_VARIABLES: int = 4 @@ -123,6 +125,71 @@ def test_duplicates(self) -> None: device=AVAILABLE_DEVICE, ) + def test_graph(self) -> None: + """ + Test that a graph representation of the relation can be created. + + Returns: + None + """ + indices: List[Tuple[int, int]] = [(0, 1), (1, 0)] + single_n_ary = NAryRelation(*indices, device=AVAILABLE_DEVICE) + single_n_ary_graph: igraph.Graph = single_n_ary.get_graph() + self.assertTrue(single_n_ary_graph is not None) + self.assertEqual( + single_n_ary_graph.vcount(), 3 + ) # 2 index pairs + 1 for relation + self.assertEqual(single_n_ary_graph.ecount(), 2) # 2 edges (relations) + + # check vertex attributes are as we expect + self.assertEqual(single_n_ary_graph.vs[0]["tags"], {"relation"}) + for index in (1, 2): + self.assertEqual(single_n_ary_graph.vs[index]["tags"], {"anchor"}) + self.assertEqual(single_n_ary_graph.vs[index]["item"], indices[index - 1]) + + # check edges are as we expect + for index in (0, 1): + self.assertEqual(single_n_ary_graph.es[index].source, index + 1) + self.assertEqual(single_n_ary_graph.es[index].target, 0) + + indices: List[List[Tuple[int, int]]] = [[(0, 1), (1, 0)], [(1, 1), (2, 1)]] + multiple_n_ary = NAryRelation(*indices, device=AVAILABLE_DEVICE) + multiple_n_ary_graph: igraph.Graph = multiple_n_ary.get_graph() + self.assertTrue(multiple_n_ary_graph is not None) + self.assertEqual( + multiple_n_ary_graph.vcount(), 6 + ) # 4 index pairs + 2 for relations + self.assertEqual(multiple_n_ary_graph.ecount(), 4) # 4 edges (relations) + + # check vertex attributes are as we expect + relation_vertices: igraph.VertexSeq = multiple_n_ary_graph.vs.select( + tags_eq={"relation"} + ) + self.assertEqual(len(relation_vertices), 2) + relation_index: int = 0 + for relation_vertex in relation_vertices: + self.assertEqual(relation_vertex["tags"], {"relation"}) + predecessors: List[igraph.Vertex] = relation_vertex.predecessors() + for predecessor_index, predecessor in enumerate(predecessors): + self.assertEqual(predecessor["tags"], {"anchor"}) + # below does not work consistently + # self.assertEqual(predecessor["item"], indices[relation_index][index]) + relation_index += 1 + + # check that relations involving the same index references share the same vertex + + multiple_n_ary = NAryRelation( + [(0, 1), (1, 0)], [(1, 1), (0, 1)], device=AVAILABLE_DEVICE + ) + multiple_n_ary_graph_with_uniques: igraph.Graph = multiple_n_ary.get_graph() + self.assertTrue(multiple_n_ary_graph_with_uniques is not None) + self.assertEqual( + multiple_n_ary_graph_with_uniques.vcount(), 5 + ) # 3 unique index pairs + 2 for relations + self.assertEqual( + multiple_n_ary_graph_with_uniques.ecount(), 4 # 4 edges (relations) + ) + class TestProduct(TestNAryRelation): """ From 9c513eeb26436e2440afb3765ad86a0a3ea76342 Mon Sep 17 00:00:00 2001 From: John Wesley Hostetter Date: Tue, 20 Aug 2024 13:34:59 -0400 Subject: [PATCH 09/12] Refactor to reduce complexity --- src/fuzzy/relations/continuous/n_ary.py | 72 ++++++++++++++----------- tests/test_relations/test_n_ary.py | 8 +-- 2 files changed, 44 insertions(+), 36 deletions(-) diff --git a/src/fuzzy/relations/continuous/n_ary.py b/src/fuzzy/relations/continuous/n_ary.py index 0339733..73deba8 100644 --- a/src/fuzzy/relations/continuous/n_ary.py +++ b/src/fuzzy/relations/continuous/n_ary.py @@ -36,9 +36,10 @@ def __init__( device: The device to use for the relation. """ super().__init__(**kwargs) + self.matrix = None # this will be created later (via self._rebuild) + self.graph = None # this will be created later (via self._rebuild) self.device: torch.device = device self.indices: List[List[Tuple[int, int]]] = [] - # self.indices = indices if not isinstance(indices[0], list): indices = [indices] @@ -58,19 +59,8 @@ def __init__( # now convert to a list of matrices max_var = max(t[0] for t in self._original_shape) max_term = max(t[1] for t in self._original_shape) - self.make_np_matrix(max_var, max_term) self.indices.extend(indices) - - # this mask is used to zero out the values that are not part of the relation - self.mask: torch.Tensor = torch.tensor( - self.matrix, dtype=torch.float32, device=device - ) - # matrix size can increase (in-place) for more potential rows (vars) and columns (terms) - # self._coo_matrix.resize( - # self._coo_matrix.shape[0] + 1, self._coo_matrix.shape[1] + 1 - # ) - # we can create a graph from the adjacency matrix - # g = igraph.Graph.Adjacency(self._coo_matrix) + self._rebuild(*(max_var, max_term)) @staticmethod def convert_indices_to_matrix(indices) -> sps._coo.coo_matrix: @@ -87,7 +77,7 @@ def convert_indices_to_matrix(indices) -> sps._coo.coo_matrix: row, col = zip(*indices) return sps.coo_matrix((data, (row, col)), dtype=np.int8) - def make_np_matrix(self, max_var: int, max_term: int) -> None: + def create_ndarray(self, max_var: int, max_term: int) -> None: """ Make (or update) the numpy matrix from the COO matrices. @@ -106,26 +96,12 @@ def make_np_matrix(self, max_var: int, max_term: int) -> None: # make a new axis and stack long that axis self.matrix: np.ndarray = np.stack(matrices).swapaxes(0, 1).swapaxes(1, 2) - def resize(self, *shape): - """ - Resize the matrix in-place to the given shape. - - Args: - shape: The new shape of the matrix. - """ - for coo_matrix in self._coo_matrix: - coo_matrix.resize(*shape) - self.make_np_matrix(shape[0], shape[1]) - - # update the mask to reflect the new shape - self.mask = torch.tensor(self.matrix, dtype=torch.float32, device=self.device) - - def get_graph(self) -> igraph.Graph: + def create_igraph(self) -> None: """ - Get the graph representation of the relation(s). + Create the graph representation of the relation(s). Returns: - The graph representation of the relation(s). + None """ graphs: List[igraph.Graph] = [] for relation in self.indices: @@ -148,7 +124,39 @@ def get_graph(self) -> igraph.Graph: index_pair, {"anchor"}, ) - return igraph.union(graphs, byname=True) + self.graph = igraph.union(graphs, byname=True) + + def _rebuild(self, *shape) -> None: + """ + Rebuild the relation's matrix and graph. + + Args: + shape: The new shape of the n-ary fuzzy relation; assuming shape is (max_var, max_term). + + Returns: + None + """ + # re-create the self.matrix + self.create_ndarray(shape[0], shape[1]) + # re-create the self.graph + self.create_igraph() + # update the self.mask to reflect the new shape + # this mask is used to zero out the values that are not part of the relation + self.mask = torch.tensor(self.matrix, dtype=torch.float32, device=self.device) + + def resize(self, *shape) -> None: + """ + Resize the matrix in-place to the given shape, and then rebuild the relations' members. + + Args: + shape: The new shape of the matrix. + + Returns: + None + """ + for coo_matrix in self._coo_matrix: + coo_matrix.resize(*shape) + self._rebuild(*shape) def apply_mask(self, membership: Membership) -> torch.Tensor: """ diff --git a/tests/test_relations/test_n_ary.py b/tests/test_relations/test_n_ary.py index 005e81d..f0189de 100644 --- a/tests/test_relations/test_n_ary.py +++ b/tests/test_relations/test_n_ary.py @@ -134,7 +134,7 @@ def test_graph(self) -> None: """ indices: List[Tuple[int, int]] = [(0, 1), (1, 0)] single_n_ary = NAryRelation(*indices, device=AVAILABLE_DEVICE) - single_n_ary_graph: igraph.Graph = single_n_ary.get_graph() + single_n_ary_graph: igraph.Graph = single_n_ary.graph self.assertTrue(single_n_ary_graph is not None) self.assertEqual( single_n_ary_graph.vcount(), 3 @@ -154,7 +154,7 @@ def test_graph(self) -> None: indices: List[List[Tuple[int, int]]] = [[(0, 1), (1, 0)], [(1, 1), (2, 1)]] multiple_n_ary = NAryRelation(*indices, device=AVAILABLE_DEVICE) - multiple_n_ary_graph: igraph.Graph = multiple_n_ary.get_graph() + multiple_n_ary_graph: igraph.Graph = multiple_n_ary.graph self.assertTrue(multiple_n_ary_graph is not None) self.assertEqual( multiple_n_ary_graph.vcount(), 6 @@ -170,7 +170,7 @@ def test_graph(self) -> None: for relation_vertex in relation_vertices: self.assertEqual(relation_vertex["tags"], {"relation"}) predecessors: List[igraph.Vertex] = relation_vertex.predecessors() - for predecessor_index, predecessor in enumerate(predecessors): + for predecessor in predecessors: self.assertEqual(predecessor["tags"], {"anchor"}) # below does not work consistently # self.assertEqual(predecessor["item"], indices[relation_index][index]) @@ -181,7 +181,7 @@ def test_graph(self) -> None: multiple_n_ary = NAryRelation( [(0, 1), (1, 0)], [(1, 1), (0, 1)], device=AVAILABLE_DEVICE ) - multiple_n_ary_graph_with_uniques: igraph.Graph = multiple_n_ary.get_graph() + multiple_n_ary_graph_with_uniques: igraph.Graph = multiple_n_ary.graph self.assertTrue(multiple_n_ary_graph_with_uniques is not None) self.assertEqual( multiple_n_ary_graph_with_uniques.vcount(), 5 From 7b4adf36e134799eb5c5cb1923fd669d028251a2 Mon Sep 17 00:00:00 2001 From: John Wesley Hostetter Date: Tue, 20 Aug 2024 16:52:32 -0400 Subject: [PATCH 10/12] Add ability to ignore missing values --- src/fuzzy/relations/continuous/n_ary.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/src/fuzzy/relations/continuous/n_ary.py b/src/fuzzy/relations/continuous/n_ary.py index 73deba8..a18ac15 100644 --- a/src/fuzzy/relations/continuous/n_ary.py +++ b/src/fuzzy/relations/continuous/n_ary.py @@ -26,6 +26,7 @@ def __init__( self, *indices: Union[Tuple[int, int], List[Tuple[int, int]]], device: torch.device, + nan_replacement: float = 0.0, **kwargs, ): """ @@ -34,11 +35,17 @@ def __init__( Args: items: The 2-tuple indices to apply the n-ary relation to (e.g., (0, 1), (1, 0)). device: The device to use for the relation. + nan_replacement: The value to use when a value is missing in the relation (i.e., nan); + this is useful for when input to the relation is not complete. Default is 0.0 + (penalize), a value of 1.0 would ignore missing values (i.e., do not penalize). """ super().__init__(**kwargs) self.matrix = None # this will be created later (via self._rebuild) self.graph = None # this will be created later (via self._rebuild) self.device: torch.device = device + if nan_replacement not in [0.0, 1.0]: + raise ValueError("The nan_replacement must be either 0.0 or 1.0.") + self.nan_replacement: float = nan_replacement self.indices: List[List[Tuple[int, int]]] = [] if not isinstance(indices[0], list): @@ -177,7 +184,12 @@ def apply_mask(self, membership: Membership) -> torch.Tensor: # select memberships that are not zeroed out (i.e., involved in the relation) after_mask = membership.degrees.unsqueeze(dim=-1) * self.mask.unsqueeze(0) # the complement mask adds zeros where the mask is zero, these are not part of the relation - return (after_mask + (1 - self.mask)).prod(dim=2, keepdim=False) + # nan_to_num is used to replace nan values with the nan_replacement value (often not needed) + return ( + (after_mask + (1 - self.mask)) + .prod(dim=2, keepdim=False) + .nan_to_num(self.nan_replacement) + ) def forward(self, membership: Membership) -> torch.Tensor: """ From 581e56b6c0a3f26d2c9003cfa521638679b5738e Mon Sep 17 00:00:00 2001 From: John Wesley Hostetter Date: Wed, 21 Aug 2024 12:17:34 -0400 Subject: [PATCH 11/12] Improve Pylint score --- src/fuzzy/relations/continuous/n_ary.py | 2 ++ src/fuzzy/relations/continuous/t_norm.py | 1 + 2 files changed, 3 insertions(+) diff --git a/src/fuzzy/relations/continuous/n_ary.py b/src/fuzzy/relations/continuous/n_ary.py index a18ac15..e49e8d0 100644 --- a/src/fuzzy/relations/continuous/n_ary.py +++ b/src/fuzzy/relations/continuous/n_ary.py @@ -22,6 +22,8 @@ class NAryRelation(torch.nn.Module): truth value. """ + # pylint: disable=too-many-instance-attributes + def __init__( self, *indices: Union[Tuple[int, int], List[Tuple[int, int]]], diff --git a/src/fuzzy/relations/continuous/t_norm.py b/src/fuzzy/relations/continuous/t_norm.py index 774bc5e..6b45d26 100644 --- a/src/fuzzy/relations/continuous/t_norm.py +++ b/src/fuzzy/relations/continuous/t_norm.py @@ -84,6 +84,7 @@ def forward(self, membership: Membership) -> Membership: The applicability of the fuzzy compounds (e.g., fuzzy logic rules). """ # intermediate_values = self.calc_intermediate_input(antecedents_memberships) + # pylint: disable=fixme # TODO: these dimensions are possibly not correct, need to be fixed/tested firing_strengths = membership.degrees.sum(dim=1) max_values, _ = firing_strengths.max(dim=-1, keepdim=True) From be1586b4a6fda6cde80d716bf4e064700cb1ac2a Mon Sep 17 00:00:00 2001 From: John Wesley Hostetter Date: Wed, 21 Aug 2024 12:29:46 -0400 Subject: [PATCH 12/12] Add missing requirement --- requirements.txt | 1 + tests/test_relations/test_n_ary.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 17146c7..f5a6776 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,5 @@ numpy==1.26.4 # new is 2.0.1 (update when possible - tests fail otherwise) +igraph==0.11.6 scipy==1.13.1 # for sparse matrices sympy==1.13.2 # for torch torch==2.4.0 diff --git a/tests/test_relations/test_n_ary.py b/tests/test_relations/test_n_ary.py index f0189de..4c0f2ae 100644 --- a/tests/test_relations/test_n_ary.py +++ b/tests/test_relations/test_n_ary.py @@ -5,8 +5,8 @@ import unittest from typing import List, Tuple -import igraph import torch +import igraph import numpy as np from fuzzy.sets.continuous.impl import Gaussian