Skip to content

Commit

Permalink
Merge pull request #27 from abhhfcgjk/develop
Browse files Browse the repository at this point in the history
GNNGuard and Nettack on a few nodes
  • Loading branch information
LukyanovKirillML authored Oct 21, 2024
2 parents c7e42d2 + 92148c8 commit 04535db
Show file tree
Hide file tree
Showing 11 changed files with 607 additions and 67 deletions.
146 changes: 100 additions & 46 deletions experiments/attack_defense_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,9 @@
from src.base.datasets_processing import DatasetManager
from src.models_builder.models_zoo import model_configs_zoo
from attacks.QAttack import qattack
from defense.JaccardDefense import jaccard_def
from attacks.metattack import meta_gradient_attack
from defense.GNNGuard import gnnguard


def test_attack_defense():
Expand All @@ -23,6 +26,7 @@ def test_attack_defense():
# full_name = ("multiple-graphs", "TUDataset", 'MUTAG')
# full_name = ("single-graph", "custom", 'karate')
full_name = ("single-graph", "Planetoid", 'Cora')
# full_name = ("single-graph", "Planetoid", 'CiteSeer')
# full_name = ("multiple-graphs", "TUDataset", 'PROTEINS')

dataset, data, results_dataset_path = DatasetManager.get_by_full_name(
Expand Down Expand Up @@ -113,7 +117,7 @@ def test_attack_defense():
# }
# )

poison_attack_config = ConfigPattern(
metafull_poison_attack_config = ConfigPattern(
_class_name="MetaAttackFull",
_import_path=POISON_ATTACK_PARAMETERS_PATH,
_config_class="PoisonAttackConfig",
Expand All @@ -122,68 +126,117 @@ def test_attack_defense():
}
)

# poison_attack_config = ConfigPattern(
# _class_name="RandomPoisonAttack",
# _import_path=POISON_ATTACK_PARAMETERS_PATH,
# _config_class="PoisonAttackConfig",
# _config_kwargs={
# "n_edges_percent": 0.1,
# }
# )
random_poison_attack_config = ConfigPattern(
_class_name="RandomPoisonAttack",
_import_path=POISON_ATTACK_PARAMETERS_PATH,
_config_class="PoisonAttackConfig",
_config_kwargs={
"n_edges_percent": 0.5,
}
)

poison_defense_config = ConfigPattern(
gnnguard_poison_defense_config = ConfigPattern(
_class_name="GNNGuard",
_import_path=POISON_DEFENSE_PARAMETERS_PATH,
_config_class="PoisonDefenseConfig",
_config_kwargs={
"n_edges_percent": 0.1,
"lr": 0.01,
"train_iters": 100,
# "model": gnn_model_manager.gnn
}
)

jaccard_poison_defense_config = ConfigPattern(
_class_name="JaccardDefender",
_import_path=POISON_DEFENSE_PARAMETERS_PATH,
_config_class="PoisonDefenseConfig",
_config_kwargs={
"threshold": 0.05,
}
)

evasion_attack_config = ConfigPattern(
qattack_evasion_attack_config = ConfigPattern(
_class_name="QAttack",
_import_path=EVASION_ATTACK_PARAMETERS_PATH,
_config_class="EvasionAttackConfig",
_config_kwargs={
"population_size": 50,
"individual_size": 30,
"generations": 50,
"population_size": 500,
"individual_size": 100,
"generations": 100,
"prob_cross": 0.5,
"prob_mutate": 0.02
}
)
# evasion_attack_config = ConfigPattern(
# _class_name="FGSM",
# _import_path=EVASION_ATTACK_PARAMETERS_PATH,
# _config_class="EvasionAttackConfig",
# _config_kwargs={
# "epsilon": 0.01 * 1,
# }
# )

# evasion_defense_config = ConfigPattern(
# _class_name="GradientRegularizationDefender",
# _import_path=EVASION_DEFENSE_PARAMETERS_PATH,
# _config_class="EvasionDefenseConfig",
# _config_kwargs={
# "regularization_strength": 0.1 * 10
# }
# )
evasion_defense_config = ConfigPattern(
fgsm_evasion_attack_config = ConfigPattern(
_class_name="FGSM",
_import_path=EVASION_ATTACK_PARAMETERS_PATH,
_config_class="EvasionAttackConfig",
_config_kwargs={
"epsilon": 0.01 * 1,
}
)

netattack_evasion_attack_config = ConfigPattern(
_class_name="NettackEvasionAttacker",
_import_path=EVASION_ATTACK_PARAMETERS_PATH,
_config_class="EvasionAttackConfig",
_config_kwargs={
"node_idx": 0, # Node for attack
"n_perturbations": 20,
"perturb_features": True,
"perturb_structure": True,
"direct": True,
"n_influencers": 3
}
)

netattackgroup_evasion_attack_config = ConfigPattern(
_class_name="NettackGroupEvasionAttacker",
_import_path=EVASION_ATTACK_PARAMETERS_PATH,
_config_class="EvasionAttackConfig",
_config_kwargs={
"node_idxs": [random.randint(0, 500) for _ in range(20)], # Nodes for attack
"n_perturbations": 50,
"perturb_features": True,
"perturb_structure": True,
"direct": True,
"n_influencers": 10
}
)

gradientregularization_evasion_defense_config = ConfigPattern(
_class_name="GradientRegularizationDefender",
_import_path=EVASION_DEFENSE_PARAMETERS_PATH,
_config_class="EvasionDefenseConfig",
_config_kwargs={
"regularization_strength": 0.1 * 10
}
)


fgsm_evasion_attack_config0 = ConfigPattern(
_class_name="FGSM",
_import_path=EVASION_ATTACK_PARAMETERS_PATH,
_config_class="EvasionAttackConfig",
_config_kwargs={
"epsilon": 0.1 * 1,
}
)
at_evasion_defense_config = ConfigPattern(
_class_name="AdvTraining",
_import_path=EVASION_DEFENSE_PARAMETERS_PATH,
_config_class="EvasionDefenseConfig",
_config_kwargs={
"attack_name": None,
"attack_config": evasion_attack_config # evasion_attack_config
"attack_config": fgsm_evasion_attack_config0 # evasion_attack_config
}
)

# gnn_model_manager.set_poison_attacker(poison_attack_config=poison_attack_config)
# gnn_model_manager.set_poison_defender(poison_defense_config=poison_defense_config)
gnn_model_manager.set_evasion_attacker(evasion_attack_config=evasion_attack_config)
# gnn_model_manager.set_evasion_defender(evasion_defense_config=evasion_defense_config)
# gnn_model_manager.set_poison_attacker(poison_attack_config=random_poison_attack_config)
# gnn_model_manager.set_poison_defender(poison_defense_config=gnnguard_poison_defense_config)
gnn_model_manager.set_evasion_attacker(evasion_attack_config=netattackgroup_evasion_attack_config)
# gnn_model_manager.set_evasion_defender(evasion_defense_config=at_evasion_defense_config)

warnings.warn("Start training")
dataset.train_test_split()
Expand All @@ -207,7 +260,8 @@ def test_attack_defense():
warnings.warn("Training was successful")

metric_loc = gnn_model_manager.evaluate_model(
gen_dataset=dataset, metrics=[Metric("F1", mask='test', average='macro')])
gen_dataset=dataset, metrics=[Metric("F1", mask='test', average='macro'),
Metric("Accuracy", mask='test')])
print(metric_loc)

def test_meta():
Expand Down Expand Up @@ -326,12 +380,12 @@ def test_nettack_evasion():
acc_test_loc = gnn_model_manager.evaluate_model(gen_dataset=dataset,
metrics=[Metric("Accuracy", mask=mask_loc)])[mask_loc]['Accuracy']

acc_train = gnn_model_manager.evaluate_model(gen_dataset=dataset,
metrics=[Metric("Accuracy", mask='train')])['train']['Accuracy']
acc_test = gnn_model_manager.evaluate_model(gen_dataset=dataset,
metrics=[Metric("Accuracy", mask='test')])['test']['Accuracy']
# acc_train = gnn_model_manager.evaluate_model(gen_dataset=dataset,
# metrics=[Metric("Accuracy", mask='train')])['train']['Accuracy']
# acc_test = gnn_model_manager.evaluate_model(gen_dataset=dataset,
# metrics=[Metric("Accuracy", mask='test')])['test']['Accuracy']

print(f"Accuracy on train: {acc_train}. Accuracy on test: {acc_test}")
# print(f"Accuracy on train: {acc_train}. Accuracy on test: {acc_test}")
print(f"Accuracy on test loc: {acc_test_loc}")

# Model prediction on a node before an evasion attack on it
Expand Down Expand Up @@ -724,8 +778,8 @@ def test_adv_training():
if __name__ == '__main__':
import random
random.seed(10)
test_attack_defense()
torch.manual_seed(5000)
# test_adv_training()
#test_attack_defense()
# torch.manual_seed(5000)
# test_gnnguard()
# test_jaccard()
test_attack_defense()
8 changes: 8 additions & 0 deletions metainfo/evasion_attack_parameters.json
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,14 @@
"direct": ["direct", "bool", true, {}, "Indicates whether to directly modify edges/features of the node attacked or only those of influencers"],
"n_influencers": ["n_influencers", "int", 0, {"min": 0, "step": 1}, "Number of influencing nodes. Will be ignored if direct is True"]
},
"NettackGroupEvasionAttacker": {

"n_perturbations": ["n_perturbations", "int", null, {"min": 0, "step": 1}, "Number of perturbations. If None, then n_perturbations = degree(node_idx)"],
"perturb_features": ["perturb_features", "bool", true, {}, "Indicates whether the features can be changed"],
"perturb_structure": ["perturb_structure", "bool", true, {}, "Indicates whether the structure can be changed"],
"direct": ["direct", "bool", true, {}, "Indicates whether to directly modify edges/features of the node attacked or only those of influencers"],
"n_influencers": ["n_influencers", "int", 0, {"min": 0, "step": 1}, "Number of influencing nodes. Will be ignored if direct is True"]
},
"QAttack": {
"population_size": ["Population size", "int", 50, {"min": 1, "step": 1}, "Number of genes in population"],
"individual_size": ["Individual size", "int", 30, {"min": 1, "step": 1}, "Number of rewiring operations within one gene"],
Expand Down
14 changes: 7 additions & 7 deletions metainfo/modules_parameters.json
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
{
"import_info": ["GCNConv", ["torch_geometric.nn"]],
"need_full_gnn_flag": false,
"forward_parameters": "x=x, edge_index=edge_index"
"forward_parameters": "x=x, edge_index=edge_index, edge_weight=edge_weight"
}
},
"SAGEConv": {
Expand Down Expand Up @@ -53,7 +53,7 @@
{
"import_info": ["SGConv", ["torch_geometric.nn"]],
"need_full_gnn_flag": false,
"forward_parameters": "x=x, edge_index=edge_index"
"forward_parameters": "x=x, edge_index=edge_index, edge_weight=edge_weight"
}
},
"GINConv": {
Expand All @@ -74,7 +74,7 @@
{
"import_info": ["TAGConv", ["torch_geometric.nn"]],
"need_full_gnn_flag": false,
"forward_parameters": "x=x, edge_index=edge_index"
"forward_parameters": "x=x, edge_index=edge_index, edge_weight=edge_weight"
}
},
"ARMAConv": {
Expand All @@ -88,7 +88,7 @@
{
"import_info": ["ARMAConv", ["torch_geometric.nn"]],
"need_full_gnn_flag": false,
"forward_parameters": "x=x, edge_index=edge_index"
"forward_parameters": "x=x, edge_index=edge_index, edge_weight=edge_weight"
}
},
"SSGConv": {
Expand All @@ -102,7 +102,7 @@
{
"import_info": ["SSGConv", ["torch_geometric.nn"]],
"need_full_gnn_flag": false,
"forward_parameters": "x=x, edge_index=edge_index"
"forward_parameters": "x=x, edge_index=edge_index, edge_weight=edge_weight"
}
},
"GMM": {
Expand Down Expand Up @@ -130,7 +130,7 @@
{
"import_info": ["CGConv", ["torch_geometric.nn"]],
"need_full_gnn_flag": false,
"forward_parameters": "x=x, edge_index=edge_index"
"forward_parameters": "x=x, edge_index=edge_index, edge_weight=edge_weight"
}
},
"APPNP": {
Expand All @@ -144,7 +144,7 @@
{
"import_info": ["APPNP", ["torch_geometric.nn"]],
"need_full_gnn_flag": false,
"forward_parameters": "x=x, edge_index=edge_index"
"forward_parameters": "x=x, edge_index=edge_index, edge_weight=edge_weight"
}
},
"Linear": {
Expand Down
9 changes: 5 additions & 4 deletions metainfo/poison_defense_parameters.json
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,14 @@
"BadRandomPoisonDefender": {
"n_edges_percent": ["n_edges_percent", "float", 0.1, {"min": 0.0001, "step": 0.01}, "?"]
},
"JaccardDefender": {
"threshold": ["Edge Threshold", "float", 0.35, {"min": 0, "max": 1, "step": 0.01}, "Jaccard index threshold for dropping edges"]
},
"GNNGuard": {
"lr": ["lr", "float", 0.01, {"min": 0.0001, "step": 0.005}, "?"],
"attention": ["attention", "bool", true, {}, "?"],
"drop": ["drop", "bool", true, {}, "?"]
},
"JaccardDefender": {
"threshold": ["Edge Threshold", "float", 0.35, {"min": 0, "max": 1, "step": 0.01}, "Jaccard index threshold for dropping edges"]
"drop": ["drop", "bool", true, {}, "?"],
"train_iters": ["train_iters", "int", 50, {}, "?"]
}
}

4 changes: 3 additions & 1 deletion src/attacks/QAttack/qattack.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,7 @@ def fitness_individual(self, model, gen_dataset, gene):
# Get labels from black-box
labels = model.gnn.get_answer(dataset.x, dataset.edge_index)
labeled_nodes = dict(enumerate(labels.tolist()))
# labeled_nodes = {n: labels.tolist()[n-1] for n in adj_list.keys()} # FIXME check order for labels and node id consistency

# Calculate modularity
Q = self.modularity(adj_list, labeled_nodes)
Expand Down Expand Up @@ -204,6 +205,7 @@ def mutation(self, gen_dataset):
self.population[i][n]['del'] = np.random.choice(list(adj_list[n]), 1)
else:
selected_nodes = set(self.population[i].keys())
#non_selected_nodes = non_isolated_nodes.difference(selected_nodes)
non_drain_nodes = non_drain_nodes.difference(selected_nodes)
new_node = np.random.choice(list(non_drain_nodes), size=1, replace=False)[0]
self.population[i].pop(n)
Expand Down Expand Up @@ -238,4 +240,4 @@ def attack(self, model_manager, gen_dataset, mask_tensor):
set(adj_list[n]).union(set([int(rewiring[n]['add'])])).difference(set([int(rewiring[n]['del'])])))

gen_dataset.dataset.data.edge_index = from_adj_list(adj_list)
return gen_dataset
return gen_dataset
20 changes: 19 additions & 1 deletion src/attacks/evasion_attacks.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,4 +137,22 @@ def _evasion(gen_dataset, feature_perturbations, structure_perturbations):
torch.tensor((edge[1], edge[0]), dtype=torch.int32).to(torch.int64).unsqueeze(1)), dim=1)

gen_dataset.data.edge_index = edge_index


class NettackGroupEvasionAttacker(EvasionAttacker):
name = "NettackGroupEvasionAttacker"
def __init__(self,node_idxs, **kwargs):
super().__init__()
self.node_idxs = node_idxs # kwargs.get("node_idxs")
assert isinstance(self.node_idxs, list)
self.n_perturbations = kwargs.get("n_perturbations")
self.perturb_features = kwargs.get("perturb_features")
self.perturb_structure = kwargs.get("perturb_structure")
self.direct = kwargs.get("direct")
self.n_influencers = kwargs.get("n_influencers")
self.attacker = NettackEvasionAttacker(0, **kwargs)

def attack(self, model_manager, gen_dataset, mask_tensor):
for node_idx in self.node_idxs:
self.attacker.node_idx = node_idx
gen_dataset = self.attacker.attack(model_manager, gen_dataset, mask_tensor)
return gen_dataset
Loading

0 comments on commit 04535db

Please sign in to comment.