Skip to content

Commit

Permalink
+
Browse files Browse the repository at this point in the history
  • Loading branch information
Jeratt committed Oct 25, 2024
1 parent bfecf18 commit d34224a
Show file tree
Hide file tree
Showing 6 changed files with 252 additions and 31 deletions.
80 changes: 53 additions & 27 deletions experiments/EAttack_experiment.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,11 @@
from explainers.GNNExplainer.torch_geom_our.out import GNNExplainer
from explainers.SubgraphX.out import SubgraphXExplainer
from explainers.Zorro.out import ZorroExplainer
from explainers.PGMExplainer.out import PGMExplainer

def test():
from attacks.EAttack.eattack_attack import EAttack
#from attacks.EAttack.eattack_attack import EAttack
from attacks.EAttack.experimental_code import EAttack

my_device = device('cpu')

Expand All @@ -39,7 +41,8 @@ def test():
)

# Train model on original dataset and remember the model metric and node predicted probability
gcn_gcn = model_configs_zoo(dataset=dataset, model_name='gcn_gcn')
# gcn_gcn = model_configs_zoo(dataset=dataset, model_name='gcn_gcn')
gcn_gcn_gcn = model_configs_zoo(dataset=dataset, model_name='gcn_gcn_gcn')

manager_config = ConfigPattern(
_config_class="ModelManagerConfig",
Expand All @@ -53,15 +56,15 @@ def test():
)

gnn_model_manager = FrameworkGNNModelManager(
gnn=gcn_gcn,
gnn=gcn_gcn_gcn,
dataset_path=results_dataset_path,
manager_config=manager_config,
modification=ModelModificationConfig(model_ver_ind=0, epochs=0)
)

gnn_model_manager.gnn.to(my_device)

num_steps = 100
num_steps = 200
gnn_model_manager.train_model(gen_dataset=dataset,
steps=num_steps,
save_model_flag=False)
Expand All @@ -77,27 +80,27 @@ def test():
print(f"BEFORE ATTACK\nAccuracy on train: {acc_train}. Accuracy on test: {acc_test}")
# print(f"Accuracy on test: {acc_test}")

explainer_init_config = ConfigPattern(
_class_name="GNNExplainer(torch-geom)",
_import_path=EXPLAINERS_INIT_PARAMETERS_PATH,
_config_class="ExplainerInitConfig",
_config_kwargs={
}
)
explainer_run_config = ConfigPattern(
_config_class="ExplainerRunConfig",
_config_kwargs={
"mode": "local",
"kwargs": {
"_class_name": "GNNExplainer(torch-geom)",
"_import_path": EXPLAINERS_LOCAL_RUN_PARAMETERS_PATH,
"_config_class": "Config",
"_config_kwargs": {

},
}
}
)
# explainer_init_config = ConfigPattern(
# _class_name="GNNExplainer(torch-geom)",
# _import_path=EXPLAINERS_INIT_PARAMETERS_PATH,
# _config_class="ExplainerInitConfig",
# _config_kwargs={
# }
# )
# explainer_run_config = ConfigPattern(
# _config_class="ExplainerRunConfig",
# _config_kwargs={
# "mode": "local",
# "kwargs": {
# "_class_name": "GNNExplainer(torch-geom)",
# "_import_path": EXPLAINERS_LOCAL_RUN_PARAMETERS_PATH,
# "_config_class": "Config",
# "_config_kwargs": {
#
# },
# }
# }
# )

# explainer_init_config = ConfigPattern(
# _class_name="SubgraphX",
Expand All @@ -121,10 +124,33 @@ def test():
# }
# )

explainer_init_config = ConfigPattern(
_class_name="PGMExplainer",
_import_path=EXPLAINERS_INIT_PARAMETERS_PATH,
_config_class="ExplainerInitConfig",
_config_kwargs={
}
)
explainer_run_config = ConfigPattern(
_config_class="ExplainerRunConfig",
_config_kwargs={
"mode": "local",
"kwargs": {
"_class_name": "PGMExplainer",
"_import_path": EXPLAINERS_LOCAL_RUN_PARAMETERS_PATH,
"_config_class": "Config",
"_config_kwargs": {

},
}
}
)

init_kwargs = getattr(explainer_init_config, CONFIG_OBJ).to_dict()
explainer = GNNExplainer(gen_dataset=dataset, model=gnn_model_manager.gnn, device=my_device, **init_kwargs)
# explainer = GNNExplainer(gen_dataset=dataset, model=gnn_model_manager.gnn, device=my_device, **init_kwargs)
# explainer = SubgraphXExplainer(gen_dataset=dataset, model=gnn_model_manager.gnn, device=my_device, **init_kwargs)
# explainer = ZorroExplainer(gen_dataset=dataset, model=gnn_model_manager.gnn, device=my_device, **init_kwargs)
explainer = PGMExplainer(gen_dataset=dataset, model=gnn_model_manager.gnn, device=my_device, **init_kwargs)

# node_inds = np.arange(dataset.dataset.data.x.shape[0])
# dataset = gen_dataset.dataset.data[mask_tensor]
Expand All @@ -143,7 +169,7 @@ def test():
if u not in adj_list[v]:
adj_list[v].append(u)
node_inds = [n for n in adj_list.keys() if len(adj_list[n]) > 1]
attacked_node_size = int((0.02 * len(node_inds)))
attacked_node_size = int((0.002 * len(node_inds)))
attack_inds = np.random.choice(node_inds, attacked_node_size)

evasion_attack_config = ConfigPattern(
Expand Down
6 changes: 5 additions & 1 deletion metainfo/evasion_attack_parameters.json
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,11 @@
"attack_size": ["Attack size (edge)", "float", 0.15, {"min": 0, "max": 1, "step": 0.01}, "Percent of nodes to be rewired"],
"targeted": ["Targeted attack", "bool", true, {}, "Whether attack targeted or not"],
"max_rewire": ["Max 2-hop node to rewire for target", "int", 20, {"min": 1, "step": 1}, "Not more than this amount of node from 2-hop neighbourhood will be rewired"],
"random_rewire": ["Random rewire", "bool", false, {}, "Rewire based on random, not on explanation (for comparison)"]
"random_rewire": ["Random rewire", "bool", false, {}, "Rewire based on random, not on explanation (for comparison)"],
"attack_features": ["Attack features", "bool", false, {}, "Whether features to be attacked or not"],
"attack_edges": ["Attack edges", "bool", true, {}, "Whether edges to be attacked or not"],
"edge_mode": ["Edge attack type", "string", "add", ["remove", "add", "rewire"], "What to do with edges: remove or add or rewire (add one and remove another)"],
"features_mode": ["Feature attack type", "string", "reverse", ["reverse","drop"], "What to do with features: drop or reverse (binary)"]
},
"QAttack": {
"population_size": ["Population size", "int", 50, {"min": 1, "step": 1}, "Number of genes in population"],
Expand Down
8 changes: 6 additions & 2 deletions src/attacks/EAttack/eattack_attack.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,8 @@
class EAttack(EvasionAttacker):
name = "EAttack"

def __init__(self, explainer, run_config, attack_size, attack_inds, targeted, max_rewire, random_rewire, **kwargs):
def __init__(self, explainer, run_config, attack_size, attack_inds, targeted, max_rewire, random_rewire,
attack_edges, attack_features, **kwargs):
super().__init__(**kwargs)
self.explainer = explainer
self.run_config = run_config
Expand All @@ -36,12 +37,15 @@ def __init__(self, explainer, run_config, attack_size, attack_inds, targeted, ma
self.max_rewire = max_rewire
self.attack_inds = attack_inds
self.random_rewire = random_rewire
self.attack_edges = attack_edges
self.attack_features = attack_features


def attack(self, model_manager, gen_dataset, mask_tensor):

explanations = []
if not self.targeted:
# TODO check correctness
# make sample
node_inds = [i for i, x in enumerate(mask_tensor) if x]
# dataset = gen_dataset.dataset.data[mask_tensor]
Expand All @@ -51,6 +55,7 @@ def attack(self, model_manager, gen_dataset, mask_tensor):

# get explanations
if False:
# TODO check correctness
# get random explanation
for i in tqdm(range(len(self.attack_inds))):
edge_index = gen_dataset.dataset.data.edge_index.tolist()
Expand All @@ -61,7 +66,6 @@ def attack(self, model_manager, gen_dataset, mask_tensor):
neighbours[u].add(v)
elif v in neighbours.keys():
neighbours[v].add(u)

else:
for i in tqdm(range(len(self.attack_inds))):
self.params['element_idx'] = self.attack_inds[i]
Expand Down
134 changes: 134 additions & 0 deletions src/attacks/EAttack/experimental_code.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,134 @@
import numpy as np
import random
import torch
import copy

from explainers.GNNExplainer.torch_geom_our.out import GNNExplainer
from explainers.SubgraphX.out import SubgraphXExplainer
from explainers.Zorro.out import ZorroExplainer
from aux.utils import EXPLAINERS_INIT_PARAMETERS_PATH, EXPLAINERS_LOCAL_RUN_PARAMETERS_PATH, \
EXPLAINERS_GLOBAL_RUN_PARAMETERS_PATH
from aux.configs import ConfigPattern

from tqdm import tqdm
from networkx.classes import neighbors
from numpy.array_api import astype
from sympy.codegen.ast import int64

from attacks.evasion_attacks import EvasionAttacker
from aux.configs import CONFIG_OBJ
from explainers.explainer import ProgressBar
from typing import Dict, Optional


class EAttack(EvasionAttacker):
name = "EAttack"

def __init__(self, explainer, run_config, attack_size, attack_inds, targeted, max_rewire, random_rewire,
attack_edges, attack_features, edge_mode, features_mode, **kwargs):
super().__init__(**kwargs)
self.explainer = explainer
self.run_config = run_config
# self.mode = mode
self.mode = getattr(run_config, CONFIG_OBJ).mode
self.params = getattr(getattr(run_config, CONFIG_OBJ).kwargs, CONFIG_OBJ).to_dict()
self.attack_size = attack_size
self.targeted = targeted
self.max_rewire = max_rewire
self.attack_inds = attack_inds
self.random_rewire = random_rewire
self.attack_edges = attack_edges
self.attack_features = attack_features
self.edge_mode = edge_mode
self.features_mode = features_mode


def attack(self, model_manager, gen_dataset, mask_tensor):

assert self.attack_edges or self.attack_features

explanations = []

# get explanations
for i in tqdm(range(len(self.attack_inds))):
self.params['element_idx'] = self.attack_inds[i]
self.explainer.pbar = ProgressBar(None, "er", desc=f'{self.explainer.name} explaining')
self.explainer.run(self.mode, self.params, finalize=True)
explanation = copy.deepcopy(self.explainer.explanation.dictionary['data'])
explanations.append(explanation)

edge_index = gen_dataset.dataset.data.edge_index.tolist()
edge_index_set = set([(u, v) for u, v in zip(edge_index[0], edge_index[1])])

if self.attack_edges:
cnt = 0

for i, n in enumerate(self.attack_inds):
if self.edge_mode == 'remove':
for e in explanations[i]['edges'].keys():
u, v = map(int, e.split(','))
if u != n and v != n: # not remove within 1-hop
# TODO check with discard of (v, u) too
cnt += 1
edge_index_set.discard((u, v))
# TEST
edge_index_set.discard((v, u))
elif self.edge_mode == 'add':
unimportant_nodes = set()
important_nodes = set()
for (u, v) in zip(edge_index[0], edge_index[1]):
if v == n:
if f"{u},{v}" not in explanations[i]['edges'].keys():
unimportant_nodes.add(u)
else:
important_nodes.add(u)
elif u == n:
if f"{u},{v}" not in explanations[i]['edges'].keys():
unimportant_nodes.add(v)
else:
important_nodes.add(v)
# if ((v == n and f"{u},{v}" not in explanations[i]['edges'].keys()) and
# f"{v},{u}" not in explanations[i]['edges'].keys()):
# unimportant_nodes.add(u)
# elif v == n:
# important_nodes.add(u)
unimportant_nodes = list(unimportant_nodes)
# TEST
edges = [(u, v) for u, v in zip(edge_index[0], edge_index[1]) if u == n or v == n]
#print(len(edges))
if len(unimportant_nodes) == 0:
continue
for e in explanations[i]['edges'].keys():
u, v = map(int, e.split(','))
if v in important_nodes and u != n:
new_node = random.sample(unimportant_nodes, 1)
edge_index_set.add((u, new_node[0]))
cnt += 1
elif self.edge_mode == 'rewire':
for (u, v) in zip(edge_index[0], edge_index[1]):
if u != n and v != n and f"{u},{v}" in explanations[i]['edges'].keys():
edge_index_set.discard((u, v))
if (u, n) not in edge_index_set:
cnt += 1
edge_index_set.add((u, n))
elif (v, n) not in edge_index_set:
cnt += 1
edge_index_set.add((v, n))

# Update dataset edges
edge_index_new = [[], []]
for (u, v) in edge_index_set:
edge_index_new[0].append(u)
edge_index_new[1].append(v)
edge_index_new = torch.tensor(edge_index_new, dtype=torch.int64)
gen_dataset.dataset.data.edge_index = edge_index_new

print(cnt)

if self.attack_features:
if self.features_mode == 'reverse':
pass
elif self.features_mode == 'drop':
pass

return gen_dataset
2 changes: 1 addition & 1 deletion src/explainers/PGMExplainer/out.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ def __init__(self,
@finalize_decorator
def run(self, mode, kwargs, finalize=True):
assert mode == "local"
idx = kwargs.pop('element_idx')
idx = int(kwargs.pop('element_idx'))

if self.gen_dataset.is_multi():
self.graph_idx = idx
Expand Down
53 changes: 53 additions & 0 deletions src/models_builder/models_zoo.py
Original file line number Diff line number Diff line change
Expand Up @@ -311,6 +311,59 @@ def model_configs_zoo(dataset, model_name):
)
)

gcn_gcn_gcn = FrameworkGNNConstructor(
model_config=ModelConfig(
structure=ModelStructureConfig(
[
{
'label': 'n',
'layer': {
'layer_name': 'GCNConv',
'layer_kwargs': {
'in_channels': dataset.num_node_features,
'out_channels': 16,
},
},
'activation': {
'activation_name': 'ReLU',
'activation_kwargs': None,
},
},

{
'label': 'n',
'layer': {
'layer_name': 'GCNConv',
'layer_kwargs': {
'in_channels': 16,
'out_channels': 16,
},
},
'activation': {
'activation_name': 'LogSoftmax',
'activation_kwargs': None,
},
},

{
'label': 'n',
'layer': {
'layer_name': 'GCNConv',
'layer_kwargs': {
'in_channels': 16,
'out_channels': dataset.num_classes,
},
},
'activation': {
'activation_name': 'LogSoftmax',
'activation_kwargs': None,
},
},
]
)
)
)

gcn_gcn_linearized = FrameworkGNNConstructor(
model_config=ModelConfig(
structure=ModelStructureConfig(
Expand Down

0 comments on commit d34224a

Please sign in to comment.