Skip to content

Commit

Permalink
testing on different datasets
Browse files Browse the repository at this point in the history
  • Loading branch information
LukyanovKirillML committed Dec 6, 2024
1 parent 57c27ab commit d282733
Show file tree
Hide file tree
Showing 4 changed files with 33 additions and 19 deletions.
31 changes: 14 additions & 17 deletions experiments/attack_defense_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,8 @@


def test_attack_defense(d='Cora', m='gin_2', a_e=None, d_e=None, a_p=None, d_p=None):
my_device = device('cuda' if torch.cuda.is_available() else 'cpu')
# my_device = device('cuda' if torch.cuda.is_available() else 'cpu')
my_device = device('cpu')

full_name = None

Expand Down Expand Up @@ -318,8 +319,7 @@ def test_attack_defense(d='Cora', m='gin_2', a_e=None, d_e=None, a_p=None, d_p=N
warnings.warn("Start training")
dataset.train_test_split()


for i in range(3):
for i in range(2):
adm = FrameworkAttackDefenseManager(
gen_dataset=copy.deepcopy(dataset),
gnn_manager=gnn_model_manager,
Expand All @@ -334,7 +334,7 @@ def test_attack_defense(d='Cora', m='gin_2', a_e=None, d_e=None, a_p=None, d_p=N
adm.poison_defense_pipeline(
steps=steps_epochs,
save_model_flag=save_model_flag,
metrics_attack=[AttackMetric("ASR"), AttackMetric("AuccAttackDiff"),],
metrics_attack=[AttackMetric("ASR"), AttackMetric("AuccAttackDiff"), ],
metrics_defense=[DefenseMetric("AuccDefenseCleanDiff"), DefenseMetric("AuccDefenseAttackDiff"), ],
mask='test'
)
Expand Down Expand Up @@ -802,11 +802,10 @@ def test_jaccard():
}
)


# gnn_model_manager.set_poison_attacker(poison_attack_config=poison_attack_config)
# gnn_model_manager.set_poison_defender(poison_defense_config=poison_defense_config)
gnn_model_manager.set_evasion_attacker(evasion_attack_config=netattackgroup_evasion_attack_config)
#gnn_model_manager.set_evasion_defender(evasion_defense_config=gradientregularization_evasion_defense_config)
# gnn_model_manager.set_evasion_defender(evasion_defense_config=gradientregularization_evasion_defense_config)

warnings.warn("Start training")
dataset.train_test_split()
Expand All @@ -832,7 +831,6 @@ def test_jaccard():

mask_loc = Metric.create_mask_by_target_list(y_true=dataset.labels, target_list=node_idxs)


metric_loc = gnn_model_manager.evaluate_model(
gen_dataset=dataset, metrics=[Metric("F1", mask='train', average='macro'),
Metric("Accuracy", mask='train')])
Expand All @@ -849,7 +847,6 @@ def test_jaccard():
print(f"NODE IDXS: {node_idxs}", metric_loc)



def test_adv_training():
from defense.evasion_defense import AdvTraining

Expand Down Expand Up @@ -1121,29 +1118,29 @@ def test_pgd():
print(f"After PGD attack on graph (MUTAG dataset): {info_after_pgd_attack_on_graph}")



def exp_pipeline():
dataset_grid = ['Photo', 'Cora']
#model_grid = ['gcn_2', 'gcn_3', 'gin_2']
model_grid = ['gcn_2']
dataset_grid = ['Cora']
# model_grid = ['gcn_2', 'gcn_3', 'gin_2']
model_grid = ['gin_2']
attack_grid_evasion = ['fgsm', 'nettack']
attack_grid_poison = ['clga']
defense_grid_evasion = []
defense_grid_poison = [None, 'jaccard']
defense_grid_poison = ['jaccard']

for d in dataset_grid:
for m in model_grid:
for a_p in attack_grid_poison:
for d_p in defense_grid_poison:
test_attack_defense(d, m, a_p=a_p,d_p=d_p)
test_attack_defense(d, m, a_p=a_p, d_p=d_p)


if __name__ == '__main__':
import random

#random.seed(10)
#test_attack_defense()
# random.seed(10)
# test_attack_defense()
exp_pipeline()
# torch.manual_seed(5000)
# test_gnnguard()
#test_jaccard()
# test_jaccard()
# test_pgd()
12 changes: 12 additions & 0 deletions metainfo/poison_attack_parameters.json
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,18 @@
"train_iters": ["Train iters (surrogate)", "int", 200, {"min": 0, "step": 1}, "Trainig iterations for surrogate model"],
"attack_structure": ["Attack structure", "bool", true, {}, "whether change graph structure with attack or not"],
"attack_features": ["Attack features", "bool", false, {}, "whether change node features with attack or not"]
},
"CLGAAttack": {
"learning_rate": ["Learning Rate", "float", 0.01, {"min": 0.0001, "max": 0.1, "step": 0.001}, "Learning rate for model optimization"],
"num_hidden": ["Hidden Units", "int", 256, {"min": 16, "max": 1024, "step": 16}, "Number of hidden units in the GCN encoder"],
"num_proj_hidden": ["Projection Units", "int", 32, {"min": 16, "max": 128, "step": 16}, "Number of units in the projection head"],
"activation": ["Activation Function", "str", "prelu", ["prelu", "relu", "tanh", "sigmoid"], "Activation function for the GCN encoder"],
"drop_edge_rate_1": ["Drop Edge Rate (View 1)", "float", 0.3, {"min": 0.0, "max": 1.0, "step": 0.01}, "Probability of dropping edges in the first augmented view"],
"drop_edge_rate_2": ["Drop Edge Rate (View 2)", "float", 0.4, {"min": 0.0, "max": 1.0, "step": 0.01}, "Probability of dropping edges in the second augmented view"],
"tau": ["Temperature coeff", "float", 0.4, {"min": 0.1, "max": 1.0, "step": 0.1}, "Temperature parameter for contrastive loss"],
"num_epochs": ["Number of Epochs", "int", 3000, {"min": 100, "max": 10000, "step": 100}, "Number of training epochs for the attack"],
"weight_decay": ["Weight Decay", "float", 1e-5, {"min": 1e-6, "max": 1e-3, "step": 1e-5}, "Weight decay (L2 regularization) coefficient"],
"drop_scheme": ["Drop Scheme", "str", "degree", ["degree", "pr", "evc", "uniform"], "Scheme for dropping edges or features"]
}
}

4 changes: 2 additions & 2 deletions src/defense/JaccardDefense/jaccard_def.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,8 +61,8 @@ def defense(self, gen_dataset, **kwargs):
return gen_dataset

def jaccard_index(self, x, u, v):
im1 = x[u,:].numpy().astype(bool)
im2 = x[v,:].numpy().astype(bool)
im1 = x[u,:].cpu().numpy().astype(bool)
im2 = x[v,:].cpu().numpy().astype(bool)
intersection = np.logical_and(im1, im2)
union = np.logical_or(im1, im2)
return intersection.sum() / float(union.sum())
5 changes: 5 additions & 0 deletions src/models_builder/gnn_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -983,11 +983,16 @@ def train_on_batch(
) -> torch.Tensor:
loss = None
if hasattr(batch, "edge_weight"):
if batch.edge_weight is not None:
batch.edge_weight.cpu()
weight = batch.edge_weight
else:
weight = None
if task_type == "single-graph":
self.optimizer.zero_grad()
batch.x.cpu()
batch.edge_index.cpu()

logits = self.gnn(batch.x, batch.edge_index, weight)
loss = self.loss_function(logits, batch.y)
if self.clip is not None:
Expand Down

0 comments on commit d282733

Please sign in to comment.