Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Q attack #21

Merged
merged 5 commits into from
Oct 14, 2024
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
110 changes: 109 additions & 1 deletion experiments/attack_defense_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -343,8 +343,116 @@ def test_nettack_evasion():
print(f"info_before_evasion_attack: {info_before_evasion_attack}")
print(f"info_after_evasion_attack: {info_after_evasion_attack}")

def test_qattack():
from attacks.QAttack import qattack
my_device = device('cpu')

# Load dataset
# full_name = ("single-graph", "Planetoid", 'Cora')
full_name = ('single-graph', 'pytorch-geometric-other', 'KarateClub')
dataset, data, results_dataset_path = DatasetManager.get_by_full_name(
full_name=full_name,
dataset_ver_ind=0
)

# Train model on original dataset and remember the model metric and node predicted probability
gcn_gcn = model_configs_zoo(dataset=dataset, model_name='gcn_gcn')

manager_config = ConfigPattern(
_config_class="ModelManagerConfig",
_config_kwargs={
"mask_features": [],
"optimizer": {
"_class_name": "Adam",
"_config_kwargs": {},
}
}
)

gnn_model_manager = FrameworkGNNModelManager(
gnn=gcn_gcn,
dataset_path=results_dataset_path,
manager_config=manager_config,
modification=ModelModificationConfig(model_ver_ind=0, epochs=0)
)

gnn_model_manager.gnn.to(my_device)

num_steps = 100
gnn_model_manager.train_model(gen_dataset=dataset,
steps=num_steps,
save_model_flag=False)

evasion_attack_config = ConfigPattern(
_class_name="QAttack",
_import_path=EVASION_ATTACK_PARAMETERS_PATH,
_config_class="EvasionAttackConfig",
_config_kwargs={
}
)

gnn_model_manager.set_evasion_attacker(evasion_attack_config=evasion_attack_config)

# Evaluate model

# acc_train = gnn_model_manager.evaluate_model(gen_dataset=dataset,
# metrics=[Metric("Accuracy", mask='train')])['train']['Accuracy']


acc_test = gnn_model_manager.evaluate_model(gen_dataset=dataset,
metrics=[Metric("Accuracy", mask='test')])['test']['Accuracy']
# print(f"Accuracy on train: {acc_train}. Accuracy on test: {acc_test}")
print(f"Accuracy on test: {acc_test}")

# Node for attack
# node_idx = 0
#
# # Model prediction on a node before an evasion attack on it
# gnn_model_manager.gnn.eval()
# with torch.no_grad():
# probabilities = torch.exp(gnn_model_manager.gnn(dataset.data.x, dataset.data.edge_index))
#
# predicted_class = probabilities[node_idx].argmax().item()
# predicted_probability = probabilities[node_idx][predicted_class].item()
# real_class = dataset.data.y[node_idx].item()

# info_before_evasion_attack = {"node_idx": node_idx,
# "predicted_class": predicted_class,
# "predicted_probability": predicted_probability,
# "real_class": real_class}

# Attack config


#dataset = gnn_model_manager.evasion_attacker.attack(gnn_model_manager, dataset, None)

# Attack
# gnn_model_manager.evaluate_model(gen_dataset=dataset, metrics=[Metric("F1", mask='test', average='macro')])
#
# acc_test = gnn_model_manager.evaluate_model(gen_dataset=dataset,
# metrics=[Metric("Accuracy", mask='test')])['test']['Accuracy']
# print(f"Accuracy on test after attack: {acc_test}")

# # Model prediction on a node after an evasion attack on it
# with torch.no_grad():
# probabilities = torch.exp(gnn_model_manager.gnn(gnn_model_manager.evasion_attacker.attack_diff.data.x,
# gnn_model_manager.evasion_attacker.attack_diff.data.edge_index))
#
# predicted_class = probabilities[node_idx].argmax().item()
# predicted_probability = probabilities[node_idx][predicted_class].item()
# real_class = dataset.data.y[node_idx].item()
#
# info_after_evasion_attack = {"node_idx": node_idx,
# "predicted_class": predicted_class,
# "predicted_probability": predicted_probability,
# "real_class": real_class}
#
# print(f"info_before_evasion_attack: {info_before_evasion_attack}")
# print(f"info_after_evasion_attack: {info_after_evasion_attack}")


if __name__ == '__main__':
#test_attack_defense()
torch.manual_seed(5000)
test_meta()
#test_meta()
test_qattack()
9 changes: 8 additions & 1 deletion metainfo/evasion_attack_parameters.json
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,13 @@
"perturb_structure": ["perturb_structure", "bool", true, {}, "Indicates whether the structure can be changed"],
"direct": ["direct", "bool", true, {}, "Indicates whether to directly modify edges/features of the node attacked or only those of influencers"],
"n_influencers": ["n_influencers", "int", 0, {"min": 0, "step": 1}, "Number of influencing nodes. Will be ignored if direct is True"]
}
},
"QAttack": {
"population_size": ["Population size", "int", 50, {"min": 1, "step": 1}, "Number of genes in population"],
"individual_size": ["Individual size", "int", 30, {"min": 1, "step": 1}, "Number of rewiring operations within one gene"],
"generations" : ["Generations", "int", 50, {"min": 0, "step": 1}, "Number of generations for genetic algorithm"],
"prob_cross": ["Probability for crossover", "float", 0.5, {"min": 0, "max": 1, "step": 0.01}, "Probability of crossover between two genes"],
"prob_mutate": ["Probability for mutation", "float", 0.02, {"min": 0, "max": 1, "step": 0.01}, "Probability of gene mutation"]
}
}

Loading
Loading