Skip to content

Commit

Permalink
add def full_pipeline_model_metrics_only
Browse files Browse the repository at this point in the history
  • Loading branch information
LukyanovKirillML committed Dec 5, 2024
1 parent 1bf8166 commit e055200
Show file tree
Hide file tree
Showing 5 changed files with 268 additions and 31 deletions.
92 changes: 73 additions & 19 deletions experiments/attack_defense_metric_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,15 @@
from src.base.datasets_processing import DatasetManager
from src.models_builder.models_zoo import model_configs_zoo

for pack in [
'defense.GNNGuard.gnnguard',
'defense.JaccardDefense.jaccard_def',
]:
try:
__import__(pack)
except ImportError:
print(f"Couldn't import Explainer from {pack}")


def attack_defense_metrics():
my_device = device('cuda' if torch.cuda.is_available() else 'cpu')
Expand Down Expand Up @@ -61,12 +70,22 @@ def attack_defense_metrics():

gnn_model_manager.gnn.to(my_device)

random_poison_attack_config = ConfigPattern(
_class_name="RandomPoisonAttack",
# random_poison_attack_config = ConfigPattern(
# _class_name="RandomPoisonAttack",
# _import_path=POISON_ATTACK_PARAMETERS_PATH,
# _config_class="PoisonAttackConfig",
# _config_kwargs={
# "n_edges_percent": 1.0,
# }
# )

metafull_poison_attack_config = ConfigPattern(
_class_name="MetaAttackFull",
_import_path=POISON_ATTACK_PARAMETERS_PATH,
_config_class="PoisonAttackConfig",
_config_kwargs={
"n_edges_percent": 1.0,
"num_nodes": dataset.dataset.x.shape[0],
"lambda": 0,
}
)

Expand All @@ -86,7 +105,7 @@ def attack_defense_metrics():
_import_path=POISON_DEFENSE_PARAMETERS_PATH,
_config_class="PoisonDefenseConfig",
_config_kwargs={
"threshold": 0.05,
"threshold": 0.4,
}
)

Expand All @@ -95,7 +114,7 @@ def attack_defense_metrics():
_import_path=EVASION_ATTACK_PARAMETERS_PATH,
_config_class="EvasionAttackConfig",
_config_kwargs={
"epsilon": 0.001 * 12,
"epsilon": 0.001 * 5,
}
)

Expand All @@ -104,27 +123,45 @@ def attack_defense_metrics():
_import_path=EVASION_DEFENSE_PARAMETERS_PATH,
_config_class="EvasionDefenseConfig",
_config_kwargs={
"regularization_strength": 0.1 * 1000
"regularization_strength": 0.1 * 500
}
)

fgsm_evasion_attack_config1 = ConfigPattern(
_class_name="FGSM",
_import_path=EVASION_ATTACK_PARAMETERS_PATH,
_config_class="EvasionAttackConfig",
_config_kwargs={
"epsilon": 0.01,
}
)
at_evasion_defense_config = ConfigPattern(
_class_name="AdvTraining",
_import_path=EVASION_DEFENSE_PARAMETERS_PATH,
_config_class="EvasionDefenseConfig",
_config_kwargs={
"attack_name": None,
"attack_config": fgsm_evasion_attack_config1
}
)

gnn_model_manager.set_poison_attacker(poison_attack_config=random_poison_attack_config)
gnn_model_manager.set_poison_defender(poison_defense_config=jaccard_poison_defense_config)
# gnn_model_manager.set_poison_attacker(poison_attack_config=metafull_poison_attack_config)
gnn_model_manager.set_poison_defender(poison_defense_config=gnnguard_poison_defense_config)
gnn_model_manager.set_evasion_attacker(evasion_attack_config=fgsm_evasion_attack_config)
gnn_model_manager.set_evasion_defender(evasion_defense_config=gradientregularization_evasion_defense_config)

warnings.warn("Start training")
dataset.train_test_split()

# try:
# # raise FileNotFoundError()
# gnn_model_manager.load_model_executor()
# dataset = gnn_model_manager.load_train_test_split(dataset)
# raise FileNotFoundError()
# # gnn_model_manager.load_model_executor()
# except FileNotFoundError:
# gnn_model_manager.epochs = gnn_model_manager.modification.epochs = 0
# train_test_split_path = gnn_model_manager.train_model(gen_dataset=dataset, steps=steps_epochs,
# save_model_flag=save_model_flag,
# metrics=[Metric("F1", mask='train', average=None)])
# metrics=[Metric("F1", mask='train', average=None),
# Metric("Accuracy", mask="train")])
#
# if train_test_split_path is not None:
# dataset.save_train_test_mask(train_test_split_path)
Expand All @@ -135,10 +172,19 @@ def attack_defense_metrics():
#
# warnings.warn("Training was successful")
#
# # mask_loc = Metric.create_mask_by_target_list(y_true=dataset.labels, target_list=node_idxs)
#
# metric_loc = gnn_model_manager.evaluate_model(
# gen_dataset=dataset, metrics=[Metric("F1", mask='train', average='macro'),
# Metric("Accuracy", mask='train')],
# save_flag=True
# )
# print("TRAIN", metric_loc)
#
# metric_loc = gnn_model_manager.evaluate_model(
# gen_dataset=dataset, metrics=[Metric("F1", mask='test', average='macro'),
# Metric("Accuracy", mask='test')])
# print(metric_loc)
# print("TEST", metric_loc)

adm = FrameworkAttackDefenseManager(
gen_dataset=copy.deepcopy(dataset),
Expand All @@ -156,17 +202,25 @@ def attack_defense_metrics():
# metrics_attack=[AttackMetric("ASR")],
# mask='test'
# )
adm.evasion_defense_pipeline(
# adm.evasion_defense_pipeline(
# steps=steps_epochs,
# save_model_flag=save_model_flag,
# metrics_attack=[AttackMetric("ASR"), AttackMetric("AuccAttackDiff"),],
# metrics_defense=[DefenseMetric("AuccDefenseCleanDiff"), DefenseMetric("AuccDefenseAttackDiff"), ],
# mask='test'
# )

adm.full_pipeline_model_metrics_only(
# steps=1,
steps=steps_epochs,
save_model_flag=save_model_flag,
metrics_attack=[AttackMetric("ASR"), AttackMetric("AuccAttackDiff"),],
metrics_defense=[DefenseMetric("AuccDefenseCleanDiff"), DefenseMetric("AuccDefenseAttackDiff"), ],
mask='test'
model_metrics=[Metric("Accuracy", mask="test")],
task="tttttt",
)


if __name__ == '__main__':
import random

random.seed(10)
attack_defense_metrics()
# random.seed(10)
attack_defense_metrics()
4 changes: 2 additions & 2 deletions src/attacks/metattack/meta_gradient_attack.py
Original file line number Diff line number Diff line change
Expand Up @@ -216,7 +216,7 @@ def __init__(self, num_nodes=None, feature_shape=None, lambda_=0.5, train_iters=
self.b_velocities = []
self.momentum = momentum

def attack(self, gen_dataset, attack_budget=10, ll_constraint=True, ll_cutoff=0.004):
def attack(self, gen_dataset, attack_budget=0.05, ll_constraint=True, ll_cutoff=0.004):
super().attack(gen_dataset=gen_dataset)

self.hidden_sizes = [16] # FIXME get from model architecture
Expand Down Expand Up @@ -264,7 +264,7 @@ def attack(self, gen_dataset, attack_budget=10, ll_constraint=True, ll_cutoff=0.
modified_adj = ori_adj
modified_features = ori_features

for i in tqdm(range(attack_budget), desc="Perturbing graph"):
for i in tqdm(range(int(attack_budget*gen_dataset.dataset.data.edge_index.shape[1])), desc="Perturbing graph"):
if self.attack_structure:
modified_adj = self.get_modified_adj(ori_adj)

Expand Down
122 changes: 122 additions & 0 deletions src/models_builder/attack_defense_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,128 @@ def return_attack_defense_flags(
self.gnn_manager.evasion_defense_flag = self.start_attack_defense_flag_state["evasion_defense"]
self.gnn_manager.mi_defense_flag = self.start_attack_defense_flag_state["mi_defense"]

def full_pipeline_model_metrics_only(
self,
steps: int,
save_model_flag: bool = True,
model_metrics=None,
task: str = "tttttt",
):
if model_metrics is None:
from models_builder.gnn_models import Metric
model_metrics = [Metric("F1", mask='train', average=None),
Metric("Accuracy", mask="train")]

task = list(task)
if not self.available_attacks["poison"]:
task[0] = 'f'
if not self.available_defense["poison"]:
task[1] = 'f'
if not self.available_attacks["evasion"]:
task[2] = 'f'
if not self.available_attacks["mi"]:
task[3] = 'f'
if not self.available_defense["evasion"]:
task[4] = 'f'
if not self.available_defense["mi"]:
task[5] = 'f'
task = "".join(task)
self.run_experiments(
task=task,
steps=steps,
save_model_flag=save_model_flag,
model_metrics=model_metrics,
)

def run_experiments(
self,
steps: int,
save_model_flag: bool = True,
model_metrics=None,
task: str = 'ffffff',
flags=None,
position: int = 0,
):
if flags is None:
flags = []

if position == len(task):
self.start(
flags=flags,
steps=steps,
save_model_flag=save_model_flag,
model_metrics=model_metrics,
)
return

if task[position] == 'f':
self.run_experiments(
steps=steps,
save_model_flag=save_model_flag,
model_metrics=model_metrics,
task=task,
flags=flags + [False],
position=position + 1
)
elif task[position] == 't':
self.run_experiments(
steps=steps,
save_model_flag=save_model_flag,
model_metrics=model_metrics,
task=task,
flags=flags + [False],
position=position + 1
)
self.run_experiments(
steps=steps,
save_model_flag=save_model_flag,
model_metrics=model_metrics,
task=task,
flags=flags + [True],
position=position + 1
)

def start(
self,
steps: int,
save_model_flag: bool = True,
model_metrics=None,
flags: list = None,
):
if flags is None:
flags = [False] * 6
self.set_clear_model()
if flags[0]:
self.gnn_manager.poison_attack_flag = True
if flags[1]:
self.gnn_manager.poison_defense_flag = True
if flags[2]:
self.gnn_manager.evasion_attack_flag = True
if flags[3]:
self.gnn_manager.mi_attack_flag = True
if flags[4]:
self.gnn_manager.evasion_defense_flag = True
if flags[5]:
self.gnn_manager.mi_defense_flag = True
self.gnn_manager.epochs = self.gnn_manager.modification.epochs = 0
from models_builder.gnn_models import Metric
train_test_split_path = self.gnn_manager.train_model(
gen_dataset=self.gen_dataset, steps=steps,
save_model_flag=save_model_flag,
metrics=[Metric("F1", mask='train', average=None),
Metric("Accuracy", mask="train")]
)

if train_test_split_path is not None:
self.gen_dataset.save_train_test_mask(train_test_split_path)
metric_loc = self.gnn_manager.evaluate_model(
gen_dataset=self.gen_dataset,
metrics=model_metrics,
save_flag=save_model_flag,
)
if not save_model_flag:
print(f"Model metrics in a pipeline with task sequence {flags}: {metric_loc}")

def evasion_attack_pipeline(
self,
metrics_attack: List,
Expand Down
Loading

0 comments on commit e055200

Please sign in to comment.