From bc11d9103397d679f65f39139b2305e6de019898 Mon Sep 17 00:00:00 2001 From: Advaith Rao Date: Mon, 4 Dec 2023 20:30:42 -0500 Subject: [PATCH] Code lint fix for modeler and differential privacy changed to DP-SGD --- detector/modeler.py | 6 ++++++ ethics/differential_privacy.py | 26 +++++++++++++++----------- 2 files changed, 21 insertions(+), 11 deletions(-) diff --git a/detector/modeler.py b/detector/modeler.py index f2e0275..688d5a8 100644 --- a/detector/modeler.py +++ b/detector/modeler.py @@ -554,6 +554,8 @@ def train( 'val_loss': avg_val_loss, 'val_accuracy': avg_val_accuracy, }) + + print(f'{"="*20} Training Done {"="*20}') def predict( self, @@ -873,6 +875,8 @@ def train( 'val_loss': avg_val_loss, 'val_accuracy': avg_val_accuracy, }) + + print(f'{"="*20} Training Done {"="*20}') def predict( self, @@ -1018,6 +1022,8 @@ def train( # Train the SVM model self.model.fit(body, label) + print(f'{"="*20} Training Done {"="*20}') + def predict( self, body: pd.Series | list[str], diff --git a/ethics/differential_privacy.py b/ethics/differential_privacy.py index 97f4c7f..75635de 100644 --- a/ethics/differential_privacy.py +++ b/ethics/differential_privacy.py @@ -132,8 +132,11 @@ def train( validation_dataloader = DataLoader(val_dataset, batch_size=self.batch_size) # Initialize the optimizer and learning rate scheduler - optimizer = AdamW(list(self.model.parameters()), - lr=self.learning_rate, eps=self.epsilon) + # optimizer = AdamW(list(self.model.parameters()), + # lr=self.learning_rate, eps=self.epsilon) + #SGD Optimizer + optimizer = torch.optim.SGD(self.model.parameters(), lr=self.learning_rate, momentum=0.9) + total_steps = len(train_dataloader) * self.num_epochs scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=0, num_training_steps=total_steps) @@ -152,15 +155,14 @@ def train( ) print( - f""" - ******** - Using, - sigma(Noise Multiplier) = {optimizer.noise_multiplier} - C(Max Grad Norm) = {MAX_GRAD_NORM} - Epsilon = {self.epsilon} - Delta = {1/total_steps} - ******** - """ + f"""******** + Using, + sigma(Noise Multiplier) = {optimizer.noise_multiplier} + C(Max Grad Norm) = {MAX_GRAD_NORM} + Epsilon = {self.epsilon} + Target Epsilon = {TARGET_EPSILON} + Delta = {1/total_steps} + ********""" ) # Initialize variables for early stopping @@ -257,6 +259,8 @@ def train( 'val_loss': avg_val_loss, 'val_accuracy': avg_val_accuracy, }) + + print(f'{"="*20} Training Done {"="*20}') def predict( self,