Skip to content

Commit

Permalink
Code lint fix for modeler and differential privacy changed to DP-SGD
Browse files Browse the repository at this point in the history
  • Loading branch information
advaithsrao committed Dec 5, 2023
1 parent 5c3d5bb commit bc11d91
Show file tree
Hide file tree
Showing 2 changed files with 21 additions and 11 deletions.
6 changes: 6 additions & 0 deletions detector/modeler.py
Original file line number Diff line number Diff line change
Expand Up @@ -554,6 +554,8 @@ def train(
'val_loss': avg_val_loss,
'val_accuracy': avg_val_accuracy,
})

print(f'{"="*20} Training Done {"="*20}')

def predict(
self,
Expand Down Expand Up @@ -873,6 +875,8 @@ def train(
'val_loss': avg_val_loss,
'val_accuracy': avg_val_accuracy,
})

print(f'{"="*20} Training Done {"="*20}')

def predict(
self,
Expand Down Expand Up @@ -1018,6 +1022,8 @@ def train(
# Train the SVM model
self.model.fit(body, label)

print(f'{"="*20} Training Done {"="*20}')

def predict(
self,
body: pd.Series | list[str],
Expand Down
26 changes: 15 additions & 11 deletions ethics/differential_privacy.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,8 +132,11 @@ def train(
validation_dataloader = DataLoader(val_dataset, batch_size=self.batch_size)

# Initialize the optimizer and learning rate scheduler
optimizer = AdamW(list(self.model.parameters()),
lr=self.learning_rate, eps=self.epsilon)
# optimizer = AdamW(list(self.model.parameters()),
# lr=self.learning_rate, eps=self.epsilon)
#SGD Optimizer
optimizer = torch.optim.SGD(self.model.parameters(), lr=self.learning_rate, momentum=0.9)

total_steps = len(train_dataloader) * self.num_epochs
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=0, num_training_steps=total_steps)

Expand All @@ -152,15 +155,14 @@ def train(
)

print(
f"""
********
Using,
sigma(Noise Multiplier) = {optimizer.noise_multiplier}
C(Max Grad Norm) = {MAX_GRAD_NORM}
Epsilon = {self.epsilon}
Delta = {1/total_steps}
********
"""
f"""********
Using,
sigma(Noise Multiplier) = {optimizer.noise_multiplier}
C(Max Grad Norm) = {MAX_GRAD_NORM}
Epsilon = {self.epsilon}
Target Epsilon = {TARGET_EPSILON}
Delta = {1/total_steps}
********"""
)

# Initialize variables for early stopping
Expand Down Expand Up @@ -257,6 +259,8 @@ def train(
'val_loss': avg_val_loss,
'val_accuracy': avg_val_accuracy,
})

print(f'{"="*20} Training Done {"="*20}')

def predict(
self,
Expand Down

0 comments on commit bc11d91

Please sign in to comment.