Skip to content

Commit

Permalink
Code change for patience for modeler and differential_privacy
Browse files Browse the repository at this point in the history
  • Loading branch information
advaithsrao committed Dec 5, 2023
1 parent bc11d91 commit 6581734
Show file tree
Hide file tree
Showing 2 changed files with 23 additions and 23 deletions.
2 changes: 1 addition & 1 deletion detector/modeler.py
Original file line number Diff line number Diff line change
Expand Up @@ -453,7 +453,7 @@ def train(

# Initialize variables for early stopping
best_validation_loss = float("inf")
patience = 5 # Number of epochs to wait for improvement
patience = 10 # Number of epochs to wait for improvement
wait = 0

for epoch in range(self.num_epochs):
Expand Down
44 changes: 22 additions & 22 deletions ethics/differential_privacy.py
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ def train(

# Initialize variables for early stopping
best_validation_loss = float("inf")
patience = 5 # Number of epochs to wait for improvement
patience = 10 # Number of epochs to wait for improvement
wait = 0

for epoch in range(self.num_epochs):
Expand All @@ -178,35 +178,35 @@ def train(
total_train_loss = 0

for step, batch in enumerate(train_dataloader):
optimizer.zero_grad()
b_input_ids = batch[0].to(self.device)
b_input_mask = batch[1].to(self.device)
b_labels = batch[2].to(self.device)
optimizer.zero_grad()

b_input_ids = batch[0].to(self.device)
b_input_mask = batch[1].to(self.device)
b_labels = batch[2].to(self.device)

# Forward pass
logits = self.model(b_input_ids, attention_mask=b_input_mask)
loss = F.cross_entropy(logits, b_labels)
# Forward pass
logits = self.model(b_input_ids, attention_mask=b_input_mask)

loss = F.cross_entropy(logits, b_labels)

total_train_loss += loss.item()
total_train_loss += loss.item()

# Backward pass
loss.backward()
# Backward pass
loss.backward()

# torch.nn.utils.clip_grad_norm_(list(self.model.parameters()), 1.0)
torch.nn.utils.clip_grad_norm_(list(self.model.parameters()), 1.0)

# Update the model parameters
optimizer.step()
# Update the model parameters
optimizer.step()

# Update the learning rate
scheduler.step()
# Update the learning rate
scheduler.step()

if step % 100 == 0 and step != 0:
avg_train_loss = total_train_loss / 100
print(f'Step {step}/{len(train_dataloader)} - Average training loss: {avg_train_loss:.4f}')
if step % 100 == 0 and step != 0:
avg_train_loss = total_train_loss / 100
print(f'Step {step}/{len(train_dataloader)} - Average training loss: {avg_train_loss:.4f}')

total_train_loss = 0
total_train_loss = 0

avg_train_loss = total_train_loss / len(train_dataloader)
print(f'Training loss: {avg_train_loss:.4f}')
Expand Down

0 comments on commit 6581734

Please sign in to comment.