From bae28160bd90cc0881e378dcb3f66de774238ff6 Mon Sep 17 00:00:00 2001 From: "sweep-nightly[bot]" <131841235+sweep-nightly[bot]@users.noreply.github.com> Date: Sat, 25 Nov 2023 22:22:53 +0000 Subject: [PATCH] Sandbox run src/main.py --- src/main.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/main.py b/src/main.py index 5f93d03..aa33da9 100644 --- a/src/main.py +++ b/src/main.py @@ -7,14 +7,14 @@ from torchvision import datasets, transforms # Step 1: Load MNIST Data and Preprocess -transform = transforms.Compose([ - transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,)) -]) +transform = transforms.Compose( + [transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))] +) -trainset = datasets.MNIST('.', download=True, train=True, transform=transform) +trainset = datasets.MNIST(".", download=True, train=True, transform=transform) trainloader = DataLoader(trainset, batch_size=64, shuffle=True) + # Step 2: Define the PyTorch Model class Net(nn.Module): def __init__(self): @@ -22,14 +22,14 @@ def __init__(self): self.fc1 = nn.Linear(28 * 28, 128) self.fc2 = nn.Linear(128, 64) self.fc3 = nn.Linear(64, 10) - + def forward(self, x): - x = nn.functional.relu(self.fc1(x)) x = nn.functional.relu(self.fc2(x)) x = self.fc3(x) return nn.functional.log_softmax(x, dim=1) + # Step 3: Train the Model model = CNN() optimizer = optim.SGD(model.parameters(), lr=0.01) @@ -45,4 +45,4 @@ def forward(self, x): loss.backward() optimizer.step() -torch.save(model.state_dict(), "mnist_model.pth") \ No newline at end of file +torch.save(model.state_dict(), "mnist_model.pth")