-
Notifications
You must be signed in to change notification settings - Fork 38
/
neural_nets.py
113 lines (89 loc) · 3.27 KB
/
neural_nets.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
import numpy as np
# new comment
class logistic(nn.Module):
def __init__(self, in_size=32*32*1, num_classes=10):
super(logistic, self).__init__()
self.linear = nn.Linear(in_size, num_classes)
def forward(self, x):
out = x.view(x.size(0), -1)
out = self.linear(out)
return out
class lstm(nn.Module):
def __init__(self, input_size=32, hidden_size=128, num_layers=2, num_classes=10):
super(lstm, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.input_size = input_size
self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
self.fc = nn.Linear(hidden_size, num_classes)
def forward(self, x):
x = x.reshape(-1, self.input_size, self.input_size)
# Set initial hidden and cell states
h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).cuda()
c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).cuda()
# Forward propagate LSTM
out, _ = self.lstm(x, (h0, c0)) # out: tensor of shape (batch_size, seq_length, hidden_size)
# Decode the hidden state of the last time step
out = self.fc(out[:, -1, :])
return out
class cnn(nn.Module):
def __init__(self):
super(fedlearnCNN, self).__init__()
self.conv1 = nn.Conv2d(1, 32, kernel_size=5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(32, 32, kernel_size=5)
self.fc1 = nn.Linear(800, 256)
self.fc2 = nn.Linear(256, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
class VGG(nn.Module):
'''
VGG model
'''
def __init__(self, features, size=512, out=10):
super(VGG, self).__init__()
self.features = features
self.classifier = nn.Sequential(
#nn.Dropout(),
nn.Linear(size, size),
nn.ReLU(True),
#nn.Dropout(),
nn.Linear(size, size),
nn.ReLU(True),
nn.Linear(size, out),
)
# Initialize weights
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, np.sqrt(2. / n))
m.bias.data.zero_()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def make_layers(cfg):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
def vgg11s():
return VGG(make_layers([32, 'M', 64, 'M', 128, 128, 'M', 128, 128, 'M', 128, 128, 'M']), size=128)
def vgg11():
return VGG(make_layers([64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M']))