-
Notifications
You must be signed in to change notification settings - Fork 0
/
client1lstm.py
86 lines (74 loc) · 3.7 KB
/
client1lstm.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
from keras_preprocessing.image import ImageDataGenerator
import matplotlib.pyplot as plt
import seaborn as sns
from pathlib import Path
from tensorflow.keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, Flatten, MaxPooling2D, Conv2D, MaxPool2D, BatchNormalization, \
AveragePooling2D, RepeatVector
import cv2, pathlib, splitfolders
from tensorflow.keras.models import load_model
from tensorflow.python.keras.saving.saved_model.load import recurrent
model=Sequential()
model.add(Conv2D(64,(3,3),input_shape=(150,150,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(128,(3,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(128,(3,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(512))
model.add(Activation("relu"))
model.add(Dropout(0.4))
model.add(RepeatVector(13))
model.add(recurrent.LSTM(128, return_sequences=True, input_shape=(150,150, 3), activation='relu'))
model.add(recurrent.LSTM(64))
model.add(Dropout(0.25))
model.add(Dense(2))
model.add(Activation('softmax'))
model.summary()
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
epochs = 30
batch_size = 64
img_height, img_width = 150, 150
input_shape = (img_height, img_width, 3)
def create_data(data_base):
data_base = pathlib.Path(data_base)
splitfolders.ratio(data_base, output='images/', seed=1234, ratio=(0.7, 0.1, 0.2), group_prefix=None)
data_generator = ImageDataGenerator(rescale=1.0 / 255)
train_generator = data_generator.flow_from_directory('images/train/', target_size=(img_height, img_width),
class_mode='categorical', batch_size=batch_size,
subset='training')
valid_generator = data_generator.flow_from_directory('images/val/', target_size=(img_height, img_width),
class_mode='categorical', batch_size=batch_size, shuffle=False)
test_generator = data_generator.flow_from_directory('images/test/', target_size=(img_height, img_width),
class_mode='categorical', batch_size=batch_size, shuffle=False)
return train_generator, valid_generator, test_generator
train_data, valid_data, test_data = create_data('C:/Users/amanz/PycharmProjects/stage/data/data_augm_clt')
import flwr as fl
class FlowerClient(fl.client.NumPyClient):
def get_parameters(self):
return model.get_weights()
def fit(self,parameters,config):
model.set_weights(parameters)
history_1 = model.fit_generator(generator=train_data, epochs=5,validation_data=valid_data)
print("Fit history : ",history_1.history)
results = {
"loss": history_1.history["loss"][0],
"accuracy": history_1.history["accuracy"][0],
"val_loss": history_1.history["val_loss"][0],
"val_accuracy": history_1.history["val_accuracy"][0],
}
return model.get_weights(),len(train_data),results
def evaluate(self,parameters,config):
model.set_weights(parameters)
train_loss, train_acc = model.evaluate_generator(generator=test_data, steps=16)
print('Eval accuracy : ',train_acc)
return train_loss,len(test_data),{"accuracy":train_acc}
fl.client.start_numpy_client(server_address="localhost:8080",
client=FlowerClient(),
grpc_max_message_length=1024*1024*1024)