-
Notifications
You must be signed in to change notification settings - Fork 8
/
runner.py
79 lines (62 loc) · 3.09 KB
/
runner.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from pydeepflow.model import Multi_Layer_ANN
from pydeepflow.early_stopping import EarlyStopping
from pydeepflow.checkpoints import ModelCheckpoint
from pydeepflow.learning_rate_scheduler import LearningRateScheduler
from pydeepflow.model import Plotting_Utils
from pydeepflow.cross_validator import CrossValidator
if __name__ == "__main__":
# Load Iris dataset from sklearn
iris = load_iris()
X = iris.data
y = iris.target
print("First five rows of the dataset:")
print(pd.DataFrame(X, columns=iris.feature_names).head())
# Convert labels to one-hot encoding (for multiclass classification)
y_one_hot = np.eye(len(np.unique(y)))[y]
# Standardize the features
scaler = StandardScaler()
X = scaler.fit_transform(X)
# Ask the user whether to use GPU (simulated as False for this example)
use_gpu_input = False
use_gpu = True if use_gpu_input == 'y' else False
# Define the architecture of the network
hidden_layers = [5, 5] # Example: two hidden layers with 5 neurons each
activations = ['relu', 'relu'] # ReLU activations for the hidden layers
# Initialize the CrossValidator
k_folds = 10 # Set the number of folds for cross-validation
cross_validator = CrossValidator(n_splits=k_folds)
# Perform k-fold cross-validation
fold_accuracies = [] # To store accuracy for each fold
for fold, (train_index, val_index) in enumerate(cross_validator.split(X, y_one_hot)):
print(f"Training on fold {fold + 1}/{k_folds}")
# Split data into training and validation sets for the current fold
X_train, X_val = X[train_index], X[val_index]
y_train, y_val = y_one_hot[train_index], y_one_hot[val_index]
# Initialize the ANN for each fold without batch normalization
ann = Multi_Layer_ANN(X_train, y_train, hidden_layers, activations,
loss='categorical_crossentropy', use_gpu=use_gpu)
# Callback functions
lr_scheduler = LearningRateScheduler(initial_lr=0.01, strategy="cyclic")
# Train the model and capture history
ann.fit(epochs=1000, learning_rate=0.01,
lr_scheduler=lr_scheduler,
X_val=X_val,
y_val=y_val,
verbose=True)
# Evaluate the model on the validation set
y_pred_val = ann.predict(X_val)
y_val_labels = np.argmax(y_val, axis=1)
# Adjust prediction shape handling for accuracy calculation
y_pred_val_labels = np.argmax(y_pred_val, axis=1) # Multi-class classification
# Calculate and store the accuracy for this fold
fold_accuracy = np.mean(y_pred_val_labels == y_val_labels)
fold_accuracies.append(fold_accuracy)
print(f"Fold {fold + 1} Accuracy: {fold_accuracy * 100:.2f}%")
# Optionally plot training history of the last fold
plot_utils = Plotting_Utils()
plot_utils.plot_training_history(ann.history)