-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathclass_nn_standard.py
119 lines (102 loc) · 3.43 KB
/
class_nn_standard.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
# -*- coding: utf-8 -*-
from keras.models import Model
from class_nn_base import NN_base
from keras.optimizers import Adam
from keras.regularizers import l2
from keras.layers import Input, Dense, LeakyReLU, BatchNormalization
###########################################################################################
# Standard NN class #
###########################################################################################
class NN_standard(NN_base):
###############
# Constructor #
###############
def __init__(
self,
layer_dims, # [n_x, n_h1, n_h2, .., n_hL, n_y]
learning_rate,
output_activation,
loss_function,
weight_init,
num_epochs,
class_weights,
minibatch_size,
L2_lambda=0.0,
flag_batchnorm=False,
seed=0
):
NN_base.__init__(
self,
learning_rate=learning_rate,
output_activation=output_activation,
loss_function=loss_function,
weight_init=weight_init,
num_epochs=num_epochs,
class_weights=class_weights,
minibatch_size=minibatch_size,
L2_lambda=L2_lambda,
flag_batchnorm=flag_batchnorm,
seed=seed
)
# parameters
self.layer_dims = layer_dims
# model
self.model = self.create_standard_model()
# configure model for training
self.model.compile(
optimizer=Adam(lr=self.learning_rate),
loss=self.loss_function,
metrics=['accuracy']
)
##################
# Standard Model #
##################
def create_standard_model(self):
# Input and output dims
n_x = self.layer_dims[0]
n_y = self.layer_dims[-1]
# Input layer
X_input = Input(shape=(n_x,), name='input')
# First hidden layer
X = Dense(
units=self.layer_dims[1],
activation=None,
use_bias=True,
kernel_initializer=self.weight_init,
bias_initializer='zeros',
kernel_regularizer=l2(self.L2_lambda),
bias_regularizer=None,
activity_regularizer=None
)(X_input)
if self.flag_batchnorm:
X = BatchNormalization()(X)
X = LeakyReLU(alpha=0.01)(X)
# Other hidden layers (if any)
for l in self.layer_dims[2:-1]:
X = Dense(
units=l,
activation=None,
use_bias=True,
kernel_initializer=self.weight_init,
bias_initializer='zeros',
kernel_regularizer=l2(self.L2_lambda),
bias_regularizer=None,
activity_regularizer=None
)(X)
if self.flag_batchnorm:
X = BatchNormalization()(X)
X = LeakyReLU(alpha=0.01)(X)
# Output layer
y_out = Dense(
units=n_y,
activation=self.output_activation,
use_bias=True,
kernel_initializer=self.weight_init,
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
name='output'
)(X)
# Model
return Model(inputs=X_input, outputs=y_out)