forked from ya332/Adversarial-Attacks-Neural-Networks
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtest.py
485 lines (472 loc) · 21.5 KB
/
test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
"""
from l2_attack import CarliniL2
CarliniL2(None, model).attack(inputs, targets)
"""
import tensorflow as tf
import numpy as np
from sklearn.datasets import load_wine
from sklearn.preprocessing import MinMaxScaler, OneHotEncoder
from keras.layers import Dense, Input, concatenate, Dropout
from keras.models import Model
from keras.optimizers import rmsprop
dataset = load_wine()
ensemble_num = 10 # number of sub-networks
bootstrap_size = 0.8 # 80% size of original (training) dataset
training_size = 0.8 # 80% for training, 20% for test
num_hidden_neurons = 10 # number of neurons in hidden layer
dropout = 0.25 # percentage of weights dropped out before softmax output (this prevents overfitting)
epochs = 200 # number of epochs (complete training episodes over the training set) to run
batch = 10 # mini batch size for better convergence
# get the holdout training and test set
temp = []
scaler = MinMaxScaler()
one_hot = OneHotEncoder() # one hot encode the target classes
dataset['data'] = scaler.fit_transform(dataset['data'])
dataset['target'] = one_hot.fit_transform(np.reshape(dataset['target'], (-1,1)) ).toarray()
for i in range(len(dataset.data)):
temp.append([dataset['data'][i], np.array(dataset['target'][i])])
# shuffle the row of data and targets
temp = np.array(temp)
np.random.shuffle(temp)
# holdout training and test stop index
stop = int(training_size*len(dataset.data))
train_X = np.array([x for x in temp[:stop,0]])
train_Y = np.array([x for x in temp[:stop,1]])
test_X = np.array([x for x in temp[stop:,0]])
test_Y = np.array([x for x in temp[stop:,1]])
# now build the ensemble neural network
# first, let's build the individual sub-networks, each
# as a Keras functional model.
sub_net_outputs = []
sub_net_inputs = []
for i in range(ensemble_num):
# two hidden layers to keep it simple
# specify input shape to the shape of the training set
net_input = Input(shape = (train_X.shape[1],))
sub_net_inputs.append(net_input)
y = Dense(num_hidden_neurons)(net_input)
y = Dense(num_hidden_neurons)(y)
y = Dropout(dropout)(y)
sub_net_outputs.append(y) # sub_nets contains the output tensors
# now concatenate the output tensors
y = concatenate(sub_net_outputs)
# final softmax output layer
y = Dense(train_Y[0].shape[0], activation='softmax')(y)
# now build the whole funtional model
model = Model(inputs=sub_net_inputs, outputs=y)
model.compile(optimizer='rmsprop', loss='categorical_crossentropy')
print("Begin training...")
# train the model
model.fit( [train_X] * ensemble_num, train_Y,validation_data=[ [test_X] * ensemble_num, test_Y],
epochs=epochs, batch_size=batch)
"""
Using TensorFlow backend.
Begin training...
Train on 142 samples, validate on 36 samples
Epoch 1/200
142/142 [==============================] - 0s - loss: 1.2639 - val_loss: 0.9777
Epoch 2/200
142/142 [==============================] - 0s - loss: 1.0023 - val_loss: 0.8191
Epoch 3/200
142/142 [==============================] - 0s - loss: 0.7580 - val_loss: 0.6661
Epoch 4/200
142/142 [==============================] - 0s - loss: 0.6359 - val_loss: 0.5535
Epoch 5/200
142/142 [==============================] - 0s - loss: 0.5533 - val_loss: 0.4836
Epoch 6/200
142/142 [==============================] - 0s - loss: 0.4388 - val_loss: 0.4176
Epoch 7/200
142/142 [==============================] - 0s - loss: 0.3697 - val_loss: 0.3526
Epoch 8/200
142/142 [==============================] - 0s - loss: 0.3315 - val_loss: 0.3002
Epoch 9/200
142/142 [==============================] - 0s - loss: 0.2763 - val_loss: 0.2710
Epoch 10/200
142/142 [==============================] - 0s - loss: 0.2453 - val_loss: 0.2417
Epoch 11/200
142/142 [==============================] - 0s - loss: 0.2260 - val_loss: 0.2598
Epoch 12/200
142/142 [==============================] - 0s - loss: 0.1936 - val_loss: 0.1944
Epoch 13/200
142/142 [==============================] - 0s - loss: 0.1860 - val_loss: 0.1998
Epoch 14/200
142/142 [==============================] - 0s - loss: 0.1461 - val_loss: 0.1617
Epoch 15/200
142/142 [==============================] - 0s - loss: 0.1544 - val_loss: 0.1795
Epoch 16/200
142/142 [==============================] - 0s - loss: 0.1296 - val_loss: 0.1413
Epoch 17/200
142/142 [==============================] - 0s - loss: 0.1295 - val_loss: 0.1426
Epoch 18/200
142/142 [==============================] - 0s - loss: 0.0989 - val_loss: 0.1281
Epoch 19/200
142/142 [==============================] - 0s - loss: 0.1128 - val_loss: 0.1735
Epoch 20/200
142/142 [==============================] - 0s - loss: 0.1035 - val_loss: 0.1105
Epoch 21/200
142/142 [==============================] - 0s - loss: 0.1006 - val_loss: 0.1145
Epoch 22/200
142/142 [==============================] - 0s - loss: 0.0940 - val_loss: 0.1156
Epoch 23/200
142/142 [==============================] - 0s - loss: 0.0735 - val_loss: 0.1078
Epoch 24/200
142/142 [==============================] - 0s - loss: 0.0685 - val_loss: 0.1099
Epoch 25/200
142/142 [==============================] - 0s - loss: 0.0850 - val_loss: 0.1055
Epoch 26/200
142/142 [==============================] - 0s - loss: 0.0683 - val_loss: 0.1144
Epoch 27/200
142/142 [==============================] - 0s - loss: 0.0642 - val_loss: 0.1003
Epoch 28/200
142/142 [==============================] - 0s - loss: 0.0680 - val_loss: 0.0795
Epoch 29/200
142/142 [==============================] - 0s - loss: 0.0627 - val_loss: 0.0921
Epoch 30/200
142/142 [==============================] - 0s - loss: 0.0566 - val_loss: 0.0669
Epoch 31/200
142/142 [==============================] - 0s - loss: 0.0520 - val_loss: 0.1085
Epoch 32/200
142/142 [==============================] - 0s - loss: 0.0484 - val_loss: 0.1429
Epoch 33/200
142/142 [==============================] - 0s - loss: 0.0532 - val_loss: 0.0741
Epoch 34/200
142/142 [==============================] - 0s - loss: 0.0440 - val_loss: 0.0610
Epoch 35/200
142/142 [==============================] - 0s - loss: 0.0467 - val_loss: 0.0787
Epoch 36/200
142/142 [==============================] - 0s - loss: 0.0413 - val_loss: 0.0805
Epoch 37/200
142/142 [==============================] - 0s - loss: 0.0439 - val_loss: 0.1362
Epoch 38/200
142/142 [==============================] - 0s - loss: 0.0567 - val_loss: 0.0580
Epoch 39/200
142/142 [==============================] - 0s - loss: 0.0354 - val_loss: 0.0602
Epoch 40/200
142/142 [==============================] - 0s - loss: 0.0344 - val_loss: 0.1200
Epoch 41/200
142/142 [==============================] - 0s - loss: 0.0342 - val_loss: 0.0465
Epoch 42/200
142/142 [==============================] - 0s - loss: 0.0482 - val_loss: 0.0505
Epoch 43/200
142/142 [==============================] - 0s - loss: 0.0428 - val_loss: 0.1038
Epoch 44/200
142/142 [==============================] - 0s - loss: 0.0360 - val_loss: 0.0618
Epoch 45/200
142/142 [==============================] - 0s - loss: 0.0332 - val_loss: 0.1045
Epoch 46/200
142/142 [==============================] - 0s - loss: 0.0302 - val_loss: 0.1006
Epoch 47/200
142/142 [==============================] - 0s - loss: 0.0331 - val_loss: 0.0840
Epoch 48/200
142/142 [==============================] - 0s - loss: 0.0331 - val_loss: 0.0687
Epoch 49/200
142/142 [==============================] - 0s - loss: 0.0331 - val_loss: 0.0892
Epoch 50/200
142/142 [==============================] - 0s - loss: 0.0318 - val_loss: 0.0952
Epoch 51/200
142/142 [==============================] - 0s - loss: 0.0214 - val_loss: 0.0860
Epoch 52/200
142/142 [==============================] - 0s - loss: 0.0207 - val_loss: 0.0601
Epoch 53/200
142/142 [==============================] - 0s - loss: 0.0188 - val_loss: 0.0465
Epoch 54/200
142/142 [==============================] - 0s - loss: 0.0296 - val_loss: 0.0494
Epoch 55/200
142/142 [==============================] - 0s - loss: 0.0309 - val_loss: 0.0511
Epoch 56/200
142/142 [==============================] - 0s - loss: 0.0176 - val_loss: 0.1213
Epoch 57/200
142/142 [==============================] - 0s - loss: 0.0195 - val_loss: 0.0320
Epoch 58/200
142/142 [==============================] - 0s - loss: 0.0316 - val_loss: 0.0614
Epoch 59/200
142/142 [==============================] - 0s - loss: 0.0212 - val_loss: 0.0579
Epoch 60/200
142/142 [==============================] - 0s - loss: 0.0172 - val_loss: 0.0768
Epoch 61/200
142/142 [==============================] - 0s - loss: 0.0207 - val_loss: 0.0546
Epoch 62/200
142/142 [==============================] - 0s - loss: 0.0231 - val_loss: 0.0723
Epoch 63/200
142/142 [==============================] - 0s - loss: 0.0184 - val_loss: 0.0624
Epoch 64/200
142/142 [==============================] - 0s - loss: 0.0168 - val_loss: 0.0860
Epoch 65/200
142/142 [==============================] - 0s - loss: 0.0162 - val_loss: 0.0573
Epoch 66/200
142/142 [==============================] - 0s - loss: 0.0130 - val_loss: 0.0236
Epoch 67/200
142/142 [==============================] - 0s - loss: 0.0201 - val_loss: 0.0453
Epoch 68/200
142/142 [==============================] - 0s - loss: 0.0156 - val_loss: 0.0375
Epoch 69/200
142/142 [==============================] - 0s - loss: 0.0111 - val_loss: 0.0543
Epoch 70/200
142/142 [==============================] - 0s - loss: 0.0122 - val_loss: 0.0284
Epoch 71/200
142/142 [==============================] - 0s - loss: 0.0124 - val_loss: 0.0637
Epoch 72/200
142/142 [==============================] - 0s - loss: 0.0173 - val_loss: 0.0505
Epoch 73/200
142/142 [==============================] - 0s - loss: 0.0181 - val_loss: 0.0522
Epoch 74/200
142/142 [==============================] - 0s - loss: 0.0145 - val_loss: 0.0442
Epoch 75/200
142/142 [==============================] - 0s - loss: 0.0098 - val_loss: 0.0357
Epoch 76/200
142/142 [==============================] - 0s - loss: 0.0084 - val_loss: 0.0290
Epoch 77/200
142/142 [==============================] - 0s - loss: 0.0166 - val_loss: 0.0513
Epoch 78/200
142/142 [==============================] - 0s - loss: 0.0069 - val_loss: 0.0576
Epoch 79/200
142/142 [==============================] - 0s - loss: 0.0058 - val_loss: 0.0383
Epoch 80/200
142/142 [==============================] - 0s - loss: 0.0099 - val_loss: 0.0443
Epoch 81/200
142/142 [==============================] - 0s - loss: 0.0073 - val_loss: 0.0774
Epoch 82/200
142/142 [==============================] - 0s - loss: 0.0092 - val_loss: 0.0427
Epoch 83/200
142/142 [==============================] - 0s - loss: 0.0063 - val_loss: 0.0514
Epoch 84/200
142/142 [==============================] - 0s - loss: 0.0061 - val_loss: 0.0288
Epoch 85/200
142/142 [==============================] - 0s - loss: 0.0086 - val_loss: 0.0276
Epoch 86/200
142/142 [==============================] - 0s - loss: 0.0040 - val_loss: 0.0549
Epoch 87/200
142/142 [==============================] - 0s - loss: 0.0041 - val_loss: 0.0504
Epoch 88/200
142/142 [==============================] - 0s - loss: 0.0058 - val_loss: 0.0253
Epoch 89/200
142/142 [==============================] - 0s - loss: 0.0049 - val_loss: 0.0313
Epoch 90/200
142/142 [==============================] - 0s - loss: 0.0064 - val_loss: 0.0318
Epoch 91/200
142/142 [==============================] - 0s - loss: 0.0037 - val_loss: 0.0977
Epoch 92/200
142/142 [==============================] - 0s - loss: 0.0038 - val_loss: 0.0166
Epoch 93/200
142/142 [==============================] - 0s - loss: 0.0061 - val_loss: 0.0215
Epoch 94/200
142/142 [==============================] - 0s - loss: 0.0024 - val_loss: 0.0422
Epoch 95/200
142/142 [==============================] - 0s - loss: 0.0083 - val_loss: 0.0389
Epoch 96/200
142/142 [==============================] - 0s - loss: 0.0088 - val_loss: 0.0499
Epoch 97/200
142/142 [==============================] - 0s - loss: 0.0047 - val_loss: 0.0300
Epoch 98/200
142/142 [==============================] - 0s - loss: 0.0026 - val_loss: 0.0335
Epoch 99/200
142/142 [==============================] - 0s - loss: 0.0053 - val_loss: 0.0523
Epoch 100/200
142/142 [==============================] - 0s - loss: 0.0029 - val_loss: 0.0359
Epoch 101/200
142/142 [==============================] - 0s - loss: 0.0048 - val_loss: 0.0796
Epoch 102/200
142/142 [==============================] - 0s - loss: 0.0024 - val_loss: 0.0224
Epoch 103/200
142/142 [==============================] - 0s - loss: 0.0039 - val_loss: 0.0922
Epoch 104/200
142/142 [==============================] - 0s - loss: 0.0077 - val_loss: 0.0364
Epoch 105/200
142/142 [==============================] - 0s - loss: 0.0038 - val_loss: 0.0436
Epoch 106/200
142/142 [==============================] - 0s - loss: 0.0024 - val_loss: 0.0407
Epoch 107/200
142/142 [==============================] - 0s - loss: 0.0031 - val_loss: 0.0633
Epoch 108/200
142/142 [==============================] - 0s - loss: 0.0032 - val_loss: 0.0255
Epoch 109/200
142/142 [==============================] - 0s - loss: 0.0030 - val_loss: 0.0595
Epoch 110/200
142/142 [==============================] - 0s - loss: 0.0028 - val_loss: 0.0169
Epoch 111/200
142/142 [==============================] - 0s - loss: 0.0024 - val_loss: 0.0486
Epoch 112/200
142/142 [==============================] - 0s - loss: 0.0024 - val_loss: 0.0299
Epoch 113/200
142/142 [==============================] - 0s - loss: 0.0037 - val_loss: 0.0408
Epoch 114/200
142/142 [==============================] - 0s - loss: 0.0024 - val_loss: 0.0811
Epoch 115/200
142/142 [==============================] - 0s - loss: 0.0026 - val_loss: 0.0164
Epoch 116/200
142/142 [==============================] - 0s - loss: 0.0018 - val_loss: 0.0361
Epoch 117/200
142/142 [==============================] - 0s - loss: 0.0013 - val_loss: 0.0225
Epoch 118/200
142/142 [==============================] - 0s - loss: 0.0013 - val_loss: 0.0351
Epoch 119/200
142/142 [==============================] - 0s - loss: 0.0016 - val_loss: 0.0710
Epoch 120/200
142/142 [==============================] - 0s - loss: 0.0038 - val_loss: 0.0173
Epoch 121/200
142/142 [==============================] - 0s - loss: 0.0013 - val_loss: 0.0276
Epoch 122/200
142/142 [==============================] - 0s - loss: 0.0017 - val_loss: 0.0368
Epoch 123/200
142/142 [==============================] - 0s - loss: 6.6628e-04 - val_loss: 0.0468
Epoch 124/200
142/142 [==============================] - 0s - loss: 0.0016 - val_loss: 0.0216
Epoch 125/200
142/142 [==============================] - 0s - loss: 0.0024 - val_loss: 0.0191
Epoch 126/200
142/142 [==============================] - 0s - loss: 0.0022 - val_loss: 0.0111
Epoch 127/200
142/142 [==============================] - 0s - loss: 0.0018 - val_loss: 0.0152
Epoch 128/200
142/142 [==============================] - 0s - loss: 0.0017 - val_loss: 0.0340
Epoch 129/200
142/142 [==============================] - 0s - loss: 0.0010 - val_loss: 0.0253
Epoch 130/200
142/142 [==============================] - 0s - loss: 0.0015 - val_loss: 0.0298
Epoch 131/200
142/142 [==============================] - 0s - loss: 0.0038 - val_loss: 0.0552
Epoch 132/200
142/142 [==============================] - 0s - loss: 0.0025 - val_loss: 0.0230
Epoch 133/200
142/142 [==============================] - 0s - loss: 0.0012 - val_loss: 0.0148
Epoch 134/200
142/142 [==============================] - 0s - loss: 0.0019 - val_loss: 0.0290
Epoch 135/200
142/142 [==============================] - 0s - loss: 0.0011 - val_loss: 0.0321
Epoch 136/200
142/142 [==============================] - 0s - loss: 0.0014 - val_loss: 0.0224
Epoch 137/200
142/142 [==============================] - 0s - loss: 5.2285e-04 - val_loss: 0.0362
Epoch 138/200
142/142 [==============================] - 0s - loss: 3.7215e-04 - val_loss: 0.0190
Epoch 139/200
142/142 [==============================] - 0s - loss: 0.0030 - val_loss: 0.0438
Epoch 140/200
142/142 [==============================] - 0s - loss: 9.2121e-04 - val_loss: 0.0222
Epoch 141/200
142/142 [==============================] - 0s - loss: 0.0029 - val_loss: 0.0276
Epoch 142/200
142/142 [==============================] - 0s - loss: 5.9754e-04 - val_loss: 0.0195
Epoch 143/200
142/142 [==============================] - 0s - loss: 7.1640e-04 - val_loss: 0.0138
Epoch 144/200
142/142 [==============================] - 0s - loss: 0.0015 - val_loss: 0.0845
Epoch 145/200
142/142 [==============================] - 0s - loss: 8.5187e-04 - val_loss: 0.0247
Epoch 146/200
142/142 [==============================] - 0s - loss: 3.6506e-04 - val_loss: 0.0223
Epoch 147/200
142/142 [==============================] - 0s - loss: 0.0038 - val_loss: 0.0391
Epoch 148/200
142/142 [==============================] - 0s - loss: 4.0802e-04 - val_loss: 0.0439
Epoch 149/200
142/142 [==============================] - 0s - loss: 0.0011 - val_loss: 0.0263
Epoch 150/200
142/142 [==============================] - 0s - loss: 2.8790e-04 - val_loss: 0.0331
Epoch 151/200
142/142 [==============================] - 0s - loss: 6.4799e-04 - val_loss: 0.0251
Epoch 152/200
142/142 [==============================] - 0s - loss: 6.8637e-04 - val_loss: 0.0267
Epoch 153/200
142/142 [==============================] - 0s - loss: 3.1719e-04 - val_loss: 0.0750
Epoch 154/200
142/142 [==============================] - 0s - loss: 3.3267e-04 - val_loss: 0.0246
Epoch 155/200
142/142 [==============================] - 0s - loss: 2.5407e-04 - val_loss: 0.0841
Epoch 156/200
142/142 [==============================] - 0s - loss: 9.4721e-04 - val_loss: 0.0323
Epoch 157/200
142/142 [==============================] - 0s - loss: 2.5022e-04 - val_loss: 0.0311
Epoch 158/200
142/142 [==============================] - 0s - loss: 2.3520e-04 - val_loss: 0.0111
Epoch 159/200
142/142 [==============================] - 0s - loss: 8.9580e-04 - val_loss: 0.0795
Epoch 160/200
142/142 [==============================] - 0s - loss: 2.6490e-04 - val_loss: 0.0306
Epoch 161/200
142/142 [==============================] - 0s - loss: 4.8849e-04 - val_loss: 0.0344
Epoch 162/200
142/142 [==============================] - 0s - loss: 0.0013 - val_loss: 0.0570
Epoch 163/200
142/142 [==============================] - 0s - loss: 2.7203e-04 - val_loss: 0.0501
Epoch 164/200
142/142 [==============================] - 0s - loss: 1.9163e-04 - val_loss: 0.0364
Epoch 165/200
142/142 [==============================] - 0s - loss: 2.8227e-04 - val_loss: 0.0360
Epoch 166/200
142/142 [==============================] - 0s - loss: 2.2753e-04 - val_loss: 0.0315
Epoch 167/200
142/142 [==============================] - 0s - loss: 2.3544e-04 - val_loss: 0.1090
Epoch 168/200
142/142 [==============================] - 0s - loss: 2.5797e-04 - val_loss: 0.0301
Epoch 169/200
142/142 [==============================] - 0s - loss: 1.0038e-04 - val_loss: 0.0390
Epoch 170/200
142/142 [==============================] - 0s - loss: 4.4550e-04 - val_loss: 0.0189
Epoch 171/200
142/142 [==============================] - 0s - loss: 1.5940e-04 - val_loss: 0.0347
Epoch 172/200
142/142 [==============================] - 0s - loss: 1.1927e-04 - val_loss: 0.0388
Epoch 173/200
142/142 [==============================] - 0s - loss: 3.4964e-04 - val_loss: 0.0160
Epoch 174/200
142/142 [==============================] - 0s - loss: 1.3943e-04 - val_loss: 0.0173
Epoch 175/200
142/142 [==============================] - 0s - loss: 2.2197e-04 - val_loss: 0.0536
Epoch 176/200
142/142 [==============================] - 0s - loss: 3.6663e-04 - val_loss: 0.0437
Epoch 177/200
142/142 [==============================] - 0s - loss: 1.1296e-04 - val_loss: 0.0302
Epoch 178/200
142/142 [==============================] - 0s - loss: 1.7213e-04 - val_loss: 0.0206
Epoch 179/200
142/142 [==============================] - 0s - loss: 9.8614e-05 - val_loss: 0.0211
Epoch 180/200
142/142 [==============================] - 0s - loss: 8.6613e-05 - val_loss: 0.0147
Epoch 181/200
142/142 [==============================] - 0s - loss: 2.8344e-04 - val_loss: 0.0244
Epoch 182/200
142/142 [==============================] - 0s - loss: 6.9407e-05 - val_loss: 0.0421
Epoch 183/200
142/142 [==============================] - 0s - loss: 4.7512e-04 - val_loss: 0.0404
Epoch 184/200
142/142 [==============================] - 0s - loss: 3.5294e-04 - val_loss: 0.0420
Epoch 185/200
142/142 [==============================] - 0s - loss: 6.7377e-05 - val_loss: 0.0320
Epoch 186/200
142/142 [==============================] - 0s - loss: 6.7105e-05 - val_loss: 0.0679
Epoch 187/200
142/142 [==============================] - 0s - loss: 8.9703e-05 - val_loss: 0.0457
Epoch 188/200
142/142 [==============================] - 0s - loss: 1.1150e-04 - val_loss: 0.0389
Epoch 189/200
142/142 [==============================] - 0s - loss: 1.9544e-04 - val_loss: 0.0639
Epoch 190/200
142/142 [==============================] - 0s - loss: 7.3062e-05 - val_loss: 0.0403
Epoch 191/200
142/142 [==============================] - 0s - loss: 4.3287e-05 - val_loss: 0.0162
Epoch 192/200
142/142 [==============================] - 0s - loss: 4.9369e-05 - val_loss: 0.0147
Epoch 193/200
142/142 [==============================] - 0s - loss: 1.0524e-04 - val_loss: 0.0321
Epoch 194/200
142/142 [==============================] - 0s - loss: 5.1740e-05 - val_loss: 0.1401
Epoch 195/200
142/142 [==============================] - 0s - loss: 9.1553e-05 - val_loss: 0.0299
Epoch 196/200
142/142 [==============================] - 0s - loss: 1.6222e-05 - val_loss: 0.0209
Epoch 197/200
142/142 [==============================] - 0s - loss: 7.6026e-05 - val_loss: 0.0254
Epoch 198/200
142/142 [==============================] - 0s - loss: 5.7551e-05 - val_loss: 0.0281
Epoch 199/200
142/142 [==============================] - 0s - loss: 1.9276e-05 - val_loss: 0.0433
Epoch 200/200
142/142 [==============================] - 0s - loss: 7.5447e-05 - val_loss: 0.0095
Out[1]:
<keras.callbacks.History at 0x2d9142dfbe0>
"""
print("Training complete...")
np.set_printoptions(precision=2,suppress=True)
for i in range(len(test_X)):
print("Prediction: " + str(model.predict([test_X[i].reshape(1,test_X[i].shape[0])] * ensemble_num)) + " | True: " + str(test_Y[i]))