Skip to content

Commit

Permalink
MLP and RNN and LSTM tests
Browse files Browse the repository at this point in the history
  • Loading branch information
OrestisAlpos committed Apr 30, 2017
1 parent fbc427c commit 547720c
Show file tree
Hide file tree
Showing 15 changed files with 121 additions and 3 deletions.
File renamed without changes.
File renamed without changes
69 changes: 69 additions & 0 deletions MLP_train_vs_test_error.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
import os
from keras.models import model_from_json, Sequential
from keras.layers import Activation, Dense, Dropout
from keras.engine import Input, Model
import keras.utils
from keras.utils.vis_utils import plot_model
import numpy as np
import datetime
import random
from reader import Reader
import matplotlib.pyplot as plt


results_directory = './MLPresults/train_vs_test_error/'
models_directory = './MLPmodels/'


def get_model(num_hid_layers, cells_per_layer, dropout_rate):
length = Reader.getInputShape()
model = Sequential()
model.add(Dense(cells_per_layer, input_shape=(length,), activation='relu'))
model.add(Dropout(dropout_rate))
for i in range(num_hid_layers):
model.add(Dense(cells_per_layer, activation='relu'))
model.add(Dropout(dropout_rate))
model.add(Dense(1, activation='sigmoid'))#softmax se multiclass, sigmoid se 2class
model_name = models_directory + 'MLP.hidlay' + str(num_hid_layers) + '.cells' + str(cells_per_layer) + '.drop' + str(dropout_rate)
plot_model(model, to_file = model_name + '.2class' + '.png', show_shapes=True)
fp_model = open(model_name + '.2class' + '.json', 'w+')
fp_model.write(model.to_json())
fp_model.close()
return model


def fit_and_eval(dataset_id, num_hid_layers, cells_per_layer, dropout_rate):
#fp_logfile = open('./working/logfile', 'a')
#reader = Reader(fp_logfile, False)
dataset_name = 'dataset' + str(dataset_id)
(x_train, y_train), (x_test, y_test) = Reader.getDataset(dataset_id)
#x_train = x_train[0:100,:]
#y_train = y_train[0:100]
#x_test = x_test[0:100,:]
#y_test = y_test[0:100]
num_classes = 2
nb_epoch = 500
optimizer = 'sgd'
loss_function = 'mse'
results_file = results_directory + dataset_name + '.' + loss_function + '.' + optimizer + '.Dropout' + str(dropout_rate) + 'Layers' + str(num_hid_layers) + '.Cells' + str(cells_per_layer)
write_results(results_file, 'loss|acc|val_loss|val_acc')
model = get_model(num_hid_layers, cells_per_layer, dropout_rate)
model.compile(optimizer=optimizer, loss=loss_function, metrics=['accuracy'])
#model.fit(x_train, keras.utils.np_utils.to_categorical(y_train, num_classes), epochs = nb_epoch, batch_size = 128, shuffle=True)
hist = model.fit(x_train, y_train, validation_data=(x_test,y_test), epochs = nb_epoch, batch_size = 128, shuffle=True)
#ev = model.evaluate(x = x_test, y = keras.utils.np_utils.to_categorical(y_test, num_classes), batch_size = 128)
#ev = model.evaluate(x = x_test, y = y_test, batch_size = 128)
write_results(results_file,hist.history['loss'])
write_results(results_file,hist.history['acc'])
write_results(results_file,hist.history['val_loss'])
write_results(results_file,hist.history['val_acc'])

def write_results(results_file, text):
fp_results = open(results_file, 'a')
fp_results.write(str(text))
fp_results.write('\n')
fp_results.close()

fit_and_eval(3,1,40,0.5)
fit_and_eval(3,4,40,0.5)

1 change: 1 addition & 0 deletions MLPmodels/MLP.hidlay1.cells40.drop0.5.2class.json
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
{"config": [{"config": {"activation": "relu", "use_bias": true, "kernel_regularizer": null, "kernel_constraint": null, "name": "dense_1", "dtype": "float32", "batch_input_shape": [null, 79], "kernel_initializer": {"config": {"distribution": "uniform", "mode": "fan_avg", "scale": 1.0, "seed": null}, "class_name": "VarianceScaling"}, "bias_initializer": {"config": {}, "class_name": "Zeros"}, "trainable": true, "activity_regularizer": null, "bias_constraint": null, "bias_regularizer": null, "units": 40}, "class_name": "Dense"}, {"config": {"name": "dropout_1", "trainable": true, "rate": 0.5}, "class_name": "Dropout"}, {"config": {"activation": "relu", "use_bias": true, "kernel_constraint": null, "name": "dense_2", "bias_constraint": null, "kernel_initializer": {"config": {"distribution": "uniform", "mode": "fan_avg", "scale": 1.0, "seed": null}, "class_name": "VarianceScaling"}, "bias_initializer": {"config": {}, "class_name": "Zeros"}, "bias_regularizer": null, "trainable": true, "activity_regularizer": null, "kernel_regularizer": null, "units": 40}, "class_name": "Dense"}, {"config": {"name": "dropout_2", "trainable": true, "rate": 0.5}, "class_name": "Dropout"}, {"config": {"activation": "sigmoid", "use_bias": true, "kernel_constraint": null, "name": "dense_3", "bias_constraint": null, "kernel_initializer": {"config": {"distribution": "uniform", "mode": "fan_avg", "scale": 1.0, "seed": null}, "class_name": "VarianceScaling"}, "bias_initializer": {"config": {}, "class_name": "Zeros"}, "bias_regularizer": null, "trainable": true, "activity_regularizer": null, "kernel_regularizer": null, "units": 1}, "class_name": "Dense"}], "backend": "theano", "class_name": "Sequential", "keras_version": "2.0.3"}
Binary file added MLPmodels/MLP.hidlay1.cells40.drop0.5.2class.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
1 change: 1 addition & 0 deletions MLPmodels/MLP.hidlay4.cells40.drop0.5.2class.json
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
{"config": [{"config": {"activation": "relu", "use_bias": true, "kernel_regularizer": null, "kernel_constraint": null, "name": "dense_4", "dtype": "float32", "batch_input_shape": [null, 79], "kernel_initializer": {"config": {"distribution": "uniform", "mode": "fan_avg", "scale": 1.0, "seed": null}, "class_name": "VarianceScaling"}, "bias_initializer": {"config": {}, "class_name": "Zeros"}, "trainable": true, "activity_regularizer": null, "bias_constraint": null, "bias_regularizer": null, "units": 40}, "class_name": "Dense"}, {"config": {"name": "dropout_3", "trainable": true, "rate": 0.5}, "class_name": "Dropout"}, {"config": {"activation": "relu", "use_bias": true, "kernel_constraint": null, "name": "dense_5", "bias_constraint": null, "kernel_initializer": {"config": {"distribution": "uniform", "mode": "fan_avg", "scale": 1.0, "seed": null}, "class_name": "VarianceScaling"}, "bias_initializer": {"config": {}, "class_name": "Zeros"}, "bias_regularizer": null, "trainable": true, "activity_regularizer": null, "kernel_regularizer": null, "units": 40}, "class_name": "Dense"}, {"config": {"name": "dropout_4", "trainable": true, "rate": 0.5}, "class_name": "Dropout"}, {"config": {"activation": "relu", "use_bias": true, "kernel_constraint": null, "name": "dense_6", "bias_constraint": null, "kernel_initializer": {"config": {"distribution": "uniform", "mode": "fan_avg", "scale": 1.0, "seed": null}, "class_name": "VarianceScaling"}, "bias_initializer": {"config": {}, "class_name": "Zeros"}, "bias_regularizer": null, "trainable": true, "activity_regularizer": null, "kernel_regularizer": null, "units": 40}, "class_name": "Dense"}, {"config": {"name": "dropout_5", "trainable": true, "rate": 0.5}, "class_name": "Dropout"}, {"config": {"activation": "relu", "use_bias": true, "kernel_constraint": null, "name": "dense_7", "bias_constraint": null, "kernel_initializer": {"config": {"distribution": "uniform", "mode": "fan_avg", "scale": 1.0, "seed": null}, "class_name": "VarianceScaling"}, "bias_initializer": {"config": {}, "class_name": "Zeros"}, "bias_regularizer": null, "trainable": true, "activity_regularizer": null, "kernel_regularizer": null, "units": 40}, "class_name": "Dense"}, {"config": {"name": "dropout_6", "trainable": true, "rate": 0.5}, "class_name": "Dropout"}, {"config": {"activation": "relu", "use_bias": true, "kernel_constraint": null, "name": "dense_8", "bias_constraint": null, "kernel_initializer": {"config": {"distribution": "uniform", "mode": "fan_avg", "scale": 1.0, "seed": null}, "class_name": "VarianceScaling"}, "bias_initializer": {"config": {}, "class_name": "Zeros"}, "bias_regularizer": null, "trainable": true, "activity_regularizer": null, "kernel_regularizer": null, "units": 40}, "class_name": "Dense"}, {"config": {"name": "dropout_7", "trainable": true, "rate": 0.5}, "class_name": "Dropout"}, {"config": {"activation": "sigmoid", "use_bias": true, "kernel_constraint": null, "name": "dense_9", "bias_constraint": null, "kernel_initializer": {"config": {"distribution": "uniform", "mode": "fan_avg", "scale": 1.0, "seed": null}, "class_name": "VarianceScaling"}, "bias_initializer": {"config": {}, "class_name": "Zeros"}, "bias_regularizer": null, "trainable": true, "activity_regularizer": null, "kernel_regularizer": null, "units": 1}, "class_name": "Dense"}], "backend": "theano", "class_name": "Sequential", "keras_version": "2.0.3"}
Binary file added MLPmodels/MLP.hidlay4.cells40.drop0.5.2class.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

1 change: 1 addition & 0 deletions RNNHandler.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,7 @@ def fit_and_eval(self, x_train, y_train, x_test, y_test, nb_epoch, dataset_name)
res_fscore.append(fscore)
self.write_result(str(i) +'|'+ str(loss) +'|'+ str(accuracy) +'|'+ str(precision) +'|'+ str(recall) +'|'+ str(fscore))

self.model.save(self.models_directory + '/full_models/' + dataset_name + '.' + self.model_name + '.h5')
return (res_loss, res_accuracy, res_precision, res_recall, res_fscore)


Expand Down
4 changes: 2 additions & 2 deletions RNN_plot_all.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,9 +35,9 @@
#x_test = x_test[0:1000,:]
#y_test = y_test[0:1000]

for RNN_name in ['RNN_1.2A', 'RNN_2.2A', 'RNN_3.2A']:
for RNN_name in ['RNN_2A']:
results = {}
for loss,optimizer in [('binary_crossentropy','rmsprop')]:#, ('binary_crossentropy','rmsprop')]: #categorical_crossentropy
for loss,optimizer in [('mse','sgd')]:#, ('binary_crossentropy','rmsprop')]: #categorical_crossentropy
#for optimizer in ['sgd', 'rmsprop']:
RNNmodel = RNNHandler.RNNHandler(RNN_name, num_classes, loss, optimizer)
(res_loss, res_accuracy, res_precision, res_recall, res_fscore) = RNNmodel.fit_and_eval(x_train, y_train, x_test, y_test, num_epochs, dataset_name)
Expand Down
36 changes: 36 additions & 0 deletions RNNmodels/full_models/RNN_2A_try1/Dataset3.RNN_2A
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
RNN_2A Dataset3 Loss:mse Optimizer:sgd Dropout:No
Epoch|Loss|Accuracy|Precision|Recall|Fscore
1|0.249393068135|0.5|0.500500500501|1.0|0.667111407605
2|0.248312153488|0.514|0.507099391481|1.0|0.672947510094
3|0.244004927114|0.972|0.94696969697|1.0|0.972762645914
4|0.210154790625|0.908|0.983412322275|0.83|0.90021691974
5|0.0803285433103|0.902|0.985507246377|0.816|0.892778993435
6|0.0507295946019|0.923|0.981776765376|0.862|0.917997870075
7|0.0475076762441|0.924|0.98401826484|0.862|0.918976545842
8|0.0490682174755|0.926|0.988532110092|0.862|0.92094017094
9|0.051329900913|0.924|0.988479262673|0.858|0.918629550321
10|0.0535065157635|0.922|0.988425925926|0.854|0.916309012876
RNN_2A Dataset3 Loss:mse Optimizer:sgd Dropout:No
Epoch|Loss|Accuracy|Precision|Recall|Fscore
1|0.249481226876|0.5|0.500500500501|1.0|0.667111407605
2|0.248582044005|0.595|0.552486187845|1.0|0.711743772242
3|0.245551700696|0.927|0.881932021467|0.986|0.931067044381
4|0.225288330287|0.897|0.98533007335|0.806|0.886688668867
5|0.101098144269|0.898|0.987745098039|0.806|0.887665198238
6|0.0592786783102|0.912|0.979069767442|0.842|0.905376344086
7|0.0538275055143|0.92|0.988372093023|0.85|0.913978494624
8|0.0554141971436|0.919|0.988344988345|0.848|0.912809472551
9|0.0578174612486|0.916|0.988262910798|0.842|0.909287257019
10|0.0601488883879|0.911|0.988123515439|0.832|0.903365906623
RNN_2A Dataset3 Loss:mse Optimizer:sgd Dropout:No
Epoch|Loss|Accuracy|Precision|Recall|Fscore
1|0.109936682961|0.833983333333|0.989289144271|0.675277777778|0.802665205074
2|0.147271024661|0.833194444444|0.989999836599|0.673188888889|0.801420644316
3|0.155582350312|0.833238888889|0.990113248247|0.6732|0.801465676321
4|0.158335090967|0.83325|0.990129589986|0.673211111111|0.801478904447
5|0.159589471921|0.833266666667|0.990130073861|0.673244444444|0.801502685256
6|0.160230895648|0.833272222222|0.990146253779|0.673244444444|0.801507986375
7|0.160564807812|0.833277777778|0.990162434226|0.673244444444|0.801513287564
8|0.16073671819|0.833277777778|0.990162434226|0.673244444444|0.801513287564
9|0.161066646665|0.833277777778|0.9901784547|0.673233333333|0.80151066194
10|0.160675322958|0.833288888889|0.990178775697|0.673255555556|0.801526515602
Binary file not shown.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
2 changes: 1 addition & 1 deletion reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ def getDataset(DataSetType):
fp_att.readline() #read away the first line (title)
#Train Dataset: 10 legitimate and 10 attack samples, loop 30,000 times.
#TOT 300,000 leg, 300,000 att.
for i in range(10000): ############ CORRECTION: TOTAL 100,000 legitimate, 100,000 attack
for i in range(1000): ############ CORRECTION: TOTAL 10,000 legitimate, 10,000 attack
for j in range(10):
Reader.write_it(x_train, y_train, fp_leg, 0)
for j in range(10):
Expand Down

0 comments on commit 547720c

Please sign in to comment.