-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathinceptionv3_highfive_train.py
102 lines (77 loc) · 3.05 KB
/
inceptionv3_highfive_train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
from keras import activations
from keras.preprocessing import image
from keras.layers import Input,Flatten,Dense
from keras.models import Model
from keras.callbacks import LearningRateScheduler, ModelCheckpoint, EarlyStopping
from keras.applications.inception_v3 import preprocess_input
import numpy as np
from inceptionv3_highfive_model import model_inceptionv3
from sklearn.model_selection import train_test_split
import os
from skimage import exposure,io, data, img_as_float
import tensorflow as tf
from keras import backend as K
from keras.preprocessing import image
from tempfile import mktemp
import time
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config = config)
#load images
fileslist = []
number_classes = 0
for classes in os.listdir("tv_human_interactions_videos/frames/"):
number_classes = number_classes + 1
sd = "tv_human_interactions_videos/frames/"+classes+"/"
for files in os.listdir(sd):
fileslist.append(sd+files)
np.random.shuffle(fileslist)
classes = []
X = []
i = 1
length =len(fileslist)
for f in fileslist:
img_class = int((f.split("/")[-1]).split("_")[0])
#Default size for Inception is 299x299
img = image.load_img(f, target_size=(299,299))
print("Processed: "+str(i/length))
img_h = image.img_to_array(img)
img_h /=255
X.append(img_h)
classes.append(img_class)
i = i+1
X = np.array(X, dtype='float32')
Y = np.eye(number_classes, dtype='uint8')[classes]
x_train, x_val, y_train, y_val = train_test_split(X, Y, test_size=0.25)
K.clear_session()
#Load model
model = model_inceptionv3(299,299,3,number_classes)
#training
batch_size = 16
nb_epochs = 100
#datagenerator based on image augmentation
datagen = image.ImageDataGenerator(rotation_range=40,width_shift_range=0.2,height_shift_range=0.2,zoom_range=0.2,horizontal_flip=True,fill_mode='nearest')
datagen.fit(x_train)
early_stopping_monitor = EarlyStopping(monitor='val_loss',min_delta=0,patience=12,verbose=0,mode='min')
filepath = "weights.best.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint,early_stopping_monitor]
history = model.fit_generator(datagen.flow(x_train, y_train,batch_size=batch_size),steps_per_epoch=x_train.shape[0] // batch_size, epochs=nb_epochs,callbacks=callbacks_list,shuffle=True, validation_data=(x_val,y_val))
score = model.evaluate(x_val, y_val, verbose=0)
print('Final score:', score[0])
print('Final accuracy:', score[1])
#serialise model to json
json_string = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(json_string)
#Predictions
x_test, x_test_2, y_test, y_test_2 = train_test_split(X, Y, test_size=0.5)
eval_1 = model.evaluate(x_test,y_test, verbose=0)
print("------------------------------")
print('Test score:', eval_1[0])
print('Test accuracy:', eval_1[1])
print("------------------------------")
eval_2 = model.evaluate(x_test_2,y_test_2, verbose=0)
print('Test 2 score:', eval_2[0])
print('Test 2 accuracy:', eval_2[1])
print("------------------------------")