-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathAWS-DL-Challenge-model-training.py
122 lines (97 loc) · 3.86 KB
/
AWS-DL-Challenge-model-training.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
import itertools
import os
import random
# import keras
import cv2
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from IPython.display import Image, display
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from tensorflow import keras
from tensorflow.keras.layers import (Activation, BatchNormalization, Conv2D,
Dense, Dropout, Flatten, MaxPooling2D,
SeparableConv2D)
from tensorflow.keras.metrics import categorical_crossentropy
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from habana_frameworks.tensorflow import load_habana_module
tf.compact.v1.disable_eager_execution()
load_habana_module()
real = "Dataset/real_and_fake_face/training_fake"
fake = "Dataset/real_and_fake_face/training_fake"
datadir = "Dataset/real_and_fake_face"
real_path = os.listdir(real)
fake_path = os.listdir(fake)
def load_img(path):
image = cv2.imread(path)
image = cv2.resize(image, (224, 224))
return image[...,::-1]
categories = ["training_real" , "training_fake"]
for category in categories:
path = os.path.join(datadir, category)
for img in os.listdir(path):
img_array = cv2.imread(os.path.join(path,img), cv2.IMREAD_GRAYSCALE)
break
break
training_data = []
IMG_SIZE = 224
categories = ["training_real" , "training_fake"]
def create_training_data():
for category in categories:
path = os.path.join(datadir, category)
class_num = categories.index(category)
for img in os.listdir(path):
try:
img_array = cv2.imread(os.path.join(path,img), cv2.IMREAD_UNCHANGED)
new_array = cv2.resize(img_array,(IMG_SIZE,IMG_SIZE))
training_data.append([new_array,class_num])
except: pass
create_training_data()
training_data = np.array(training_data)
np.random.shuffle(training_data)
X = []
y = []
for features,label in training_data:
X.append(features)
y.append(label)
X = np.array(X).reshape(-1, IMG_SIZE, IMG_SIZE, 3)
y = np.array(y)
X = X/255.0
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
train_x = tf.keras.utils.normalize(X_train,axis=1)
test_x = tf.keras.utils.normalize(X_test, axis=1)
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(64,(3,3),activation = 'relu',
input_shape= X.shape[1:]),
tf.keras.layers.Conv2D(64,(3,3),activation = 'relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(64,(3,3),activation = 'relu'),
tf.keras.layers.Conv2D(64,(3,3),activation = 'relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dense(2, activation=tf.nn.softmax)
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
vgg16_model = keras.applications.vgg16.VGG16()
model = Sequential()
for layer in vgg16_model.layers[:-1]: model.add(layer)
for layer in model.layers: layer.trainable = False
model.add(Dense(2, activation='softmax'))
model.save('final_model')
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
def load_img(path):
image = cv2.resize(path, (224, 224))
return image[...,::-1]
def prepare(image):
IMG_SIZE = 224
new_array = cv2.resize(image, (IMG_SIZE, IMG_SIZE))
return new_array.reshape(-1, IMG_SIZE,IMG_SIZE,3)