- python=3.6
- pip=19.0.3
- tensorflow==1.13.1
- keras==2.2.4
- coremltools==2.1.0
- Layers: 11
- Epochs: 50
- Batch size: 610
- Accuracy: 0.8489 (but it seems to be much lower)
model = Sequential()
model.add(Conv2D(filters=48, kernel_size=(3, 3), strides=(1, 1), activation='relu', input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(0.3))
model.add(Conv2D(filters=32, kernel_size=(5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(3, 3)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(classes_count, activation='softmax'))
model.summary()
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
...
model.save('simpsons_model.h5')
Convert the Keras model (.h5) to the CoreML model.
coreml_model = coremltools.converters.keras.convert(model,
input_names='image',
image_input_names='image',
class_labels=labels)
coreml_model.author = 'Makarov Anton'
coreml_model.short_description = 'The Simpsons Classifier'
coreml_model.input_description['image'] = 'Image of one of the Simpsons characters'
coreml_model.output_description['output1'] = 'Character recognition probability'
coreml_model.save('simpsons_model.mlmodel')
- Swift
- import CoreML, Vision, AVKit (Live Camera)
private func getCoreMLModel() -> VNCoreMLModel? {
guard let model = try? VNCoreMLModel(for: simpsons_model().model) else {
return nil
}
return model
}