My Keras to CoreML conversion fails at this point. I'm using standalone Keras and everything works fine but the conversion
Code Block for dense_layer in dense_layers: for layer_size in layer_sizes: for conv_layer in conv_layers: NAME = "{}-conv-{}-nodes-{}-dense-{}".format(conv_layer, layer_size, dense_layer, int(time.time())) print(NAME) i = i + 1 print(i) model = Sequential() model.add(Conv2D(8, (5, 5), padding='same', activation='relu', input_shape=X.shape[1:])) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2,2), padding='same')) model.add(Dropout(0.2)) model.add(Conv2D(layer_size, (3, 3), padding='same', activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2,2), padding='same')) model.add(Dropout(0.2)) for l in range(conv_layer-1): model.add(Conv2D(layer_size, (3, 3), padding='same', activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2,2), padding='same')) model.add(Dropout(0.2)) model.add(Flatten()) for _ in range(dense_layer): model.add(Dense(layer_size)) model.add(Activation('relu')) model.add(Dense(CLASSNAME_SIZE)) model.add(Activation('softmax')) tensorboard = TensorBoard(log_dir="logs/{}".format(NAME)) model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) model.fit(X, Y, batch_size=32, epochs=1, validation_data=(X_val,Y_val), shuffle=True, callbacks=[tensorboard]) model.save('traffic_signsv8.model') model.save('traffic_signsv8.h5') model = load_model('traffic_signsv8.h5') coremlModel = coremltools.converters.keras.convert(model, input_names = 'image', image_input_names = 'image', input_name_shape_dict={'input_1:0': [3, 48, 48, 1]}, image_scale=1.0/255.0) coremlModel.save('traffic_signsv8.mlmodel') spec = coremltools.utils.load_spec("traffic_signsv8.mlmodel") input = spec.description.input[0] input.type.imageType.colorSpace = ft.ImageFeatureType.RGB input.type.imageType.height = 48 input.type.imageType.width = 48 coremltools.utils.save_spec(spec, "try.mlmodel")