NaN value when predict with model resnet

Coremltools: 6.2.0 When I run coreml model in python result is good: {'var_840': array([[-8.15439941e+02, 2.88793579e+02, -3.83110474e+02, -8.95208740e+02, -3.53131561e+02, -3.65339783e+02, -4.94590851e+02, 6.24686813e+01, -5.92614822e+01, -9.67470627e+01, -4.30247498e+02, -9.27047348e+01, 2.19661942e+01, -2.96691345e+02, -4.26566772e+02........

But when I run on xcode so result look like: [-inf,inf,nan,-inf,nan,nan,nan,nan,nan,-inf,-inf,-inf,-inf,-inf,-inf,nan,-inf,-inf,nan,-inf,nan,nan,-inf,nan,-inf,-inf,-inf,nan,nan,nan,nan,nan,nan,nan,nan,nan,nan,-inf,nan,nan,nan,nan,-inf,nan,-inf .......

Step1: Convert Resnet50 to coreml:


import torch
import torchvision

# Load a pre-trained version of MobileNetV2 model.
torch_model = torchvision.models.resnet50(pretrained=True)
# Set the model in evaluation mode.
torch_model.eval()
# Trace the model with random data.
example_input = torch.rand(1, 3, 224, 224) 
traced_model = torch.jit.trace(torch_model, example_input)
out = traced_model(example_input)
# Download class labels in ImageNetLabel.txt.
# Set the image scale and bias for input image preprocessing.
import coremltools as ct
image_input = ct.ImageType(shape=example_input.shape,
                          )
# Using image_input in the inputs parameter:
# Convert to Core ML using the Unified Conversion API.
model = ct.convert(
    traced_model,
    inputs=[image_input],
    compute_units=ct.ComputeUnit.CPU_ONLY,
)
# Save the converted model.
model.save("resnettest.mlmodel")
# Print a confirmation message.
print('model converted and saved')

Step2: Test model coreml in python:

import coremltools as ct
import PIL
import numpy as np
# Load the model
model = ct.models.MLModel('/Users/ngoclh/Downloads/resnettest.mlmodel')
print(model)
img_path = "/Users/ngoclh/gitlocal/DetectCirtochApp/DetectCirtochApp/resources/image.jpg"
img = PIL.Image.open(img_path)
img = img.resize([224, 224], PIL.Image.ANTIALIAS)
coreml_out_dict = model.predict({"input_1" : img})
print(coreml_out_dict)

Step3: Test coreml model in Xcode:

func getFeature() {
    do {
        let deepLab = try VGG_emb.init() //mobilenet_emb.init()//cirtorch_emb.init()
        let image = UIImage(named: "image.jpg")
        let pixBuf = image!.pixelBuffer(width: 224, height: 224)!
        guard let output = try? deepLab.prediction(input_1: pixBuf) else {
            return
        }
        let names = output.featureNames
        print("ngoc names: ", names)
        for name in names {
            let feature = output.featureValue(for: name)
            print("ngoc feature: ", feature)
        }
    } catch {
        print(error)
    }
}

Replies

Hello! I reproduced the model that you've included here, and produced similar predictions with the same model and image from both Python and Swift. One thing you might try is to load the mlmodel and compile it within swift. Here's some code to reproduce this successfully:

func convertCIImageToCGImage(inputImage: CIImage) -> CGImage! {
    let context = CIContext(options: nil)
    if context != nil {
        return context.createCGImage(inputImage, from: inputImage.extent)
    }
    return nil
}

let modelURL = URL(fileURLWithPath: "resnettest.mlmodel") // Local URL
let compiledURL = try MLModel.compileModel(at: modelURL)
let mlmodel = try MLModel(contentsOf: compiledURL)
let imageURL = URL(fileURLWithPath: "image.jpg") // Local URL
let image = CIImage(contentsOf: imageURL)
let cgImage = convertCIImageToCGImage(inputImage: image!)
let inputName = "x_1" // Model input name
let imageConstraint = mlmodel.modelDescription.inputDescriptionsByName[inputName]!.imageConstraint!
let imageValue = try MLFeatureValue(cgImage: cgImage!, constraint: imageConstraint)
let imageFeatureProvider = try MLDictionaryFeatureProvider(dictionary: [inputName: imageValue])
let output = try mlmodel.prediction(from: imageFeatureProvider)

One note is that the input layer in the model when I pulled it was x_1 and not input_1.