I am trying to create a Pipeline with 3 sub-models: a Feature Vectorizer -> a NN regressor converted from PyTorch -> a Feature Extractor (to convert the output tensor to a Double value).
The pipeline works fine when I use just a Vectorizer and an Extractor, this is the code:
vectorizer = models.feature_vectorizer.create_feature_vectorizer(
input_features=["windSpeed", "theoreticalPowerCurve", "windDirection"], # Multiple input features
output_feature_name="input"
)
preProc_spec = vectorizer[0]
ct.utils.convert_double_to_float_multiarray_type(preProc_spec)
extractor = models.array_feature_extractor.create_array_feature_extractor(
input_features=[("input",datatypes.Array(3,))], # Multiple input features
output_name="output",
extract_indices = 1
)
ct.utils.convert_double_to_float_multiarray_type(extractor)
pipeline_network = pipeline.PipelineRegressor (
input_features = ["windSpeed", "theoreticalPowerCurve", "windDirection"],
output_features=["output"]
)
pipeline_network.add_model(preProc_spec)
pipeline_network.add_model(extractor)
ct.utils.convert_double_to_float_multiarray_type(pipeline_network.spec)
ct.utils.save_spec(pipeline_network.spec,"Final.mlpackage")
This model works ok. I created a regression NN using PyTorch and converted to Core ML either
import torch
import torch.nn as nn
class TurbinePowerModel(nn.Module):
def __init__(self):
super().__init__()
self.linear1 = nn.Linear(3, 4)
self.activation1 = nn.ReLU()
#self.linear2 = nn.Linear(5, 4)
#self.activation2 = nn.ReLU()
self.output = nn.Linear(4, 1)
def forward(self, x):
#x = F.normalize(x, dim = 0)
x = self.linear1(x)
x = self.activation1(x)
# x = self.linear2(x)
# x = self.activation2(x)
x = self.output(x)
return x
def forward_inference(self, windSpeed,theoreticalPowerCurve,windDirection):
input_tensor = torch.tensor([windSpeed,
theoreticalPowerCurve,
windDirection], dtype=torch.float32)
return self.forward(input_tensor)
model = torch.load('TurbinePowerRegression-1layer.pt', weights_only=False)
import coremltools as ct
print(ct.__version__)
import pandas as pd
from sklearn.preprocessing import StandardScaler
df = pd.read_csv('T1_clean.csv',delimiter=';')
X = df[['WindSpeed','TheoreticalPowerCurve','WindDirection']]
y = df[['ActivePower']]
scaler = StandardScaler()
X = scaler.fit_transform(X)
y = scaler.fit_transform(y)
X_tensor = torch.tensor(X, dtype=torch.float32)
y_tensor = torch.tensor(y, dtype=torch.float32)
traced_model = torch.jit.trace(model, X_tensor[0])
mlmodel = ct.convert(
traced_model,
inputs=[ct.TensorType(name="input", shape=X_tensor[0].shape)],
classifier_config=None # Optional, for classification tasks
)
mlmodel.save("TurbineBase.mlpackage")
This model has a Multiarray(Float 32 3) as input and a Multiarray(Float32 1) as output.
When I try to include it in the middle of the pipeline (Adjusting the output and input types of the other models accordingly), the process runs ok, but I have the following error when opening the generated model on Xcode:
What's is missing on the models. How can I set or adjust this metadata properly?
Thanks!!!
Core ML
RSS for tagIntegrate machine learning models into your app using Core ML.
Post
Replies
Boosts
Views
Activity
After updating to macOS15.2beta, the Yolo11 object detection model exported to coreml outputs incorrect and abnormal bounding boxes.
It also doesn't work in iOS apps built on a 15.2 mac.
The same model worked fine on macOS14.1.
When training a Yolo11 custom model in Python, exporting it to coreml, and testing it in the preview tab of mlpackage on macOS15.2 and Xcode16.0, the above result is obtained.
We use MLModel in our app, which uses two file formats: mlmodel and mlpackage. We find that when the model is released, models using mlmodel format have a certain probability of crashing. And these crashes account for the majority (over 85%) in the iOS 16.x system. Here is the crash stack:
Exception Type: SIGTRAP
Exception Codes: TRAP_BRKPT at 0x1b48e855c
Crashed Thread: 5
Thread 5 Crashed:
0 libdispatch.dylib 0x00000001b48e855c _dispatch_semaphore_dispose.cold.1 + 40
1 libdispatch.dylib 0x00000001b48b2b28 _dispatch_semaphore_signal_slow
2 libdispatch.dylib 0x00000001b48b0e58 _dispatch_dispose + 208
3 AppleNeuralEngine 0x00000001ef07b51c -[_ANEProgramForEvaluation .cxx_destruct] + 32
4 libobjc.A.dylib 0x00000001a67ed4a4 object_cxxDestructFromClass(objc_object*, objc_class*) + 116
5 libobjc.A.dylib 0x00000001a67f221c objc_destructInstance + 80
6 libobjc.A.dylib 0x00000001a67fb9d0 _objc_rootDealloc + 80
7 AppleNeuralEngine 0x00000001ef079e04 -[_ANEProgramForEvaluation dealloc] + 72
8 AppleNeuralEngine 0x00000001ef07ca70 -[_ANEModel .cxx_destruct] + 44
9 libobjc.A.dylib 0x00000001a67ed4a4 object_cxxDestructFromClass(objc_object*, objc_class*) + 116
10 libobjc.A.dylib 0x00000001a67f221c objc_destructInstance + 80
11 libobjc.A.dylib 0x00000001a67fb9d0 _objc_rootDealloc + 80
12 AppleNeuralEngine 0x00000001ef07bd7c -[_ANEModel dealloc] + 136
13 CoreFoundation 0x00000001ad4563cc cow_cleanup + 168
14 CoreFoundation 0x00000001ad49044c -[__NSDictionaryM dealloc] + 148
15 Espresso 0x00000001bb19c7a4 Espresso::ANERuntimeEngine::compiler::reset() + 1340
16 Espresso 0x00000001bb19cac8 Espresso::ANERuntimeEngine::compiler::~compiler() + 108
17 Espresso 0x00000001bacd69e4 std::__1::__shared_weak_count::__release_shared() + 84
18 Espresso 0x00000001ba944d00 std::__1::__hash_table<std::__1::__hash_value_type<Espresso::platform, std::__1::shared_ptr<Espresso::net_compiler>>, std::__1::__unordered_map_hasher<Espresso::platform, std::__1::__hash_value_type<Espresso::platform, std::__1::shared_ptr<Espresso::net_compiler>>, std::__1::hash<Espresso::platform>, std::__1::equal_to<Espresso::platform>, true>, std::__1::__unordered_map_equal<Espresso::platform, std::__1::__hash_value_type<Espresso::platform, std::__1::shared_ptr<Espresso::net_compiler>>, std::__1::equal_to<Espresso::platform>, std::__1::hash<Espresso::platform>, true>, std::__1::allocator<std::__1::__hash_value_type<Espresso::platform, std::__1::shared_ptr<Espresso::net_compiler>>>>::__deallocate_node(std::__1::__hash_node_base<std::__1::__hash_node<std::__1::__hash_value_type<Espresso::platform, std::__1::shared_ptr<Espresso::net_compiler>>, void*>*>*) + 40
19 Espresso 0x00000001ba8ea640 std::__1::__hash_table<std::__1::__hash_value_type<Espresso::platform, std::__1::shared_ptr<Espresso::net_compiler>>, std::__1::__unordered_map_hasher<Espresso::platform, std::__1::__hash_value_type<Espresso::platform, std::__1::shared_ptr<Espresso::net_compiler>>, std::__1::hash<Espresso::platform>, std::__1::equal_to<Espresso::platform>, true>, std::__1::__unordered_map_equal<Espresso::platform, std::__1::__hash_value_type<Espresso::platform, std::__1::shared_ptr<Espresso::net_compiler>>, std::__1::equal_to<Espresso::platform>, std::__1::hash<Espresso::platform>, true>, std::__1::allocator<std::__1::__hash_value_type<Espresso::platform, std::__1::shared_ptr<Espresso::net_compiler>>>>::~__hash_table() + 28
20 Espresso 0x00000001ba8e5750 Espresso::net::~net() + 396
21 Espresso 0x00000001bacd69e4 std::__1::__shared_weak_count::__release_shared() + 84
22 Espresso 0x00000001bad750e4 std::__1::__vector_base<std::__1::shared_ptr<Espresso::net>, std::__1::allocator<std::__1::shared_ptr<Espresso::net>>>::clear() + 52
23 Espresso 0x00000001ba902448 std::__1::__vector_base<std::__1::shared_ptr<Espresso::net>, std::__1::allocator<std::__1::shared_ptr<Espresso::net>>>::~__vector_base() + 36
24 Espresso 0x00000001ba8ed99c std::__1::unique_ptr<EspressoLight::espresso_plan::priv_t, std::__1::default_delete<EspressoLight::espresso_plan::priv_t>>::reset(EspressoLight::espresso_plan::priv_t*) + 188
25 Espresso 0x00000001ba95b7fc EspressoLight::espresso_plan::~espresso_plan() + 72
26 Espresso 0x00000001ba902078 EspressoLight::espresso_plan::~espresso_plan() + 16
27 Espresso 0x00000001ba8e690c espresso_plan_destroy + 372
28 CoreML 0x00000001c48c45cc -[MLNeuralNetworkEngine _deallocContextAndPlan] + 40
29 CoreML 0x00000001c48c43bc -[MLNeuralNetworkEngine dealloc] + 40
30 libobjc.A.dylib 0x00000001a67ed4a4 object_cxxDestructFromClass(objc_object*, objc_class*) + 116
31 libobjc.A.dylib 0x00000001a67f221c objc_destructInstance + 80
32 libobjc.A.dylib 0x00000001a67fb9d0 _objc_rootDealloc + 80
~~~~ Our code that release the MLModel object ~~~~
Moreover, we use a synchronization mechanism to ensure that the release of the MLModel and the data processing of the model (by calling [model predictionFromFeatures]) do not occur simultaneously. What could be the possible causes of the problem, and how can we prevent it from happening? Any advice would be appreciated.
FB:FB16079804
Hello,
I've made the FastAI's Cat vs Dog model into model that distinguishes lemons from limes and it all works fine in a notebook.
I am now looking to transform this model into Core ML for my iOS app using TorchScript and Apple official guidelines for coremltools.
Model converts but I cannot see the Preview Tab in. Xcode. Have anyone of you tried to convert to Core ML? I guess my input types are not matching with coremltools expectations for preview but I am stuck . Here is my code.
import torch
import coremltools as ct
from fastai.vision.all import *
import json
from torchvision import transforms
# Load your Fastai model (replace with your actual path)
learn = load_learner('lemonmodel.pkl')
# Example input image (you can use any image from your dataset)
input_image = PILImage.create('example.jpg')
# Preprocess the image (assuming you used these transforms during training)
to_tensor = transforms.ToTensor()
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
input_tensor = to_tensor(input_image)
input_tensor = normalize(input_tensor) # Apply normalization
# Add a batch dimension
input_tensor = input_tensor.unsqueeze(0)
# Ensure float32 type
input_tensor = input_tensor.float()
# Trace the model
trace = torch.jit.trace(learn.model, input_tensor)
# Define the Core ML input type (considering your model's input shape)
_input = ct.ImageType(
name="input_1",
shape=input_tensor.shape,
bias=[-0.485/0.229, -0.456/0.224, -0.406/0.225],
scale=1./(255*0.226)
)
# Convert the model to Core ML format
mlmodel = ct.convert(
trace,
inputs=[_input],
minimum_deployment_target=ct.target.iOS14 # Optional, set deployment target
)
# Set model type as 'imageClassifier' for the Preview tab
mlmodel.type = 'imageClassifier'
# Correct structure for preview parameters** (assuming two classes: 'lemon' and 'lime')
labels_json = {
"imageClassifier": {
"labels": ["lemon", "lime"],
"input": {
"shape": list(input_tensor.shape), # Provide the actual input shape
"mean": [0.485, 0.456, 0.406], # Match normalization mean
"std": [0.229, 0.224, 0.225] # Match normalization std
},
"output": {
"shape": [1, 2] # Output shape for your model (2 classes)
}
}
}
# Setting up the metadata with correct 'preview' params
mlmodel.user_defined_metadata['com.apple.coreml.model.preview.params'] = json.dumps(labels_json)
# Save the model as .mlmodel
mlmodel.save("LemonClassifierGemini.mlmodel")
mlmodel = ct.convert(
trace,
inputs=[_input],
minimum_deployment_target=ct.target.iOS14 # Optional, set deployment target
)
# Set model type as 'imageClassifier' for the Preview tab**
mlmodel.type = 'imageClassifier'
# Correct structure for preview parameters** (assuming two classes: 'lemon' and 'lime')
labels_json = {
"imageClassifier": {
"labels": ["lemon", "lime"],
"input": {
"shape": list(input_tensor.shape), # Provide the actual input shape
"mean": [0.485, 0.456, 0.406], # Match normalization mean
"std": [0.229, 0.224, 0.225] # Match normalization std
},
"output": {
"shape": [1, 2] # Output shape for your model (2 classes)
}
}
}
# Setting up the metadata with correct 'preview' params**
mlmodel.user_defined_metadata['com.apple.coreml.model.preview.params'] = json.dumps(labels_json)
# Save the model as .mlmodel
mlmodel.save("LemonClassifierGemini.mlmodel")
My model is :
Input batch shape: torch.Size([32, 3, 192, 192])
Labels batch shape: torch.Size([32])
Validation Loss: None, Validation Metric: None
Predictions shape: torch.Size([63, 2])
Targets shape: torch.Size([63])
Code for the model :
searches = 'lemon','lime'
path = Path('lemon_or_not')
for o in searches:
dest = (path/o)
dest.mkdir(exist_ok=True, parents=True)
download_images(dest, urls=search_images(f'{o} photo'))
time.sleep(5)
resize_images(path/o, max_size=400, dest=path/o)
dls = DataBlock(
blocks=(ImageBlock, CategoryBlock),
get_items=get_image_files,
splitter=RandomSplitter(valid_pct=0.2, seed=42),
get_y=parent_label,
item_tfms=[Resize(192, method='squish')]
).dataloaders(path, bs=32)
dls.show_batch(max_n=6)
learn = vision_learner(dls, resnet18, metrics=error_rate)
learn.fine_tune(3)
is_lemon,_,probs = learn.predict(PILImage.create('lemon.jpg'))
print(f"This is a: {is_lemon}.")
print(f"Probability it's a lemon: {probs[0]:.4f}")
This is a: lemon.
Probability it's a lemon: 1.0000
learn.export('lemonmodel.pkl')
I am stuck to why it doest show the Preview Tab.
I used the multifunction models feature introduced in iOS 18 to merge three VAE Encoder models with different resolutions into a single model. However, loading this merged model on iOS causes a crash with the error EXC_BAD_ACCESS (code=1, address=0x0). In contrast, merging VAE Decoder models using the same method does not result in crashes. Additionally, merging only two VAE Decoder models with different resolutions also leads to a crash when loaded on iOS. As for the Stable Diffusion Unet model, merging two or even three models does not cause any crashes, and it successfully generates images as expected.
I use the following code to load the model:
let config = MLModelConfiguration()
config.computeUnits = .cpuAndNeuralEngine
config.functionName = "test"
try MLModel(contentsOf: url, configuration: config)
I am using the depthAnything v2 provided by Apple on the developer website. On my iPhone 15 Pro, if I choose all or cpuAndNeuralEngine, it will stuck in loading models.
let config = MLModelConfiguration()
config.computeUnits = .cpuAndGPU//normal when not using neuralEngine.
let model = try await DepthModel.load(configuration: config)
with following error:
E5RT encountered an STL exception. msg = MILCompilerForANE error: failed to compile ANE model using ANEF. Error=无法与帮助程序通信。.
E5RT: MILCompilerForANE error: failed to compile ANE model using ANEF. Error=无法与帮助程序通信。 (11)
Hi everyone,
I'm working on an iOS app that uses VisionKit and I'm exploring the .visualLookUp feature. Specifically, I want to extract the detailed information that Visual Look Up provides after identifying an object in an image (e.g., if the object is a flower, retrieve its name; if it’s a clothing tag, get the tag's content).
I'm working on a cross-platform AI app. It is a CMake project. The inference part should be built as a library separately on Windows and MacOS. On MacOS it should be built with objective-c and CoreML.
Here's my step roughly:
Create a XCode Project for CoreML inference and build it as static library. Models are compiled to ".mlmodelc", and codes are compile to binary ".a" lib.
Create a CMake Project for the app, and use the ".a" lib built by XCode.
Run the App.
I initialize the CoreML model like this(just for demostration):
#include "det.h" // the model header generated by xcode
auto url = [[NSURL alloc] initFileURLWithPath:[NSString stringWithFormat:@"%@/%@", dir, @"det.mlmodelc"]];
auto model = [[det alloc] initWithContentsOfURL:url error:&error]; // no error
The url is valid, and the initialization doesn't report any error. However, when I tried to do inference using codes like this:
auto cvPixelBuffer = createCVPixelBuffer(960, 960); // util function
auto preds = [model predictionFromImage:cvPixelBuffer error:NULL];
The output preds will be null and I got these errors:
2024-12-10 14:52:37.678201+0800 望言OCR[50204:5615023] [e5rt] E5RT encountered unknown exception.
2024-12-10 14:52:37.678237+0800 望言OCR[50204:5615023] [coreml] E5RT: E5RT encountered an unknown exception. (11)
2024-12-10 14:52:37.870739+0800 望言OCR[50204:5615023] H11ANEDevice::H11ANEDeviceOpen kH11ANEUserClientCommand_DeviceOpen call failed result=0xe00002e2
2024-12-10 14:52:37.870758+0800 望言OCR[50204:5615023] Device Open failed - status=0xe00002e2
2024-12-10 14:52:37.870760+0800 望言OCR[50204:5615023] (Single-ANE System) Critical Error: Could not open the only H11ANE device
2024-12-10 14:52:37.870769+0800 望言OCR[50204:5615023] H11ANEDeviceOpen failed: 0x17
2024-12-10 14:52:37.870845+0800 望言OCR[50204:5615023] H11ANEDevice::H11ANEDeviceOpen kH11ANEUserClientCommand_DeviceOpen call failed result=0xe00002e2
2024-12-10 14:52:37.870848+0800 望言OCR[50204:5615023] Device Open failed - status=0xe00002e2
2024-12-10 14:52:37.870849+0800 望言OCR[50204:5615023] (Single-ANE System) Critical Error: Could not open the only H11ANE device
2024-12-10 14:52:37.870853+0800 望言OCR[50204:5615023] H11ANEDeviceOpen failed: 0x17
2024-12-10 14:52:37.870857+0800 望言OCR[50204:5615023] [common] start: ANEDeviceOpen() failed : ret=23 :
It seems that CoreML failed to find ANE device. Is there anything need to be done before we use a CoreML Model as a library in a CMake or other non-XCode project?
By the way, codes like above will work on an XCode Native App with CoreML (I tested this before) . So I guess I missed some environment initializations in my non-XCode project?
My app was rejected because of this error below but I cannot find any documentation on a key related to Image Playground. My app is set to minimum of 18.2 already.
Rejection Message:
The UIRequiredDeviceCapabilities key in the Info.plist is set in such a way that the app will not install on iPhone running iOS 18.1.1
Next Steps
To resolve this issue, check the UIRequiredDeviceCapabilities key to verify that it contains only the attributes required for the app features or the attributes that must not be present on the device. Attributes specified by a dictionary should be set to true if they are required and false if they must not be present on the device.
Resources
Learn more about the UIRequiredDeviceCapabilities key.
When building MLModel, it is set to use NPU. It seems that GPU is used during inference, but it crashes during Compile.
The stack is as follows:
I'm trying to set up Facebook AI's "Segment Anything" MLModel to compare its performance and efficacy on-device against the Vision library's Foreground Instance Mask Request.
The Vision request accepts any reasonably-sized image for processing, and then has a method to produce an output at the same resolution as the input image. Conversely, the MLModel for Segment Anything accepts a 1024x1024 image for inference and outputs a 1024x1024 image for output.
What is the best way to work with non-square images, such as 4:3 camera photos? I can basically think of 3 methods for accomplishing this:
Scale the image to 1024x1024, ignoring aspect ratio, then inversely scale the output back to the original size. However, I have a big concern that squashing the content will result in poor inference results.
Scale the image, preserving its aspect ratio so its minimum dimension is 1024, then run the model multiple times on a sliding 1024x1024 window and then aggregating the results. My main concern here is the complexity of de-duping the output, when each run could make different outputs based on how objects are cropped.
Fit the image within 1024x1024 and pad with black pixels to make a square. I'm not sure if the border will muck up the inference.
Anyway, this seems like it must be a well-solved problem in ML, but I'm having difficulty finding an authoritative best practice.
Problem
I have set SWIFT_UPCOMING_FEATURE_EXISTENTIAL_ANY at Build Settings > Swift Compiler - Upcoming Features to true to support this existential any proposal.
Then following errors appears in the MLModel class, but this is an auto-generated file, so I don't know how to deal with it.
Use of protocol 'MLFeatureProvider' as a type must be written 'any MLFeatureProvider'
Use of protocol 'Error' as a type must be written 'any Error'
environment
Xcode 16.0
Xcode 16.1 Beta 2
What I tried
Delete cache of DerivedData and regenerate MLModel class files
I also tried using DepthAnythingV2SmallF16P6.mlpackage to verify if there is a problem with my mlmodel
I tried the above after setting up Swift6 in Xcode
I also used coremlc to generate MLModel class files with Swift6 specified by command.
I'm finding the model is giving very jagged edges. This may be to do with the output resolution: Grayscale16Half 518 × 392.
I have tried to re-convert this model on Colab but have not had much luck as this is very much out of my comfort zone. Has anyone else dealt with this? the model would be perfect if I could just overcome this issue.
Attempting to set up ComfyUI-CoreMLSuite on my Mac Studio.
ComfyUI starts but no Core nodes are in the add-node-list.
cloned both ComfyUI-CoreMLSuite and ml-stable-diffusion into custom_nodes and bounced the ComfyUI server.
The startup complains that ml-stable-diffusion has no init.py.
FileNotFoundError: [Errno 2] No such file or directory: ... /ComfyUI/custom_nodes/ml-stable-diffusion/init.py'
It appears to be a show stopper.
What to do?
I have followed https://apple.github.io/coremltools/docs-guides/source/installing-coremltools.html but failed.
Looks like the doc is too outdated.
In our app we use CoreML. But ever since macOS 15.x was released we started to get a great bunch of crashes like this:
Incident Identifier: 424041c3-884b-4e50-bb5a-429a83c3e1c8
CrashReporter Key: B914246B-1291-4D44-984D-EDF84B52310E
Hardware Model: Mac14,12
Process: <REMOVED> [1509]
Path: /Applications/<REMOVED>
Identifier: com.<REMOVED>
Version: <REMOVED>
Code Type: arm64
Parent Process: launchd [1]
Date/Time: 2024-11-13T13:23:06.999Z
Launch Time: 2024-11-13T13:22:19Z
OS Version: Mac OS X 15.1.0 (24B83)
Report Version: 104
Exception Type: SIGABRT
Exception Codes: #0 at 0x189042600
Crashed Thread: 36
Thread 36 Crashed:
0 libsystem_kernel.dylib 0x0000000189042600 __pthread_kill + 8
1 libsystem_c.dylib 0x0000000188f87908 abort + 124
2 libsystem_c.dylib 0x0000000188f86c1c __assert_rtn + 280
3 Metal 0x0000000193fdd870 MTLReportFailure.cold.1 + 44
4 Metal 0x0000000193fb9198 MTLReportFailure + 444
5 MetalPerformanceShadersGraph 0x0000000222f78c80 -[MPSGraphExecutable initWithMPSGraphPackageAtURL:compilationDescriptor:] + 296
6 Espresso 0x00000001a290ae3c E5RT::SharedResourceFactory::GetMPSGraphExecutable(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, NSDictionary*) + 932
.
.
.
43 CoreML 0x0000000192d263bc -[MLModelAsset modelWithConfiguration:error:] + 120
44 CoreML 0x0000000192da96d0 +[MLModel modelWithContentsOfURL:configuration:error:] + 176
45 <REMOVED> 0x000000010497b758 -[<REMOVED> <REMOVED>] (<REMOVED>)
No similar crashes on macOS 12-14!
MetalPerformanceShadersGraph.log
Any clue what is causing this?
Thanks! :)
Hi everyone,
I'm working on integrating object recognition from live video feeds into my existing app by following Apple's sample code. My original project captures video and records it successfully. However, after integrating the Vision-based object detection components (VNCoreMLRequest), no detections occur, and the callback for the request is never triggered.
To debug this issue, I’ve added the following functionality:
Set up AVCaptureVideoDataOutput for processing video frames.
Created a VNCoreMLRequest using my Core ML model.
The video recording functionality works as expected, but no object detection happens. I’d like to know:
How to debug this further? Which key debug points or logs could help identify where the issue lies?
Have I missed any key configurations? Below is a diff of the modifications I’ve made to my project for the new feature.
Diff of Changes:
(Attach the diff provided above)
Specific Observations:
The captureOutput method is invoked correctly, but there is no output or error from the Vision request callback.
Print statements in my setup function setForVideoClassify() show that the setup executes without errors.
Questions:
Could this be due to issues with my Core ML model compatibility or configuration?
Is the VNCoreMLRequest setup incorrect, or do I need to ensure specific image formats for processing?
Platform:
Xcode 16.1, iOS 18.1, Swift 5, SwiftUI, iPhone 11,
Darwin MacBook-Pro.local 24.1.0 Darwin Kernel Version 24.1.0: Thu Oct 10 21:02:27 PDT 2024; root:xnu-11215.41.3~2/RELEASE_X86_64 x86_64
Any guidance or advice is appreciated! Thanks in advance.
Hello,
I am exploring real-time object detection, and its replacement/overlay with another shape, on live video streams for an iOS app using Core ML and Vision frameworks. My target is to achieve high-speed, real-time detection without noticeable latency, similar to what’s possible with PageFault handling and Associative Caching in OS, but applied to video processing.
Given that this requires consistent, real-time model inference, I’m curious about how well the Neural Engine or GPU can handle such tasks on A-series chips in iPhones versus M-series chips (specifically M1 Pro and possibly M4) in MacBooks. Here are a few specific points I’d like insight on:
Hardware Suitability: How feasible is it to perform real-time object detection with Core ML on the Neural Engine (i.e., can it maintain low latency)? Would the M-series chips (e.g., M1 Pro or newer) offer a tangible benefit for this type of task compared to the A-series in mobile devices? Which A- and M- chips would be minimum feasible recommendation for such task.
Performance Expectations: For continuous, live video object detection, what would be the expected frame rate or latency using an optimized Core ML model? Has anyone benchmarked such applications, and is the M-series required to achieve smooth, real-time processing?
Differences Across Apple Hardware: How does performance scale between the A-series Neural Engine and M-series GPU and Neural Engine? Is the M-series vastly superior for real-time Core ML tasks like object detection on live video feeds?
If anyone has attempted live object detection on these chips, any insights on real-time performance, limitations, or optimizations would be highly appreciated.
Please refer: Apple APIs
Thank you in advance for your help!
I have a model that uses a CoreML delegate, and I’m getting the following warning whenever I set the model to nil. My understanding is that CoreML is creating a cache in the app’s storage but is having issues clearing it. As a result, the app’s storage usage increases every time the model is loaded.
This StackOverflow post explains the problem in detail: App Storage Size Increases with CoreML usage
This is a critical issue because the cache will eventually fill up the phone’s storage:
doUnloadModel:options:qos:error:: model=_ANEModel: { modelURL=file:///var/mobile/Containers/Data/Application/22DDB13E-DABA-4195-846F-F884135F37FE/tmp/F38A9824-3944-420C-BD32-78CE598BE22D-10125-00000586EFDFD7D6.mlmodelc/ : sourceURL= (null) : key={"isegment":0,"inputs":{"0_0":{"shape":[256,256,1,3,1]}},"outputs":{"142_0":{"shape":[16,16,1,222,1]},"138_0":{"shape":[16,16,1,111,1]}}} : identifierSource=0 : cacheURLIdentifier=E0CD0F44FB0417936057FC6375770CFDCCC8C698592ED412DDC9C81E96256872_C9D6E5E73302943871DC2C610588FEBFCB1B1D730C63CA5CED15D2CD5A0AC0DA : string_id=0x00000000 : program=_ANEProgramForEvaluation: { programHandle=6077141501305 : intermediateBufferHandle=6077142786285 : queueDepth=127 } : state=3 : programHandle=6077141501305 : intermediateBufferHandle=6077142786285 : queueDepth=127 : attr={
ANEFModelDescription = {
ANEFModelInput16KAlignmentArray = (
);
ANEFModelOutput16KAlignmentArray = (
);
ANEFModelProcedures = (
{
ANEFModelInputSymbolIndexArray = (
0
);
ANEFModelOutputSymbolIndexArray = (
0,
1
);
ANEFModelProcedureID = 0;
}
);
kANEFModelInputSymbolsArrayKey = (
"0_0"
);
kANEFModelOutputSymbolsArrayKey = (
"138_0@output",
"142_0@output"
);
kANEFModelProcedureNameToIDMapKey = {
net = 0;
};
};
NetworkStatusList = (
{
LiveInputList = (
{
BatchStride = 393216;
Batches = 1;
Channels = 3;
Depth = 1;
DepthStride = 393216;
Height = 256;
Interleave = 1;
Name = "0_0";
PlaneCount = 3;
PlaneStride = 131072;
RowStride = 512;
Symbol = "0_0";
Type = Float16;
Width = 256;
}
);
LiveOutputList = (
{
BatchStride = 113664;
Batches = 1;
Channels = 111;
Depth = 1;
DepthStride = 113664;
Height = 16;
Interleave = 1;
Name = "138_0@output";
PlaneCount = 111;
PlaneStride = 1024;
RowStride = 64;
Symbol = "138_0@output";
Type = Float16;
Width = 16;
},
{
BatchStride = 227328;
Batches = 1;
Channels = 222;
Depth = 1;
DepthStride = 227328;
Height = 16;
Interleave = 1;
Name = "142_0@output";
PlaneCount = 222;
PlaneStride = 1024;
RowStride = 64;
Symbol = "142_0@output";
Type = Float16;
Width = 16;
}
);
Name = net;
}
);
} : perfStatsMask=0} was not loaded by the client.
https://developer.apple.com/machine-learning/models/
Adding the DepthAnythingV2SmallF16.mlpackage to a new project in Xcode 16.1 and invoking the class crashes the app.
Anyone else having the same issue?
I tried Xcode 16.2 beta and it has the same response.
Code
import UIKit
import CoreML
class ViewController : UIViewController {
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view.
do {
// Use a default model configuration.
let defaultConfig = MLModelConfiguration()
// app crashes here
let model = try? DepthAnythingV2SmallF16(
configuration: defaultConfig
)
} catch {
//
}
}
}
Response
/AppleInternal/Library/BuildRoots/4b66fb3c-7dd0-11ef-b4fb-4a83e32a47e1/Library/Caches/com.apple.xbs/Sources/MetalPerformanceShadersGraph/mpsgraph/MetalPerformanceShadersGraph/Core/Files/MPSGraphExecutable.mm:129: failed assertion Error: unhandled platform for MPSGraph serialization'
`