Hello,
I am working on a fairly complex iPhone app that controls the front built-in wide angle camera. I need to take and display a sequence of photos that cover the whole range of focus value available.
Here is how I do it :
- call setExposureModeCustom to set the first lens position
- wait for the completionHandler to be called back
- capture a photo
- do it again for the next lens position.
- etc.
This works fine, but it takes longer than I expected for the completionHandler to be called back.
From what I've seen, the delay scales with the exposure duration. When I set the exposure duration to the max value:
- on the iPhone 14 Pro, it takes about 3 seconds (3 times the max exposure)
- on the iPhone 8 1.3s (4 times the max exposure).
I was expecting a delay of two times the exposure duration: take a photo, throw one away while changing lens position, take the next photo, etc. but this takes more than that.
I also tried the same thing with changing the ISO instead of the focus position and I get the same kind of delays. Also, I do not think the problem is linked to the way I process the images because I get the same delay even if I do nothing with the output.
Is there something I could do to make things go faster for this use-case ?
Any input would be appreciated,
Thanks
I created a minimal testing app to reproduce the issue :
import Foundation
import AVFoundation
class Main:NSObject, AVCaptureVideoDataOutputSampleBufferDelegate {
let dispatchQueue = DispatchQueue(label:"VideoQueue", qos: .userInitiated)
let session:AVCaptureSession
let videoDevice:AVCaptureDevice
var focus:Float = 0
override init(){
session = AVCaptureSession()
session.beginConfiguration()
session.sessionPreset = .photo
videoDevice = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .back)!
super.init()
let videoDeviceInput = try! AVCaptureDeviceInput(device: videoDevice)
session.addInput(videoDeviceInput)
let videoDataOutput = AVCaptureVideoDataOutput()
if session.canAddOutput(videoDataOutput) {
session.addOutput(videoDataOutput)
videoDataOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32BGRA ]
videoDataOutput.setSampleBufferDelegate(self, queue: dispatchQueue)
}
session.commitConfiguration()
dispatchQueue.async {
self.startSession()
}
}
func startSession(){
session.startRunning()
//lock max exposure duration
try! videoDevice.lockForConfiguration()
let exposure = videoDevice.activeFormat.maxExposureDuration.seconds * 0.5
print("set max exposure", exposure)
videoDevice.setExposureModeCustom(duration: CMTime(seconds: exposure, preferredTimescale: 1000), iso: videoDevice.activeFormat.minISO){ time in
print("did set max exposure")
self.changeFocus()
}
videoDevice.unlockForConfiguration()
}
func changeFocus(){
let date = Date.now
print("set focus", focus)
try! videoDevice.lockForConfiguration()
videoDevice.setFocusModeLocked(lensPosition: focus){ time in
let dt = abs(date.timeIntervalSinceNow)
print("did set focus - took:", dt, "frames:", dt/self.videoDevice.exposureDuration.seconds)
self.next()
}
videoDevice.unlockForConfiguration()
}
func next(){
focus += 0.02
if focus > 1 {
print("done")
return
}
changeFocus()
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection){
print("did receive video frame")
}
}