Environment:
macOS 26.2 (Tahoe) Xcode 16.3 Apple Silicon (M4) Sandboxed Mac App Store app Description:
Repeated use of VNRecognizeTextRequest causes permanent memory growth in the host process. The physical footprint increases by approximately 3-15 MB per OCR call and never returns to baseline, even after all references to the request, handler, observations, and image are released.
` private func selectAndProcessImage() { let panel = NSOpenPanel() panel.allowedContentTypes = [.image] panel.allowsMultipleSelection = false panel.canChooseDirectories = false panel.message = "Select an image for OCR processing"
guard panel.runModal() == .OK, let url = panel.url else { return }
selectedImageURL = url
isProcessing = true
recognizedText = "Processing..."
// Run OCR on a background thread to keep UI responsive
let workItem = DispatchWorkItem {
let result = performOCR(on: url)
DispatchQueue.main.async {
recognizedText = result
isProcessing = false
}
}
DispatchQueue.global(qos: .userInitiated).async(execute: workItem)
}
private func performOCR(on url: URL) -> String {
// Wrap EVERYTHING in autoreleasepool so all ObjC objects are drained immediately
let resultText: String = autoreleasepool {
// Load image and convert to CVPixelBuffer for explicit memory control
guard let imageData = try? Data(contentsOf: url) else {
return "Error: Could not read image file."
}
guard let nsImage = NSImage(data: imageData) else {
return "Error: Could not create image from file data."
}
guard let cgImage = nsImage.cgImage(forProposedRect: nil, context: nil, hints: nil) else {
return "Error: Could not create CGImage."
}
let width = cgImage.width
let height = cgImage.height
// Create a CVPixelBuffer from the CGImage
var pixelBuffer: CVPixelBuffer?
let attrs: [String: Any] = [
kCVPixelBufferCGImageCompatibilityKey as String: true,
kCVPixelBufferCGBitmapContextCompatibilityKey as String: true
]
let status = CVPixelBufferCreate(
kCFAllocatorDefault,
width,
height,
kCVPixelFormatType_32ARGB,
attrs as CFDictionary,
&pixelBuffer
)
guard status == kCVReturnSuccess, let buffer = pixelBuffer else {
return "Error: Could not create CVPixelBuffer (status: \(status))."
}
// Draw the CGImage into the pixel buffer
CVPixelBufferLockBaseAddress(buffer, [])
guard let context = CGContext(
data: CVPixelBufferGetBaseAddress(buffer),
width: width,
height: height,
bitsPerComponent: 8,
bytesPerRow: CVPixelBufferGetBytesPerRow(buffer),
space: CGColorSpaceCreateDeviceRGB(),
bitmapInfo: CGImageAlphaInfo.noneSkipFirst.rawValue
) else {
CVPixelBufferUnlockBaseAddress(buffer, [])
return "Error: Could not create CGContext for pixel buffer."
}
context.draw(cgImage, in: CGRect(x: 0, y: 0, width: width, height: height))
CVPixelBufferUnlockBaseAddress(buffer, [])
// Run OCR
let requestHandler = VNImageRequestHandler(cvPixelBuffer: buffer, options: [:])
let request = VNRecognizeTextRequest()
request.recognitionLevel = .accurate
request.usesLanguageCorrection = true
do {
try requestHandler.perform([request])
} catch {
return "Error during OCR: \(error.localizedDescription)"
}
guard let observations = request.results, !observations.isEmpty else {
return "No text found in image."
}
let lines = observations.compactMap { observation in
observation.topCandidates(1).first?.string
}
// Explicitly nil out the pixel buffer before the pool drains
pixelBuffer = nil
return lines.joined(separator: "\n")
}
// Everything — Data, NSImage, CGImage, CVPixelBuffer, VN objects — released here
return resultText
}
`