import Foundation import Combine import AVFoundation import Photos import UIKit import FirebaseStorage public struct AlertError { public var title: String = "" public var message: String = "" public var primaryButtonTitle = "Accept" public var secondaryButtonTitle: String? public var primaryAction: (() -> ())? public var secondaryAction: (() -> ())? public init(title: String = "", message: String = "", primaryButtonTitle: String = "Accept", secondaryButtonTitle: String? = nil, primaryAction: (() -> ())? = nil, secondaryAction: (() -> ())? = nil) { self.title = title self.message = message self.primaryAction = primaryAction self.primaryButtonTitle = primaryButtonTitle self.secondaryAction = secondaryAction } } /////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////// // // // this is the CameraService class, which configures and runs a capture session // which acquires syncronized image and depth data // using an AVCaptureDataOutputSynchronizer // // /////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////// public class CameraService: NSObject, AVCaptureVideoDataOutputSampleBufferDelegate, AVCaptureDepthDataOutputDelegate, AVCaptureDataOutputSynchronizerDelegate, MyFirebaseProtocol, ObservableObject{ @Published public var shouldShowAlertView = false @Published public var shouldShowSpinner = false public var labelStatus: String = "Ready" var images: [UIImage?] = [] public var alertError: AlertError = AlertError() public let session = AVCaptureSession() var isSessionRunning = false var isConfigured = false var setupResult: SessionSetupResult = .success private let sessionQueue = DispatchQueue(label: "session queue") // Communicate with the session and other session objects on this queue. @objc dynamic var videoDeviceInput: AVCaptureDeviceInput! private let videoDeviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInTrueDepthCamera], mediaType: .video, position: .front) var videoCaptureDevice : AVCaptureDevice? = nil let videoDataOutput: AVCaptureVideoDataOutput = AVCaptureVideoDataOutput() // Define frame output. let depthDataOutput = AVCaptureDepthDataOutput() var outputSynchronizer: AVCaptureDataOutputSynchronizer? = nil let dataOutputQueue = DispatchQueue(label: "video data queue", qos: .userInitiated, attributes: [], autoreleaseFrequency: .workItem) var scanStateCounter: Int = 0 var m_DepthDatasetsToUpload = [AVCaptureSynchronizedDepthData]() var m_FrameBufferToUpload = [AVCaptureSynchronizedSampleBufferData]() var firebaseDepthDatasetsArray: [String] = [] @Published var firebaseImageUploadCount = 0 @Published var firebaseTextFileUploadCount = 0 public func configure() { /* Setup the capture session. In general, it's not safe to mutate an AVCaptureSession or any of its inputs, outputs, or connections from multiple threads at the same time. Don't perform these tasks on the main queue because AVCaptureSession.startRunning() is a blocking call, which can take a long time. Dispatch session setup to the sessionQueue, so that the main queue isn't blocked, which keeps the UI responsive. */ sessionQueue.async { self.configureSession() } } // MARK: Checks for user's permisions public func checkForPermissions() { switch AVCaptureDevice.authorizationStatus(for: .video) { case .authorized: // The user has previously granted access to the camera. break case .notDetermined: /* The user has not yet been presented with the option to grant video access. Suspend the session queue to delay session setup until the access request has completed. */ sessionQueue.suspend() AVCaptureDevice.requestAccess(for: .video, completionHandler: { granted in if !granted { self.setupResult = .notAuthorized } self.sessionQueue.resume() }) default: // The user has previously denied access. setupResult = .notAuthorized DispatchQueue.main.async { self.alertError = AlertError(title: "Camera Access", message: "SwiftCamera doesn't have access to use your camera, please update your privacy settings.", primaryButtonTitle: "Settings", secondaryButtonTitle: nil, primaryAction: { UIApplication.shared.open(URL(string: UIApplication.openSettingsURLString)!, options: [:], completionHandler: nil) }, secondaryAction: nil) self.shouldShowAlertView = true } } } // MARK: Session Management // Call this on the session queue. /// - Tag: ConfigureSession private func configureSession() { if setupResult != .success { return } session.beginConfiguration() session.sessionPreset = AVCaptureSession.Preset.vga640x480 // Add video input. do { var defaultVideoDevice: AVCaptureDevice? let frontCameraDevice = AVCaptureDevice.default(.builtInTrueDepthCamera, for: .video, position: .front) // If the rear wide angle camera isn't available, default to the front wide angle camera. defaultVideoDevice = frontCameraDevice videoCaptureDevice = defaultVideoDevice guard let videoDevice = defaultVideoDevice else { print("Default video device is unavailable.") setupResult = .configurationFailed session.commitConfiguration() return } let videoDeviceInput = try AVCaptureDeviceInput(device: videoDevice) if session.canAddInput(videoDeviceInput) { session.addInput(videoDeviceInput) self.videoDeviceInput = videoDeviceInput } else if session.inputs.isEmpty == false { self.videoDeviceInput = videoDeviceInput } else { print("Couldn't add video device input to the session.") setupResult = .configurationFailed session.commitConfiguration() return } } catch { print("Couldn't create video device input: \(error)") setupResult = .configurationFailed session.commitConfiguration() return } ////////////////////////////////////////////////////////////////////////////////////////////////////////////// // MARK: add video output to session ////////////////////////////////////////////////////////////////////////////////////////////////////////////// videoDataOutput.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as NSString) : NSNumber(value: kCVPixelFormatType_32BGRA)] as [String : Any] videoDataOutput.alwaysDiscardsLateVideoFrames = true videoDataOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "camera_frame_processing_queue")) if session.canAddOutput(self.videoDataOutput) { session.addOutput(self.videoDataOutput) } else if session.outputs.contains(videoDataOutput) { } else { print("Couldn't create video device output") setupResult = .configurationFailed session.commitConfiguration() return } guard let connection = self.videoDataOutput.connection(with: AVMediaType.video), connection.isVideoOrientationSupported else { return } connection.videoOrientation = .portrait ////////////////////////////////////////////////////////////////////////////////////////////////////////////// // MARK: add depth output to session ////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Add a depth data output if session.canAddOutput(depthDataOutput) { session.addOutput(depthDataOutput) depthDataOutput.isFilteringEnabled = false //depthDataOutput.setDelegate(T##delegate: AVCaptureDepthDataOutputDelegate?##AVCaptureDepthDataOutputDelegate?, callbackQueue: <#T##DispatchQueue?#>) depthDataOutput.setDelegate(self, callbackQueue: DispatchQueue(label: "depth_frame_processing_queue")) if let connection = depthDataOutput.connection(with: .depthData) { connection.isEnabled = true } else { print("No AVCaptureConnection") } } else if session.outputs.contains(depthDataOutput){ } else { print("Could not add depth data output to the session") session.commitConfiguration() return } // Search for highest resolution with half-point depth values let depthFormats = videoCaptureDevice!.activeFormat.supportedDepthDataFormats let filtered = depthFormats.filter({ CMFormatDescriptionGetMediaSubType($0.formatDescription) == kCVPixelFormatType_DepthFloat16 }) let selectedFormat = filtered.max(by: { first, second in CMVideoFormatDescriptionGetDimensions(first.formatDescription).width < CMVideoFormatDescriptionGetDimensions(second.formatDescription).width }) do { try videoCaptureDevice!.lockForConfiguration() videoCaptureDevice!.activeDepthDataFormat = selectedFormat videoCaptureDevice!.unlockForConfiguration() } catch { print("Could not lock device for configuration: \(error)") session.commitConfiguration() return } ////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Use an AVCaptureDataOutputSynchronizer to synchronize the video data and depth data outputs. // The first output in the dataOutputs array, in this case the AVCaptureVideoDataOutput, is the "master" output. ////////////////////////////////////////////////////////////////////////////////////////////////////////////// outputSynchronizer = AVCaptureDataOutputSynchronizer(dataOutputs: [videoDataOutput, depthDataOutput]) outputSynchronizer!.setDelegate(self, queue: dataOutputQueue) session.commitConfiguration() self.isConfigured = true //self.start() } // MARK: Device Configuration /// - Tag: Stop capture session public func stop(completion: (() -> ())? = nil) { sessionQueue.async { //print("entered stop") if self.isSessionRunning { //print(self.setupResult) if self.setupResult == .success { //print("entered success") DispatchQueue.main.async{ self.session.stopRunning() self.isSessionRunning = self.session.isRunning if !self.session.isRunning { DispatchQueue.main.async { completion?() } } } } } } } /// - Tag: Start capture session public func start() { // We use our capture session queue to ensure our UI runs smoothly on the main thread. sessionQueue.async { if !self.isSessionRunning && self.isConfigured { switch self.setupResult { case .success: self.session.startRunning() self.isSessionRunning = self.session.isRunning if self.session.isRunning { } case .configurationFailed, .notAuthorized: print("Application not authorized to use camera") DispatchQueue.main.async { self.alertError = AlertError(title: "Camera Error", message: "Camera configuration failed. Either your device camera is not available or its missing permissions", primaryButtonTitle: "Accept", secondaryButtonTitle: nil, primaryAction: nil, secondaryAction: nil) self.shouldShowAlertView = true } } } } } // ------------------------------------------------------------------------ // MARK: CAPTURE HANDLERS // ------------------------------------------------------------------------ public func dataOutputSynchronizer(_ synchronizer: AVCaptureDataOutputSynchronizer, didOutput synchronizedDataCollection: AVCaptureSynchronizedDataCollection) { //printWithTime("Capture") guard let syncedDepthData: AVCaptureSynchronizedDepthData = synchronizedDataCollection.synchronizedData(for: depthDataOutput) as? AVCaptureSynchronizedDepthData else { return } guard let syncedVideoData: AVCaptureSynchronizedSampleBufferData = synchronizedDataCollection.synchronizedData(for: videoDataOutput) as? AVCaptureSynchronizedSampleBufferData else { return } /////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////// // // // Below is the code that extracts the information from depth data // The depth data is 640x480, which matches the size of the synchronized image // I save this info to a file, upload it to the cloud, and merge it with the image // on a PC to create a pointcloud // // /////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////// let depth_data : AVDepthData = syncedDepthData.depthData let cvpixelbuffer : CVPixelBuffer = depth_data.depthDataMap let height : Int = CVPixelBufferGetHeight(cvpixelbuffer) let width : Int = CVPixelBufferGetWidth(cvpixelbuffer) let quality : AVDepthData.Quality = depth_data.depthDataQuality let accuracy : AVDepthData.Accuracy = depth_data.depthDataAccuracy let pixelsize : Float = depth_data.cameraCalibrationData!.pixelSize let camcaldata : AVCameraCalibrationData = depth_data.cameraCalibrationData! let intmat : matrix_float3x3 = camcaldata.intrinsicMatrix let cal_lensdistort_x : CGFloat = camcaldata.lensDistortionCenter.x let cal_lensdistort_y : CGFloat = camcaldata.lensDistortionCenter.y let cal_matrix_width : CGFloat = camcaldata.intrinsicMatrixReferenceDimensions.width let cal_matrix_height : CGFloat = camcaldata.intrinsicMatrixReferenceDimensions.height let intrinsics_fx : Float = camcaldata.intrinsicMatrix.columns.0.x let intrinsics_fy : Float = camcaldata.intrinsicMatrix.columns.1.y let intrinsics_ox : Float = camcaldata.intrinsicMatrix.columns.2.x let intrinsics_oy : Float = camcaldata.intrinsicMatrix.columns.2.y let pixelformattype : OSType = CVPixelBufferGetPixelFormatType(cvpixelbuffer) CVPixelBufferLockBaseAddress(cvpixelbuffer, CVPixelBufferLockFlags(rawValue: 0)) let int16Buffer = unsafeBitCast(CVPixelBufferGetBaseAddress(cvpixelbuffer), to: UnsafeMutablePointer<Float16>.self) let int16PerRow = CVPixelBufferGetBytesPerRow(cvpixelbuffer) / 2 for x in 0...height-1 { for y in 0...width-1 { let luma = int16Buffer[x * int16PerRow + y] ///////////////////////// // SAVE DEPTH VALUE 'luma' to FILE FOR PROCESSING } } CVPixelBufferUnlockBaseAddress(cvpixelbuffer, CVPixelBufferLockFlags(rawValue: 0)) }