How to set up simple rendercallback in version 3 of Audio Unit API

Hello there 🙂


First of all I am a beginner to Audio Units so please forgive me that I don't have a clear understanding of all the concepts.

I have managed to set up a single AudioUnit using the v2 API in Swift 2.0. The AudioUnit simply records audio from built-in microphone and then outputs it to speaker/headset with low latency. The reason for this is simply to render some cool graphics while the sound itself is untouched...


First here is a piece of my working class using version 2 of of the AudioUnit API:

final class AudioMetering: NSObject {

    private let
    preferredIOBufferDuration = 0.005, // a value of 5 ms seems to introduce ~1% of CPU usage on iPhone 5
    inputBus  = AudioUnitElement(1),
    outputBus = AudioUnitElement(0)
  
    private var
    active = false,
    audioUnit = AudioUnit()

    /*
        This method is called when the Remote I/O audio unit has new samples available. The method
        renders new samples; process them if needed and returns them for output. The callback is
        called from AURemoteIO::IOThread and has to return ASAP (hard deadline).
    */
    private let renderCallback: AURenderCallback = { (inRefCon, ioActionFlags, inTimeStamp, inBusNumber, inNumberFrames, ioData) -> OSStatus in
      
        let audioMetering = unsafeBitCast(inRefCon, AudioMetering.self) // get object
      
        if audioMetering.active == true {
            // ioData holds the samples and is used for audio output
            switch AudioUnitRender(audioMetering.audioUnit, ioActionFlags, inTimeStamp, audioMetering.inputBus, inNumberFrames, ioData) {
            case noErr:
                break
              
            case kAudioUnitErr_CannotDoInCurrentContext: // it seems safe to continue with this error; output is just muted...
                print("kAudioUnitErr_CannotDoInCurrentContext")
              
            default: assert(false)
            }
          
            // 'look-at-samples-to-render-cool-graphics-code' (deterministic and non-blocking code)
        }
      
        return noErr
    }

    func start() {
        assert(active == false)
        active = true

  // set-up AudioSession...

        do {
            try SharedInstanceOf.audioSession.setPreferredIOBufferDuration(preferredIOBufferDuration)
        } catch let error as NSError {
            dumpError(error, functionName: "setPreferredIOBufferDuration")
        }
      
        var audioComponentDescription =
        AudioComponentDescription(
            componentType: OSType(kAudioUnitType_Output),
            componentSubType: OSType(kAudioUnitSubType_RemoteIO),
            componentManufacturer: OSType(kAudioUnitManufacturer_Apple),
            componentFlags: 0,
            componentFlagsMask: 0)
      
        let audioComponent = AudioComponentFindNext(nil, &audioComponentDescription)
        assert(AudioComponentInstanceNew(audioComponent, &audioUnit) == noErr)
      
        var flag = UInt32(1)
        assert(AudioUnitSetProperty(audioUnit, AudioUnitPropertyID(kAudioOutputUnitProperty_EnableIO), AudioUnitScope(kAudioUnitScope_Input), inputBus, &flag, UInt32(sizeof(UInt32))) == noErr)
        assert(AudioUnitSetProperty(audioUnit, AudioUnitPropertyID(kAudioOutputUnitProperty_EnableIO), AudioUnitScope(kAudioUnitScope_Output), outputBus, &flag, UInt32(sizeof(UInt32))) == noErr)

        // add a callback and render explicitly; callback to be used for fancy graphics and/or possible DSP :]
        var renderCallbackStruct = AURenderCallbackStruct(inputProc: renderCallback, inputProcRefCon: UnsafeMutablePointer(unsafeAddressOf(self)))
        assert(AudioUnitSetProperty(audioUnit, AudioUnitPropertyID(kAudioUnitProperty_SetRenderCallback), AudioUnitScope(kAudioUnitScope_Global), 0, &renderCallbackStruct, UInt32(sizeof(AURenderCallbackStruct))) == noErr)
      
        assert(AudioUnitInitialize(audioUnit) == noErr)
        assert(AudioOutputUnitStart(audioUnit) == noErr)
    }
  
    func stop() {
        assert(active == true)
        active = false
      
        assert(AudioOutputUnitStop(audioUnit) == noErr)
        assert(AudioUnitUninitialize(audioUnit) == noErr)
        assert(AudioComponentInstanceDispose(audioUnit) == noErr)

  // inactivate AudioSession
    }
}


Below is my attempt to adopt to version 3 of the AudioUnit API. I have posted a number of Qs in there (Q1-Q5):

final class AudioMetering: NSObject {

    private let
    preferredIOBufferDuration = 0.005, // a value of 5 ms seems to introduce ~1% of CPU usage on iPhone 5
    // below description is used to pick the specific audio unit we need to use
    audioComponentDescription = AudioComponentDescription(componentType: kAudioUnitType_Output, componentSubType: kAudioUnitSubType_RemoteIO, componentManufacturer: kAudioUnitManufacturer_Apple, componentFlags: 0, componentFlagsMask: 0)
   
    private var
    active = false,
    auAudioUnit: AUAudioUnit!

    func start() {
        assert(active == false)
        active = true

  // set-up AudioSession...

       do {
            try SharedInstanceOf.audioSession.setPreferredIOBufferDuration(preferredIOBufferDuration)
        } catch let error as NSError {
            dumpError(error, functionName: "setPreferredIOBufferDuration")
        }

       do {
            try auAudioUnit = AUAudioUnit(componentDescription: audioComponentDescription)

           // Q1: is:
            auAudioUnit.outputEnabled = true
            auAudioUnit.inputEnabled = true
            // replacing below calls?
            // var flag = UInt32(1)
            // assert(AudioUnitSetProperty(audioUnit, AudioUnitPropertyID(kAudioOutputUnitProperty_EnableIO), AudioUnitScope(kAudioUnitScope_Input), inputBus, &flag, UInt32(sizeof(UInt32))) == noErr)
            // assert(AudioUnitSetProperty(audioUnit, AudioUnitPropertyID(kAudioOutputUnitProperty_EnableIO), AudioUnitScope(kAudioUnitScope_Output), outputBus, &flag, UInt32(sizeof(UInt32))) == noErr)
           
          // Q2: is below:
            auAudioUnit.outputProvider = {
                (actionFlags, timestamp, frameCount, inputBusNumber, inputData) -> AUAudioUnitStatus in

               // Q3: what should replace call to AudioUnitRender?
               
                return noErr
            }
            // replacing this code:
            // var renderCallbackStruct = AURenderCallbackStruct(inputProc: renderCallback, inputProcRefCon: UnsafeMutablePointer(unsafeAddressOf(self)))
            // assert(AudioUnitSetProperty(audioUnit, AudioUnitPropertyID(kAudioUnitProperty_SetRenderCallback), AudioUnitScope(kAudioUnitScope_Global), 0, &renderCallbackStruct, UInt32(sizeof(AURenderCallbackStruct))) == noErr)
           
            try auAudioUnit.allocateRenderResources() // Q4: does this replace call to AudioUnitInitialize?
            try auAudioUnit.startHardware()           // Q5: does this replace call to AudioOutputUnitStart?

       } catch let error as NSError {
            dumpError(error, functionName: "AUAudioUnit failed")
            assert(false)
        }


The resources/examples on version 3 is very limited and so I hope this forum can guide me 🙂


Thanks!

Accepted Answer

Hi again,


I've cracked it myself 😀


Line 04 gets the renderBlock (i.e., AudioUnitRender) and then in line 08 we simply call it!


        do {
            try auAudioUnit = AUAudioUnit(componentDescription: self.audioComponentDescription)
         
            let renderBlock = auAudioUnit.renderBlock
         
            auAudioUnit.outputProvider = { // AURenderPullInputBlock
                (actionFlags, timestamp, frameCount, inputBusNumber, inputData) -> AUAudioUnitStatus in
                switch renderBlock(actionFlags, timestamp, frameCount, 1, inputData, .None) {
                case noErr: break
                case kAudioUnitErr_CannotDoInCurrentContext: print("kAudioUnitErr_CannotDoInCurrentContext")
                default: assert(false)
                }
How to set up simple rendercallback in version 3 of Audio Unit API
 
 
Q