Hi, I'm trying to decode a HLS livestream with VideoToolbox. The CMSampleBuffer is successfully created (OSStatus == noErr). When I enqueue the CMSampleBuffer to a AVSampleBufferDisplayLayer the view isn't displaying anything and the status of the AVSampleBufferDisplayLayer is 1 (rendering).
When I use a VTDecompressionSession to convert the CMSampleBuffer to a CVPixelBuffer the VTDecompressionOutputCallback returns a -8969 (bad data error).
What do I need to fix in my code? Do I incorrectly parse the data from the segment for the CMSampleBuffer?
let segmentData = try await downloadSegment(from: segment.url)
let (sps, pps, idr) = try parseH264FromTSSegment(tsData: segmentData)
if self.formatDescription == nil {
       self.formatDescription = try CMFormatDescription(h264ParameterSets: [sps, pps])
}
    
if let sampleBuffer = try createSampleBuffer(from: idr, segment: segment) {
     try self.decodeSampleBuffer(sampleBuffer)
} 
func parseH264FromTSSegment(tsData: Data) throws -> (sps: Data, pps: Data, idr: Data) {
    let tsSize = 188
    var pesData = Data()
    for i in stride(from: 0, to: tsData.count, by: tsSize) {
        let tsPacket = tsData.subdata(in: i..<min(i + tsSize, tsData.count))
       guard let payload = extractPayloadFromTSPacket(tsPacket) else { continue }
        pesData.append(payload)
    }
    let nalUnits = parseNalUnits(from: pesData)
    var sps: Data?
    var pps: Data?
    var idr: Data?
    for nalUnit in nalUnits {
        guard let firstByte = nalUnit.first else { continue }
        let nalType = firstByte & 0x1F
        switch nalType {
        case 7: // SPS
           sps = nalUnit
        case 8: // PPS
            pps = nalUnit
        case 5: // IDR
            idr = nalUnit
        default:
            break
        }
        if sps != nil, pps != nil, idr != nil {
            break
        }
    }
    guard let validSPS = sps, let validPPS = pps, let validIDR = idr else {
        throw NSError()
     }
    return (validSPS, validPPS, validIDR)
}
func extractPayloadFromTSPacket(_ tsPacket: Data) -> Data? {
  let syncByte: UInt8 = 0x47
  guard tsPacket.count == 188, tsPacket[0] == syncByte else {
      return nil
  }
    let payloadStart = (tsPacket[1] & 0x40) != 0
    let adaptationFieldControl = (tsPacket[3] & 0x30) >> 4
    var payloadOffset = 4
    if adaptationFieldControl == 2 || adaptationFieldControl == 3 {
        let adaptationFieldLength = Int(tsPacket[4])
        payloadOffset += 1 + adaptationFieldLength
    }
    guard adaptationFieldControl == 1 || adaptationFieldControl == 3 else {
        return nil
    }
    let payload = tsPacket.subdata(in: payloadOffset..<tsPacket.count)
    return payloadStart ? payload : nil
}
func parseNalUnits(from h264Data: Data) -> [Data] {
     let startCode = Data([0x00, 0x00, 0x00, 0x01])
     var nalUnits: [Data] = []
     var searchRange = h264Data.startIndex..<h264Data.endIndex
    while let range = h264Data.range(of: startCode, options: [], in: searchRange) {
        let nextStart = h264Data.range(of: startCode, options: [], in: range.upperBound..<h264Data.endIndex)?.lowerBound ?? h264Data.endIndex
        let nalUnit = h264Data.subdata(in: range.upperBound..<nextStart)
        nalUnits.append(nalUnit)
        searchRange = nextStart..<h264Data.endIndex
     }
     return nalUnits
}
private func createSampleBuffer(from data: Data, segment: HLSSegment) throws -> CMSampleBuffer? {
    var blockBuffer: CMBlockBuffer?
    let alignedData = UnsafeMutableRawPointer.allocate(byteCount: data.count, alignment: MemoryLayout<UInt8>.alignment)
    data.copyBytes(to: alignedData.assumingMemoryBound(to: UInt8.self), count: data.count)
    
    let blockStatus = CMBlockBufferCreateWithMemoryBlock(
        allocator: kCFAllocatorDefault,
        memoryBlock: alignedData,
        blockLength: data.count,
        blockAllocator: nil,
        customBlockSource: nil,
       offsetToData: 0,
       dataLength: data.count,
       flags: 0,
       blockBufferOut: &blockBuffer
    )
    guard blockStatus == kCMBlockBufferNoErr, let validBlockBuffer = blockBuffer else {
        alignedData.deallocate()
        throw NSError()
    }
    var sampleBuffer: CMSampleBuffer?
    var timing = [calculateTiming(for: segment)]
    var sampleSizes = [data.count]
    let sampleStatus = CMSampleBufferCreate(
        allocator: kCFAllocatorDefault,
        dataBuffer: validBlockBuffer,
        dataReady: true,
        makeDataReadyCallback: nil,
        refcon: nil,
       formatDescription: formatDescription,
       sampleCount: 1,
       sampleTimingEntryCount: 1,
       sampleTimingArray: &timing,
       sampleSizeEntryCount: sampleSizes.count,
       sampleSizeArray: &sampleSizes,
       sampleBufferOut: &sampleBuffer
    )
    guard sampleStatus == noErr else {
        alignedData.deallocate()
        throw NSError()
    }
    return sampleBuffer
}
private func decodeSampleBuffer(_ sampleBuffer: CMSampleBuffer) throws {
    guard let formatDescription = CMSampleBufferGetFormatDescription(sampleBuffer) else {
        throw NSError()
    }
 
    if decompressionSession == nil {
        try setupDecompressionSession(formatDescription: formatDescription)
    }
    guard let session = decompressionSession else {
        throw NSError()
    }
 
    let flags: VTDecodeFrameFlags = [._EnableAsynchronousDecompression, ._EnableTemporalProcessing]
    var flagOut = VTDecodeInfoFlags()
     
   let status = VTDecompressionSessionDecodeFrame(
       session,
       sampleBuffer: sampleBuffer,
       flags: flags,
       frameRefcon: nil,
       infoFlagsOut: nil)
    if status != noErr {
        throw NSError()
    }
}
private func setupDecompressionSession(formatDescription: CMFormatDescription) throws {
   self.formatDescription = formatDescription
   if let session = decompressionSession {
       VTDecompressionSessionInvalidate(session)
       self.decompressionSession = nil
   }
   var decompressionSession: VTDecompressionSession?
   var callback = VTDecompressionOutputCallbackRecord(
      decompressionOutputCallback: decompressionOutputCallback,
      decompressionOutputRefCon: Unmanaged.passUnretained(self).toOpaque())
   let status = VTDecompressionSessionCreate(
        allocator: kCFAllocatorDefault,
        formatDescription: formatDescription,
        decoderSpecification: nil,
        imageBufferAttributes: nil,
        outputCallback: &callback,
        decompressionSessionOut: &decompressionSession
    )
    if status != noErr {
        throw NSError()
    }
    self.decompressionSession = decompressionSession
}
let decompressionOutputCallback: VTDecompressionOutputCallback = { (
    decompressionOutputRefCon,
    sourceFrameRefCon,
    status,
    infoFlags,
    imageBuffer,
    presentationTimeStamp,
    presentationDuration
) in
    guard status == noErr else {
        print("Callback: \(status)")
        return
     }
   
    if let imageBuffer = imageBuffer {
    }
}
                    
                  
                Explore the integration of media technologies within your app. Discuss working with audio, video, camera, and other media functionalities.
  
    
    Selecting any option will automatically load the page
  
  
  
  
    
  
  
          Post
Replies
Boosts
Views
Activity
                    
                      I am developing a macOS 15 MediaExtension plugin to enable additional codecs and container formats in AVFoundation
My Plugin is sort of working, but i'd like to debug the XPC process that AVFoundation 'hoists' for me from the calling app (ie - the process hosting my plugin instance that is managing the MESampleBuffer protocol calls for example)
Is there a method to configure XCode for interactive attaching to this background process for interactive debugging?
Right now I have to use Console + Print which is not fun or productive.
Does Apple have a working example of a MediaExtension anywhere?
This is an exciting API that is very under-documented.
I'm willing to spend a Code Review 'credit' for this, but my issues are not quite focused.
Any assistance is highly appreciated!
                    
                  
                
                    
                      Hi!
I am creating a aumi AUv3 extension and I am trying to achieve simultaneous connections to multiple other avaudionodes. I would like to know it is possible to route the midi to different outputs inside the render process in the AUv3.
I am using connectMIDI(_:to:format:eventListBlock:) to connect the output of the AUv3 to multiple AvAudioNodes. However, when I send midi out of the AUv3, it gets sent to all the AudioNodes connected to it. I can't seem to find any documentation on how to route the midi only to one of the connected nodes. Is this possible?
                    
                  
                
                    
                      I try to validate low latency HLS fragmented MP4 setup with meadistreamvalidator. I get following error:
Error: Invalid URL
Detail:  '(null)' is not a valid URL
Source:  mediaplaylistURL.m3u8 - segmentURL.mp4
meadistreamvalidator version is 1.23.14
What does that error mean?
                    
                  
                
              
                
              
              
                
                Topic:
                  
	
		Media Technologies
  	
                
                
                SubTopic:
                  
                    
	
		Streaming
		
  	
                  
                
              
              
              
  
  
    
    
  
  
              
                
                
              
            
          
                    
                      I am building a video conferencing app using LiveKit in Flutter and want to implement Picture-in-Picture (PiP) mode on iOS. My goal is to display a view showing the speaker's initials or avatar during PiP mode. I successfully implemented this functionality on Android but am struggling to achieve it on iOS.
I am using a MethodChannel to communicate with the native iOS code. Here's the Flutter-side code:
import 'package:flutter/foundation.dart';
import 'package:flutter/services.dart';
class PipController {
  static const _channel = MethodChannel('pip_channel');
  static Future<void> startPiP() async {
    try {
      await _channel.invokeMethod('enterPiP');
    } catch (e) {
      if (kDebugMode) {
        print("Error starting PiP: $e");
      }
    }
  }
  static Future<void> stopPiP() async {
    try {
      await _channel.invokeMethod('exitPiP');
    } catch (e) {
      if (kDebugMode) {
        print("Error stopping PiP: $e");
      }
    }
  }
}
On the iOS side, I am using AVPictureInPictureController. Since it requires an AVPlayerLayer, I had to include a dummy video URL to initialize the AVPlayer. However, this results in the dummy video’s audio playing in the background, but no view is displayed in PiP mode.
Here’s my iOS code:
import Flutter
import UIKit
import AVKit
@main
@objc class AppDelegate: FlutterAppDelegate {
    
    var pipController: AVPictureInPictureController?
    var playerLayer: AVPlayerLayer?
    
    override func application(
        _ application: UIApplication,
        didFinishLaunchingWithOptions launchOptions: [UIApplication.LaunchOptionsKey: Any]?
    ) -> Bool {
        
        let controller: FlutterViewController = window?.rootViewController as! FlutterViewController
        let pipChannel = FlutterMethodChannel(name: "pip_channel", binaryMessenger: controller.binaryMessenger)
        pipChannel.setMethodCallHandler { [weak self] (call: FlutterMethodCall, result: @escaping FlutterResult) in
            if call.method == "enterPiP" {
                self?.startPictureInPicture(result: result)
            } else if call.method == "exitPiP" {
                self?.stopPictureInPicture(result: result)
            } else {
                result(FlutterMethodNotImplemented)
            }
        }
        GeneratedPluginRegistrant.register(with: self)
        return super.application(application, didFinishLaunchingWithOptions: launchOptions)
    }
    
    private func startPictureInPicture(result: @escaping FlutterResult) {
        guard AVPictureInPictureController.isPictureInPictureSupported() else {
            result(FlutterError(code: "UNSUPPORTED", message: "PiP is not supported on this device.", details: nil))
            return
        }
        // Set up the AVPlayer
        let player = AVPlayer(url: URL(string: "http://commondatastorage.googleapis.com/gtv-videos-bucket/sample/BigBuckBunny.mp4")!)
        let playerLayer = AVPlayerLayer(player: player)
        self.playerLayer = playerLayer
        // Create a dummy view
        let dummyView = UIView(frame: CGRect(x: 0, y: 0, width: 1, height: 1))
        dummyView.isHidden = true
        window?.rootViewController?.view.addSubview(dummyView)
        dummyView.layer.addSublayer(playerLayer)
        playerLayer.frame = dummyView.bounds
        // Initialize PiP Controller
        pipController = AVPictureInPictureController(playerLayer: playerLayer)
        pipController?.delegate = self
        // Start playback and PiP
        player.play()
        pipController?.startPictureInPicture()
        print("Picture-in-Picture started")
        result(nil)
    }
    
    private func stopPictureInPicture(result: @escaping FlutterResult) {
        guard let pipController = pipController, pipController.isPictureInPictureActive else {
            result(FlutterError(code: "NOT_ACTIVE", message: "PiP is not currently active.", details: nil))
            return
        }
        pipController.stopPictureInPicture()
        playerLayer = nil
        self.pipController = nil
        result(nil)
    }
}
extension AppDelegate: AVPictureInPictureControllerDelegate {
    func pictureInPictureControllerDidStartPictureInPicture(_ pictureInPictureController: AVPictureInPictureController) {
        print("PiP started")
    }
    func pictureInPictureControllerDidStopPictureInPicture(_ pictureInPictureController: AVPictureInPictureController) {
        print("PiP stopped")
    }
}
Questions:
How can I implement PiP mode on iOS without using a video URL (or AVPlayerLayer)?
Is there a way to display a custom UIView (like a speaker’s initials or an avatar) in PiP mode instead of requiring a video?
Why does PiP not display any view, even though the dummy video URL is playing in the background?
I am new to iOS development and would greatly appreciate any guidance or alternative approaches to achieve this functionality. Thank you!
                    
                  
                
                    
                      Hello,
I am trying to follow the getting started guide. I have produced a developer token via the music kit embedding approach and can confirm I'm successfully authorized.
When I try to do play music, I'm unable to hear anything. Thought it could be some auto-play problems with the browser, but it doesn't appear to be related, as I can trigger play from a button with no further success.
  const music = MusicKit.getInstance()
  try {
    await music.authorize() // successful
     const result = await music.api.music(`/v1/catalog/gb/search`, {
       term: 'Sound Travels',
       types: 'albums',
     })
    await music.play()
  } catch (error) {
    console.error('play error', error) // ! No error triggered
  }
I have searched the forum, have found similar queries but apparently none using V3 of the API.
Other potentially helpful information:
OS: macos 15.1 (24B83)
API version: V3
On localhost
Browser: Arc (chromium based), also tried on Safari,
The only difference between the two browsers is that safari appears to exit the breakpoint, whereas Arc will continue (without throwing any errors)
authorizationStatus: 3
Side note, any reason this is still in beta so many years later?
                    
                  
                
                    
                      The following is my playground code.  Any of the apple audio units show the plugin view, however anything else (i.e. kontakt, spitfire, etc.) does not.  It does not error, just where the visual is expected is blank.
import AppKit
import PlaygroundSupport
import AudioToolbox
import AVFoundation
import CoreAudioKit
let manager = AVAudioUnitComponentManager.shared()
let description = AudioComponentDescription(componentType: kAudioUnitType_MusicDevice,
                                            componentSubType: 0,
                                            componentManufacturer: 0,
                                            componentFlags: 0,
                                            componentFlagsMask: 0)
var deviceComponents = manager.components(matching: description)
var names = deviceComponents.map{$0.name}
let pluginName: String = "AUSampler" // This works
//let pluginName: String = "Kontakt" // This does not
var plugin = deviceComponents.filter{$0.name.contains(pluginName)}.first!
print("Plugin name: \(plugin.name)")
var customViewController:NSViewController?
AVAudioUnit.instantiate(with: plugin.audioComponentDescription, options: []){avAudioUnit, error in
    
    var ilip = avAudioUnit!.auAudioUnit.isLoadedInProcess
    print("Loaded in process: \(ilip)")
    
    guard error == nil else {
        print("Error: \(error!.localizedDescription)")
        return
    }
    
    print("AudioUnit successfully created.")
    
    let audioUnit = avAudioUnit!.auAudioUnit
    audioUnit.requestViewController{ vc in
        
        if let viewCtrl = vc {
            customViewController = vc
            var b = vc?.view.bounds
            PlaygroundPage.current.liveView = vc
            print("Successfully added view controller.")
        }else{
            print("Failed to load controller.")
        }
    }
    
}
                    
                  
                
                    
                      Hello!
I have a problem with getting album extended info from users library. Note that app authorised to use Apple Music according documentation.
I get albums from users library with this code:
 func getLibraryAlbums() async throws -> MusicItemCollection<Album> {
        let request = MusicLibraryRequest<Album>()
        let response = try await request.response()
        return response.items
    }
This is an example of Albums request respones:
{
  "data" : [
    {
      "meta" : {
        "musicKit_identifierSet" : {
          "isLibrary" : true,
          "id" : "1945382328890400383",
          "dataSources" : [
            "localLibrary",
            "legacyModel"
          ],
          "type" : "Album",
          "deviceLocalID" : {
            "databaseID" : "37336CB19CF51727",
            "value" : "1945382328890400383"
          },
          "catalogID" : {
            "kind" : "adamID",
            "value" : "1173535954"
          }
        }
      },
      "id" : "1945382328890400383",
      "type" : "library-albums",
      "attributes" : {
        "artwork" : {
          "url" : "musicKit:\/\/artwork\/transient\/{w}x{h}?id=4A2F444C%2D336D%2D49EA%2D90C8%2D13C547A5B95B",
          "width" : 0,
          "height" : 0
        },
        "genreNames" : [
          "Pop"
        ],
        "trackCount" : 1,
        "artistName" : "Сара Окс",
        "isAppleDigitalMaster" : false,
        "audioVariants" : [
          "lossless"
        ],
        "playParams" : {
          "catalogId" : "1173535954",
          "id" : "1945382328890400383",
          "musicKit_persistentID" : "1945382328890400383",
          "kind" : "album",
          "musicKit_databaseID" : "37336CB19CF51727",
          "isLibrary" : true
        },
        "name" : "Нимфомания - Single",
        "isCompilation" : false
      }
    },
    {
      "meta" : {
        "musicKit_identifierSet" : {
          "isLibrary" : true,
          "id" : "-8570883332059662437",
          "dataSources" : [
            "localLibrary",
            "legacyModel"
          ],
          "type" : "Album",
          "deviceLocalID" : {
            "value" : "-8570883332059662437",
            "databaseID" : "37336CB19CF51727"
          },
          "catalogID" : {
            "kind" : "adamID",
            "value" : "1618488499"
          }
        }
      },
      "id" : "-8570883332059662437",
      "type" : "library-albums",
      "attributes" : {
        "isCompilation" : false,
        "genreNames" : [
          "Pop"
        ],
        "trackCount" : 1,
        "artistName" : "TIMOFEEW & KURYANOVA",
        "isAppleDigitalMaster" : false,
        "audioVariants" : [
          "lossless"
        ],
        "playParams" : {
          "catalogId" : "1618488499",
          "musicKit_persistentID" : "-8570883332059662437",
          "kind" : "album",
          "id" : "-8570883332059662437",
          "musicKit_databaseID" : "37336CB19CF51727",
          "isLibrary" : true
        },
        "artwork" : {
          "url" : "musicKit:\/\/artwork\/transient\/{w}x{h}?id=BEA6DBD3%2D8E14%2D4A10%2D97BE%2D8908C7C5FC2C",
          "width" : 0,
          "height" : 0
        },
        "name" : "Не звони - Single"
      }
    },
...
  ]
}
In AlbumView using task: view modifier I request extended information about the album with this code:
    func loadExtendedInfo(_ album: Album) async throws -> Album {
        let response = try await album.with([.tracks, .audioVariants, .recordLabels], preferredSource: .library)
        return response
    }
but in the response some of the fields are always nil, for example recordLabels,  releaseDate, url, editorialNotes, copyright.
Please tell me what I'm doing wrong?
                    
                  
                
                    
                      Getting MatchError "MATCH_ATTEMPT_FAILED" everytime when matchstream on Android Studio Java+Kotlin project. My project reads the samples from the mic input using audioRecord class and sents them to the Shazamkit to matchstream. I created a kotlin class to handle to Shazamkit. The audioRecord is build to be mono and 16 bit.
My Kotlin Class
class ShazamKitHelper {
    val shazamScope = CoroutineScope(Dispatchers.IO + SupervisorJob())
    lateinit var streaming_session: StreamingSession
    lateinit var signature: Signature
    lateinit var catalog: ShazamCatalog
    fun createStreamingSessionAsync(developerTokenProvider: DeveloperTokenProvider, readBufferSize: Int, sampleRate: AudioSampleRateInHz
    ): CompletableFuture<Unit>{
        return CompletableFuture.supplyAsync {
            runBlocking {
                runCatching {
                    shazamScope.launch {
                        createStreamingSession(developerTokenProvider,readBufferSize,sampleRate)
                    }.join()
                }.onFailure { throwable ->
                }.getOrThrow()
            }
        }
    }
    private suspend fun createStreamingSession(developerTokenProvider:DeveloperTokenProvider,readBufferSize: Int,sampleRateInHz: AudioSampleRateInHz) {
        catalog = ShazamKit.createShazamCatalog(developerTokenProvider)
        streaming_session = (ShazamKit.createStreamingSession(
            catalog,
            sampleRateInHz,
            readBufferSize
        ) as ShazamKitResult.Success).data
    }
    fun startMatching() {
        val audioData = sharedAudioData ?: return // Return if sharedAudioData is null
        CoroutineScope(Dispatchers.IO).launch {
            runCatching {
                streaming_session.matchStream(audioData.data, audioData.meaningfulLengthInBytes, audioData.timestampInMs)
            }.onFailure { throwable ->
                Log.e("ShazamKitHelper", "Error during matchStream", throwable)
            }
        }
    }
    @JvmField
    var sharedAudioData: AudioData? = null;
    data class AudioData(val data: ByteArray, val meaningfulLengthInBytes: Int, val timestampInMs: Long)
    fun startListeningForMatches() {
        CoroutineScope(Dispatchers.IO).launch {
            streaming_session.recognitionResults().collect { matchResult ->
                when (matchResult) {
                    is MatchResult.Match -> {
                        val match = matchResult.matchedMediaItems
                        println("Match found: ${match.get(0).title} by ${match.get(0).artist}")
                    }
                    is MatchResult.NoMatch -> {
                        println("No match found")
                    }
                    is MatchResult.Error -> {
                        val error = matchResult.exception
                        println("Match error: ${error.message}")
                    }
                }
            }
        }
    }
}
My code in java reads the samples from a thread:
shazam_create_session();
                while (audioRecord.getRecordingState() == AudioRecord.RECORDSTATE_RECORDING){
                    if (shazam_session_created){
                        byte[] buffer = new byte[288000];//max_shazam_seconds * sampleRate * 2];
                        audioRecord.read(buffer,0,buffer.length,AudioRecord.READ_BLOCKING);
                        helper.sharedAudioData = new ShazamKitHelper.AudioData(buffer,buffer.length,System.currentTimeMillis());
                        helper.startMatching();
                        if (!listener_called){
                            listener_called = true;
                            helper.startListeningForMatches();
                        }
                    } else{
                        SystemClock.sleep(100);
                    }
                }
private void shazam_create_session() {
            MyDeveloperTokenProvider provider = new MyDeveloperTokenProvider();
            AudioSampleRateInHz sample_rate = AudioSampleRateInHz.SAMPLE_RATE_48000;
            if (sampleRate == 44100)
                sample_rate = AudioSampleRateInHz.SAMPLE_RATE_44100;
            CompletableFuture<Unit> future = helper.createStreamingSessionAsync(provider, 288000, sample_rate);
            future.thenAccept(result -> {
                shazam_session_created = true;
            });
            future.exceptionally(throwable -> {
                Toast.makeText(mine, "Failure", Toast.LENGTH_SHORT).show();
                return null;
            });
        }
I Implemented the developer token in java as follows
public static class MyDeveloperTokenProvider implements DeveloperTokenProvider {
        DeveloperToken the_token = null;
        @NonNull
        @Override
        public DeveloperToken provideDeveloperToken() {
            if (the_token == null){
                try {
                    the_token = generateDeveloperToken();
                    return the_token;
                } catch (NoSuchAlgorithmException | InvalidKeySpecException e) {
                    throw new RuntimeException(e);
                }
            } else{
                return the_token;
            }
        }
        @NonNull
        private DeveloperToken generateDeveloperToken() throws NoSuchAlgorithmException, InvalidKeySpecException {
            PKCS8EncodedKeySpec priPKCS8 = new PKCS8EncodedKeySpec(Decoders.BASE64.decode(p8));
            PrivateKey appleKey = KeyFactory.getInstance("EC").generatePrivate(priPKCS8);
            Instant now = Instant.now();
            Instant expiration = now.plus(Duration.ofDays(90));
            String jwt = Jwts.builder()
                    .header().add("alg", "ES256").add("kid", keyId).and()
                    .issuer(teamId)
                    .issuedAt(Date.from(now))
                    .expiration(Date.from(expiration))
                    .signWith(appleKey) // Specify algorithm explicitly
                    .compact();
            return new DeveloperToken(jwt);
        }
    }
                    
                  
                
                    
                      Does Phase support creating new sound events at runtime?  Is that implemented in the plugin for Unity as well?  Does Phase support Unity's addressable system, are they compatible?
                    
                  
                
                    
                      I am unable to access the Int32 error from the errors that CoreAudio throws in Swift type AudioHardwareError. This is critical. There is no way to access the errors or even create an AudioHardwareError to test for errors.
do {
    _ = try AudioHardwareDevice(id: 0).streams // will throw
} catch {
    if let error = error as? AudioHardwareError { // cast to AudioHardwareError
        
        print(error) // prints error code but not the errorDescription
    }
}
How can get reliably get the error.Int32? Or create a AudioHardwareError with an error constant? There is no way for me to handle these error with code or run tests without knowing what the error is.
On top of that, by default the error localizedDescription does not contain the errorDescription unless I extend AudioHardwareError with CustomStringConvertible.
extension AudioHardwareError: @retroactive CustomStringConvertible {
    public var description: String {
        return self.localizedDescription
    }
}
                    
                  
                
                    
                      I'm developing an iOS radio app that plays various HLS streams. The challenge is that some stations broadcast HLS streams containing both audio and video (example: https://svs.itworkscdn.net/smcwatarlive/smcwatar/chunks.m3u8), but I want to:
Extract and play only the audio track
Support AirPlay for audio-only streaming
Minimize data usage by not downloading video content
Technical Details:
iOS 17+
Swift 5.9
Using AVFoundation for playback
Current implementation uses AVPlayer with AVPlayerItem
Current Code Structure:
class StreamPlayer: ObservableObject {
@Published var isPlaying = false
private var player: AVPlayer?
private var playerItem: AVPlayerItem?
func playStream(url: URL) {
    let asset = AVURLAsset(url: url)
    playerItem = AVPlayerItem(asset: asset)
    player = AVPlayer(playerItem: playerItem)
    player?.play()
}
Stream Analysis:
When analyzing the video stream using FFmpeg:
CopyInput #0, hls, from 'https://svs.itworkscdn.net/smcwatarlive/smcwatar/chunks.m3u8':
  Stream #0:0: Video: h264, yuv420p(tv, bt709), 1920x1080 [SAR 1:1 DAR 16:9], 25 fps
  Stream #0:1: Audio: aac, 44100 Hz, stereo, fltp
Attempted Solutions:
Using MobileFFmpeg:
let command = [
    "-i", streamUrl,
    "-vn",
    "-acodec", "aac",
    "-ac", "2",
    "-ar", "44100",
    "-b:a", "128k",
    "-f", "mpegts",
    "udp://127.0.0.1:12345"
].joined(separator: " ")
ffmpegProcess = MobileFFmpeg.execute(command)
Issue: While FFmpeg successfully extracts audio, playback through AVPlayer doesn't work reliably.
Tried using HLS output:
let command = [
    "-i", streamUrl,
    "-vn",
    "-acodec", "aac",
    "-ac", "2",
    "-ar", "44100",
    "-b:a", "128k",
    "-f", "hls",
    "-hls_time", "2",
    "-hls_list_size", "3",
    outputUrl.path
]
Issue: Creates temporary files but faces synchronization issues with live streams.
Requirements:
Real-time audio extraction from HLS stream
Maintain live streaming capabilities
Full AirPlay support
Minimal data usage (avoid downloading video content)
Handle network interruptions gracefully
Questions:
What's the most efficient way to extract only audio from an HLS stream in real-time?
Is there a way to tell AVPlayer to ignore video tracks completely?
Are there better alternatives to FFmpeg for this specific use case?
What's the recommended approach for handling AirPlay with modified streams?
Any guidance or alternative approaches would be greatly appreciated. Thank you!
                    
                  
                
              
                
              
              
                
                Topic:
                  
	
		Media Technologies
  	
                
                
                SubTopic:
                  
                    
	
		Streaming
		
  	
                  
                
              
              
              
  
  
    
    
  
  
              
                
                
              
            
          
                    
                      After 18.2 IOS update, videos are not playing in Netflix, Amazon Prime and youtube
                    
                  
                
              
                
              
              
                
                Topic:
                  
	
		Media Technologies
  	
                
                
                SubTopic:
                  
                    
	
		Video
		
  	
                  
                
              
              
              
  
  
    
    
  
  
              
                
                
              
            
          
                    
                      I'm a new app developer and am trying to add a button that adds pictures from the photo library AND camera. I added the first function (adding pictures from the photo library) using the new-ish photoPicker, but I can't find a way to do the same thing for the camera. Should I just tough it out and use the UI View Controller struct that I've seen in all of the YouTube tutorials I've come across?
I also want the user to be able to crop the picture in the app after they take a picture.
Thanks in advance
                    
                  
                
                    
                      Hi all,
we are in the business of scanning documents and barcodes with the camera system of mobile devices. Since there is a wide variety of use cases, from scanning tiniest barcodes and small business cards to scanning barcodes or large documents from far distances we preferably rely on the triple camera devices, if available, with automatic constituent device switching.
This approach used to be working perfectly fine. Depending on the zoom level (we prefer to use an initial zoom value of 2.0) and the focusing distance the iPhone Pro models switched through the different camera systems at light speed: from ultra-wide to wide, tele and back. No issues at all.
Unfortunately the new iPhone 16 Pro models behave very different when it comes to constituent device switching based on focus distance. The switching is slow and sometimes it does not happen at all when the focusing distance changes. Especially when aiming for a at a distant object for a longer time and then aiming at a very close object that is maybe 2" away. The iPhone 15 Pro here always switches immediately to the ultra-wide camera, while the iPhone 16 Pro takes at least 2-3 seconds, in rare cases up to 10 seconds and sometimes forever to switch to the ultra-wide camera.
Of course we assumed that our code is responsible for these issues. So we experimented with restricting the devices and so on. Then we stripped more and more configuration code but nothing we tried improved the situation.
So we ended up writing a minimal example app that demonstrates the problem. You can find the code below. Execute it on various iPhones and aim at far distance (> 10 feet) and then quickly to very close distance (<5 inches).
Here is a list of devices and our test results:
iPhone 15 Pro, iOS 17.6: very fast and reliable switching
iPhone 15 Pro, iOS 18.1: very fast and reliable switching
iPhone 13 Pro Max, iOS 15.3: very fast and reliable switching
iPhone 16 (dual-wide camera), iOS 18.1: very fast and reliable switching
iPhone 16 Pro, iOS 18.1: slow switching, unreliable
iPhone 16 Pro Max, iOS 18.1: slow switching, unreliable
Questions:
Does anyone else have seen this issue? And possibly found a workaround?
Is this behaviour intended on iPhone 16 Pro models? Can we somehow improve the switching speed?
Further the iPhone 16 Pro models also show a jumping preview in the preview layer when they switch the constituent active device. Not dramatic, but compared to the other phones it looks like a glitch.
Thank you very much!
Kind regards,
Sebastian
import UIKit
import AVFoundation
class ViewController: UIViewController {
    
    var captureSession : AVCaptureSession!
    var captureDevice : AVCaptureDevice!
    var captureInput : AVCaptureInput!
    var previewLayer : AVCaptureVideoPreviewLayer!
    var activePrimaryConstituentToken: NSKeyValueObservation?
    var zoomToken: NSKeyValueObservation?
    
    override func viewDidLoad() {
        super.viewDidLoad()
        
    }
    
    override func viewDidAppear(_ animated: Bool) {
        super.viewDidAppear(animated)
        checkPermissions()
        setupAndStartCaptureSession()
    }
    
    func checkPermissions() {
        let cameraAuthStatus =  AVCaptureDevice.authorizationStatus(for: AVMediaType.video)
        switch cameraAuthStatus {
        case .authorized:
            return
        case .denied:
            abort()
        case .notDetermined:
            AVCaptureDevice.requestAccess(for: AVMediaType.video, completionHandler:
                                            { (authorized) in
                if(!authorized){
                    abort()
                }
            })
        case .restricted:
            abort()
        @unknown default:
            fatalError()
        }
    }
    
    func setupAndStartCaptureSession() {
        DispatchQueue.global(qos: .userInitiated).async{
            
            self.captureSession = AVCaptureSession()
            self.captureSession.beginConfiguration()
            
            if self.captureSession.canSetSessionPreset(.photo) {
                self.captureSession.sessionPreset = .photo
            }
            self.captureSession.automaticallyConfiguresCaptureDeviceForWideColor = true
            
            self.setupInputs()
            
            DispatchQueue.main.async {
                self.setupPreviewLayer()
            }
            self.captureSession.commitConfiguration()
            
            self.captureSession.startRunning()
            
            self.activePrimaryConstituentToken = self.captureDevice.observe(\.activePrimaryConstituent, options: [.new], changeHandler: { (device, change) in
                let type = device.activePrimaryConstituent!.deviceType.rawValue
                print("Device type: \(type)")
            })
            self.zoomToken = self.captureDevice.observe(\.videoZoomFactor, options: [.new], changeHandler: { (device, change) in
                let zoom = device.videoZoomFactor
                print("Zoom: \(zoom)")
            })
            
            let switchZoomFactor = 2.0
            
            DispatchQueue.main.async {
                self.setZoom(CGFloat(switchZoomFactor), animated: false)
            }
        }
    }
    
    func setupInputs() {
        
        if let device = AVCaptureDevice.default(.builtInTripleCamera, for: .video, position: .back) {
            captureDevice = device
        } else {
            fatalError("no back camera")
        }
        
        guard let input = try? AVCaptureDeviceInput(device: captureDevice) else {
            fatalError("could not create input device from back camera")
        }
        
        if !captureSession.canAddInput(input) {
            fatalError("could not add back camera input to capture session")
        }
        captureInput = input
        captureSession.addInput(input)
    }
    
    func setupPreviewLayer() {
        previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
        view.layer.addSublayer(previewLayer)
        previewLayer.frame = self.view.layer.frame
    }
    
    func setZoom(_ value: CGFloat, animated: Bool) {
        guard let device = captureDevice else { return }
        let maxZoom: CGFloat = captureDevice.maxAvailableVideoZoomFactor
        let minZoom: CGFloat = captureDevice.minAvailableVideoZoomFactor
        
        let zoomValue = max(min(value, maxZoom), minZoom)
        let deltaZoom = Float(abs(zoomValue - device.videoZoomFactor))
        do {
            try device.lockForConfiguration()
            if animated {
                device.ramp(toVideoZoomFactor: zoomValue, withRate: max(deltaZoom * 50.0, 50.0))
            } else {
                device.videoZoomFactor = zoomValue
            }
            device.unlockForConfiguration()
        } catch {
            return
        }
    }
}
                    
                  
                
                    
                      I'm developing a tutorial style tvOS app with multiple videos. The examples I've seen so far deal with only one video.
Defining the player and source(url) before body view
let avPlayer = AVPlayer(url: URL(string: "https://domain.com/.../.../video.mp4")!))
and then in the body view the video is displayed
VideoPlayer(player: avPlayer)
This allows options such as stop/start etc.
When I try something similar with a video title passed into this view I can't define the player with this title variable.
var vTitle: String
var avPlayer = AVPlayer(url: URL(string: "https://domain.com/.../.../" + vTitle + ".mp4"")!))
    
    var body: some View {
        
I het an error that vTitle can't be used in the url above the body view.
Any thoughts or suggestions? Thanks
                    
                  
                
                    
                      The problem I have at the moment is that if a phone call comes in during my recording, even if I don't answer, my recording will be interrupted
The phenomenon of recording interruption is that the picture is stuck, and the recording can be resumed automatically after the call is over. But it will cause the recorded video sound and painting out of sync
Through the AVCaptureSessionWasInterrupted listening, I can get to record the types of alerts and interrupt
As far as I can tell, a ringing or vibrating phone can block the audio channel. I found the same scenario in other apps, you can turn off the ring tone or vibration, but I don't know how to do it, I tried a lot of ways, but it doesn't work
BlackmagicCam or ProMovie App, when a call comes in during recording, there will only be a notification menu, and there will be no ringtone or vibration, which solves the problem of recording interruption
I don't know if this requires some configuration or application, please let me know if it does
                    
                  
                
                    
                      In an m3u8 manifest, audio EXT-X-MEDIA tags usually contain CHANNELS tag containing the audio channels count like so:
#EXT-X-MEDIA:TYPE=AUDIO,URI="audio_clear_eng_stereo.m3u8",GROUP-ID="default-audio-group",LANGUAGE="en",NAME="stream_5",AUTOSELECT=YES,CHANNELS="2"
Is it possible to get this info from AVPlayer, AVMediaSelectionOption or some related API?
                    
                  
                
                    
                      Hi I'm working on a project that require video frame PTS to be consistent between original video and a transcoded one. It's working fairly well on  regular mp4, however if I set preferredOutputSegmentInterval to have generate a fMP4 output, even I specified the  initialSegmentStartTime as 0, it always add one frame pts offset to all frames.
For example: if I use the code sample provided by Apple: https://developer.apple.com/videos/play/wwdc2020/10011/?time=406, useffprobe  -select_streams v:0 -show_entries packet=pts_time -of csv ~/Downloads/fmp4/prog_index.m3u8 to display the pts of the output, it doesn't start from 0, but has some one frame pts offset. I also tried open with MP4Box, it also shows the first frames dts and cts are not start from 0.
However, if I use AVAssetReader to read the same output video, and get the PTS from 1st frame, it's returning 0. So I can't use it to calculate the pts difference between 2 videos neither.
Can I get some help to understand why there is difference between AVAssetWriter/Reader fMP4's pts and others like ffprobe?
                    
                  
                
                    
                      Hello all! I've been having this issue for a while, on my iPhone 12 Pro.
The volume when listening to music, watching YouTube, TikTok, etc. It will randomly lower, but the actual audio slider won't it will still be at max volume but get very quiet. I've followed other instructions such as turn off audio awareness, and other settings but nothing seems to be working. And phone calls too Has anyone else had this issue and managed to fix it?
                    
                  
                
              
                
              
              
                
                Topic:
                  
	
		Media Technologies
  	
                
                
                SubTopic:
                  
                    
	
		Audio