-
Classify hand poses and actions with Create ML
With Create ML, your app's ability to understand the expressiveness of the human hand has never been easier. Discover how you can build off the support for Hand Pose Detection in Vision and train custom Hand Pose and Hand Action classifiers using the Create ML app and framework. Learn how simple it is to collect data, train a model, and integrate it with Vision, Camera, and ARKit to create a fun, entertaining app experience.
To learn more about Create ML and related concepts around model training, check out “Build an Action Classifier with Create ML” from WWDC20. And don't miss “Build dynamic iOS apps with the Create ML framework” to learn how your models can be trained on-the-fly and on device from within your app.Recursos
Vídeos relacionados
WWDC23
WWDC22
WWDC21
WWDC20
-
Buscar neste vídeo...
-
-
9:31 - Detecting hands in a frame
func session(_ session: ARSession, didUpdate frame: ARFrame) { let pixelBuffer = frame.capturedImage let handPoseRequest = VNDetectHumanHandPoseRequest() handPoseRequest.maximumHandCount = 1 handPoseRequest.revision = VNDetectHumanHandPoseRequestRevision1 let handler = VNImageRequestHandler(cvPixelBuffer: pixelBuffer, options: [:]) do { try handler.perform([humanBodyPoseRequest]) } catch { assertionFailure("Human Pose Request failed: \(error)") } guard let handPoses = request.results, !handPoses.isEmpty else { // No effects to draw, so clear out current graphics return } let handObservation = handPoses.first -
11:03 - Predicting hand pose
if frameCounter % handPosePredictionInterval == 0 { guard let keypointsMultiArray = try? handObservation.keypointsMultiArray() else { fatalError() } let handPosePrediction = try model.prediction(poses: keypointsMultiArray) let confidence = handPosePrediction.labelProbabilities[handPosePrediction.label]! if confidence > 0.9 { renderHandPoseEffect(name: handPosePrediction.label) } } func renderHandPoseEffect(name: String) { switch name { case "One": if effectNode == nil { effectNode = addParticleNode(for: .one) } default: removeAllParticleNode() } } -
12:25 - Getting tip of index finger to use as anchor
let landmarkConfidenceThreshold: Float = 0.2 let indexFingerName = VNHumanHandPoseObservation.JointName.indexTip let width = viewportSize.width let height = viewportSize.height if let indexFingerPoint = try? observation.recognizedPoint(indexFingerName), indexFingerPoint.confidence > landmarkConfidenceThreshold { let normalizedLocation = indexFingerPoint.location indexFingerTipLocation = CGPoint((x: normalizedLocation.x * width, y: normalizedLocation.y * height)) } else { indexFingerTipLocation = nil } -
15:47 - Getting hand chirality
// Working with chirality let handPoseRequest = VNDetectHumanHandPoseRequest() try handler.perform([handPoseRequest]) let detectedHandPoses = handPoseRequest.results! for hand in detectedHandPoses where hand.chirality == .right { // Take action on every right hand, or prune the results } -
22:16 - Hand action classification by accumulating queue of hand poses
var queue = [MLMultiArray]() // . . . frameCounter += 1 if frameCounter % 2 == 0 { let hands: [(MLMultiArray, VNHumanHandPoseObservation.Chirality)] = getHands() for (pose, chirality) in hands where chirality == .right { queue.append(pose) queue = Array(queue.suffix(queueSize)) queueSamplingCounter += 1 if queue.count == queueSize && queueSamplingCounter % queueSamplingCount == 0 { let poses = MLMultiArray(concatenating: queue, axis: 0, dataType: .float32) let prediction = try? handActionModel?.prediction(poses: poses) guard let label = prediction?.label, let confidence = prediction?.labelProbabilities[label] else { continue } if confidence > handActionConfidenceThreshold { DispatchQueue.main.async { self.renderer?.renderHandActionEffect(name: label) } } } } }
-