Trying to display a bounding box over QR code
Asked Answered
O

1

7

I'm trying to display a bounding box around a detected QR code, but the box isn't showing up. Think it may be something with a mismatch in coordinate systems but not totally sure.

import SwiftUI
import AVFoundation
import Combine

struct ContentView: View {
    @StateObject private var viewModel = QRCodeScannerViewModel()

    var body: some View {
        VStack {
            if let url = viewModel.detectedURL {
                Text(url)
                    .foregroundColor(.blue)
                    .padding()
            } else {
                Text("Scanning for QR Codes...")
            }
            CameraPreview(session: viewModel.session, detectedBoundingBox: $viewModel.detectedBoundingBox)
                .onAppear {
                    viewModel.startScanning()
                }
                .onDisappear {
                    viewModel.stopScanning()
                }
        }
    }
}

struct CameraPreview: UIViewRepresentable {
    let session: AVCaptureSession
    @Binding var detectedBoundingBox: CGRect?

    func makeUIView(context: Context) -> UIView {
        let view = UIView(frame: UIScreen.main.bounds)
        let previewLayer = AVCaptureVideoPreviewLayer(session: session)
        previewLayer.frame = view.bounds
        previewLayer.videoGravity = .resizeAspectFill
        view.layer.addSublayer(previewLayer)
        return view
    }

    func updateUIView(_ uiView: UIView, context: Context) {
        guard let boundingBox = detectedBoundingBox else { return }
        
        // Transform the bounding box to the UIView's coordinate system
        let viewSize = uiView.bounds.size
        let scaleX = viewSize.width
        let scaleY = viewSize.height
        let transformedBox = CGRect(
            x: boundingBox.origin.y * scaleX,
            y: boundingBox.origin.x * scaleY,
            width: boundingBox.size.height * scaleX,
            height: boundingBox.size.width * scaleY
        )

        // Update or create the bounding box layer
        if let boundingBoxLayer = uiView.layer.sublayers?.first(where: { $0 is CAShapeLayer }) as? CAShapeLayer {
            boundingBoxLayer.frame = transformedBox
        } else {
            let boundingBoxLayer = CAShapeLayer()
            boundingBoxLayer.frame = transformedBox
            boundingBoxLayer.borderColor = UIColor.red.cgColor
            boundingBoxLayer.borderWidth = 2
            uiView.layer.addSublayer(boundingBoxLayer)
        }
    }
}


class QRCodeScannerViewModel: NSObject, ObservableObject, AVCaptureVideoDataOutputSampleBufferDelegate {
    @Published var detectedURL: String?
    @Published var detectedBoundingBox: CGRect?

    let session = AVCaptureSession()
    private let videoDataOutput = AVCaptureVideoDataOutput()

    override init() {
        super.init()
        setupCaptureSession()
    }

    private func setupCaptureSession() {
        guard let device = AVCaptureDevice.default(for: .video),
              let input = try? AVCaptureDeviceInput(device: device),
              session.canAddInput(input),
              session.canAddOutput(videoDataOutput) else {
            return
        }

        session.addInput(input)
        videoDataOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "sampleBufferQueue"))
        session.addOutput(videoDataOutput)
    }

    func startScanning() {
        DispatchQueue.global(qos: .userInitiated).async {
            self.session.startRunning()
        }
    }

    func stopScanning() {
        session.stopRunning()
    }

    func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
        guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
        
        let ciImage = CIImage(cvImageBuffer: pixelBuffer)
        let context = CIContext()
        let detector = CIDetector(ofType: CIDetectorTypeQRCode, context: context, options: [CIDetectorAccuracy: CIDetectorAccuracyHigh])
        let features = detector?.features(in: ciImage) as? [CIQRCodeFeature]

        DispatchQueue.main.async {
            if let feature = features?.first {
                self.detectedURL = feature.messageString
                self.detectedBoundingBox = feature.bounds
            }
        }
    }
}

UPDATE: Have it...kind of working?

import SwiftUI
import AVFoundation
import Combine

struct ContentView: View {
    @StateObject private var viewModel = QRCodeScannerViewModel()

    var body: some View {
        VStack {
            if let url = viewModel.detectedURL {
                Text(url)
                    .foregroundColor(.blue)
                    .padding()
            } else {
                Text("Scanning for QR Codes...")
            }
            CameraPreview(session: viewModel.session, boundingBox: $viewModel.boundingBox, updateBoundingBox: viewModel.updateBoundingBox)
                .onAppear {
                    viewModel.startScanning()
                }
                .onDisappear {
                    viewModel.stopScanning()
                }
        }
    }
}

struct CameraPreview: UIViewRepresentable {
    let session: AVCaptureSession
    @Binding var boundingBox: CGRect
    let updateBoundingBox: (CGRect) -> Void

    func makeUIView(context: Context) -> UIView {
        let view = UIView(frame: UIScreen.main.bounds)
        let previewLayer = AVCaptureVideoPreviewLayer(session: session)
        previewLayer.frame = view.frame
        previewLayer.videoGravity = .resizeAspectFill
        view.layer.addSublayer(previewLayer)
        context.coordinator.previewLayer = previewLayer
        return view
    }

    func updateUIView(_ uiView: UIView, context: Context) {
        // Instead of updating the bounding box directly in the view, use a callback
        if let layer = uiView.layer.sublayers?.first(where: { $0 is CAShapeLayer }) as? CAShapeLayer {
            layer.path = UIBezierPath(rect: boundingBox).cgPath
        } else {
            let shapeLayer = CAShapeLayer()
            shapeLayer.path = UIBezierPath(rect: boundingBox).cgPath
            shapeLayer.strokeColor = UIColor.red.cgColor
            shapeLayer.fillColor = UIColor.clear.cgColor
            shapeLayer.lineWidth = 2
            uiView.layer.addSublayer(shapeLayer)
        }
    }

    func makeCoordinator() -> Coordinator {
        Coordinator(boundingBox: $boundingBox, updateBoundingBox: updateBoundingBox)
    }

    class Coordinator: NSObject {
        var previewLayer: AVCaptureVideoPreviewLayer?
        @Binding var boundingBox: CGRect
        let updateBoundingBox: (CGRect) -> Void

        init(boundingBox: Binding<CGRect>, updateBoundingBox: @escaping (CGRect) -> Void) {
            _boundingBox = boundingBox
            self.updateBoundingBox = updateBoundingBox
        }

        var boundingBoxLayer: CAShapeLayer?

     
    }
}

class QRCodeScannerViewModel: NSObject, ObservableObject, AVCaptureVideoDataOutputSampleBufferDelegate {
    @Published var detectedURL: String?
    @Published var boundingBox: CGRect = .zero

    let session = AVCaptureSession()
    private let videoDataOutput = AVCaptureVideoDataOutput()

    override init() {
        super.init()
        setupCaptureSession()
    }

    private func setupCaptureSession() {
        guard let device = AVCaptureDevice.default(for: .video),
              let input = try? AVCaptureDeviceInput(device: device),
              session.canAddInput(input),
              session.canAddOutput(videoDataOutput) else {
            return
        }

        session.addInput(input)
        videoDataOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "sampleBufferQueue"))
        session.addOutput(videoDataOutput)
    }

    func startScanning() {
        DispatchQueue.global(qos: .userInitiated).async {
            self.session.startRunning()
        }
    }

    func stopScanning() {
        session.stopRunning()
    }

    func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
        guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
        
        let ciImage = CIImage(cvImageBuffer: pixelBuffer)
        let context = CIContext()
        let detector = CIDetector(ofType: CIDetectorTypeQRCode, context: context, options: [CIDetectorAccuracy: CIDetectorAccuracyHigh])
        let features = detector?.features(in: ciImage) as? [CIQRCodeFeature]

        DispatchQueue.main.async {
            if let feature = features?.first {
                self.detectedURL = feature.messageString
                // Convert feature bounds to view coordinates
                let boundingBox = feature.bounds
                let imageSize = ciImage.extent.size
                let viewSize = UIScreen.main.bounds.size
                let scaleX = viewSize.width / imageSize.width
                let scaleY = viewSize.height / imageSize.height
                let transformedBoundingBox = CGRect(
                    x: boundingBox.origin.x * scaleX,
                    y: viewSize.height - (boundingBox.origin.y + boundingBox.height) * scaleY,
                    width: boundingBox.width * scaleX,
                    height: boundingBox.height * scaleY
                )
                self.boundingBox = transformedBoundingBox
                // Call the callback to update the bounding box in CameraPreview
                self.updateBoundingBox(self.boundingBox)
            } else {
                self.detectedURL = nil
                self.boundingBox = .zero
                // Call the callback to update the bounding box in CameraPreview
                self.updateBoundingBox(self.boundingBox)
            }
        }
    }

    func updateBoundingBox(_ boundingBox: CGRect) {
        DispatchQueue.main.async {
            self.boundingBox = boundingBox
        }
    }
}

UPDATE 2 Getting same results when using just a static image

struct ContentView: View {
    @StateObject private var viewModel = QRCodeScannerViewModel()
    @State private var useCamera: Bool = true
    @State private var testImage: UIImage? = UIImage(named: "testImage.jpg")
    @State private var processedImage: UIImage?

    var body: some View {
        VStack {
            Toggle("Use Camera", isOn: $useCamera)
                .padding()

            if useCamera {
                if let url = viewModel.detectedURL {
                    Text(url)
                        .foregroundColor(.blue)
                        .padding()
                } else {
                    Text("Scanning for QR Codes...")
                }
                CameraPreview(session: viewModel.session)
                    .onAppear {
                        viewModel.startScanning()
                    }
                    .onDisappear {
                        viewModel.stopScanning()
                    }
            } else {
                if let image = processedImage {
                    Image(uiImage: image)
                        .resizable()
                        .scaledToFit()
                        .padding()
                }
            }
        }
        .onChange(of: useCamera) { usingCamera in
            if usingCamera {
                viewModel.startScanning()
                processedImage = nil
            } else {
                viewModel.stopScanning()
                if let image = testImage {
                    processedImage = viewModel.processImageForQRCode(image)
                }
            }
        }
    }
}



struct CameraPreview: UIViewRepresentable {
    let session: AVCaptureSession

    func makeUIView(context: Context) -> UIView {
        let view = UIView(frame: UIScreen.main.bounds)
        let previewLayer = AVCaptureVideoPreviewLayer(session: session)
        previewLayer.frame = view.frame
        previewLayer.videoGravity = .resizeAspectFill
        view.layer.addSublayer(previewLayer)
        return view
    }

    func updateUIView(_ uiView: UIView, context: Context) {}
}

class QRCodeScannerViewModel: NSObject, ObservableObject, AVCaptureVideoDataOutputSampleBufferDelegate {
    @Published var detectedURL: String?
    let session = AVCaptureSession()
    private let videoDataOutput = AVCaptureVideoDataOutput()

    override init() {
        super.init()
        setupCaptureSession()
    }

    private func setupCaptureSession() {
        guard let device = AVCaptureDevice.default(for: .video),
              let input = try? AVCaptureDeviceInput(device: device),
              session.canAddInput(input),
              session.canAddOutput(videoDataOutput) else {
            return
        }

        session.addInput(input)
        videoDataOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "sampleBufferQueue"))
        session.addOutput(videoDataOutput)
    }

    func startScanning() {
        if session.isRunning {
            return
        }
        DispatchQueue.global(qos: .userInitiated).async {
            self.session.startRunning()
        }
    }

    func stopScanning() {
        if session.isRunning {
            session.stopRunning()
        }
    }

    func processImageForQRCode(_ image: UIImage) -> UIImage {
        let ciImage = CIImage(image: image)!
        let context = CIContext()
        let detector = CIDetector(ofType: CIDetectorTypeQRCode, context: context, options: [CIDetectorAccuracy: CIDetectorAccuracyHigh])
        let features = detector?.features(in: ciImage) as? [CIQRCodeFeature]

        UIGraphicsBeginImageContext(image.size)
        image.draw(at: CGPoint.zero)

        if let features = features {
            for feature in features {
                drawBoundingBox(around: feature)
            }
        }

        let processedImage = UIGraphicsGetImageFromCurrentImageContext()
        UIGraphicsEndImageContext()

        return processedImage ?? image
    }

    private func drawBoundingBox(around feature: CIQRCodeFeature) {
        guard let context = UIGraphicsGetCurrentContext() else { return }

        context.setStrokeColor(UIColor.red.cgColor)
        context.setLineWidth(10)
        context.addRect(feature.bounds)
        context.strokePath()
    }

    func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
        guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
        
        let ciImage = CIImage(cvImageBuffer: pixelBuffer)
        let context = CIContext()
        guard let cgImage = context.createCGImage(ciImage, from: ciImage.extent) else { return }
        let uiImage = UIImage(cgImage: cgImage)

        let newCIImage = CIImage(image: uiImage)
        
        let detector = CIDetector(ofType: CIDetectorTypeQRCode, context: context, options: [CIDetectorAccuracy: CIDetectorAccuracyHigh])
        let features = detector?.features(in: newCIImage ?? ciImage) as? [CIQRCodeFeature]

        DispatchQueue.main.async {
            self.detectedURL = features?.first?.messageString
        }
    }
}

Ossy answered 8/4 at 15:57 Comment(3)
CIDetector is obsolete pretty much, apple recommends using Vision framework and VNRequest.Rhaetian
But yes, you are likely to have the similar coordinates issue as VNRequest does. For VNRequest we have to transpose Y coordinate (e.g. topLeft.y = height - topLeft.y (and so on for each point). I don't know if it's exactly the same issue here, but what you can do is take a very simple picture, where QR code is at top left corner. And see how wrong the shape is (print it out). Similar images with QR code at the right top corner, etc. and compare. This will give you an idea how to transform coords.Rhaetian
@timbretimbre yeah; for a couple reasons I need to use CIDetector (this is just a test project for a larger app). Want to write an answer and I'll try and accept it?Ossy
O
1

Applying this transform ended up being what I needed to do:

let transform = CGAffineTransform(scaleX: 1, y: -1).translatedBy(x: 0, y: -image.size.height)
                drawBoundingBox(around: feature.bounds.applying(transform))
Ossy answered 17/4 at 22:45 Comment(0)

© 2022 - 2024 — McMap. All rights reserved.