How to Add Live Camera Preview to Uiview

How to Add Live Camera Preview to UIView

UPDATED TO SWIFT 5

You can try something like this:

import UIKit
import AVFoundation

class ViewController: UIViewController{
var previewView : UIView!
var boxView:UIView!
let myButton: UIButton = UIButton()

//Camera Capture requiered properties
var videoDataOutput: AVCaptureVideoDataOutput!
var videoDataOutputQueue: DispatchQueue!
var previewLayer:AVCaptureVideoPreviewLayer!
var captureDevice : AVCaptureDevice!
let session = AVCaptureSession()

override func viewDidLoad() {
super.viewDidLoad()
previewView = UIView(frame: CGRect(x: 0,
y: 0,
width: UIScreen.main.bounds.size.width,
height: UIScreen.main.bounds.size.height))
previewView.contentMode = UIView.ContentMode.scaleAspectFit
view.addSubview(previewView)

//Add a view on top of the cameras' view
boxView = UIView(frame: self.view.frame)

myButton.frame = CGRect(x: 0, y: 0, width: 200, height: 40)
myButton.backgroundColor = UIColor.red
myButton.layer.masksToBounds = true
myButton.setTitle("press me", for: .normal)
myButton.setTitleColor(UIColor.white, for: .normal)
myButton.layer.cornerRadius = 20.0
myButton.layer.position = CGPoint(x: self.view.frame.width/2, y:200)
myButton.addTarget(self, action: #selector(self.onClickMyButton(sender:)), for: .touchUpInside)

view.addSubview(boxView)
view.addSubview(myButton)

self.setupAVCapture()
}

override var shouldAutorotate: Bool {
if (UIDevice.current.orientation == UIDeviceOrientation.landscapeLeft ||
UIDevice.current.orientation == UIDeviceOrientation.landscapeRight ||
UIDevice.current.orientation == UIDeviceOrientation.unknown) {
return false
}
else {
return true
}
}

@objc func onClickMyButton(sender: UIButton){
print("button pressed")
}
}

// AVCaptureVideoDataOutputSampleBufferDelegate protocol and related methods
extension ViewController: AVCaptureVideoDataOutputSampleBufferDelegate{
func setupAVCapture(){
session.sessionPreset = AVCaptureSession.Preset.vga640x480
guard let device = AVCaptureDevice
.default(AVCaptureDevice.DeviceType.builtInWideAngleCamera,
for: .video,
position: AVCaptureDevice.Position.back) else {
return
}
captureDevice = device
beginSession()
}

func beginSession(){
var deviceInput: AVCaptureDeviceInput!

do {
deviceInput = try AVCaptureDeviceInput(device: captureDevice)
guard deviceInput != nil else {
print("error: cant get deviceInput")
return
}

if self.session.canAddInput(deviceInput){
self.session.addInput(deviceInput)
}

videoDataOutput = AVCaptureVideoDataOutput()
videoDataOutput.alwaysDiscardsLateVideoFrames=true
videoDataOutputQueue = DispatchQueue(label: "VideoDataOutputQueue")
videoDataOutput.setSampleBufferDelegate(self, queue:self.videoDataOutputQueue)

if session.canAddOutput(self.videoDataOutput){
session.addOutput(self.videoDataOutput)
}

videoDataOutput.connection(with: .video)?.isEnabled = true

previewLayer = AVCaptureVideoPreviewLayer(session: self.session)
previewLayer.videoGravity = AVLayerVideoGravity.resizeAspect

let rootLayer :CALayer = self.previewView.layer
rootLayer.masksToBounds=true
previewLayer.frame = rootLayer.bounds
rootLayer.addSublayer(self.previewLayer)
session.startRunning()
} catch let error as NSError {
deviceInput = nil
print("error: \(error.localizedDescription)")
}
}

func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
// do stuff here
}

// clean up AVCapture
func stopCamera(){
session.stopRunning()
}

}

Here i use a UIView called previewView to start the camera and then i add a new UIView called boxView wich is above previewView. I add a UIButton to boxView

IMPORTANT

Remember that in iOS 10 and later you need to first ask the user for permission in order to have access to the camera. You do this by adding a usage
key
to your app’s Info.plist together with a purpose string
because if you fail to declare the usage, your app will crash when it
first makes the access.

Here's a screenshot to show the Camera access request
Sample Image

How to add camera preview view to three custom uiview's in ios swift

Finally i found the answer with the help of JohnnySlagle/Multiple-Camera-Feeds code.

I created three views like

@property (weak, nonatomic) IBOutlet UIView *video1;
@property (weak, nonatomic) IBOutlet UIView *video2;
@property (weak, nonatomic) IBOutlet UIView *video3;

Then slightly changed setUpFeedViews

- (void)setupFeedViews {
NSUInteger numberOfFeedViews = 3;

for (NSUInteger i = 0; i < numberOfFeedViews; i++) {
VideoFeedView *feedView = [self setupFeedViewWithFrame:CGRectMake(0, 0, self.video1.frame.size.width, self.video1.frame.size.height)];
feedView.tag = i+1;
switch (i) {
case 0:
[self.video1 addSubview:feedView];
break;
case 1:
[self.video2 addSubview:feedView];
break;
case 2:
[self.video3 addSubview:feedView];
break;
default:
break;
}
[self.feedViews addObject:feedView];
}
}

Then applied filters in AVCaptureVideoDataOutputSampleBufferDelegate

- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection {
CMFormatDescriptionRef formatDesc = CMSampleBufferGetFormatDescription(sampleBuffer);

// update the video dimensions information
_currentVideoDimensions = CMVideoFormatDescriptionGetDimensions(formatDesc);

CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);

CIImage *sourceImage = [CIImage imageWithCVPixelBuffer:(CVPixelBufferRef)imageBuffer options:nil];

CGRect sourceExtent = sourceImage.extent;

CGFloat sourceAspect = sourceExtent.size.width / sourceExtent.size.height;

for (VideoFeedView *feedView in self.feedViews) {
CGFloat previewAspect = feedView.viewBounds.size.width / feedView.viewBounds.size.height;
// we want to maintain the aspect radio of the screen size, so we clip the video image
CGRect drawRect = sourceExtent;
if (sourceAspect > previewAspect) {
// use full height of the video image, and center crop the width
drawRect.origin.x += (drawRect.size.width - drawRect.size.height * previewAspect) / 2.0;
drawRect.size.width = drawRect.size.height * previewAspect;
} else {
// use full width of the video image, and center crop the height
drawRect.origin.y += (drawRect.size.height - drawRect.size.width / previewAspect) / 2.0;
drawRect.size.height = drawRect.size.width / previewAspect;
}
[feedView bindDrawable];

if (_eaglContext != [EAGLContext currentContext]) {
[EAGLContext setCurrentContext:_eaglContext];
}

// clear eagl view to grey
glClearColor(0.5, 0.5, 0.5, 1.0);
glClear(GL_COLOR_BUFFER_BIT);

// set the blend mode to "source over" so that CI will use that
glEnable(GL_BLEND);
glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);

glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
// This is necessary for non-power-of-two textures
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);

if (feedView.tag == 1) {
if (sourceImage) {
[_ciContext drawImage:sourceImage inRect:feedView.viewBounds fromRect:drawRect];
}
} else if (feedView.tag == 2) {
sourceImage = [sourceImage imageByApplyingTransform:CGAffineTransformMakeScale(1, -1)];
sourceImage = [sourceImage imageByApplyingTransform:CGAffineTransformMakeTranslation(0, sourceExtent.size.height)];
if (sourceImage) {
[_ciContext drawImage:sourceImage inRect:feedView.viewBounds fromRect:drawRect];
}
} else {
CIFilter *effectFilter = [CIFilter filterWithName:@"CIColorInvert"];
[effectFilter setValue:sourceImage forKey:kCIInputImageKey];
CIImage *invertImage = [effectFilter outputImage];
if (invertImage) {
[_ciContext drawImage:invertImage inRect:feedView.viewBounds fromRect:drawRect];
}
}
[feedView display];
}
}

Thats it. Successfully it meets my requirement.

Fill view with IOS camera preview layer

I solved it with Swift 4 like this:

Don't forget to import AVKit

override func viewDidLoad() {
super.viewDidLoad()

let captureSession = AVCaptureSession()

guard let captureDevice = AVCaptureDevice.default(for: .video),
let input = try? AVCaptureDeviceInput(device: captureDevice) else {return}

captureSession.addInput(input)
captureSession.startRunning()

previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer.frame = view.frame
previewLayer.videoGravity = .resizeAspectFill
view.layer.addSublayer(previewLayer)

}

IOS/Objective-C: Display live camera preview on load without image picker controller

I just translate to Obj-C on example code. If you want to understand more maybe you could see my project FaceDetectionDemo. Hope that help you.

- (void)setupAVCapture {
NSError *error = nil;

// Select device
AVCaptureSession *session = [[AVCaptureSession alloc] init];
if ([[UIDevice currentDevice] userInterfaceIdiom] ==
UIUserInterfaceIdiomPhone) {
[session setSessionPreset:AVCaptureSessionPreset640x480];
} else {
[session setSessionPreset:AVCaptureSessionPresetPhoto];
}

AVCaptureDevice *device = [self findFrontCamera];
if (nil == device) {
self.isUsingFrontFacingCamera = NO;
device = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
}

// get the input device
AVCaptureDeviceInput *deviceInput = [AVCaptureDeviceInput
deviceInputWithDevice:device error:&error];
if (error) {
session = nil;
[self teardownAVCapture];
if ([_delegate
respondsToSelector:@selector(FaceDetectionComponentError:error:)]) {
__weak typeof(self) weakSelf = self;
dispatch_async(dispatch_get_main_queue(), ^{
[weakSelf.delegate FaceDetectionComponentError:weakSelf
error:error];
});
}
return;
}

// add the input to the session
if ([session canAddInput:deviceInput]) {
[session addInput:deviceInput];
}

// Make a video data output
self.videoDataOutput = [[AVCaptureVideoDataOutput alloc] init];

// We want RGBA, both CoreGraphics and OpenGL work well with 'RGBA'
NSDictionary *rgbOutputSettings = [NSDictionary dictionaryWithObject:
[NSNumber
numberWithInt:kCMPixelFormat_32BGRA] forKey:
(id)kCVPixelBufferPixelFormatTypeKey];
[self.videoDataOutput setVideoSettings:rgbOutputSettings];
[self.videoDataOutput setAlwaysDiscardsLateVideoFrames:YES]; // discard if the data output queue is blocked

self.videoDataOutputQueue = dispatch_queue_create("VideoDataOutputQueue",
DISPATCH_QUEUE_SERIAL);
[self.videoDataOutput setSampleBufferDelegate:self
queue:self.videoDataOutputQueue];

if ([session canAddOutput:self.videoDataOutput]) {
[session addOutput:self.videoDataOutput];
}

[[self.videoDataOutput connectionWithMediaType:AVMediaTypeVideo]
setEnabled:YES];

self.previewLayer = [[AVCaptureVideoPreviewLayer alloc]
initWithSession:session];
self.previewLayer.backgroundColor = [[UIColor blackColor] CGColor];
self.previewLayer.videoGravity = AVLayerVideoGravityResizeAspect;

CALayer *rootLayer = [self.previewView layer];
[rootLayer setMasksToBounds:YES];
[self.previewLayer setFrame:[rootLayer bounds]];
[rootLayer addSublayer:self.previewLayer];
[session startRunning];
}

- (AVCaptureDevice *)findFrontCamera {
AVCaptureDevicePosition desiredPosition = AVCaptureDevicePositionFront;
for (AVCaptureDevice *d in [AVCaptureDevice
devicesWithMediaType:AVMediaTypeVideo]) {
if ([d position] == desiredPosition) {
self.isUsingFrontFacingCamera = YES;
return d;
}
}
return nil;
}

// AVCaptureVideoDataOutputSampleBufferDelegate
- (void)captureOutput:(AVCaptureOutput *)captureOutput
didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
fromConnection:(AVCaptureConnection *)connection {

}

How to render same UIView content (a camera preview and layers) into two UIViews side by side for VR Headset in Swift

I found the answer! The answer is CAReplicatorLayer
Multiple-Camera-Feeds (It is not swift, but can be re-factorised in Swift very very easily)

use a CAReplicatorLayer to duplicate the layer automatically. As the docs say, it will automatically create "...a specified number of copies of its sublayers (the source layer), each copy potentially having geometric, temporal and color transformations applied to it."

This is super useful if there isn't a lot of interaction with the live previews besides simple geometric or color transformations (Think Photo Booth). I have most often seen the CAReplicatorLayer used as a way to create the 'reflection' effect.

Here is some sample code to replicate a CACaptureVideoPreviewLayer:

Init AVCaptureVideoPreviewLayer

AVCaptureVideoPreviewLayer *previewLayer = [[AVCaptureVideoPreviewLayer alloc] initWithSession:session];
[previewLayer setVideoGravity:AVLayerVideoGravityResizeAspectFill];
[previewLayer setFrame:CGRectMake(0.0, 0.0, self.view.bounds.size.width, self.view.bounds.size.height / 4)];

Init CAReplicatorLayer and set properties

Note: This will replicate the live preview layer *four** times.*

NSUInteger replicatorInstances = 4;

CAReplicatorLayer *replicatorLayer = [CAReplicatorLayer layer];
replicatorLayer.frame = CGRectMake(0, 0, self.view.bounds.size.width, self.view.bounds.size.height / replicatorInstances);
replicatorLayer.instanceCount = instances;
replicatorLayer.instanceTransform = CATransform3DMakeTranslation(0.0, self.view.bounds.size.height / replicatorInstances, 0.0);

Add Layers

Note: From my experience you need to add the layer you want to replicate to the CAReplicatorLayer as a sublayer.

[replicatorLayer addSublayer:previewLayer];
[self.view.layer addSublayer:replicatorLayer];

Downsides

A downside to using CAReplicatorLayer is that it handles all placement of the layer replications. So it will apply any set transformations to each instance and and it will all be contained within itself. E.g. There would be no way to have a replication of a AVCaptureVideoPreviewLayer on two separate cells.

How to handle a device rotation for AVCaptureVideoPreviewLayer?

If you want to update layer frame in rotation, you need to create custom UIView and override layoutSubviews(). Inside layoutSubviews(), you need to update frame for sublayers.

The code will be as below.

struct CameraPreview: UIViewRepresentable {
@ObservedObject var camera: CameraModel

class LayerView: UIView {
override func layoutSubviews() {
super.layoutSubviews()
// To disable default animation of layer. You can comment out those lines with `CATransaction` if you want to include
CATransaction.begin()
CATransaction.setDisableActions(true)
layer.sublayers?.forEach({ layer in
layer.frame = frame
})
CATransaction.commit()
}
}

func makeUIView(context: Context) -> some UIView {
let view = LayerView()
camera.preview = AVCaptureVideoPreviewLayer(session: camera.session)
camera.preview.frame = view.frame
camera.preview.videoGravity = AVLayerVideoGravity.resizeAspectFill
view.layer.addSublayer(camera.preview)
camera.session.startRunning()
return view
}

func updateUIView(_ uiView: UIViewType, context: Context) {
}
}


Related Topics



Leave a reply



Submit