Add a Watermark on Video After Merging Video and Audio Asset into One in Swift3 iOS

Add a Watermark on Video after merging Video and Audio Asset into one in Swift3 iOS

I have worked on a project and used this code. Maybe this will help you to add watermark.

import UIKit
import AssetsLibrary
import AVFoundation
import Photos
import SpriteKit

enum PDWatermarkPosition {
case TopLeft
case TopRight
case BottomLeft
case BottomRight
case Default
}

class PDVideoWaterMarker: NSObject {

func watermark(video videoAsset:AVAsset, watermarkText text : String, saveToLibrary flag : Bool, watermarkPosition position : PDWatermarkPosition, completion : ((_ status : AVAssetExportSessionStatus?, _ session: AVAssetExportSession?, _ outputURL : URL?) -> ())?) {
self.watermark(video: videoAsset, watermarkText: text, imageName: nil, saveToLibrary: flag, watermarkPosition: position) { (status, session, outputURL) -> () in
completion!(status, session, outputURL)
}
}

func watermark(video videoAsset:AVAsset, imageName name : String, watermarkText text : String , saveToLibrary flag : Bool, watermarkPosition position : PDWatermarkPosition, completion : ((_ status : AVAssetExportSessionStatus?, _ session: AVAssetExportSession?, _ outputURL : URL?) -> ())?) {
self.watermark(video: videoAsset, watermarkText: text, imageName: name, saveToLibrary: flag, watermarkPosition: position) { (status, session, outputURL) -> () in
completion!(status, session, outputURL)
}
}

private func watermark(video videoAsset:AVAsset, watermarkText text : String!, imageName name : String!, saveToLibrary flag : Bool, watermarkPosition position : PDWatermarkPosition, completion : ((_ status : AVAssetExportSessionStatus?, _ session: AVAssetExportSession?, _ outputURL : URL?) -> ())?) {
DispatchQueue.global(qos: DispatchQoS.QoSClass.default).async {

let mixComposition = AVMutableComposition()

let compositionVideoTrack = mixComposition.addMutableTrack(withMediaType: AVMediaTypeVideo, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))

if videoAsset.tracks(withMediaType: AVMediaTypeVideo).count == 0

{
completion!(nil, nil, nil)
return
}

let clipVideoTrack = videoAsset.tracks(withMediaType: AVMediaTypeVideo)[0]

self.addAudioTrack(composition: mixComposition, videoAsset: videoAsset as! AVURLAsset)

do {
try compositionVideoTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAsset.duration), of: clipVideoTrack, at: kCMTimeZero)
}
catch {
print(error.localizedDescription)
}

let videoSize = clipVideoTrack.naturalSize //CGSize(width: 375, height: 300)

print("videoSize--\(videoSize)")
let parentLayer = CALayer()

let videoLayer = CALayer()

parentLayer.frame = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
videoLayer.frame = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
//videoLayer.backgroundColor = UIColor.red.cgColor
parentLayer.addSublayer(videoLayer)

if name != nil {
let watermarkImage = UIImage(named: name)
let imageLayer = CALayer()
//imageLayer.backgroundColor = UIColor.purple.cgColor
imageLayer.contents = watermarkImage?.cgImage

var xPosition : CGFloat = 0.0
var yPosition : CGFloat = 0.0
let imageSize : CGFloat = 57.0

switch (position) {
case .TopLeft:
xPosition = 0
yPosition = 0
break
case .TopRight:
xPosition = videoSize.width - imageSize - 30
yPosition = 30
break
case .BottomLeft:
xPosition = 0
yPosition = videoSize.height - imageSize
break
case .BottomRight, .Default:
xPosition = videoSize.width - imageSize
yPosition = videoSize.height - imageSize
break
}

imageLayer.frame = CGRect(x: xPosition, y: yPosition, width: imageSize, height: imageSize)
imageLayer.opacity = 0.65
parentLayer.addSublayer(imageLayer)

if text != nil {
let titleLayer = CATextLayer()
titleLayer.backgroundColor = UIColor.clear.cgColor
titleLayer.string = text
titleLayer.font = "Helvetica" as CFTypeRef
titleLayer.fontSize = 20
titleLayer.alignmentMode = kCAAlignmentRight
titleLayer.frame = CGRect(x: 0, y: yPosition - imageSize, width: videoSize.width - imageSize/2 - 4, height: 57)
titleLayer.foregroundColor = UIColor.red.cgColor
parentLayer.addSublayer(titleLayer)
}
}

let videoComp = AVMutableVideoComposition()
videoComp.renderSize = videoSize
videoComp.frameDuration = CMTimeMake(1, 30)
videoComp.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, in: parentLayer)

let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(kCMTimeZero, mixComposition.duration)
instruction.backgroundColor = UIColor.gray.cgColor
_ = mixComposition.tracks(withMediaType: AVMediaTypeVideo)[0] as AVAssetTrack

let layerInstruction = self.videoCompositionInstructionForTrack(track: compositionVideoTrack, asset: videoAsset)

instruction.layerInstructions = [layerInstruction]
videoComp.instructions = [instruction]

let documentDirectory = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)[0]
let dateFormatter = DateFormatter()
dateFormatter.dateStyle = .long
dateFormatter.timeStyle = .short
let date = dateFormatter.string(from: Date())

let url = URL(fileURLWithPath: documentDirectory).appendingPathComponent("watermarkVideo-\(date).mov")

let exporter = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality)
exporter?.outputURL = url
exporter?.outputFileType = AVFileTypeQuickTimeMovie
exporter?.shouldOptimizeForNetworkUse = false
exporter?.videoComposition = videoComp

exporter?.exportAsynchronously() {
DispatchQueue.main.async {

if exporter?.status == AVAssetExportSessionStatus.completed {
let outputURL = exporter?.outputURL
if flag {
if UIVideoAtPathIsCompatibleWithSavedPhotosAlbum(outputURL!.path) {
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: outputURL!)
}) { saved, error in
if saved {
completion!(AVAssetExportSessionStatus.completed, exporter, outputURL)
}
}
}

} else {
completion!(AVAssetExportSessionStatus.completed, exporter, outputURL)
}

} else {
// Error
completion!(exporter?.status, exporter, nil)
}
}
}
}
}

private func addAudioTrack(composition: AVMutableComposition, videoAsset: AVURLAsset) {
let compositionAudioTrack:AVMutableCompositionTrack = composition.addMutableTrack(withMediaType: AVMediaTypeAudio, preferredTrackID: CMPersistentTrackID())
let audioTracks = videoAsset.tracks(withMediaType: AVMediaTypeAudio)
for audioTrack in audioTracks {
try! compositionAudioTrack.insertTimeRange(audioTrack.timeRange, of: audioTrack, at: kCMTimeZero)
}
}

private func orientationFromTransform(transform: CGAffineTransform) -> (orientation: UIImageOrientation, isPortrait: Bool) {
var assetOrientation = UIImageOrientation.up
var isPortrait = false
if transform.a == 0 && transform.b == 1.0 && transform.c == -1.0 && transform.d == 0 {
assetOrientation = .right
isPortrait = true
} else if transform.a == 0 && transform.b == -1.0 && transform.c == 1.0 && transform.d == 0 {
assetOrientation = .left
isPortrait = true
} else if transform.a == 1.0 && transform.b == 0 && transform.c == 0 && transform.d == 1.0 {
assetOrientation = .up
} else if transform.a == -1.0 && transform.b == 0 && transform.c == 0 && transform.d == -1.0 {
assetOrientation = .down
}

return (assetOrientation, isPortrait)
}

private func videoCompositionInstructionForTrack(track: AVCompositionTrack, asset: AVAsset) -> AVMutableVideoCompositionLayerInstruction {
let instruction = AVMutableVideoCompositionLayerInstruction(assetTrack: track)
let assetTrack = asset.tracks(withMediaType: AVMediaTypeVideo)[0]

let transform = assetTrack.preferredTransform
let assetInfo = orientationFromTransform(transform: transform)

var scaleToFitRatio = UIScreen.main.bounds.width / 375
if assetInfo.isPortrait {
scaleToFitRatio = UIScreen.main.bounds.width / assetTrack.naturalSize.height
let scaleFactor = CGAffineTransform(scaleX: scaleToFitRatio, y: scaleToFitRatio)
instruction.setTransform(assetTrack.preferredTransform.concatenating(scaleFactor),
at: kCMTimeZero)
} else {
let scaleFactor = CGAffineTransform(scaleX: scaleToFitRatio, y: scaleToFitRatio)
var concat = assetTrack.preferredTransform.concatenating(scaleFactor).concatenating(CGAffineTransform(translationX: 0, y: 0))
if assetInfo.orientation == .down {
let fixUpsideDown = CGAffineTransform(rotationAngle: CGFloat(Double.pi))
let windowBounds = UIScreen.main.bounds
let yFix = 375 + windowBounds.height
let centerFix = CGAffineTransform(translationX: assetTrack.naturalSize.width, y: CGFloat(yFix))
concat = fixUpsideDown.concatenating(centerFix).concatenating(scaleFactor)
}
instruction.setTransform(concat, at: kCMTimeZero)

}

return instruction
}
}

iPhone Watermark on recorded Video.

Use AVFoundation. I would suggest grabbing frames with AVCaptureVideoDataOutput, then overlaying the captured frame with the watermark image, and finally writing captured and processed frames to a file user AVAssetWriter.

Search around stack overflow, there are a ton of fantastic examples detailing how to do each of these things I have mentioned. I haven't seen any that give code examples for exactly the effect you would like, but you should be able to mix and match pretty easily.

EDIT:

Take a look at these links:

iPhone: AVCaptureSession capture output crashing (AVCaptureVideoDataOutput) - this post might be helpful just by nature of containing relevant code.

AVCaptureDataOutput will return images as CMSampleBufferRefs.
Convert them to CGImageRefs using this code:

    - (CGImageRef) imageFromSampleBuffer:(CMSampleBufferRef) sampleBuffer // Create a CGImageRef from sample buffer data
{

CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
CVPixelBufferLockBaseAddress(imageBuffer,0); // Lock the image buffer

uint8_t *baseAddress = (uint8_t *)CVPixelBufferGetBaseAddressOfPlane(imageBuffer, 0); // Get information of the image
size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);
size_t width = CVPixelBufferGetWidth(imageBuffer);
size_t height = CVPixelBufferGetHeight(imageBuffer);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();

CGContextRef newContext = CGBitmapContextCreate(baseAddress, width, height, 8, bytesPerRow, colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst);
CGImageRef newImage = CGBitmapContextCreateImage(newContext);
CGContextRelease(newContext);

CGColorSpaceRelease(colorSpace);
CVPixelBufferUnlockBaseAddress(imageBuffer,0);
/* CVBufferRelease(imageBuffer); */ // do not call this!

return newImage;
}

From there you would convert to a UIImage,

  UIImage *img = [UIImage imageWithCGImage:yourCGImage];  

Then use

[img drawInRect:CGRectMake(x,y,height,width)]; 

to draw the frame to a context, draw a PNG of the watermark over it, and then add the processed images to your output video using AVAssetWriter. I would suggest adding them in real time so you're not filling up memory with tons of UIImages.

How do I export UIImage array as a movie? - this post shows how to add the UIImages you have processed to a video for a given duration.

This should get you well on your way to watermarking your videos. Remember to practice good memory management, because leaking images that are coming in at 20-30fps is a great way to crash the app.

Record square video using AVFoundation and add watermark

A few things:

As far as Audio goes, you're adding a Video (camera) input, but no Audio input. So do that to get sound.

    let audioInputDevice = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeAudio)

do {
let input = try AVCaptureDeviceInput(device: audioInputDevice)

if sourceAVFoundation.captureSession.canAddInput(input) {
sourceAVFoundation.captureSession.addInput(input)
} else {
NSLog("ERROR: Can't add audio input")
}
} catch let error {
NSLog("ERROR: Getting input device: \(error)")
}

To make the video square, you're going to have to look at using AVAssetWriter instead of AVCaptureFileOutput. This is more complex, but you get more "power". You've created an AVCaptureSession already which is great, to hook up the AssetWriter, you'll need to do something like this:

    let fileManager = NSFileManager.defaultManager()
let urls = fileManager.URLsForDirectory(.DocumentDirectory, inDomains: .UserDomainMask)
guard let documentDirectory: NSURL = urls.first else {
print("Video Controller: getAssetWriter: documentDir Error")
return nil
}

let local_video_name = NSUUID().UUIDString + ".mp4"
self.videoOutputURL = documentDirectory.URLByAppendingPathComponent(local_video_name)

guard let url = self.videoOutputURL else {
return nil
}

self.assetWriter = try? AVAssetWriter(URL: url, fileType: AVFileTypeMPEG4)

guard let writer = self.assetWriter else {
return nil
}

//TODO: Set your desired video size here!
let videoSettings: [String : AnyObject] = [
AVVideoCodecKey : AVVideoCodecH264,
AVVideoWidthKey : captureSize.width,
AVVideoHeightKey : captureSize.height,
AVVideoCompressionPropertiesKey : [
AVVideoAverageBitRateKey : 200000,
AVVideoProfileLevelKey : AVVideoProfileLevelH264Baseline41,
AVVideoMaxKeyFrameIntervalKey : 90,
],
]

assetWriterInputCamera = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: videoSettings)
assetWriterInputCamera?.expectsMediaDataInRealTime = true
writer.addInput(assetWriterInputCamera!)

let audioSettings : [String : AnyObject] = [
AVFormatIDKey : NSInteger(kAudioFormatMPEG4AAC),
AVNumberOfChannelsKey : 2,
AVSampleRateKey : NSNumber(double: 44100.0)
]

assetWriterInputAudio = AVAssetWriterInput(mediaType: AVMediaTypeAudio, outputSettings: audioSettings)
assetWriterInputAudio?.expectsMediaDataInRealTime = true
writer.addInput(assetWriterInputAudio!)

Once you have the AssetWriter setup... then hook up some outputs for the Video and Audio

    let bufferAudioQueue = dispatch_queue_create("audio buffer delegate", DISPATCH_QUEUE_SERIAL)
let audioOutput = AVCaptureAudioDataOutput()
audioOutput.setSampleBufferDelegate(self, queue: bufferAudioQueue)
captureSession.addOutput(audioOutput)

// Always add video last...
let videoOutput = AVCaptureVideoDataOutput()
videoOutput.setSampleBufferDelegate(self, queue: bufferVideoQueue)
captureSession.addOutput(videoOutput)
if let connection = videoOutput.connectionWithMediaType(AVMediaTypeVideo) {
if connection.supportsVideoOrientation {
// Force recording to portrait
connection.videoOrientation = AVCaptureVideoOrientation.Portrait
}

self.outputConnection = connection
}

captureSession.startRunning()

Finally you need to capture the buffers and process that stuff... Make sure you make your class a delegate of AVCaptureVideoDataOutputSampleBufferDelegate and AVCaptureAudioDataOutputSampleBufferDelegate

//MARK: Implementation for AVCaptureVideoDataOutputSampleBufferDelegate, AVCaptureAudioDataOutputSampleBufferDelegate
func captureOutput(captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!) {

if !self.isRecordingStarted {
return
}

if let audio = self.assetWriterInputAudio where connection.audioChannels.count > 0 && audio.readyForMoreMediaData {

dispatch_async(audioQueue!) {
audio.appendSampleBuffer(sampleBuffer)
}
return
}

if let camera = self.assetWriterInputCamera where camera.readyForMoreMediaData {
dispatch_async(videoQueue!) {
camera.appendSampleBuffer(sampleBuffer)
}
}
}

There are a few missing bits and pieces, but hopefully this is enough for you to figure it out along with the documentation.

Finally, if you want to add the watermark, there are many ways this can be done in real time, but one possible way is to modify the sampleBuffer and write the watermark into the image then. You'll find other question on StackOverflow dealing with that.

Merging clips with AVFoundation creates single video in black

Ok, so thanks to Shawn's help I have accomplished what I was trying to do.
There were 2 main mistakes in my code that generated this problem, the first one was how the property of the CMTime given to VideoTrack was set: Start = new CMTime(0,0), instead of Start = new CMTime.Zero,. I still don't know what difference does it make, but it prevented the code from displaying the video and the audio of each asset, leaving a video with the length of all the clips combined and the background of AVMutableVideoCompositionInstruction.
The second mistake was how I set the instructions, the configuration that worked for me can be found in the following code.

Here is the final function working as correctly:

public void MergeClips()
{
//microphone
AVCaptureDevice microphone = AVCaptureDevice.DefaultDeviceWithMediaType(AVMediaType.Audio);

AVMutableComposition mixComposition = AVMutableComposition.Create();
AVVideoCompositionLayerInstruction[] Instruction_Array = new AVVideoCompositionLayerInstruction[Clips.Count];

foreach (string clip in Clips)
{
var asset = AVUrlAsset.FromUrl(new NSUrl(clip, false)) as AVUrlAsset;
#region HoldVideoTrack

//This range applies to the video, not to the mixcomposition
CMTimeRange range = new CMTimeRange()
{
Start = CMTime.Zero,
Duration = asset.Duration
};

var duration = mixComposition.Duration;
NSError error;

AVMutableCompositionTrack videoTrack = mixComposition.AddMutableTrack(AVMediaType.Video, 0);
AVAssetTrack assetVideoTrack = asset.TracksWithMediaType(AVMediaType.Video)[0];
videoTrack.InsertTimeRange(range, assetVideoTrack, duration, out error);
videoTrack.PreferredTransform = assetVideoTrack.PreferredTransform;

if (microphone != null)
{
AVMutableCompositionTrack audioTrack = mixComposition.AddMutableTrack(AVMediaType.Audio, 0);
AVAssetTrack assetAudioTrack = asset.TracksWithMediaType(AVMediaType.Audio)[0];
audioTrack.InsertTimeRange(range, assetAudioTrack, duration, out error);
}
#endregion

#region Instructions
int counter = Clips.IndexOf(clip);
Instruction_Array[counter] = SetInstruction(asset, mixComposition.Duration, videoTrack);
#endregion
}

// 6
AVMutableVideoCompositionInstruction mainInstruction = AVMutableVideoCompositionInstruction.Create() as AVMutableVideoCompositionInstruction;

CMTimeRange rangeIns = new CMTimeRange()
{
Start = new CMTime(0, 0),
Duration = mixComposition.Duration
};
mainInstruction.TimeRange = rangeIns;
mainInstruction.LayerInstructions = Instruction_Array;

var mainComposition = AVMutableVideoComposition.Create();
mainComposition.Instructions = new AVVideoCompositionInstruction[1] { mainInstruction };
mainComposition.FrameDuration = new CMTime(1, 30);
mainComposition.RenderSize = new CGSize(mixComposition.NaturalSize.Height, mixComposition.NaturalSize.Width);

finalVideo_path = NSUrl.FromFilename(Path.Combine(Path.GetTempPath(), "Whole2.mov"));
if (File.Exists(Path.GetTempPath() + "Whole2.mov"))
{
File.Delete(Path.GetTempPath() + "Whole2.mov");
}

//... export video ...
AVAssetExportSession exportSession = new AVAssetExportSession(mixComposition, AVAssetExportSessionPreset.HighestQuality)
{
OutputUrl = NSUrl.FromFilename(Path.Combine(Path.GetTempPath(), "Whole2.mov")),
OutputFileType = AVFileType.QuickTimeMovie,
ShouldOptimizeForNetworkUse = true,
VideoComposition = mainComposition
};
exportSession.ExportAsynchronously(_OnExportDone);
}

private AVMutableVideoCompositionLayerInstruction SetInstruction(AVAsset asset, CMTime currentTime, AVAssetTrack mixComposition_video_Track)
{
var instruction = AVMutableVideoCompositionLayerInstruction.FromAssetTrack(mixComposition_video_Track);

var startTime = CMTime.Subtract(currentTime, asset.Duration);

//NaturalSize.Height is passed as a width parameter because IOS stores the video recording horizontally
CGAffineTransform translateToCenter = CGAffineTransform.MakeTranslation(mixComposition_video_Track.NaturalSize.Height, 0);
//Angle in radiants, not in degrees
CGAffineTransform rotate = CGAffineTransform.Rotate(translateToCenter, (nfloat)(Math.PI / 2));

instruction.SetTransform(rotate, (CMTime.Subtract(currentTime, asset.Duration)));

instruction.SetOpacity(1, startTime);
instruction.SetOpacity(0, currentTime);

return instruction;
}

As I said I solved my problem thanks to Shawn's help, and most of this code was translated to C# from his answers, so please, if you were planning on voting up this answer, vote up Shawn's one instead, or both.

Swift: Adding Audio to AVMutableComposition

Adding the following code achieved my goal:

let compositionAudioVideo: AVMutableCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.audio, preferredTrackID: CMPersistentTrackID())!

let audioMix: AVMutableAudioMix = AVMutableAudioMix()
var audioMixParam: [AVMutableAudioMixInputParameters] = []

let assetVideoTrack: AVAssetTrack = videoAsset.tracks(withMediaType: AVMediaType.audio)[0]
let videoParam: AVMutableAudioMixInputParameters = AVMutableAudioMixInputParameters(track: assetVideoTrack)
videoParam.trackID = compositionAudioVideo.trackID

audioMixParam.append(videoParam)

do {
try compositionAudioVideo.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAsset.duration), of: assetVideoTrack, at: kCMTimeZero)
} catch _ {
assertionFailure()
}

audioMix.inputParameters = audioMixParam

Swift -Converted Audio URL to Video URL Doesn't Play in Photos Library

As Lance rightfully pointed out, the issue is that while there was an export of a file in the .mov or .mp4 format, there was no video, it was just an audio playing.

On reading a bit more, .mp4 for example is just a digital multimedia container format which can very well just be used for audio so it's possible to save audio file as a .mp4 / .mov.

What was needed was to add an empty video track to the AVMutableComposition to succeed. Lance already posted a great solution works perfectly well and is more self sustained than an alternative solution I propose which relies on having a blank 1 second video.

Overview of how it works

  1. You get a blank video file that is 1 second long in the resolution you want, for example 1920 x 1080
  2. You retrieve the video track from this video asset
  3. Retrieve the audio track from your audio file
  4. Create an AVMutableComposition which will be used to merge the audio and video tracks
  5. Configure an AVMutableCompositionTrack with the audio track and add that to the main AVMutableComposition
  6. Configure an AVMutableVideoComposition with the video track
  7. Use an AVAssetExportSession to export the final video with the AVMutableComposition and the AVMutableVideoComposition

The code

In most of the code below you will see multiple guard statements. You can create one guard, however, it can be useful to know with such types of tasks where the failure occurred as there could be several reason why an export could fail.

Configuring the audio track

private func configureAudioTrack(_ audioURL: URL,
inComposition composition: AVMutableComposition) -> AVMutableCompositionTrack?
{
// Initialize an AVURLAsset with your audio file
let audioAsset: AVURLAsset = AVURLAsset(url: audioURL)

let trackTimeRange = CMTimeRange(start: .zero,
duration: audioAsset.duration)

// Get the audio track from the audio asset
guard let sourceAudioTrack = audioAsset.tracks(withMediaType: .audio).first
else
{
manageError(nil, withMessage: "Error retrieving audio track from source file")
return nil
}

// Insert a new video track to the AVMutableComposition
guard let audioTrack = composition.addMutableTrack(withMediaType: .audio,
preferredTrackID: CMPersistentTrackID())
else
{
// manage your error
return nil
}

do {
// Inset the contents of the audio source into the new audio track
try audioTrack.insertTimeRange(trackTimeRange,
of: sourceAudioTrack,
at: .zero)
}
catch {
// manage your error
}

return audioTrack
}

Configuring the video track

private func configureVideoTrack(inComposition composition: AVMutableComposition) -> AVMutableCompositionTrack?
{
// Initialize a video asset with the empty video file
guard let blankMoviePathURL = Bundle.main.url(forResource: "blank",
withExtension: ".mp4"),
let videoAsset = AVAsset(url: blankMoviePathURL)
else
{
// manage errors
return nil
}

// Get the video track from the empty video
guard let sourceVideoTrack = videoAsset.tracks(withMediaType: .video).first
else
{
// manage errors
return nil
}

// Insert a new video track to the AVMutableComposition
guard let videoTrack = composition.addMutableTrack(withMediaType: .video,
preferredTrackID: kCMPersistentTrackID_Invalid)
else
{
// manage errors
return nil
}

let trackTimeRange = CMTimeRange(start: .zero,
duration: composition.duration)

do {

// Inset the contents of the video source into the new audio track
try videoTrack.insertTimeRange(trackTimeRange,
of: sourceVideoTrack,
at: .zero)

}
catch {
// manage errors
}

return videoTrack
}

Configure the video composition



Related Topics



Leave a reply



Submit