Making Video from Uiimage Array with Different Transition Animations

Making video from UIImage array with different transition animations

I was stuck in the same problem for a bit of time. Since it is a big answer, it may bore you but I hope it will help you. Here is my answer in Swift 3.

Theory:

Firstly, you have to make a video with your image array. A lot of solutions are available regarding this problem. Just search a bit, hope you will get one. If not, see my code below.
Secondly, you might know that a CALayer can be added over a video. If not, search a bit how to add CALayer over a video.
Thirdly, you have to know, how to animate a CALayer. Core Animation will do the job for you. A huge collection of sample codes are available regarding this. Search for it.
Now you have a CALayer and you know how to animate it. Lastly, you have to add this animated CALayer in your video.

Code:

1.Create a video with your array of images

let outputSize = CGSize(width: 1920, height: 1280)
let imagesPerSecond: TimeInterval = 3 //each image will be stay for 3 secs
var selectedPhotosArray = [UIImage]()
var imageArrayToVideoURL = NSURL()
let audioIsEnabled: Bool = false //if your video has no sound
var asset: AVAsset!

func buildVideoFromImageArray() {
for image in 0..<5 {
selectedPhotosArray.append(UIImage(named: "\(image + 1).JPG")!) //name of the images: 1.JPG, 2.JPG, 3.JPG, 4.JPG, 5.JPG
}

imageArrayToVideoURL = NSURL(fileURLWithPath: NSHomeDirectory() + "/Documents/video1.MP4")
removeFileAtURLIfExists(url: imageArrayToVideoURL)
guard let videoWriter = try? AVAssetWriter(outputURL: imageArrayToVideoURL as URL, fileType: AVFileTypeMPEG4) else {
fatalError("AVAssetWriter error")
}
let outputSettings = [AVVideoCodecKey : AVVideoCodecH264, AVVideoWidthKey : NSNumber(value: Float(outputSize.width)), AVVideoHeightKey : NSNumber(value: Float(outputSize.height))] as [String : Any]
guard videoWriter.canApply(outputSettings: outputSettings, forMediaType: AVMediaTypeVideo) else {
fatalError("Negative : Can't apply the Output settings...")
}
let videoWriterInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: outputSettings)
let sourcePixelBufferAttributesDictionary = [kCVPixelBufferPixelFormatTypeKey as String : NSNumber(value: kCVPixelFormatType_32ARGB), kCVPixelBufferWidthKey as String: NSNumber(value: Float(outputSize.width)), kCVPixelBufferHeightKey as String: NSNumber(value: Float(outputSize.height))]
let pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoWriterInput, sourcePixelBufferAttributes: sourcePixelBufferAttributesDictionary)
if videoWriter.canAdd(videoWriterInput) {
videoWriter.add(videoWriterInput)
}
if videoWriter.startWriting() {
let zeroTime = CMTimeMake(Int64(imagesPerSecond),Int32(1))
videoWriter.startSession(atSourceTime: zeroTime)

assert(pixelBufferAdaptor.pixelBufferPool != nil)
let media_queue = DispatchQueue(label: "mediaInputQueue")
videoWriterInput.requestMediaDataWhenReady(on: media_queue, using: { () -> Void in
let fps: Int32 = 1
let framePerSecond: Int64 = Int64(self.imagesPerSecond)
let frameDuration = CMTimeMake(Int64(self.imagesPerSecond), fps)
var frameCount: Int64 = 0
var appendSucceeded = true
while (!self.selectedPhotosArray.isEmpty) {
if (videoWriterInput.isReadyForMoreMediaData) {
let nextPhoto = self.selectedPhotosArray.remove(at: 0)
let lastFrameTime = CMTimeMake(frameCount * framePerSecond, fps)
let presentationTime = frameCount == 0 ? lastFrameTime : CMTimeAdd(lastFrameTime, frameDuration)
var pixelBuffer: CVPixelBuffer? = nil
let status: CVReturn = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, pixelBufferAdaptor.pixelBufferPool!, &pixelBuffer)
if let pixelBuffer = pixelBuffer, status == 0 {
let managedPixelBuffer = pixelBuffer
CVPixelBufferLockBaseAddress(managedPixelBuffer, CVPixelBufferLockFlags(rawValue: CVOptionFlags(0)))
let data = CVPixelBufferGetBaseAddress(managedPixelBuffer)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGContext(data: data, width: Int(self.outputSize.width), height: Int(self.outputSize.height), bitsPerComponent: 8, bytesPerRow: CVPixelBufferGetBytesPerRow(managedPixelBuffer), space: rgbColorSpace, bitmapInfo: CGImageAlphaInfo.premultipliedFirst.rawValue)
context!.clear(CGRect(x: 0, y: 0, width: CGFloat(self.outputSize.width), height: CGFloat(self.outputSize.height)))
let horizontalRatio = CGFloat(self.outputSize.width) / nextPhoto.size.width
let verticalRatio = CGFloat(self.outputSize.height) / nextPhoto.size.height
//let aspectRatio = max(horizontalRatio, verticalRatio) // ScaleAspectFill
let aspectRatio = min(horizontalRatio, verticalRatio) // ScaleAspectFit
let newSize: CGSize = CGSize(width: nextPhoto.size.width * aspectRatio, height: nextPhoto.size.height * aspectRatio)
let x = newSize.width < self.outputSize.width ? (self.outputSize.width - newSize.width) / 2 : 0
let y = newSize.height < self.outputSize.height ? (self.outputSize.height - newSize.height) / 2 : 0
context?.draw(nextPhoto.cgImage!, in: CGRect(x: x, y: y, width: newSize.width, height: newSize.height))
CVPixelBufferUnlockBaseAddress(managedPixelBuffer, CVPixelBufferLockFlags(rawValue: CVOptionFlags(0)))
appendSucceeded = pixelBufferAdaptor.append(pixelBuffer, withPresentationTime: presentationTime)
} else {
print("Failed to allocate pixel buffer")
appendSucceeded = false
}
}
if !appendSucceeded {
break
}
frameCount += 1
}
videoWriterInput.markAsFinished()
videoWriter.finishWriting { () -> Void in
print("-----video1 url = \(self.imageArrayToVideoURL)")

self.asset = AVAsset(url: self.imageArrayToVideoURL as URL)
self.exportVideoWithAnimation()
}
})
}
}

func removeFileAtURLIfExists(url: NSURL) {
if let filePath = url.path {
let fileManager = FileManager.default
if fileManager.fileExists(atPath: filePath) {
do{
try fileManager.removeItem(atPath: filePath)
} catch let error as NSError {
print("Couldn't remove existing destination file: \(error)")
}
}
}
}

2.Add animation to the created video (Please read all the commented sections carefully. I think some issues will get clear by reading those.)

func exportVideoWithAnimation() {
let composition = AVMutableComposition()

let track = asset?.tracks(withMediaType: AVMediaTypeVideo)
let videoTrack:AVAssetTrack = track![0] as AVAssetTrack
let timerange = CMTimeRangeMake(kCMTimeZero, (asset?.duration)!)

let compositionVideoTrack:AVMutableCompositionTrack = composition.addMutableTrack(withMediaType: AVMediaTypeVideo, preferredTrackID: CMPersistentTrackID())

do {
try compositionVideoTrack.insertTimeRange(timerange, of: videoTrack, at: kCMTimeZero)
compositionVideoTrack.preferredTransform = videoTrack.preferredTransform
} catch {
print(error)
}

//if your video has sound, you don’t need to check this
if audioIsEnabled {
let compositionAudioTrack:AVMutableCompositionTrack = composition.addMutableTrack(withMediaType: AVMediaTypeAudio, preferredTrackID: CMPersistentTrackID())

for audioTrack in (asset?.tracks(withMediaType: AVMediaTypeAudio))! {
do {
try compositionAudioTrack.insertTimeRange(audioTrack.timeRange, of: audioTrack, at: kCMTimeZero)
} catch {
print(error)
}
}
}

let size = videoTrack.naturalSize

let videolayer = CALayer()
videolayer.frame = CGRect(x: 0, y: 0, width: size.width, height: size.height)

let parentlayer = CALayer()
parentlayer.frame = CGRect(x: 0, y: 0, width: size.width, height: size.height)
parentlayer.addSublayer(videolayer)

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//this is the animation part
var time = [0.00001, 3, 6, 9, 12] //I used this time array to determine the start time of a frame animation. Each frame will stay for 3 secs, thats why their difference is 3
var imgarray = [UIImage]()

for image in 0..<5 {
imgarray.append(UIImage(named: "\(image + 1).JPG")!)

let nextPhoto = imgarray[image]

let horizontalRatio = CGFloat(self.outputSize.width) / nextPhoto.size.width
let verticalRatio = CGFloat(self.outputSize.height) / nextPhoto.size.height
let aspectRatio = min(horizontalRatio, verticalRatio)
let newSize: CGSize = CGSize(width: nextPhoto.size.width * aspectRatio, height: nextPhoto.size.height * aspectRatio)
let x = newSize.width < self.outputSize.width ? (self.outputSize.width - newSize.width) / 2 : 0
let y = newSize.height < self.outputSize.height ? (self.outputSize.height - newSize.height) / 2 : 0

///I showed 10 animations here. You can uncomment any of this and export a video to see the result.

///#1. left->right///
let blackLayer = CALayer()
blackLayer.frame = CGRect(x: -videoTrack.naturalSize.width, y: 0, width: videoTrack.naturalSize.width, height: videoTrack.naturalSize.height)
blackLayer.backgroundColor = UIColor.black.cgColor

let imageLayer = CALayer()
imageLayer.frame = CGRect(x: x, y: y, width: newSize.width, height: newSize.height)
imageLayer.contents = imgarray[image].cgImage
blackLayer.addSublayer(imageLayer)

let animation = CABasicAnimation()
animation.keyPath = "position.x"
animation.fromValue = -videoTrack.naturalSize.width
animation.toValue = 2 * (videoTrack.naturalSize.width)
animation.duration = 3
animation.beginTime = CFTimeInterval(time[image])
animation.fillMode = kCAFillModeForwards
animation.isRemovedOnCompletion = false
blackLayer.add(animation, forKey: "basic")

///#2. right->left///
// let blackLayer = CALayer()
// blackLayer.frame = CGRect(x: 2 * videoTrack.naturalSize.width, y: 0, width: videoTrack.naturalSize.width, height: videoTrack.naturalSize.height)
// blackLayer.backgroundColor = UIColor.black.cgColor
//
// let imageLayer = CALayer()
// imageLayer.frame = CGRect(x: x, y: y, width: newSize.width, height: newSize.height)
// imageLayer.contents = imgarray[image].cgImage
// blackLayer.addSublayer(imageLayer)
//
// let animation = CABasicAnimation()
// animation.keyPath = "position.x"
// animation.fromValue = 2 * (videoTrack.naturalSize.width)
// animation.toValue = -videoTrack.naturalSize.width
// animation.duration = 3
// animation.beginTime = CFTimeInterval(time[image])
// animation.fillMode = kCAFillModeForwards
// animation.isRemovedOnCompletion = false
// blackLayer.add(animation, forKey: "basic")

///#3. top->bottom///
// let blackLayer = CALayer()
// blackLayer.frame = CGRect(x: 0, y: 2 * videoTrack.naturalSize.height, width: videoTrack.naturalSize.width, height: videoTrack.naturalSize.height)
// blackLayer.backgroundColor = UIColor.black.cgColor
//
// let imageLayer = CALayer()
// imageLayer.frame = CGRect(x: x, y: y, width: newSize.width, height: newSize.height)
// imageLayer.contents = imgarray[image].cgImage
// blackLayer.addSublayer(imageLayer)
//
// let animation = CABasicAnimation()
// animation.keyPath = "position.y"
// animation.fromValue = 2 * videoTrack.naturalSize.height
// animation.toValue = -videoTrack.naturalSize.height
// animation.duration = 3
// animation.beginTime = CFTimeInterval(time[image])
// animation.fillMode = kCAFillModeForwards
// animation.isRemovedOnCompletion = false
// blackLayer.add(animation, forKey: "basic")

///#4. bottom->top///
// let blackLayer = CALayer()
// blackLayer.frame = CGRect(x: 0, y: -videoTrack.naturalSize.height, width: videoTrack.naturalSize.width, height: videoTrack.naturalSize.height)
// blackLayer.backgroundColor = UIColor.black.cgColor
//
// let imageLayer = CALayer()
// imageLayer.frame = CGRect(x: x, y: y, width: newSize.width, height: newSize.height)
// imageLayer.contents = imgarray[image].cgImage
// blackLayer.addSublayer(imageLayer)
//
// let animation = CABasicAnimation()
// animation.keyPath = "position.y"
// animation.fromValue = -videoTrack.naturalSize.height
// animation.toValue = 2 * videoTrack.naturalSize.height
// animation.duration = 3
// animation.beginTime = CFTimeInterval(time[image])
// animation.fillMode = kCAFillModeForwards
// animation.isRemovedOnCompletion = false
// blackLayer.add(animation, forKey: "basic")

///#5. opacity(1->0)(left->right)///
// let blackLayer = CALayer()
// blackLayer.frame = CGRect(x: -videoTrack.naturalSize.width, y: 0, width: videoTrack.naturalSize.width, height: videoTrack.naturalSize.height)
// blackLayer.backgroundColor = UIColor.black.cgColor
//
// let imageLayer = CALayer()
// imageLayer.frame = CGRect(x: x, y: y, width: newSize.width, height: newSize.height)
// imageLayer.contents = imgarray[image].cgImage
// blackLayer.addSublayer(imageLayer)
//
// let animation = CABasicAnimation()
// animation.keyPath = "position.x"
// animation.fromValue = -videoTrack.naturalSize.width
// animation.toValue = 2 * (videoTrack.naturalSize.width)
// animation.duration = 3
// animation.beginTime = CFTimeInterval(time[image])
// animation.fillMode = kCAFillModeForwards
// animation.isRemovedOnCompletion = false
// blackLayer.add(animation, forKey: "basic")
//
// let fadeOutAnimation = CABasicAnimation(keyPath: "opacity")
// fadeOutAnimation.fromValue = 1
// fadeOutAnimation.toValue = 0
// fadeOutAnimation.duration = 3
// fadeOutAnimation.beginTime = CFTimeInterval(time[image])
// fadeOutAnimation.isRemovedOnCompletion = false
// blackLayer.add(fadeOutAnimation, forKey: "opacity")

///#6. opacity(1->0)(right->left)///
// let blackLayer = CALayer()
// blackLayer.frame = CGRect(x: 2 * videoTrack.naturalSize.width, y: 0, width: videoTrack.naturalSize.width, height: videoTrack.naturalSize.height)
// blackLayer.backgroundColor = UIColor.black.cgColor
//
// let imageLayer = CALayer()
// imageLayer.frame = CGRect(x: x, y: y, width: newSize.width, height: newSize.height)
// imageLayer.contents = imgarray[image].cgImage
// blackLayer.addSublayer(imageLayer)
//
// let animation = CABasicAnimation()
// animation.keyPath = "position.x"
// animation.fromValue = 2 * videoTrack.naturalSize.width
// animation.toValue = -videoTrack.naturalSize.width
// animation.duration = 3
// animation.beginTime = CFTimeInterval(time[image])
// animation.fillMode = kCAFillModeForwards
// animation.isRemovedOnCompletion = false
// blackLayer.add(animation, forKey: "basic")
//
// let fadeOutAnimation = CABasicAnimation(keyPath: "opacity")
// fadeOutAnimation.fromValue = 1
// fadeOutAnimation.toValue = 0
// fadeOutAnimation.duration = 3
// fadeOutAnimation.beginTime = CFTimeInterval(time[image])
// fadeOutAnimation.isRemovedOnCompletion = false
// blackLayer.add(fadeOutAnimation, forKey: "opacity")

///#7. opacity(1->0)(top->bottom)///
// let blackLayer = CALayer()
// blackLayer.frame = CGRect(x: 0, y: 2 * videoTrack.naturalSize.height, width: videoTrack.naturalSize.width, height: videoTrack.naturalSize.height)
// blackLayer.backgroundColor = UIColor.black.cgColor
//
// let imageLayer = CALayer()
// imageLayer.frame = CGRect(x: x, y: y, width: newSize.width, height: newSize.height)
// imageLayer.contents = imgarray[image].cgImage
// blackLayer.addSublayer(imageLayer)
//
// let animation = CABasicAnimation()
// animation.keyPath = "position.y"
// animation.fromValue = 2 * videoTrack.naturalSize.height
// animation.toValue = -videoTrack.naturalSize.height
// animation.duration = 3
// animation.beginTime = CFTimeInterval(time[image])
// animation.fillMode = kCAFillModeForwards
// animation.isRemovedOnCompletion = false
// blackLayer.add(animation, forKey: "basic")
//
// let fadeOutAnimation = CABasicAnimation(keyPath: "opacity")
// fadeOutAnimation.fromValue = 1
// fadeOutAnimation.toValue = 0
// fadeOutAnimation.duration = 3
// fadeOutAnimation.beginTime = CFTimeInterval(time[image])
// fadeOutAnimation.isRemovedOnCompletion = false
// blackLayer.add(fadeOutAnimation, forKey: "opacity")

///#8. opacity(1->0)(bottom->top)///
// let blackLayer = CALayer()
// blackLayer.frame = CGRect(x: 0, y: -videoTrack.naturalSize.height, width: videoTrack.naturalSize.width, height: videoTrack.naturalSize.height)
// blackLayer.backgroundColor = UIColor.black.cgColor
//
// let imageLayer = CALayer()
// imageLayer.frame = CGRect(x: x, y: y, width: newSize.width, height: newSize.height)
// imageLayer.contents = imgarray[image].cgImage
// blackLayer.addSublayer(imageLayer)
//
// let animation = CABasicAnimation()
// animation.keyPath = "position.y"
// animation.fromValue = -videoTrack.naturalSize.height
// animation.toValue = 2 * videoTrack.naturalSize.height
// animation.duration = 3
// animation.beginTime = CFTimeInterval(time[image])
// animation.fillMode = kCAFillModeForwards
// animation.isRemovedOnCompletion = false
// blackLayer.add(animation, forKey: "basic")
//
// let fadeOutAnimation = CABasicAnimation(keyPath: "opacity")
// fadeOutAnimation.fromValue = 1
// fadeOutAnimation.toValue = 0
// fadeOutAnimation.duration = 3
// fadeOutAnimation.beginTime = CFTimeInterval(time[image])
// fadeOutAnimation.isRemovedOnCompletion = false
// blackLayer.add(fadeOutAnimation, forKey: "opacity")

///#9. scale(small->big->small)///
// let blackLayer = CALayer()
// blackLayer.frame = CGRect(x: 0, y: 0, width: videoTrack.naturalSize.width, height: videoTrack.naturalSize.height)
// blackLayer.backgroundColor = UIColor.black.cgColor
// blackLayer.opacity = 0
//
// let imageLayer = CALayer()
// imageLayer.frame = CGRect(x: x, y: y, width: newSize.width, height: newSize.height)
// imageLayer.contents = imgarray[image].cgImage
// blackLayer.addSublayer(imageLayer)
//
// let scaleAnimation = CAKeyframeAnimation(keyPath: "transform.scale")
// scaleAnimation.values = [0, 1.0, 0]
// scaleAnimation.beginTime = CFTimeInterval(time[image])
// scaleAnimation.duration = 3
// scaleAnimation.isRemovedOnCompletion = false
// blackLayer.add(scaleAnimation, forKey: "transform.scale")
//
// let fadeInOutAnimation = CABasicAnimation(keyPath: "opacity")
// fadeInOutAnimation.fromValue = 1
// fadeInOutAnimation.toValue = 1
// fadeInOutAnimation.duration = 3
// fadeInOutAnimation.beginTime = CFTimeInterval(time[image])
// fadeInOutAnimation.isRemovedOnCompletion = false
// blackLayer.add(fadeInOutAnimation, forKey: "opacity")

///#10. scale(big->small->big)///
// let blackLayer = CALayer()
// blackLayer.frame = CGRect(x: 0, y: 0, width: videoTrack.naturalSize.width, height: videoTrack.naturalSize.height)
// blackLayer.backgroundColor = UIColor.black.cgColor
// blackLayer.opacity = 0
//
// let imageLayer = CALayer()
// imageLayer.frame = CGRect(x: x, y: y, width: newSize.width, height: newSize.height)
// imageLayer.contents = imgarray[image].cgImage
// blackLayer.addSublayer(imageLayer)
//
// let scaleAnimation = CAKeyframeAnimation(keyPath: "transform.scale")
// scaleAnimation.values = [1, 0, 1]
// scaleAnimation.beginTime = CFTimeInterval(time[image])
// scaleAnimation.duration = 3
// scaleAnimation.isRemovedOnCompletion = false
// blackLayer.add(scaleAnimation, forKey: "transform.scale")
//
// let fadeOutAnimation = CABasicAnimation(keyPath: "opacity")
// fadeOutAnimation.fromValue = 1
// fadeOutAnimation.toValue = 1
// fadeOutAnimation.duration = 3
// fadeOutAnimation.beginTime = CFTimeInterval(time[image])
// fadeOutAnimation.isRemovedOnCompletion = false
// blackLayer.add(fadeOutAnimation, forKey: "opacity")

parentlayer.addSublayer(blackLayer)
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////

let layercomposition = AVMutableVideoComposition()
layercomposition.frameDuration = CMTimeMake(1, 30)
layercomposition.renderSize = size
layercomposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videolayer, in: parentlayer)
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(kCMTimeZero, composition.duration)
let videotrack = composition.tracks(withMediaType: AVMediaTypeVideo)[0] as AVAssetTrack
let layerinstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videotrack)
instruction.layerInstructions = [layerinstruction]
layercomposition.instructions = [instruction]

let animatedVideoURL = NSURL(fileURLWithPath: NSHomeDirectory() + "/Documents/video2.mp4")
removeFileAtURLIfExists(url: animatedVideoURL)

guard let assetExport = AVAssetExportSession(asset: composition, presetName:AVAssetExportPresetHighestQuality) else {return}
assetExport.videoComposition = layercomposition
assetExport.outputFileType = AVFileTypeMPEG4
assetExport.outputURL = animatedVideoURL as URL
assetExport.exportAsynchronously(completionHandler: {
switch assetExport.status{
case AVAssetExportSessionStatus.failed:
print("failed \(String(describing: assetExport.error))")
case AVAssetExportSessionStatus.cancelled:
print("cancelled \(String(describing: assetExport.error))")
default:
print("Exported")
}
})
}

Don't forget to import the AVFoundation

UIImageView transition between different images with animation

How can you do it?

you will need:

  1. A timer
  2. A counter
  3. An array of for holding image names

    var images:[String] = []

    var timer = Timer()

    var photoCount:Int = 0

in viewDidLoad i did this for initialization.

images = ["1","2","3"]
imageView.image = UIImage.init(named: "1")
timer = Timer.scheduledTimer(timeInterval: 2.0, target: self, selector: #selector(onTransition), userInfo: nil, repeats: true)

And the magic method for doing this is below:

func onTransition() {
if (photoCount < images.count - 1){
photoCount = photoCount + 1;
}else{
photoCount = 0;
}

UIView.transition(with: self.imageView, duration: 2.0, options: .transitionCrossDissolve, animations: {
self.imageView.image = UIImage.init(named: self.images[self.photoCount])
}, completion: nil)
}

Don't forget to pull your imageView referrence outlet. :)

   @IBOutlet weak var imageView: UIImageView!

Problem converting array of images to video

So I was able to make a working solution combining some of the methods found in the links provided by Robin Stewart. It's worth pointing out that using them like they were didn't work for me, it was only when I made some alterations that it finally worked. Maybe this has something to do that most are in Swift 3 and I am using Swift 4.2.

Here is my solution:

func writeImagesAsMovie(_ allImages: [UIImage], videoPath: String, videoSize: CGSize, videoFPS: Int32, completion: @escaping (Bool) -> ()) -> Bool{

guard let assetWriter = try? AVAssetWriter(outputURL: URL(string: videoPath)!, fileType: AVFileType.mp4) else {
fatalError("AVVideoCodecType.h264 error")
}
let outputSettings = [AVVideoCodecKey : AVVideoCodecType.h264, AVVideoWidthKey : NSNumber(value: Float(outputSize.width)), AVVideoHeightKey : NSNumber(value: Float(outputSize.height))] as [String : Any]
guard assetWriter.canApply(outputSettings: outputSettings, forMediaType: AVMediaType.video) else {
fatalError("Negative : Can't apply the Output settings...")
}
let writerInput = AVAssetWriterInput(mediaType: AVMediaType.video, outputSettings: outputSettings)

let sourcePixelBufferAttributesDictionary = [kCVPixelBufferPixelFormatTypeKey as String : NSNumber(value: kCVPixelFormatType_32ARGB), kCVPixelBufferWidthKey as String: NSNumber(value: Float(outputSize.width)), kCVPixelBufferHeightKey as String: NSNumber(value: Float(outputSize.height))]
let pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: writerInput, sourcePixelBufferAttributes: sourcePixelBufferAttributesDictionary)

if assetWriter.canAdd(writerInput) {
assetWriter.add(writerInput)
}
// Start writing session
if assetWriter.startWriting() {
assetWriter.startSession(atSourceTime: CMTime.zero)

// -- Create queue for <requestMediaDataWhenReadyOnQueue>
assert(pixelBufferAdaptor.pixelBufferPool != nil)
let mediaQueue = DispatchQueue(label: "mediaInputQueue", attributes: [])

// -- Set video parameters
let frameDuration = CMTimeMake(value: 1, timescale: videoFPS)
var frameCount = 0

// -- Add images to video
let numImages = allImages.count
writerInput.requestMediaDataWhenReady(on: mediaQueue, using: { () -> Void in
// Append unadded images to video but only while input ready
while (writerInput.isReadyForMoreMediaData && frameCount < numImages) {
let lastFrameTime = CMTimeMake(value: Int64(frameCount), timescale: videoFPS)
let presentationTime = frameCount == 0 ? lastFrameTime : CMTimeAdd(lastFrameTime, frameDuration)

if !self.appendPixelBufferForImageAtURL(allImages[frameCount], pixelBufferAdaptor: pixelBufferAdaptor, presentationTime: presentationTime) {
print("Error converting images to video: AVAssetWriterInputPixelBufferAdapter failed to append pixel buffer")
return
}

frameCount += 1
}

// No more images to add? End video.
if (frameCount >= numImages) {
writerInput.markAsFinished()
assetWriter.finishWriting {
if (assetWriter.error != nil) {
print("Error converting images to video: \(assetWriter.error)")
} else {
print("Converted images to movie @ \(videoPath)")
completion(true)
}
}
}
})
}

return true
}


Related Topics



Leave a reply



Submit