Load a Pcm into a Avaudiopcmbuffer

load a pcm into a AVAudioPCMBuffer

Thanks to Rhythmic Fistman in the comments, I went and loaded it myself, that way:

func loadSoundfont(_ pitch : String) {
let path: String = Bundle.main.path(forResource: "\(self.id)/\(pitch)", ofType: "f32")!
let url = URL(fileURLWithPath: path)

do {
let data = try Data(contentsOf: url)
let format = AVAudioFormat(commonFormat: .pcmFormatFloat32, sampleRate: 44100, channels: 2, interleaved: true)

self.buffer = AVAudioPCMBuffer(pcmFormat: format, frameCapacity: AVAudioFrameCount(data.count))

self.buffer!.floatChannelData!.pointee.withMemoryRebound(to: UInt8.self, capacity: data.count) {
let stream = OutputStream(toBuffer: $0, capacity: data.count)
stream.open()
_ = data.withUnsafeBytes {
stream.write($0, maxLength: data.count)
}
stream.close()
}

} catch let error as NSError {
print("ERROR HERE", error.localizedDescription)
}
}

Convert audio into raw PCM data of type [Double]

But AVAudioFormat does know .PCMFormatFloat64:

let format = AVAudioFormat(commonFormat: .PCMFormatFloat64, sampleRate: file.fileFormat.sampleRate, channels: 1, interleaved: false)

Maybe you mean AVAudioPCMBuffer doesn't have a float64ChannelData convenience property?

That's fine, you can use AVAudioPCMBuffer's superclass, AVAudioBuffer has everying you need to be able to get at the raw Double/Float64 samples:

let abl = buf.audioBufferList.memory
let doubles = UnsafePointer<Double>(abl.mBuffers.mData)
doubles[0] // etc...

In full:

let file = try! AVAudioFile(forReading: self.soundFileURL)
let format = AVAudioFormat(commonFormat: .PCMFormatFloat64, sampleRate: file.fileFormat.sampleRate, channels: 1, interleaved: false)

let buf = AVAudioPCMBuffer(PCMFormat: format, frameCapacity: UInt32(file.length))
try! file.readIntoBuffer(buf)
let abl = buf.audioBufferList.memory
let doubles = UnsafePointer<Float64>(abl.mBuffers.mData)

How can mp3 data in memory be loaded into an AVAudioPCMBuffer in Swift?

I went down the rabbit hole on this one. Here is what probably amounts to a Rube Goldberg-esque solution:

A lot of the pain comes from using C from Swift.

func data_AudioFile_ReadProc(_ inClientData: UnsafeMutableRawPointer, _ inPosition: Int64, _ requestCount: UInt32, _ buffer: UnsafeMutableRawPointer, _ actualCount: UnsafeMutablePointer<UInt32>) -> OSStatus {
let data = inClientData.assumingMemoryBound(to: Data.self).pointee
let bufferPointer = UnsafeMutableRawBufferPointer(start: buffer, count: Int(requestCount))
let copied = data.copyBytes(to: bufferPointer, from: Int(inPosition) ..< Int(inPosition) + Int(requestCount))
actualCount.pointee = UInt32(copied)
return noErr
}

func data_AudioFile_GetSizeProc(_ inClientData: UnsafeMutableRawPointer) -> Int64 {
let data = inClientData.assumingMemoryBound(to: Data.self).pointee
return Int64(data.count)
}

extension Data {
func convertedTo(_ format: AVAudioFormat) -> AVAudioPCMBuffer? {
var data = self

var af: AudioFileID? = nil
var status = AudioFileOpenWithCallbacks(&data, data_AudioFile_ReadProc, nil, data_AudioFile_GetSizeProc(_:), nil, 0, &af)
guard status == noErr, af != nil else {
return nil
}

defer {
AudioFileClose(af!)
}

var eaf: ExtAudioFileRef? = nil
status = ExtAudioFileWrapAudioFileID(af!, false, &eaf)
guard status == noErr, eaf != nil else {
return nil
}

defer {
ExtAudioFileDispose(eaf!)
}

var clientFormat = format.streamDescription.pointee
status = ExtAudioFileSetProperty(eaf!, kExtAudioFileProperty_ClientDataFormat, UInt32(MemoryLayout.size(ofValue: clientFormat)), &clientFormat)
guard status == noErr else {
return nil
}

if let channelLayout = format.channelLayout {
var clientChannelLayout = channelLayout.layout.pointee
status = ExtAudioFileSetProperty(eaf!, kExtAudioFileProperty_ClientChannelLayout, UInt32(MemoryLayout.size(ofValue: clientChannelLayout)), &clientChannelLayout)
guard status == noErr else {
return nil
}
}

var frameLength: Int64 = 0
var propertySize: UInt32 = UInt32(MemoryLayout.size(ofValue: frameLength))
status = ExtAudioFileGetProperty(eaf!, kExtAudioFileProperty_FileLengthFrames, &propertySize, &frameLength)
guard status == noErr else {
return nil
}

guard let pcmBuffer = AVAudioPCMBuffer(pcmFormat: format, frameCapacity: AVAudioFrameCount(frameLength)) else {
return nil
}

let bufferSizeFrames = 512
let bufferSizeBytes = Int(format.streamDescription.pointee.mBytesPerFrame) * bufferSizeFrames
let numBuffers = format.isInterleaved ? 1 : Int(format.channelCount)
let numInterleavedChannels = format.isInterleaved ? Int(format.channelCount) : 1
let audioBufferList = AudioBufferList.allocate(maximumBuffers: numBuffers)
for i in 0 ..< numBuffers {
audioBufferList[i] = AudioBuffer(mNumberChannels: UInt32(numInterleavedChannels), mDataByteSize: UInt32(bufferSizeBytes), mData: malloc(bufferSizeBytes))
}

defer {
for buffer in audioBufferList {
free(buffer.mData)
}
free(audioBufferList.unsafeMutablePointer)
}

while true {
var frameCount: UInt32 = UInt32(bufferSizeFrames)
status = ExtAudioFileRead(eaf!, &frameCount, audioBufferList.unsafeMutablePointer)
guard status == noErr else {
return nil
}

if frameCount == 0 {
break
}

let src = audioBufferList
let dst = UnsafeMutableAudioBufferListPointer(pcmBuffer.mutableAudioBufferList)

if src.count != dst.count {
return nil
}

for i in 0 ..< src.count {
let srcBuf = src[i]
let dstBuf = dst[i]
memcpy(dstBuf.mData?.advanced(by: Int(dstBuf.mDataByteSize)), srcBuf.mData, Int(srcBuf.mDataByteSize))
}

pcmBuffer.frameLength += frameCount
}

return pcmBuffer
}
}

A more robust solution would probably read the sample rate and channel count and give the option to preserve them.

Tested using:

let url = URL(fileURLWithPath: "/tmp/test.mp3")
let data = try! Data(contentsOf: url)

let format = AVAudioFormat(commonFormat: .pcmFormatFloat32, sampleRate: 44100, channels: 1, interleaved: false)!
if let d = data.convertedTo(format) {
let avf = try! AVAudioFile(forWriting: URL(fileURLWithPath: "/tmp/foo.wav"), settings: format.settings, commonFormat: format.commonFormat, interleaved: format.isInterleaved)
try! avf.write(from: d)
}

How can I to resize PCM audio sample buffer longer?

Usually linear interpolation works. What is the bit-resolution of your PCM file? If it is 16 bits (pretty typical), you'll have to first convert two bytes into a single value before applying the interpolation, and then disassemble the values back to bytes. You will need to know the byte order, as it can be either little-endian or big-endian.

EDIT: I should have added that the pitch will drop with this method of lengthening the file, unless the playback frame rate increases. To stretch out a sound in time without affecting its pitch is considerably more complicated.

Getting AVAudioPCMBuffer working (AVAudioFile.mm error code -50)

The AVAudioPCMBuffer's PCMFormat has to be set as the AVAudioFile's .processingFormat and not its .fileFormat: I thought these were the same, but that's not the case!

-50 error when converting PCM buffer with AVAudioConverter

An Apple engineer answered this on their dev forums. I missed that the convert(to:from:) variant of AVAudioConverter can't convert sample rate so you have to use the withInputFrom variant. The docs on that aren't too clear but I came up with:

private func pcmBufferForFile(filename: String, sampleRate: Float) -> AVAudioPCMBuffer {

guard let newFormat = AVAudioFormat(standardFormatWithSampleRate: Double(sampleRate), channels: 1) else {
preconditionFailure()
}
guard let url = Bundle.main.url(forResource: filename, withExtension: "wav") else {
preconditionFailure()
}
guard let audioFile = try? AVAudioFile(forReading: url) else {
preconditionFailure()
}
guard let tempBuffer = AVAudioPCMBuffer(pcmFormat: audioFile.processingFormat,
frameCapacity: AVAudioFrameCount(audioFile.length)) else {
preconditionFailure()
}

let conversionRatio = sampleRate / Float(tempBuffer.format.sampleRate)
let newLength = Float(audioFile.length) * conversionRatio
guard let newBuffer = AVAudioPCMBuffer(pcmFormat: newFormat,
frameCapacity: AVAudioFrameCount(newLength)) else {
preconditionFailure()
}

do { try audioFile.read(into: tempBuffer) } catch {
preconditionFailure()
}
guard let converter = AVAudioConverter(from: audioFile.processingFormat, to: newFormat) else {
preconditionFailure()
}
var error: NSError?
converter.convert(to: newBuffer, error: &error, withInputFrom: { (packetCount, statusPtr) -> AVAudioBuffer? in
statusPtr.pointee = .haveData
return tempBuffer
})
if error != nil {
print("*** Conversion error: \(error!)")
}
return newBuffer
}


Related Topics



Leave a reply



Submit