Mediastream Capture Canvas and Audio Simultaneously

MediaStream Capture Canvas and Audio Simultaneously

Is it possible to create a MediaStream containing MediaStreamTrack instances from two different sources/elements?

Yes, you can do it using the MediaStream.addTrack() method, or new MediaStream([track1, track2]).


OP already known how to get all of it, but here is a reminder for future readers :

  • To get a video stream track from a <canvas>, you can call canvas.captureStream(framerate) method.

  • To get an audio stream track from a <video> element you can use the Web Audio API and it's createMediaStreamDestination method.
    This will return a MediaStreamAudioDestinationNode node (dest) containing our audio stream. You'll then have to connect a MediaElementAudioSourceNode created from your <video> element, to this dest.
    If you need to add more audio tracks to this stream, you should connect all these sources to dest.

Now that we've got two streams, one for the <canvas> video and one for the audio, we can either add the audio track to the canvas stream before we initialize the recorder:

canvasStream.addTrack(audioStream.getAudioTracks()[0]);
const recorder = new MediaRecorder(canvasStream)

or we can create a third MediaStream object from these two tracks:

const [videoTrack] = canvasStream.getVideoTracks();
const [audioTrack] = audioStream.getAudioTracks();
const recordedStream = new MediaStream(videoTrack, audioTrack)
const recorder = new MediaRecorder(recordedStream);

Here is a complete example:

var
btn = document.querySelector("button"),
canvas,
cStream,
aStream,
vid,
recorder,
analyser,
dataArray,
bufferLength,
chunks = [];

function clickHandler() {

btn.textContent = 'stop recording';

if (!aStream) {
initAudioStream();
}

cStream = canvas.captureStream(30);
cStream.addTrack(aStream.getAudioTracks()[0]);

recorder = new MediaRecorder(cStream);
recorder.start();

recorder.ondataavailable = saveChunks;

recorder.onstop = exportStream;

btn.onclick = stopRecording;

};

function exportStream(e) {

if (chunks.length) {

var blob = new Blob(chunks, { type: chunks[0].type });
var vidURL = URL.createObjectURL(blob);
var vid = document.createElement('video');
vid.controls = true;
vid.src = vidURL;
vid.onend = function() {
URL.revokeObjectURL(vidURL);
}
document.body.insertBefore(vid, canvas);

} else {

document.body.insertBefore(document.createTextNode('no data saved'), canvas);

}
}

function saveChunks(e) {

e.data.size && chunks.push(e.data);

}

function stopRecording() {

vid.pause();
btn.remove();
recorder.stop();

}

function initAudioStream() {

var audioCtx = new AudioContext();
// create a stream from our AudioContext
var dest = audioCtx.createMediaStreamDestination();
aStream = dest.stream;
// connect our video element's output to the stream
var sourceNode = audioCtx.createMediaElementSource(vid);
sourceNode.connect(dest)
// start the video
vid.play();

// just for the fancy canvas drawings
analyser = audioCtx.createAnalyser();
sourceNode.connect(analyser);

analyser.fftSize = 2048;
bufferLength = analyser.frequencyBinCount;
dataArray = new Uint8Array(bufferLength);
analyser.getByteTimeDomainData(dataArray);

// output to our headphones
sourceNode.connect(audioCtx.destination)

startCanvasAnim();

}
function enableButton() {

vid.oncanplay = null;
btn.onclick = clickHandler;
btn.disabled = false;

};

var loadVideo = function() {

vid = document.createElement('video');
vid.crossOrigin = 'anonymous';
vid.oncanplay = enableButton;
vid.src = 'https://dl.dropboxusercontent.com/s/bch2j17v6ny4ako/movie720p.mp4';

}

function startCanvasAnim() {
// from MDN https://developer.mozilla.org/en/docs/Web/API/AnalyserNode#Examples
canvas = Object.assign(document.createElement("canvas"), { width: 500, height: 200});
document.body.prepend(canvas);
var canvasCtx = canvas.getContext('2d');

canvasCtx.fillStyle = 'rgb(200, 200, 200)';
canvasCtx.lineWidth = 2;
canvasCtx.strokeStyle = 'rgb(0, 0, 0)';

var draw = function() {

var drawVisual = requestAnimationFrame(draw);

analyser.getByteTimeDomainData(dataArray);

canvasCtx.fillRect(0, 0, canvas.width, canvas.height);
canvasCtx.beginPath();

var sliceWidth = canvas.width * 1.0 / bufferLength;
var x = 0;

for (var i = 0; i < bufferLength; i++) {

var v = dataArray[i] / 128.0;
var y = v * canvas.height / 2;

if (i === 0) {
canvasCtx.moveTo(x, y);
} else {
canvasCtx.lineTo(x, y);
}

x += sliceWidth;
}

canvasCtx.lineTo(canvas.width, canvas.height / 2);
canvasCtx.stroke();

};

draw();

}

loadVideo();
button { vertical-align: top }
<button disabled>record</button>

HTML using MediaRecorder record canvas with a video inside

  • Figure out why the recorded video has no audio of the video inside the canvas?

Canvases don't have audio.

  • Is there a way to cancat songs into the recorded video at a specific timestamp?

Short of remuxing everything yourself, not really. But, you probably don't need to do this anyway. Simply play back your audio at the same time and record both simultaneously.

Make a new MediaStream that combines the video track from the CanvasCaptureMediaStream, and the audio track from wherever you want it. You can use .getVideoTracks() and .getAudioTracks() on other streams, and instantiate a new stream with an array of tracks.

const stream = new MediaStream([audioTrack, videoTrack]);

webrtc captureStream from canvas and capture audio from video

You need to create new MediaStream with input stream AudioTrack and Canvas Video Track.

Then record the new Stream, so the recorder output(blobs) will contains both audio and video.

var options = {mimeType: 'video/webm'};
var recordedBlobs = [];
var newStream = new MediaStream();
newStream.addTrack(inputStream.getAudioTracks()[0]);
newStream.addTrack(canvasStream.getVideoTracks()[0]);
mediaRecorder = new MediaRecorder(newStream, options);
mediaRecorder.ondataavailable = function (event) {
if (event.data && event.data.size > 0) {
recordedBlobs.push(event.data);
}
}
mediaRecorder.start(1000);

See my demo.

how to add a audio stream on canvas stream in webrtc

Updated on 10-29-2018 to replace getAudioTracks with getTracks:

var canvasStream = canvas2d.captureStream(25); // parameter is optional

// get first audio track
// var audioTrack = audioStream.getAudioTracks()[0];
var audioTrack = audioStream.getTracks().filter(function(track) {
return track.kind === 'audio'
})[0];

// append audio track into Canvas2D stream
canvasStream.addTrack( audioTrack );

// now canvas2D stream has both audio and video tracks
// peerConnection.addStream( canvasStream );
canvasStream.getTracks().forEach(function(track) {
peerConnection.Track( track, canvasStream );
});

// create offer or answer descriptions
peerConnection.createOffer(hints).then(success).catch(failure);

creating single media stream from multiple audio elements?

For this you're going to need the Web Audio API, including some of the functions that you mentioned in your question.

Start of by creating a new AudioContext and select your <audio> elements. Loop over each <audio> element and use AudioContext.createMediaElementSource to create a playable source from the elements.

const audioContext = new AudioContext();
const audioElements = document.querySelectorAll('#keyboard audio');
const sources = Array.from(audioElements).map(
audioElement => audioCtx.createMediaElementSource(audioElement)
);

From here you need to connect all the sources into the MediaRecorder. You can't do that directly, because the MediaRecorder only accepts a MediaStream instance as argument.

To connect the two, use a MediaStreamAudioDestinationNode. This node is able to both receive the inputs from the created sources and create an output in the form of a MediaStream.

Loop over the sources and connect them with the MediaStreamAudioDestinationNode. Then pass the stream property of MediaStreamAudioDestinationNode into the MediaRecorder constructor.


Edit:
I've included the usage of a GainNode, which basically is a volume control, but can also be used to collect multiple inputs into a single AudioNode.

In this case we connect all the sources to the GainNode en than connect the GainNode to the speakers and the MediaStreamAudioDestinationNode. This way we can both monitor and stream our audio while both are coming from the same source.

This is an "easier" alternative over creating an MediaStreamAudioSourceNode and reading the stream from the MediaStreamAudioDestinationNode.



const gainNode = audioContext.createGain();
const mediaStreamDestination = audioContext.createMediaStreamDestination();

sources.forEach(source => {
source.connect(gainNode);
});

gainNode.connect(mediaStreamDestination);
gainNode.connect(audioContext.destination);

const mediaRecorder = new MediaRecorder(mediaStreamDestination.stream);

From here, all you have to do is hit record and play your notes.

let recordingData = [];

mediaRecorder.addEventListener('start', () => {
recordingData.length = 0;
});

mediaRecorder.addEventListener('dataavailable', event => {
recordingData.push(event.data);
});

mediaRecorder.addEventListener('stop', () => {
const blob = new Blob(recordingData, {
'type': 'audio/mp3'
});

// The blob is the result of the recording.
// Handle the blob from here.
});

mediaRecorder.start();

Note 1: This solution is not written in React and should be modified to work with it. For example: document.querySelectorAll should never be used in a React app. Instead use the useRef hook to create a reference to the <audio> elements.

Note 2: The <audio> element is not a part of an SVG. You don't have to place the elements inside the SVG for them to work. Instead visually hide the <audio> elements and trigger them with, for example, an onClick prop.

Merging the canvas stream with getUserMedia's audio stream is not audible (kurento/webrtc)

No Kurento to test, but the problem is probably that the CanvasCaptureMediaStreamTrack gets muted after some time of inactivity on the canvas's context.

To workaround that you can simply set up a drawing loop that will update the canvas regularly (every half seconds should be largely enough without causing too much overhead either).

Also, you may want to start from a fresh MediaStream, though I doubt this has any influence:

// assumes there is a 'canvas' and a 'mic_stream'

// make the context active, so the stream is not muted
const ctx = canvas.getContext("2d");
setInterval(() => ctx.clearRect(0,0,1,1), 500);
ctx.clearRect(0,0,1,1);
const canvas_stream = canvas.captureStream();

const canvas_track = canvas_stream.getVideoTracks()[0];
const mic_track = mic_stream.getAudioTracks()[0];
const merged_stream = new MediaStream([ canvas_track, mic_track ]);

// do something with 'merged_stream'

Capturing audio from video in web audio API, can't mute original audio

Chrome's behavior is actually the "more correct" one here (surprisingly given the many bugs they have in that area).

You are creating a clone MediaStream from the MediaElement's source. This MediaStream should not be affected by the volume set on the <video> element (specs), both Firefox and Chrome do fail here.

The captured MediaStream should thus have its own graph and when you connect it to the AudioContext, the original stream from the MediaElement should continue its life and completely ignore the captured MediaStream. This however is correctly handled by Chrome, but Firefox has it wrong (which is in part why they still do prefix the MediaElement#mozCaptureStream() method name).

But since what you want is actually Firefox's behavior, you can reproduce it by using a MediaElementAudioSourceNode, which will take the ownership of the MediaElement's audio stream, and disconnect it entirely from the MediaElement's graph. You'll thus have complete control over the output volume.

const btn = document.querySelector("button");
const vid = document.querySelector("video");
const inp = document.querySelector("input");

btn.onclick = evt => {
btn.remove();
vid.play();
const context = new AudioContext();
const gain = context.createGain();
const source = context.createMediaElementSource(vid);
source.connect(gain);
gain.connect(context.destination);
inp.oninput = evt => {
gain.gain.value = inp.value;
};
gain.gain.value = 0;
const meter = new OscilloMeter(document.querySelector("canvas"));
meter.listen(source, context);
};
button~*,button~.cont { display: none }
.cont { display: flex }
<script>class OscilloMeter{constructor(a){this.ctx=a.getContext("2d")}listen(a,b){function c(){g.getByteTimeDomainData(j),d.clearRect(0,0,e,f),d.beginPath();let a=0;for(let c=0;c<h;c++){const e=j[c]/128;var b=e*f/2;d.lineTo(a,b),a+=k}d.lineTo(d.canvas.width,d.canvas.height/2),d.stroke(),requestAnimationFrame(c)}const d=this.ctx,e=d.canvas.width,f=d.canvas.height,g=b.createAnalyser(),h=g.fftSize=256,j=new Uint8Array(h),k=e/h;d.lineWidth=2,a.connect(g),c()}}</script>
<button>Start</button>
<label>Output volume: <input type=range min=0 max=1 step=0.01 value=0></label>
<div class="cont">
<section>
<p>You can still control the input's volume through the video's UI:</p>
<video src=https://upload.wikimedia.org/wikipedia/commons/2/22/Volcano_Lava_Sample.webm id=vid controls crossorigin=anonymous height=200></video>
</section>
<section>
<p>
Processed audio (using input volume):<br>
<canvas></canvas>
</p>
</section>
</div>


Related Topics



Leave a reply



Submit