mirror of
https://github.com/classilla/tenfourfox.git
synced 2024-11-20 10:33:36 +00:00
133 lines
5.4 KiB
HTML
133 lines
5.4 KiB
HTML
<!DOCTYPE HTML>
|
|
<html>
|
|
<head>
|
|
<title>Test ScriptProcessorNode</title>
|
|
<script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
|
|
<script type="text/javascript" src="webaudio.js"></script>
|
|
<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
|
|
</head>
|
|
<body>
|
|
<pre id="test">
|
|
<script class="testbody" type="text/javascript">
|
|
|
|
// We do not use our generic graph test framework here because
|
|
// the testing logic here is sort of complicated, and would
|
|
// not be easy to map to OfflineAudioContext, as ScriptProcessorNodes
|
|
// can experience delays.
|
|
|
|
SimpleTest.waitForExplicitFinish();
|
|
addLoadEvent(function() {
|
|
var context = new AudioContext();
|
|
var buffer = null;
|
|
|
|
var sourceSP = context.createScriptProcessor(2048);
|
|
sourceSP.addEventListener("audioprocess", function(e) {
|
|
// generate the audio
|
|
for (var i = 0; i < 2048; ++i) {
|
|
// Make sure our first sample won't be zero
|
|
e.outputBuffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * (i + 1) / context.sampleRate);
|
|
e.outputBuffer.getChannelData(1)[i] = Math.sin(880 * 2 * Math.PI * (i + 1) / context.sampleRate);
|
|
}
|
|
// Remember our generated audio
|
|
buffer = e.outputBuffer;
|
|
|
|
sourceSP.removeEventListener("audioprocess", arguments.callee);
|
|
}, false);
|
|
|
|
expectException(function() {
|
|
context.createScriptProcessor(1);
|
|
}, DOMException.INDEX_SIZE_ERR);
|
|
expectException(function() {
|
|
context.createScriptProcessor(2);
|
|
}, DOMException.INDEX_SIZE_ERR);
|
|
expectException(function() {
|
|
context.createScriptProcessor(128);
|
|
}, DOMException.INDEX_SIZE_ERR);
|
|
expectException(function() {
|
|
context.createScriptProcessor(255);
|
|
}, DOMException.INDEX_SIZE_ERR);
|
|
|
|
is(sourceSP.channelCount, 2, "script processor node has 2 input channels by default");
|
|
is(sourceSP.channelCountMode, "explicit", "Correct channelCountMode for the script processor node");
|
|
is(sourceSP.channelInterpretation, "speakers", "Correct channelCountInterpretation for the script processor node");
|
|
|
|
function findFirstNonZeroSample(buffer) {
|
|
for (var i = 0; i < buffer.length; ++i) {
|
|
if (buffer.getChannelData(0)[i] != 0) {
|
|
return i;
|
|
}
|
|
}
|
|
return buffer.length;
|
|
}
|
|
|
|
var sp = context.createScriptProcessor(2048);
|
|
sourceSP.connect(sp);
|
|
sp.connect(context.destination);
|
|
var lastPlaybackTime = 0;
|
|
|
|
var emptyBuffer = context.createBuffer(1, 2048, context.sampleRate);
|
|
|
|
function checkAudioProcessingEvent(e) {
|
|
is(e.target, sp, "Correct event target");
|
|
ok(e.playbackTime > lastPlaybackTime, "playbackTime correctly set");
|
|
lastPlaybackTime = e.playbackTime;
|
|
is(e.inputBuffer.numberOfChannels, 2, "Correct number of channels for the input buffer");
|
|
is(e.inputBuffer.length, 2048, "Correct length for the input buffer");
|
|
is(e.inputBuffer.sampleRate, context.sampleRate, "Correct sample rate for the input buffer");
|
|
is(e.outputBuffer.numberOfChannels, 2, "Correct number of channels for the output buffer");
|
|
is(e.outputBuffer.length, 2048, "Correct length for the output buffer");
|
|
is(e.outputBuffer.sampleRate, context.sampleRate, "Correct sample rate for the output buffer");
|
|
|
|
compareChannels(e.outputBuffer.getChannelData(0), emptyBuffer.getChannelData(0));
|
|
compareChannels(e.outputBuffer.getChannelData(1), emptyBuffer.getChannelData(0));
|
|
}
|
|
|
|
sp.onaudioprocess = function(e) {
|
|
isnot(buffer, null, "The audioprocess handler for sourceSP must be run at this point");
|
|
checkAudioProcessingEvent(e);
|
|
|
|
// Because of the initial latency added by the second script processor node,
|
|
// we will never see any generated audio frames in the first callback.
|
|
compareChannels(e.inputBuffer.getChannelData(0), emptyBuffer.getChannelData(0));
|
|
compareChannels(e.inputBuffer.getChannelData(1), emptyBuffer.getChannelData(0));
|
|
|
|
sp.onaudioprocess = function(e) {
|
|
checkAudioProcessingEvent(e);
|
|
|
|
var firstNonZero = findFirstNonZeroSample(e.inputBuffer);
|
|
ok(firstNonZero <= 2048, "First non-zero sample within range");
|
|
|
|
compareChannels(e.inputBuffer.getChannelData(0), emptyBuffer.getChannelData(0), firstNonZero);
|
|
compareChannels(e.inputBuffer.getChannelData(1), emptyBuffer.getChannelData(0), firstNonZero);
|
|
compareChannels(e.inputBuffer.getChannelData(0), buffer.getChannelData(0), 2048 - firstNonZero, firstNonZero, 0);
|
|
compareChannels(e.inputBuffer.getChannelData(1), buffer.getChannelData(1), 2048 - firstNonZero, firstNonZero, 0);
|
|
|
|
if (firstNonZero == 0) {
|
|
// If we did not experience any delays, the test is done!
|
|
sp.onaudioprocess = null;
|
|
|
|
SimpleTest.finish();
|
|
} else if (firstNonZero != 2048) {
|
|
// In case we just saw a zero buffer this time, wait one more round
|
|
sp.onaudioprocess = function(e) {
|
|
checkAudioProcessingEvent(e);
|
|
|
|
compareChannels(e.inputBuffer.getChannelData(0), buffer.getChannelData(0), firstNonZero, 0, 2048 - firstNonZero);
|
|
compareChannels(e.inputBuffer.getChannelData(1), buffer.getChannelData(1), firstNonZero, 0, 2048 - firstNonZero);
|
|
compareChannels(e.inputBuffer.getChannelData(0), emptyBuffer.getChannelData(0), undefined, firstNonZero);
|
|
compareChannels(e.inputBuffer.getChannelData(1), emptyBuffer.getChannelData(0), undefined, firstNonZero);
|
|
|
|
sp.onaudioprocess = null;
|
|
|
|
SimpleTest.finish();
|
|
};
|
|
}
|
|
};
|
|
};
|
|
});
|
|
|
|
</script>
|
|
</pre>
|
|
</body>
|
|
</html>
|