javascript – 使用Web Audio API的和弦检测算法
作者:互联网
首先,我正在尝试实现这种和弦检测算法:
http://www.music.mcgill.ca/~jason/mumt621/papers5/fujishima_1999.pdf
我最初实现算法使用我的麦克风,但它没有用.作为测试,我创建了三个振荡器来制作一个c和弦,但算法仍然不起作用.我想我应该只看到更高的C,E和G数字,但我看到所有音符的数字.我的算法实现有问题吗?还是我的N,fref或fs值?
这是一段包含重要部分的代码:
// Set audio Context
window.AudioContext = window.AudioContext || window.webkitAudioContext;
var mediaStreamSource = null;
var analyser = null;
var N = 4096;//8192;//2048; // Samples of Sound
var bufferLen = null;
var buffer = null;
var PCP = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]; // Pitch Class Profiles
var fref = 261.63; // Reference frequency middle C (C4)
// fref = 65.4; // Reference frequency C2
// fref = 440.0; // Reference frequency A4
var audioContext = new AudioContext();
var fs = audioContext.sampleRate; // Retrieve sampling rate. Usually 48KHz
var useMicrophone = false;
navigator.mediaDevices.getUserMedia(constraints)
.then(function(stream) {
// Create an analyzer node to process the audio
analyser = audioContext.createAnalyser();
analyser.fftSize = N;
bufferLen = N / 2;
//bufferLen = analyser.frequencyBinCount;
console.log( 'bufferLen = ' + bufferLen );
buffer = new Float32Array(bufferLen);
if ( useMicrophone ) {
// Create an AudioNode from the stream.
mediaStreamSource = audioContext.createMediaStreamSource(stream);
// Connect it to the destination.
mediaStreamSource.connect(analyser);
}
else {
// As a test, feed a C chord directly into the analyzer
// C4, E4, G4
var freqs = [261.63, 329.63, 392.00];
for( var i=0; i < freqs.length; i++) {
var o = audioContext.createOscillator();
var g = audioContext.createGain(); //Create Gain Node
o.frequency.value = freqs[i];
o.connect(g);
g.gain.value = 0.25;
g.connect( audioContext.destination );
g.connect( analyser );
o.start(0);
//setTimeout(function(s) {s.stop(0)}, 1000, o);
}
}
// Call algorithm every 50 ms
setInterval(function() {
pcpAlg();
}, 50);
})
.catch(function(err) {
console.log(err.name + ": " + err.message);
});
function pcpAlg() {
analyser.getFloatTimeDomainData(buffer);
//analyser.getFloatFrequencyData( buffer );
// Reset PCP
PCP = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
// M(0)=-1 so we don't have to start at 0
for (var l = 1; l < bufferLen; l++) { // l = 0,1,...,[(N/2) - 1]
// Calculate M(l)
var ML = Math.round(12 * Math.log2( (fs * (l / N) ) / fref ) ) % 12; //
//console.log( ML );
if (ML >= 0 && ML <= 11) {
PCP[ML] += Math.pow( Math.abs( buffer[l] ), 2 );
}
}
// Display Data on UI and also try to determine if the sound is a C or F chord
displayAndCategorize();
}
如果你想尝试自己运行它,这是我的完整代码.警告我已将useMicrophone设置为false,因此它将发出c和弦声音:
https://codepen.io/mapmaps/pen/ONQPpw
解决方法:
问题在于1999年论文中的算法.您似乎使用FFT作为幅度峰值,这是一个粗略的频谱估计器,而不是音调检测器/估计器.和弦和弦估计是一项更加困难/复杂的任务.在这里查看有关复音音乐提取的最新算法的研究论文:http://www.music-ir.org/mirex/wiki/2015:MIREX2015_Results
标签:javascript,algorithm,audio,web-audio,audio-processing 来源: https://codeday.me/bug/20190706/1395826.html