gpt4 book ai didi

javascript - 混合两个音频缓冲区,使用 web Audio Api 将一个放在另一个的背景上

转载 作者:数据小太阳 更新时间:2023-10-29 03:53:25 25 4
gpt4 key购买 nike

我想通过将一首歌曲作为另一首歌曲的背景音乐来混合两个音频源。

例如,我有输入:

<input id="files" type="file" name="files[]" multiple onchange="handleFilesSelect(event)"/>

解码这些文件的脚本:

window.AudioContext = window.AudioContext || window.webkitAudioContext;
var context = new window.AudioContext();
var sources = [];
var files = [];
var mixed = {};

function handleFilesSelect(event){
if(event.target.files.length <= 1)
return false;

files = event.target.files;
readFiles(mixAudioSources);
}

function readFiles(index, callback){
var freader = new FileReader();
var i = index ? index : 0;

freader.onload = function (e) {
context.decodeAudioData(e.target.result, function (buf) {

sources[i] = context.createBufferSource();
sources[i].connect(context.destination);
sources[i].buffer = buf;

if(files.length > i+1){
readFiles(i + 1, callback);
} else {
if(callback){
callback();
}
}
});
};

freader.readAsArrayBuffer(files[i]);
}

function mixAudioSources(){
//So on our scenario we have here two decoded audio sources in "sources" array.
//How we can mix that "sources" into "mixed" variable by putting "sources[0]" as background of "sources[1]"
}

那么我怎样才能将这些来源混合成一个来源呢?例如,我有两个文件,如何将一个源作为另一个源的背景,然后将这个混合到一个源中?

另一种情况:例如,如果我从麦克风读取输入流,并且我想将此输入放在背景歌曲(某种卡拉 OK)上,是否可以在支持 html5 的客户端上完成这项工作?性能怎么样?也许是在服务器端混合此音频源的更好方法?

如果可以,那么mixAudioSources函数的可能实现方式是什么?

谢谢。

最佳答案

两种方法最初发布于 Is it possible to mix multiple audio files on top of each other preferably with javascript , 调整为处理 File change 处的对象<input type="file">的事件元素。

第一种方法利用 OfflineAudioContext() , AudioContext.createBufferSource() , AudioContext.createMediaStreamDestination() , Promise构造函数,Promise.all() , MediaRecorder()混合音轨,然后提供混合音频文件供下载。

var div = document.querySelector("div");

function handleFilesSelect(input) {
div.innerHTML = "loading audio tracks.. please wait";
var files = Array.from(input.files);
var duration = 60000;
var chunks = [];
var audio = new AudioContext();
var mixedAudio = audio.createMediaStreamDestination();
var player = new Audio();
var context;
var recorder;
var description = "";

player.controls = "controls";

function get(file) {
description += file.name.replace(/\..*|\s+/g, "");
return new Promise(function(resolve, reject) {
var reader = new FileReader;
reader.readAsArrayBuffer(file);
reader.onload = function() {
resolve(reader.result)
}
})
}

function stopMix(duration, ...media) {
setTimeout(function(media) {
media.forEach(function(node) {
node.stop()
})
}, duration, media)
}

Promise.all(files.map(get)).then(function(data) {
var len = Math.max.apply(Math, data.map(function(buffer) {
return buffer.byteLength
}));
context = new OfflineAudioContext(2, len, 44100);
return Promise.all(data.map(function(buffer) {
return audio.decodeAudioData(buffer)
.then(function(bufferSource) {
var source = context.createBufferSource();
source.buffer = bufferSource;
source.connect(context.destination);
return source.start()
})
}))
.then(function() {
return context.startRendering()
})
.then(function(renderedBuffer) {
return new Promise(function(resolve) {
var mix = audio.createBufferSource();
mix.buffer = renderedBuffer;
mix.connect(audio.destination);
mix.connect(mixedAudio);
recorder = new MediaRecorder(mixedAudio.stream);
recorder.start(0);
mix.start(0);
div.innerHTML = "playing and recording tracks..";
// stop playback and recorder in 60 seconds
stopMix(duration, mix, recorder)

recorder.ondataavailable = function(event) {
chunks.push(event.data);
};

recorder.onstop = function(event) {
var blob = new Blob(chunks, {
"type": "audio/ogg; codecs=opus"
});
console.log("recording complete");
resolve(blob)
};
})
})
.then(function(blob) {
console.log(blob);
div.innerHTML = "mixed audio tracks ready for download..";
var audioDownload = URL.createObjectURL(blob);
var a = document.createElement("a");
a.download = description + "." + blob.type.replace(/.+\/|;.+/g, "");
a.href = audioDownload;
a.innerHTML = a.download;
document.body.appendChild(a);
a.insertAdjacentHTML("afterend", "<br>");
player.src = audioDownload;
document.body.appendChild(player);
})
})
.catch(function(e) {
console.log(e)
});

}
<!DOCTYPE html>
<html>

<head>
</head>

<body>
<input id="files"
type="file"
name="files[]"
accept="audio/*"
multiple
onchange="handleFilesSelect(this)" />
<div></div>
</body>

</html>

第二种方法使用 AudioContext.createChannelMerger() , AudioContext.createChannelSplitter()

var div = document.querySelector("div");

function handleFilesSelect(input) {

div.innerHTML = "loading audio tracks.. please wait";
var files = Array.from(input.files);
var chunks = [];
var channels = [
[0, 1],
[1, 0]
];
var audio = new AudioContext();
var player = new Audio();
var merger = audio.createChannelMerger(2);
var splitter = audio.createChannelSplitter(2);
var mixedAudio = audio.createMediaStreamDestination();
var duration = 60000;
var context;
var recorder;
var audioDownload;
var description = "";

player.controls = "controls";

function get(file) {
description += file.name.replace(/\..*|\s+/g, "");
console.log(description);
return new Promise(function(resolve, reject) {
var reader = new FileReader;
reader.readAsArrayBuffer(file);
reader.onload = function() {
resolve(reader.result)
}
})
}

function stopMix(duration, ...media) {
setTimeout(function(media) {
media.forEach(function(node) {
node.stop()
})
}, duration, media)
}

Promise.all(files.map(get)).then(function(data) {
return Promise.all(data.map(function(buffer, index) {
return audio.decodeAudioData(buffer)
.then(function(bufferSource) {
var channel = channels[index];
var source = audio.createBufferSource();
source.buffer = bufferSource;
source.connect(splitter);
splitter.connect(merger, channel[0], channel[1]);
return source
})
}))
.then(function(audionodes) {
merger.connect(mixedAudio);
merger.connect(audio.destination);
recorder = new MediaRecorder(mixedAudio.stream);
recorder.start(0);
audionodes.forEach(function(node, index) {
node.start(0)
});

div.innerHTML = "playing and recording tracks..";

stopMix(duration, ...audionodes, recorder);

recorder.ondataavailable = function(event) {
chunks.push(event.data);
};

recorder.onstop = function(event) {
var blob = new Blob(chunks, {
"type": "audio/ogg; codecs=opus"
});
audioDownload = URL.createObjectURL(blob);
var a = document.createElement("a");
a.download = description + "." + blob.type.replace(/.+\/|;.+/g, "");
a.href = audioDownload;
a.innerHTML = a.download;
player.src = audioDownload;
document.body.appendChild(a);
document.body.appendChild(player);
};
})
})
.catch(function(e) {
console.log(e)
});
}
<!DOCTYPE html>
<html>

<head>
</head>

<body>
<input id="files"
type="file"
name="files[]"
accept="audio/*"
multiple onchange="handleFilesSelect(this)" />
<div></div>
</body>

</html>

关于javascript - 混合两个音频缓冲区,使用 web Audio Api 将一个放在另一个的背景上,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/42557005/

25 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com