gpt4 book ai didi

android - 如何混合音频文件和视频文件?

转载 作者:塔克拉玛干 更新时间:2023-11-02 19:11:42 29 4
gpt4 key购买 nike

我有一个从麦克风录制的 3gp 文件和一个 mp4 视频文件。我想将音频文件和视频文件混合到 mp4 文件中并保存。我搜索了很多但没有发现任何对使用 MediaMuxer api of android 有帮助的东西。 MediaMuxer api

更新:这是我混合两个文件的方法,其中有一个异常。原因是目标 mp4 文件没有任何轨道!有人可以帮我将音频和视频轨道添加到混合器吗??

异常

java.lang.IllegalStateException: Failed to stop the muxer

我的代码:

private void cloneMediaUsingMuxer( String dstMediaPath) throws IOException {
// Set up MediaExtractor to read from the source.
MediaExtractor soundExtractor = new MediaExtractor();
soundExtractor.setDataSource(audioFilePath);
MediaExtractor videoExtractor = new MediaExtractor();
AssetFileDescriptor afd2 = getAssets().openFd("Produce.MP4");
videoExtractor.setDataSource(afd2.getFileDescriptor() , afd2.getStartOffset(),afd2.getLength());


//PATH
//extractor.setDataSource();
int trackCount = soundExtractor.getTrackCount();
int trackCount2 = soundExtractor.getTrackCount();

//assertEquals("wrong number of tracks", expectedTrackCount, trackCount);
// Set up MediaMuxer for the destination.
MediaMuxer muxer;
muxer = new MediaMuxer(dstMediaPath, MediaMuxer.OutputFormat.MUXER_OUTPUT_MPEG_4);
// Set up the tracks.
HashMap<Integer, Integer> indexMap = new HashMap<Integer, Integer>(trackCount);
for (int i = 0; i < trackCount; i++) {
soundExtractor.selectTrack(i);
MediaFormat SoundFormat = soundExtractor.getTrackFormat(i);
int dstIndex = muxer.addTrack(SoundFormat);
indexMap.put(i, dstIndex);
}

HashMap<Integer, Integer> indexMap2 = new HashMap<Integer, Integer>(trackCount2);
for (int i = 0; i < trackCount2; i++) {
videoExtractor.selectTrack(i);
MediaFormat videoFormat = videoExtractor.getTrackFormat(i);
int dstIndex2 = muxer.addTrack(videoFormat);
indexMap.put(i, dstIndex2);
}


// Copy the samples from MediaExtractor to MediaMuxer.
boolean sawEOS = false;
int bufferSize = MAX_SAMPLE_SIZE;
int frameCount = 0;
int offset = 100;
ByteBuffer dstBuf = ByteBuffer.allocate(bufferSize);
MediaCodec.BufferInfo bufferInfo = new MediaCodec.BufferInfo();
MediaCodec.BufferInfo bufferInfo2 = new MediaCodec.BufferInfo();

muxer.start();
while (!sawEOS) {
bufferInfo.offset = offset;
bufferInfo.size = soundExtractor.readSampleData(dstBuf, offset);
bufferInfo2.offset = offset;
bufferInfo2.size = videoExtractor.readSampleData(dstBuf, offset);

if (bufferInfo.size < 0) {
sawEOS = true;
bufferInfo.size = 0;
bufferInfo2.size = 0;
}else if(bufferInfo2.size < 0){
sawEOS = true;
bufferInfo.size = 0;
bufferInfo2.size = 0;
}
else {
bufferInfo.presentationTimeUs = soundExtractor.getSampleTime();
bufferInfo2.presentationTimeUs = videoExtractor.getSampleTime();
//bufferInfo.flags = extractor.getSampleFlags();
int trackIndex = soundExtractor.getSampleTrackIndex();
int trackIndex2 = videoExtractor.getSampleTrackIndex();
muxer.writeSampleData(indexMap.get(trackIndex), dstBuf,
bufferInfo);

soundExtractor.advance();
videoExtractor.advance();
frameCount++;

}
}

Toast.makeText(getApplicationContext(),"f:"+frameCount,Toast.LENGTH_SHORT).show();

muxer.stop();
muxer.release();

}

更新 2:问题已解决!检查我对问题的回答。

谢谢你的帮助

最佳答案

我在处理音频和视频文件的轨道时遇到了一些问题。它们消失了,我的代码一切正常,但现在您可以使用它将音频文件和视频文件合并在一起

代码:

private void muxing() {

String outputFile = "";

try {

File file = new File(Environment.getExternalStorageDirectory() + File.separator + "final2.mp4");
file.createNewFile();
outputFile = file.getAbsolutePath();

MediaExtractor videoExtractor = new MediaExtractor();
AssetFileDescriptor afdd = getAssets().openFd("Produce.MP4");
videoExtractor.setDataSource(afdd.getFileDescriptor() ,afdd.getStartOffset(),afdd.getLength());

MediaExtractor audioExtractor = new MediaExtractor();
audioExtractor.setDataSource(audioFilePath);

Log.d(TAG, "Video Extractor Track Count " + videoExtractor.getTrackCount() );
Log.d(TAG, "Audio Extractor Track Count " + audioExtractor.getTrackCount() );

MediaMuxer muxer = new MediaMuxer(outputFile, MediaMuxer.OutputFormat.MUXER_OUTPUT_MPEG_4);

videoExtractor.selectTrack(0);
MediaFormat videoFormat = videoExtractor.getTrackFormat(0);
int videoTrack = muxer.addTrack(videoFormat);

audioExtractor.selectTrack(0);
MediaFormat audioFormat = audioExtractor.getTrackFormat(0);
int audioTrack = muxer.addTrack(audioFormat);

Log.d(TAG, "Video Format " + videoFormat.toString() );
Log.d(TAG, "Audio Format " + audioFormat.toString() );

boolean sawEOS = false;
int frameCount = 0;
int offset = 100;
int sampleSize = 256 * 1024;
ByteBuffer videoBuf = ByteBuffer.allocate(sampleSize);
ByteBuffer audioBuf = ByteBuffer.allocate(sampleSize);
MediaCodec.BufferInfo videoBufferInfo = new MediaCodec.BufferInfo();
MediaCodec.BufferInfo audioBufferInfo = new MediaCodec.BufferInfo();


videoExtractor.seekTo(0, MediaExtractor.SEEK_TO_CLOSEST_SYNC);
audioExtractor.seekTo(0, MediaExtractor.SEEK_TO_CLOSEST_SYNC);

muxer.start();

while (!sawEOS)
{
videoBufferInfo.offset = offset;
videoBufferInfo.size = videoExtractor.readSampleData(videoBuf, offset);


if (videoBufferInfo.size < 0 || audioBufferInfo.size < 0)
{
Log.d(TAG, "saw input EOS.");
sawEOS = true;
videoBufferInfo.size = 0;

}
else
{
videoBufferInfo.presentationTimeUs = videoExtractor.getSampleTime();
videoBufferInfo.flags = videoExtractor.getSampleFlags();
muxer.writeSampleData(videoTrack, videoBuf, videoBufferInfo);
videoExtractor.advance();


frameCount++;
Log.d(TAG, "Frame (" + frameCount + ") Video PresentationTimeUs:" + videoBufferInfo.presentationTimeUs +" Flags:" + videoBufferInfo.flags +" Size(KB) " + videoBufferInfo.size / 1024);
Log.d(TAG, "Frame (" + frameCount + ") Audio PresentationTimeUs:" + audioBufferInfo.presentationTimeUs +" Flags:" + audioBufferInfo.flags +" Size(KB) " + audioBufferInfo.size / 1024);

}
}

Toast.makeText(getApplicationContext() , "frame:" + frameCount , Toast.LENGTH_SHORT).show();



boolean sawEOS2 = false;
int frameCount2 =0;
while (!sawEOS2)
{
frameCount2++;

audioBufferInfo.offset = offset;
audioBufferInfo.size = audioExtractor.readSampleData(audioBuf, offset);

if (videoBufferInfo.size < 0 || audioBufferInfo.size < 0)
{
Log.d(TAG, "saw input EOS.");
sawEOS2 = true;
audioBufferInfo.size = 0;
}
else
{
audioBufferInfo.presentationTimeUs = audioExtractor.getSampleTime();
audioBufferInfo.flags = audioExtractor.getSampleFlags();
muxer.writeSampleData(audioTrack, audioBuf, audioBufferInfo);
audioExtractor.advance();


Log.d(TAG, "Frame (" + frameCount + ") Video PresentationTimeUs:" + videoBufferInfo.presentationTimeUs +" Flags:" + videoBufferInfo.flags +" Size(KB) " + videoBufferInfo.size / 1024);
Log.d(TAG, "Frame (" + frameCount + ") Audio PresentationTimeUs:" + audioBufferInfo.presentationTimeUs +" Flags:" + audioBufferInfo.flags +" Size(KB) " + audioBufferInfo.size / 1024);

}
}

Toast.makeText(getApplicationContext() , "frame:" + frameCount2 , Toast.LENGTH_SHORT).show();

muxer.stop();
muxer.release();


} catch (IOException e) {
Log.d(TAG, "Mixer Error 1 " + e.getMessage());
} catch (Exception e) {
Log.d(TAG, "Mixer Error 2 " + e.getMessage());
}

感谢这些示例代码:MediaMuxer Sample Codes-really perfect

关于android - 如何混合音频文件和视频文件?,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/31572067/

29 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com