gpt4 book ai didi

android - 在 Android 上使用 MediaMuxer 录制音频和视频

转载 作者:太空宇宙 更新时间:2023-11-03 10:46:21 24 4
gpt4 key购买 nike

我正在尝试使用 Android 4.3 中提供的 AudioRecordMediaCodecMediaMuxer 录制音频和视频但是,有时音频编码器线程停止并且不再编码。结果是一个损坏的 mp4 文件,因为复用器没有收到任何编码的音频帧。在我的 Samsung Galaxy Note 3 上,它的工作率为 99%,但在我的 Sony Xperia Z1 上,编码线程总是卡住。我真的不知道是什么原因,也许有人可以帮助我优化我的代码:

录音机.java

package com.cmdd.horicam;

import java.nio.ByteBuffer;

import android.media.AudioFormat;
import android.media.AudioRecord;
import android.media.MediaCodec;
import android.media.MediaCodecInfo;
import android.media.MediaFormat;
import android.media.MediaRecorder;
import android.os.Looper;
import android.util.Log;

public class AudioRecorder implements Runnable {
public static final String TAG = "AudioRecorder";
public static final boolean VERBOSE = false;

public MovieMuxerAudioHandler mAudioHandler;

// audio format settings
public static final String MIME_TYPE_AUDIO = "audio/mp4a-latm";
public static final int SAMPLE_RATE = 44100;
public static final int CHANNEL_COUNT = 1;
public static final int CHANNEL_CONFIG = AudioFormat.CHANNEL_IN_MONO;
public static final int BIT_RATE_AUDIO = 128000;
public static final int SAMPLES_PER_FRAME = 1024; // AAC
public static final int FRAMES_PER_BUFFER = 24;
public static final int AUDIO_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
public static final int AUDIO_SOURCE = MediaRecorder.AudioSource.MIC;

public static final int MSG_START_RECORDING = 0;
public static final int MSG_STOP_RECORDING = 1;
public static final int MSG_QUIT = 2;


private MediaCodec mAudioEncoder;
private int iBufferSize;
int iReadResult = 0;
private boolean bIsRecording = false;

private static final int TIMEOUT_USEC = 10000;

private MovieMuxer mMovieMuxer;

private MediaFormat mAudioFormat;

private volatile AudioRecorderHandler mHandler;

private Object mReadyFence = new Object(); // guards ready/running
private boolean mReady;
private boolean mRunning;

public AudioRecorder(MovieMuxer mMovieMuxer){
this.mMovieMuxer = mMovieMuxer;
}

/**
* Recorder thread entry point. Establishes Looper/Handler and waits for messages.
* <p>
* @see java.lang.Thread#run()
*/
@Override
public void run() {
// Establish a Looper for this thread, and define a Handler for it.
Looper.prepare();
synchronized (mReadyFence) {
mHandler = new AudioRecorderHandler(this);
mReady = true;
mReadyFence.notify();
}
Looper.loop();

if(VERBOSE)Log.d(TAG, "audio recorder exiting thread");
synchronized (mReadyFence) {
mReady = mRunning = false;
mHandler = null;
}
}

public void prepareEncoder(){
// prepare audio format
mAudioFormat = MediaFormat.createAudioFormat(MIME_TYPE_AUDIO, SAMPLE_RATE, CHANNEL_COUNT);
mAudioFormat.setInteger(MediaFormat.KEY_AAC_PROFILE, MediaCodecInfo.CodecProfileLevel.AACObjectLC);
mAudioFormat.setInteger(MediaFormat.KEY_MAX_INPUT_SIZE, 16384);
mAudioFormat.setInteger(MediaFormat.KEY_BIT_RATE, BIT_RATE_AUDIO);

mAudioEncoder = MediaCodec.createEncoderByType(MIME_TYPE_AUDIO);
mAudioEncoder.configure(mAudioFormat, null, null, MediaCodec.CONFIGURE_FLAG_ENCODE);
mAudioEncoder.start();

new Thread(new AudioEncoderTask(), "AudioEncoderTask").start();
}

public void prepareRecorder() {
int iMinBufferSize = AudioRecord.getMinBufferSize(SAMPLE_RATE, CHANNEL_CONFIG, AUDIO_FORMAT);

bIsRecording = false;

iBufferSize = SAMPLES_PER_FRAME * FRAMES_PER_BUFFER;

// Ensure buffer is adequately sized for the AudioRecord
// object to initialize
if (iBufferSize < iMinBufferSize)
iBufferSize = ((iMinBufferSize / SAMPLES_PER_FRAME) + 1) * SAMPLES_PER_FRAME * 2;

AudioRecord mAudioRecorder;
mAudioRecorder = new AudioRecord(
AUDIO_SOURCE, // source
SAMPLE_RATE, // sample rate, hz
CHANNEL_CONFIG, // channels
AUDIO_FORMAT, // audio format
iBufferSize); // buffer size (bytes)

mAudioRecorder.startRecording();

new Thread(new AudioRecorderTask(mAudioRecorder), "AudioRecorderTask").start();
}

/**
* Tells the audio recorder to start recording. (Call from non-encoder thread.)
* <p>
* Creates a new thread, which will create an encoder using the provided configuration.
* <p>
* Returns after the recorder thread has started and is ready to accept Messages. The
* encoder may not yet be fully configured.
*/
public void startRecording() {
if(VERBOSE)Log.d(TAG, "audio recorder: startRecording()");
synchronized (mReadyFence) {
if (mRunning) {
Log.w(TAG, "audio recorder thread already running");
return;
}
mRunning = true;

new Thread(this, "AudioRecorder").start();
while (!mReady) {
try {
mReadyFence.wait();
} catch (InterruptedException ie) {
// ignore
}
}
}

mHandler.sendMessage(mHandler.obtainMessage(MSG_START_RECORDING));
}

public void handleStartRecording(){
if(VERBOSE)Log.d(TAG, "handleStartRecording");
prepareEncoder();
prepareRecorder();
bIsRecording = true;
}

/**
* Tells the video recorder to stop recording. (Call from non-encoder thread.)
* <p>
* Returns immediately; the encoder/muxer may not yet be finished creating the movie.
* <p>
*/
public void stopRecording() {
if(mHandler != null){
mHandler.sendMessage(mHandler.obtainMessage(MSG_STOP_RECORDING));
mHandler.sendMessage(mHandler.obtainMessage(MSG_QUIT));
}
}

/**
* Handles a request to stop encoding.
*/
public void handleStopRecording() {
if(VERBOSE)Log.d(TAG, "handleStopRecording");
bIsRecording = false;
}

public String getCurrentAudioFormat(){
if(this.mAudioFormat == null)
return "null";
else
return this.mAudioFormat.toString();
}

private class AudioRecorderTask implements Runnable {

AudioRecord mAudioRecorder;
ByteBuffer[] inputBuffers;
ByteBuffer inputBuffer;

public AudioRecorderTask(AudioRecord recorder){
this.mAudioRecorder = recorder;
}

@Override
public void run() {
if(VERBOSE)Log.i(TAG, "AudioRecorder started recording");
long audioPresentationTimeNs;

byte[] mTempBuffer = new byte[SAMPLES_PER_FRAME];

while (bIsRecording) {
audioPresentationTimeNs = System.nanoTime();

iReadResult = mAudioRecorder.read(mTempBuffer, 0, SAMPLES_PER_FRAME);
if(iReadResult == AudioRecord.ERROR_BAD_VALUE || iReadResult == AudioRecord.ERROR_INVALID_OPERATION)
Log.e(TAG, "audio buffer read error");

// send current frame data to encoder
try {
if(inputBuffers == null)
inputBuffers = mAudioEncoder.getInputBuffers();

int inputBufferIndex = mAudioEncoder.dequeueInputBuffer(-1);
if (inputBufferIndex >= 0) {
inputBuffer = inputBuffers[inputBufferIndex];
inputBuffer.clear();
inputBuffer.put(mTempBuffer);
//recycleInputBuffer(mTempBuffer);

if(VERBOSE)Log.d(TAG, "sending frame to audio encoder");
mAudioEncoder.queueInputBuffer(inputBufferIndex, 0, mTempBuffer.length, audioPresentationTimeNs / 1000, 0);
}
} catch (Throwable t) {
Log.e(TAG, "sendFrameToAudioEncoder exception");
t.printStackTrace();
}
}

// finished recording -> send it to the encoder
audioPresentationTimeNs = System.nanoTime();

iReadResult = mAudioRecorder.read(mTempBuffer, 0, SAMPLES_PER_FRAME);
if (iReadResult == AudioRecord.ERROR_BAD_VALUE
|| iReadResult == AudioRecord.ERROR_INVALID_OPERATION)
Log.e(TAG, "audio buffer read error");

// send current frame data to encoder
try {
int inputBufferIndex = mAudioEncoder.dequeueInputBuffer(-1);
if (inputBufferIndex >= 0) {
inputBuffer = inputBuffers[inputBufferIndex];
inputBuffer.clear();
inputBuffer.put(mTempBuffer);

if(VERBOSE)Log.d(TAG, "sending EOS to audio encoder");
mAudioEncoder.queueInputBuffer(inputBufferIndex, 0, mTempBuffer.length, audioPresentationTimeNs / 1000, MediaCodec.BUFFER_FLAG_END_OF_STREAM);
}
} catch (Throwable t) {
Log.e(TAG, "sendFrameToAudioEncoder exception");
t.printStackTrace();
}


//if (mAudioRecorder != null) {
// mAudioRecorder.release();
// mAudioRecorder = null;
// if(VERBOSE)Log.i(TAG, "stopped");
//}
}
}

private class AudioEncoderTask implements Runnable {
private boolean bAudioEncoderFinished;
private int iAudioTrackIndex;
private MediaCodec.BufferInfo mAudioBufferInfo;

@Override
public void run(){
if(VERBOSE)Log.i(TAG, "AudioEncoder started encoding");
bAudioEncoderFinished = false;

ByteBuffer[] encoderOutputBuffers = mAudioEncoder.getOutputBuffers();
ByteBuffer encodedData;

mAudioBufferInfo = new MediaCodec.BufferInfo();

while(!bAudioEncoderFinished){
int encoderStatus = mAudioEncoder.dequeueOutputBuffer(mAudioBufferInfo, TIMEOUT_USEC);
if (encoderStatus == MediaCodec.INFO_TRY_AGAIN_LATER) {
// no output available yet
if (VERBOSE) Log.d(TAG + "_encoder", "no output available, spinning to await EOS");
} else if (encoderStatus == MediaCodec.INFO_OUTPUT_BUFFERS_CHANGED) {
// not expected for an encoder
encoderOutputBuffers = mAudioEncoder.getOutputBuffers();
} else if (encoderStatus == MediaCodec.INFO_OUTPUT_FORMAT_CHANGED) {
MediaFormat newFormat = mAudioEncoder.getOutputFormat();
if(VERBOSE)Log.d(TAG, "received output format: " + newFormat);
// should happen before receiving buffers, and should only happen once
iAudioTrackIndex = mMovieMuxer.addTrack(newFormat);

} else if (encoderStatus < 0) {
Log.w(TAG + "_encoder", "unexpected result from encoder.dequeueOutputBuffer: " + encoderStatus);
// let's ignore it
} else {
if(mMovieMuxer.muxerStarted()){
encodedData = encoderOutputBuffers[encoderStatus];
if (encodedData == null) {
throw new RuntimeException("encoderOutputBuffer " + encoderStatus + " was null");
}

if ((mAudioBufferInfo.flags & MediaCodec.BUFFER_FLAG_CODEC_CONFIG) != 0) {
// The codec config data was pulled out and fed to the muxer when we got
// the INFO_OUTPUT_FORMAT_CHANGED status. Ignore it.
if (VERBOSE) Log.d(TAG + "_encoder", "ignoring BUFFER_FLAG_CODEC_CONFIG");
mAudioBufferInfo.size = 0;
}

if (mAudioBufferInfo.size != 0) {

// adjust the ByteBuffer values to match BufferInfo (not needed?)
encodedData.position(mAudioBufferInfo.offset);
encodedData.limit(mAudioBufferInfo.offset + mAudioBufferInfo.size);

mMovieMuxer.writeSampleData(iAudioTrackIndex, encodedData, mAudioBufferInfo);

if (VERBOSE) {
Log.d(TAG + "_encoder", "sent " + mAudioBufferInfo.size + " bytes (audio) to muxer, ts=" + mAudioBufferInfo.presentationTimeUs);
}
}

mAudioEncoder.releaseOutputBuffer(encoderStatus, false);

if ((mAudioBufferInfo.flags & MediaCodec.BUFFER_FLAG_END_OF_STREAM) != 0) {
// reached EOS
if(VERBOSE)Log.i(TAG + "_encoder", "audio encoder finished");
bAudioEncoderFinished = true;

// tell the muxer that we are finished
mAudioHandler.onAudioEncodingFinished();
break;
}
}
}
}
}

}
}

感谢您的帮助。

最佳答案

当您从音频记录中请求数据时:

iReadResult = mAudioRecorder.read(mTempBuffer, 0, SAMPLES_PER_FRAME);

您可以获得多个帧,然后 mediacodec 中的 pts 预测器将根据帧数和压缩帧持续时间生成适当的输出 pts。然后你可以在编码器 dequeueoutputbuffer 之后打印这些时间戳,并看到实际值将是 !0。但是随后您将在输入时再次为编码器提供 0 点,它将重置内部预测。这一切都会导致非单调的 pts 生成,并且 muxer 可能已经为此提示,请检查 adb 日志。对我来说,这已经发生了,我必须在喂入编码器之前手动设置采样时间。

mTempBuffer.setSampleTime(calc_pts_for_that_frame);   

至少您可以检查这是否是您面临的问题,如果是,则可以通过计算适当的时间戳轻松解决。

关于android - 在 Android 上使用 MediaMuxer 录制音频和视频,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/22867832/

24 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com