gpt4 book ai didi

ffmpeg - 使用 avcodec_fill_audio_frame() 和内存泄漏的音频编码

转载 作者:行者123 更新时间:2023-12-04 23:17:19 29 4
gpt4 key购买 nike

作为编码解码音频包的一部分,我使用了 avcodec_fill_audio_frame()。我将分配的 AVFrame 指针与包含解码样本和其他参数 channel 数、样本格式、缓冲区大小的缓冲区一起传递。尽管编码工作正常,但我无法完全消除内存泄漏。我已经处理了大部分事情,但仍然无法检测到泄漏。
下面是我用于编码的函数。请提出一些建议。
AudioSample 包含解码的数据,它完全在不同的类中管理(在类析构函数中免费)。我正在释放 FFmpegEncoder 析构函数中的 AVFrame,并且每次使用 av_free_packet() 并启用 av_packet_destruct 时都会释放 AVPacket。我还需要释放什么?

void FfmpegEncoder::WriteAudioSample(AudioSample *audS) 
{


int num_audio_frame = 0;
AVCodecContext *c = NULL;
// AVFrame *frame;
AVPacket pkt;

av_init_packet(&pkt);
pkt.destruct = av_destruct_packet;
pkt.data = NULL;
pkt.size = 0;
int ret = 0, got_packet = 0;
c = m_out_aud_strm->codec;
static int64_t aud_pts_in = -1;

if((audS != NULL) && (audS->GetSampleLength() > 0) )
{
int byte_per_sample = av_get_bytes_per_sample(c->sample_fmt);

PRINT_VAL("Byte Per Sample ", byte_per_sample)
m_frame->nb_samples = (audS->GetSampleLength())/(c->channels*av_get_bytes_per_sample(c->sample_fmt));
if(m_frame->nb_samples == c->frame_size)
{

#if 1
if(m_need_resample && (c->channels >= 2))
{
uint8_t * t_buff1 = new uint8_t[audS->GetSampleLength()];
if(t_buff1 != NULL)
{
for(int64_t i = 0; i< m_frame->nb_samples; i++)
{
memcpy(t_buff1 + i*byte_per_sample, (uint8_t*)((uint8_t*)audS->GetAudioSampleData() + i*byte_per_sample*c->channels), byte_per_sample);
memcpy(t_buff1 + (audS->GetSampleLength())/2 + i*byte_per_sample, (uint8_t*)((uint8_t*)audS->GetAudioSampleData() + i*byte_per_sample*c->channels+ byte_per_sample), byte_per_sample);
}
audS->FillAudioSample(t_buff1, audS->GetSampleLength());
delete[] t_buff1;
}
}
#endif
ret = avcodec_fill_audio_frame(m_frame, c->channels, c->sample_fmt, (uint8_t*)audS->GetAudioSampleData(),m_frame->nb_samples*byte_per_sample*c->channels, 0);
//ret = avcodec_fill_audio_frame(&frame, c->channels, c->sample_fmt, t_buff,frame.nb_samples*byte_per_sample*c->channels, 0);
if(ret != 0)
{
PRINT_MSG("Avcodec Fill Audio Failed ")
}
else
{
got_packet = 0;
ret = avcodec_encode_audio2(c, &pkt, m_frame, &got_packet);
if(ret < 0 || got_packet == 0)
{
PRINT_MSG("failed to encode audio ")
}
else
{
PRINT_MSG("Audio Packet Encoded ");
aud_pts_in++;
pkt.pts = aud_pts_in;
pkt.dts = pkt.pts;
pkt.stream_index = m_out_aud_strm->index;

ret = av_interleaved_write_frame(oc, &pkt);
if(ret != 0)
{
PRINT_MSG("Error Write Audio PKT ")
}
else
{
PRINT_MSG("Audio PKT Writen ")
}
}
}
}
avcodec_flush_buffers(c);
// avcodec_free_frame(&frame);
}

av_free_packet(&pkt);
}

谢谢,
普拉迪普

最佳答案

    //================== SEND AUDIO OUTPUT =======================
void AVOutputStream::sendAudioOutput (AVFrame* inputFrame)
{
AVCodecContext *codecCtx = pOutputAudioStream->codec;

// set source data variables
sourceNumberOfChannels = inputFrame->channels;
sourceChannelLayout = inputFrame->channel_layout;
sourceSampleRate = inputFrame->sample_rate;
_sourceSampleFormat = (AVSampleFormat)inputFrame->format;
sourceNumberOfSamples = inputFrame->nb_samples;


// set destination data variables
destinationNumberOfChannels = codecCtx->channels;
destinationChannelLayout = codecCtx->channel_layout;
destinationSampleRate = codecCtx->sample_rate;
destinationSampleFormat = codecCtx->sample_fmt;//AV_SAMPLE_FMT_FLTP;//EncodecCtx->sample_fmt;
destinationLineSize = 0;
destinationData = NULL;

int returnVal = 0;


if (startDecode == false)
{
startDecode = true;

resamplerCtx = swr_alloc_set_opts(NULL,
destinationChannelLayout,
destinationSampleFormat,
destinationSampleRate,
sourceChannelLayout,
_sourceSampleFormat,
sourceSampleRate,
0,
NULL);


if (resamplerCtx == NULL)
{

std::cout << "Unable to create the resampler context for the audio frame";
isConnected = false;
}


// initialize the resampling context
returnVal = swr_init(resamplerCtx);


if (returnVal < 0)
{
std::cout << "Unable to init the resampler context, error:";
isConnected = false;
}


} //if (startDecode == false)


if (sourceSampleRate != 0)
destinationNumberOfSamples = destinationSampleRate/sourceSampleRate * sourceNumberOfSamples;


// allocate the destination samples buffer
returnVal = av_samples_alloc_array_and_samples(&destinationData,
&destinationLineSize,
destinationNumberOfChannels,
destinationNumberOfSamples,
destinationSampleFormat,
0);



if (returnVal < 0)
{
std::cout << "Unable to allocate destination samples, error";
isConnected = false;
}


// convert to destination format
returnVal = swr_convert(resamplerCtx,
destinationData,
destinationNumberOfSamples,
(const uint8_t **)inputFrame->data, //sourceData,
sourceNumberOfSamples);


if (returnVal < 0)
{

std::cout << "Resampling failed, error \n";
isConnected = false;
}


int bufferSize = av_samples_get_buffer_size(&destinationLineSize,
destinationNumberOfChannels,
destinationNumberOfSamples,
destinationSampleFormat,
0);
//whithout fifo
pOutputAudioFrame = av_frame_alloc();

pOutputAudioFrame->nb_samples = codecCtx->frame_size;//frameNumberOfSamples;
pOutputAudioFrame->format = codecCtx->sample_fmt;
pOutputAudioFrame->channel_layout = codecCtx->channel_layout;
pOutputAudioFrame->channels = codecCtx->channels;
pOutputAudioFrame->sample_rate = codecCtx->sample_rate;


returnVal = avcodec_fill_audio_frame(pOutputAudioFrame,
pOutputAudioFrame->channels,
(AVSampleFormat)pOutputAudioFrame->format,
(const uint8_t *)destinationData[0],
bufferSize,0);


pOutputAudioFrame->pts = inputFrame->pts;



if (returnVal < 0)
{

std::cout << "Unable to fill the audio frame wsampleIndexith captured audio data,error";
isConnected = false;

}




// encode the audio frame, fill a packet for streaming
av_init_packet(&outAudioPacket);
outAudioPacket.data = NULL;
outAudioPacket.size = 0;
outAudioPacket.dts = outAudioPacket.pts = 0;
int gotPacket;



// encoding
returnVal = avcodec_encode_audio2(codecCtx, &outAudioPacket, pOutputAudioFrame, &gotPacket);


// free buffers
av_freep(&destinationData[0]);
av_freep(&destinationData);
av_frame_free(&pOutputAudioFrame);



if (gotPacket)
{


outAudioPacket.stream_index = pOutputAudioStream->index;
outAudioPacket.flags |= AV_PKT_FLAG_KEY;

returnVal = av_interleaved_write_frame(pOutputFormatCtx, &outAudioPacket);
//returnVal = av_write_frame(pOutputFormatCtx, &outAudioPacket);





if (returnVal != 0)
{

std::cout << "Cannot write audio packet \n";
isConnected = false;
}

av_free_packet(&outAudioPacket);

} // if (gotPacket)



}

您可以在重新采样后看到我释放使用过的缓冲区。
//空闲缓冲区
av_freep(&destinationData[0]);
av_freep(&destinationData);

关于ffmpeg - 使用 avcodec_fill_audio_frame() 和内存泄漏的音频编码,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/16604718/

29 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com