gpt4 book ai didi

c++ - 如何将静音音频数据写入音频流?

转载 作者:行者123 更新时间:2023-12-03 01:32:11 26 4
gpt4 key购买 nike

我正在将一些图像写入 AVStream,然后我正在读取 mp3 文件并将其写入不同的 AVStream。问题是音频流比视频流短一点,所以如果我添加更 multimap 像和另一个音频文件,音频不再与视频同步。所以我的想法是在将另一个音频文件写入音频流之前将静音音频数据写入音频流。但我不知道如何将无声数据写入音频流。

我找到了this发布但我不知道如何计算数据包大小或如何将数据包写入音频流。

这是迄今为止我最“成功”的方法,但是 result (audioTest(0xff).mp4)远非沉默。

    /* set up the audio convert context */
libffmpeg::SwrContext* audioConvertContext = libffmpeg::swr_alloc();
libffmpeg::av_opt_set_int(audioConvertContext, "in_channel_count", data->audioCodecContext->channels, 0);
libffmpeg::av_opt_set_int(audioConvertContext, "out_channel_count", data->audioCodecContext->channels, 0);
libffmpeg::av_opt_set_int(audioConvertContext, "in_channel_layout", data->audioCodecContext->channel_layout, 0);
libffmpeg::av_opt_set_int(audioConvertContext, "out_channel_layout", data->audioCodecContext->channel_layout, 0);
libffmpeg::av_opt_set_int(audioConvertContext, "in_sample_rate", data->audioCodecContext->sample_rate, 0);
libffmpeg::av_opt_set_int(audioConvertContext, "out_sample_rate", data->audioCodecContext->sample_rate, 0);
libffmpeg::av_opt_set_sample_fmt(audioConvertContext, "in_sample_fmt", libffmpeg::AV_SAMPLE_FMT_S16, 0);
libffmpeg::av_opt_set_sample_fmt(audioConvertContext, "out_sample_fmt", data->audioCodecContext->sample_fmt, 0);
int ret = libffmpeg::swr_init(audioConvertContext);
if (ret < 0)
{
Helper::ThrowError("Failed to allocate audio reformat context.", ret);
}

/* set up silent frame */
libffmpeg::AVFrame* silentFrame = libffmpeg::av_frame_alloc();
if (!silentFrame)
{
Helper::ThrowError("Failed to allocate audio encode frame.");
}

silentFrame->nb_samples = data->audioCodecContext->frame_size;
silentFrame->format = data->audioCodecContext->sample_fmt;
silentFrame->channel_layout = data->audioCodecContext->channel_layout;
silentFrame->channels = data->audioCodecContext->channels;
silentFrame->sample_rate = data->audioCodecContext->sample_rate;

/* alloc the frame buffer */
ret = libffmpeg::av_frame_get_buffer(silentFrame, 0);
if (ret < 0)
{
Helper::ThrowError("Could not allocate audio data buffers.");
}

int got_output;
int samples_count;
double duration = 4 * (double)data->audioStream->time_base.den / (double)data->audioStream->time_base.num;
while (av_stream_get_end_pts(data->audioStream) < duration)
{
libffmpeg::AVPacket pkt;
libffmpeg::av_init_packet(&pkt);

ret = libffmpeg::av_frame_make_writable(silentFrame);
if (ret < 0)
{
Helper::ThrowError("Could not make frame writable.");
}

for (int j = 0; j < data->audioCodecContext->frame_size; j++)
{
silentFrame->data[0][2 * j] = 0xff;

for (int k = 1; k < data->audioCodecContext->channels; k++)
{
silentFrame->data[0][2 * j + k] = silentFrame->data[0][2 * j];
}
}

int dst_nb_samples = libffmpeg::av_rescale_rnd(
libffmpeg::swr_get_delay(audioConvertContext, data->audioCodecContext->sample_rate) + silentFrame->nb_samples,
data->audioCodecContext->sample_rate, data->audioCodecContext->sample_rate,
libffmpeg::AV_ROUND_UP);

ret = libffmpeg::swr_convert(
audioConvertContext,
silentFrame->data, dst_nb_samples,
(const libffmpeg::uint8_t * *) & silentFrame->data,
silentFrame->nb_samples);

if (ret < 0)
{
Helper::ThrowError("Error while converting audio frame.", ret);
}

silentFrame->pts = libffmpeg::av_rescale_q(samples_count, libffmpeg::AVRational{ 1, data->audioCodecContext->sample_rate }, data->audioCodecContext->time_base);
samples_count += dst_nb_samples;

ret = libffmpeg::avcodec_encode_audio2(data->audioCodecContext, &pkt, silentFrame, &got_output);
if (ret < 0)
{
Helper::ThrowError("Error while encoding audio frame.", ret);
}

if (got_output)
{
pkt.stream_index = data->audioStream->index;

if (ret = av_write_frame(data->formatContext, &pkt))
{
Helper::ThrowError("Error while writing audio frame.", ret);
}

libffmpeg::av_packet_unref(&pkt);
}
}

libffmpeg::av_frame_free(&silentFrame);

最佳答案

错误是我如何写入数组。我不太习惯 C++,所以我的解决方案可能有点乱,但至少现在可以了。

    /* set up the audio convert context */
libffmpeg::SwrContext* audioConvertContext = libffmpeg::swr_alloc();
libffmpeg::av_opt_set_int(audioConvertContext, "in_channel_count", data->audioCodecContext->channels, 0);
libffmpeg::av_opt_set_int(audioConvertContext, "out_channel_count", data->audioCodecContext->channels, 0);
libffmpeg::av_opt_set_int(audioConvertContext, "in_channel_layout", data->audioCodecContext->channel_layout, 0);
libffmpeg::av_opt_set_int(audioConvertContext, "out_channel_layout", data->audioCodecContext->channel_layout, 0);
libffmpeg::av_opt_set_int(audioConvertContext, "in_sample_rate", data->audioCodecContext->sample_rate, 0);
libffmpeg::av_opt_set_int(audioConvertContext, "out_sample_rate", data->audioCodecContext->sample_rate, 0);
libffmpeg::av_opt_set_sample_fmt(audioConvertContext, "in_sample_fmt", libffmpeg::AV_SAMPLE_FMT_S16, 0);
libffmpeg::av_opt_set_sample_fmt(audioConvertContext, "out_sample_fmt", data->audioCodecContext->sample_fmt, 0);
int ret = libffmpeg::swr_init(audioConvertContext);
if (ret < 0)
{
Helper::ThrowError("Failed to allocate audio reformat context.", ret);
}

/* set up silent frame */
libffmpeg::AVFrame* silentFrame = libffmpeg::av_frame_alloc();
if (!silentFrame)
{
Helper::ThrowError("Failed to allocate audio encode frame.");
}

silentFrame->nb_samples = data->audioCodecContext->frame_size;
silentFrame->format = data->audioCodecContext->sample_fmt;
silentFrame->channel_layout = data->audioCodecContext->channel_layout;
silentFrame->channels = data->audioCodecContext->channels;
silentFrame->sample_rate = data->audioCodecContext->sample_rate;

/* alloc the frame buffer */
ret = libffmpeg::av_frame_get_buffer(silentFrame, 0);
if (ret < 0)
{
Helper::ThrowError("Could not allocate audio data buffers.");
}

libffmpeg::AVPacket* pkt = libffmpeg::av_packet_alloc();
if (!pkt)
{
Helper::ThrowError("could not allocate the packet.");
}

void* buffer = malloc(data->audioCodecContext->frame_size * data->audioCodecContext->channels * 16);
for (int i = 0; i < data->audioCodecContext->frame_size * data->audioCodecContext->channels * 2; i++)
{
*((int*)buffer + i) = 0x0;
}

int got_output;
int samples_count;
double duration = 4 * (double)data->audioStream->time_base.den / (double)data->audioStream->time_base.num;
while (av_stream_get_end_pts(data->audioStream) < duration)
{
libffmpeg::AVPacket pkt;
libffmpeg::av_init_packet(&pkt);

ret = libffmpeg::av_frame_make_writable(silentFrame);
if (ret < 0)
{
Helper::ThrowError("Could not make frame writable.");
}

silentFrame->data[0] = (libffmpeg::uint8_t*) buffer;

int dst_nb_samples = libffmpeg::av_rescale_rnd(
libffmpeg::swr_get_delay(audioConvertContext, data->audioCodecContext->sample_rate) + silentFrame->nb_samples,
data->audioCodecContext->sample_rate, data->audioCodecContext->sample_rate,
libffmpeg::AV_ROUND_UP);

ret = libffmpeg::swr_convert(
audioConvertContext,
silentFrame->data, dst_nb_samples,
(const libffmpeg::uint8_t * *) & silentFrame->data,
silentFrame->nb_samples);

if (ret < 0)
{
Helper::ThrowError("Error while converting audio frame.", ret);
}

silentFrame->pts = libffmpeg::av_rescale_q(samples_count, libffmpeg::AVRational{ 1, data->audioCodecContext->sample_rate }, data->audioCodecContext->time_base);
samples_count += dst_nb_samples;

ret = libffmpeg::avcodec_encode_audio2(data->audioCodecContext, &pkt, silentFrame, &got_output);
if (ret < 0)
{
Helper::ThrowError("Error while encoding audio frame.", ret);
}

if (got_output)
{
pkt.stream_index = data->audioStream->index;

if (ret = av_write_frame(data->formatContext, &pkt))
{
Helper::ThrowError("Error while writing audio frame.", ret);
}

libffmpeg::av_packet_unref(&pkt);
}
}

free(buffer);
libffmpeg::av_frame_free(&silentFrame);

关于c++ - 如何将静音音频数据写入音频流?,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/56581396/

26 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com