gpt4 book ai didi

c - 尝试使用 ffmpeg 解码音频时如何摆脱 Unresolved external 错误

转载 作者:太空宇宙 更新时间:2023-11-04 04:27:37 25 4
gpt4 key购买 nike

我正在尝试使用 ffmpeg 解码音频文件,但在执行此操作时我收到许多 Unresolved external 错误。我是 ffmpeg 库的新手,对这个问题的任何建议都会有很大的帮助。

谢谢。

        void audioDecode(char* filename)
{
FILE *file;
AVFormatContext *audioInputFormatContext;
AVInputFormat *audioInputFormat = NULL;
AVCodec *audioCodec;
AVCodecContext *audioCodecContext;

av_register_all();

char *audioInputDeviceName = filename;
int ret;
int audioIndex = 0;
AVPacket pkt;
av_init_packet(&pkt);

avformat_network_init();

audioInputFormatContext = avformat_alloc_context();
ret = avformat_open_input(&audioInputFormatContext, audioInputDeviceName, audioInputFormat, NULL);

if (ret == 0)
{
ret = avformat_find_stream_info(audioInputFormatContext, 0);
if (ret >= 0)
{
for (int i = 0; i < audioInputFormatContext->nb_streams; i++) {
if (audioInputFormatContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
audioIndex = i;
break;
}
}
audioCodec = avcodec_find_decoder(audioInputFormatContext->streams[audioIndex]->codecpar->codec_id);
audioCodecContext = avcodec_alloc_context3(audioCodec);
avcodec_parameters_to_context(audioCodecContext, audioInputFormatContext->streams[audioIndex]->codecpar);

if (avcodec_open2(audioCodecContext, audioCodec, NULL) >= 0)
{
ret = av_read_frame(audioInputFormatContext, &pkt);
AVPacket encodePacket;
AVFrame* decodeFrame = av_frame_alloc();
int dec_got_frame = 0;
if (ret == 0)
{
ret = avcodec_send_packet(audioCodecContext, &pkt);
if (ret < 0)
printf("Error");
}
ret = avcodec_receive_frame(audioCodecContext, decodeFrame);

if (ret >= 0)
dec_got_frame = 1;
if (dec_got_frame)
{

fopen_s(&file, filename, "wb");
fwrite(pkt.data, 1, pkt.size, file);
fclose(file);

}

av_frame_free(&decodeFrame);

}
}
}

avformat_close_input(&audioInputFormatContext);
avcodec_free_context(&audioCodecContext);

av_packet_unref(&pkt);
}

最佳答案

我只是假设您已经连接到流源并根据您在评论中提到的内容获得编解码器上下文。

这些是我自己解码音频帧的项目的片段。

解码音频包:

void FFMPEG::process_audio_packet(AVPacket *pkt) {
int got;
avcodec_decode_audio4(aud_stream.context, aud_stream.frame, &got, pkt);
if (got) Audio.add_av_frame(aud_stream.frame);
}

处理完成的帧并提取立体声 16 位有符号缓冲区:

void AudioManager::add_av_frame(AVFrame *frame) {
int nsamples = frame->nb_samples;
int sample_rate = frame->sample_rate;
int channels = frame->channels;
AVSampleFormat format = (AVSampleFormat) frame->format;
bool planar = av_sample_fmt_is_planar(format) == 1;

int64_t pts = av_frame_get_best_effort_timestamp(frame);
//double ftime;

/*if (ffmpeg.vid_stream.stream_id != -1)
ftime = av_q2d(ffmpeg.aud_stream.context->time_base) * pts;
else
ftime = av_q2d(ffmpeg.vid_stream.context->time_base) * pts;*/

AudioBuffer *buffer = NULL;

if (planar) { // handle planar audio frames
/*
* PLANAR frames conversion
* ------------------------
*/
if (channels == 1) { // MONO

//LOGD("Processing PLANAR MONO");
/*
* MONO
*/

if (format == AV_SAMPLE_FMT_S16P) { // 16 bit signed
if ((buffer = alloc_buffer(frame))) { // allocated okay?
short *channel = (short*)frame->data[0];
short *buff = buffer->data;
for (int c = 0; c < nsamples; c++) {
*buff++ = *channel;
*buff++ = *channel++;
}
queue_new_buffer(buffer);
}
return;
}
if (format == AV_SAMPLE_FMT_S32P) { // 32 bit signed
if ((buffer = alloc_buffer(frame))) { // allocated okay?
int32_t *channel = (int32_t*)frame->data[0];
short *buff = buffer->data;
for (int c = 0; c < nsamples; c++) {
int16_t s = (int16_t) (*channel++ >> 16);
*buff++ = s;
*buff++ = s;
}
queue_new_buffer(buffer);
}
return;
}
if (format == AV_SAMPLE_FMT_U8P) { // 8 bit unsigned
if ((buffer = alloc_buffer(frame))) { // allocated okay?
uint8_t *channel = (uint8_t*)frame->data[0];
short *buff = buffer->data;
for (int c = 0; c < nsamples; c++) {
int16_t s = ((int16_t)(*channel++ - 128) << 8);
*buff++ = s;
*buff++ = s;
}
queue_new_buffer(buffer);
}
}
return; // scrap if no audio buffer (highly unlikely)

} else if (channels == 2) { // STEREO

//LOGD("Processing PLANAR STEREO");
/*
* STEREO
*/

if (format == AV_SAMPLE_FMT_S16P) { // 16 bit signed
if ((buffer = alloc_buffer(frame))) { // allocated okay
short *channel1 = (short*)frame->data[0];
short *channel2 = (short*)frame->data[1];
short *buff = buffer->data;
for (int c = 0; c < nsamples; c++) {
*buff++ = *channel1++;
*buff++ = *channel2++;
}
queue_new_buffer(buffer);
}
return;
}
if (format == AV_SAMPLE_FMT_S32P) { // 32 bit signed
if ((buffer = alloc_buffer(frame))) { // allocated okay?
int32_t *channel1 = (int32_t*)frame->data[0];
int32_t *channel2 = (int32_t*)frame->data[1];
short *buff = buffer->data;
for (int c = 0; c < nsamples; c++) {
int16_t s1 = (int16_t) (*channel1++ >> 16);
int16_t s2 = (int16_t) (*channel2++ >> 16);
*buff++ = s1;
*buff++ = s2;
}
queue_new_buffer(buffer);
}
return;
}
if (format == AV_SAMPLE_FMT_U8P) { // 8 bit unsigned
if ((buffer = alloc_buffer(frame))) { // allocated okay?
uint8_t *channel1 = (uint8_t*)frame->data[0];
uint8_t *channel2 = (uint8_t*)frame->data[1];
short *buff = buffer->data;
for (int c = 0; c < nsamples; c++) {
int16_t s1 = ((int16_t)(*channel1++ - 128) << 8);
int16_t s2 = ((int16_t)(*channel2++ - 128) << 8);
*buff++ = s1;
*buff++ = s2;
}
queue_new_buffer(buffer);
}
}
return;

} // TODO: Handle more channels at a later date

} else { // handle non-planar audio frames
/*
* INTERLEAVED conversion
* ----------------------
*/
}
}

处理音频缓冲区:

void AudioManager::queue_new_buffer(AudioBuffer *buffer) {
if (buffer) { // valid buffer

// apply volume gain (only working with stereo)

if (volume != 100) {
short *data = buffer->data;
int num_samples = buffer->nsamples << 1;
while (num_samples--) {
long sample = ((long)*data * volume) / 100;
if (sample < -32768) sample = -32768;
if (sample > 32767) sample = 32767;
*data++ = (short)sample;
}
}

// add buffer to queue

buffer->used = true;

double pts_start = get_pts_start_time();

decode_pos = (++decode_pos) % MAX_AUD_BUFFERS;
if (decode_pos == playback_pos)
playback_pos = (++playback_pos) % MAX_AUD_BUFFERS;

if (ffmpeg.vid_stream.stream_id == -1 && pts_start < 0.0) {
set_pts_start_time(buffer->frame_time);
set_sys_start_time(Display.get_current_render_time());
LOGD("START TIME FROM AUDIO STREAM...");
}

//LOGI("Audio buffer queued %d (%d)", decode_pos, playback_pos);
}
}

关于c - 尝试使用 ffmpeg 解码音频时如何摆脱 Unresolved external 错误,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/40009271/

25 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com