gpt4 book ai didi

c++ - 将输入流从 PortAudio 馈送到 webrtc::AudioProcessing

转载 作者:行者123 更新时间:2023-11-28 05:16:07 24 4
gpt4 key购买 nike

我正在使用 cygwin 包 libwebrtc-audio-processing-devel-0.3-1 来实现来自 webrtc 的 AudioProcessing 类。

我正在使用 PortAudio 从我的麦克风读取输入,并想将其传递给 webrtc 以进行 VAD 检查,但是我不知道如何将我的数据传递给 ProcessStream 方法。

#define SAMPLE_RATE       (32000)
#define FRAMES_PER_BUFFER (320)
#define PA_SAMPLE_TYPE paFloat32
#define SAMPLE_SIZE (4)

...

err = Pa_ReadStream( stream, sampleBlock, FRAMES_PER_BUFFER );

// sampleBlock should now point to 320 32 bit floats

....
apm->ProcessStream( <What goes here?> )

这是 ProcessStream definitions

当我尝试像这样为第一种方法实例化 AudioFrame 时:

AudioFrame frame;

我收到以下错误:

main.cpp:161:22: error: aggregate ‘webrtc::AudioFrame frame’ has incomplete type and cannot be defined
webrtc::AudioFrame frame;

第二种和第三种方法要求数据采用“const float* const* src”格式。这是否意味着我需要一个指向常量浮点指针的常量指针?这让我有点困惑。

下面的完整例子,也是available on Pastebin ,从默认输入设备检索输入并为 ProcessStream 调用准备 webrtc。我的调用尝试包含在内并被注释掉,因为它会导致段错误。

代码需要 PortAudio 和 libwebrtc-audio-processing-devel-0.3.1。我使用以下命令在 cygwin 上编译:

g++ main_example.cpp -o main -L./ -lcygportaudio-2 -lrt -lm -pthread -I/usr/include/webrtc_audio_processing/ -DWEBRTC_WIN -std=gnu++11 -L/bin/ -lcygwebrtc_audio_processing-1
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "portaudio.h"
#include <sys/time.h>
#include <windows.h>
#include <windowsx.h>
#include <unistd.h>

#include "webrtc/modules/audio_processing/include/audio_processing.h"
using webrtc::AudioProcessing;
using webrtc::AudioFrame;
using webrtc::GainControl;
using webrtc::NoiseSuppression;
using webrtc::EchoCancellation;
using webrtc::VoiceDetection;


#define SAMPLE_RATE (32000)
#define FRAMES_PER_BUFFER (320)
#define DITHER_FLAG (0)

#define PA_SAMPLE_TYPE paFloat32
#define SAMPLE_SIZE (4)
#define SAMPLE_SILENCE (0)
#define PRINTF_S_FORMAT "%8f"

/*******************************************************************/
int main(int argc, char **argv);
/* error handling */
int xrun(PaStream *stream, int err, char* sampleBlock);
void error1(PaStream *stream, char* sampleBlock);
void error2(PaStream *stream, int err);
int main (int argc, char **argv)
{

PaStreamParameters inputParameters;
PaStream *stream = NULL;
PaError err;
const PaDeviceInfo* inputInfo;
char *sampleBlock = NULL;
int i;
int numBytes;
int numChannels;

err = Pa_Initialize();
if( err != paNoError ) error2(stream, err);

inputParameters.device = Pa_GetDefaultInputDevice(); /* default input device */
inputInfo = Pa_GetDeviceInfo( inputParameters.device );
numChannels = inputInfo->maxInputChannels;
inputParameters.channelCount = 1;// numChannels;
inputParameters.sampleFormat = PA_SAMPLE_TYPE;
inputParameters.suggestedLatency = inputInfo->defaultHighInputLatency ;
inputParameters.hostApiSpecificStreamInfo = NULL;
printf( "Input device # %d.\n", inputParameters.device );
printf( " Name: %s\n", inputInfo->name );

/* -- setup -- */

err = Pa_OpenStream(
&stream,
&inputParameters,
NULL,
SAMPLE_RATE,
FRAMES_PER_BUFFER,
paClipOff, /* we won't output out of range samples so don't bother clipping them */
NULL, /* no callback, use blocking API */
NULL ); /* no callback, so no callback userData */
if( err != paNoError ) error2(stream, err);

numBytes = FRAMES_PER_BUFFER * numChannels * SAMPLE_SIZE ;
sampleBlock = (char *) malloc( numBytes );
if( sampleBlock == NULL )
{
printf("Could not allocate record array.\n");
error1(stream, sampleBlock);
}

err = Pa_StartStream( stream );
if( err != paNoError ) error1(stream, sampleBlock);

// Configure webrtc::audioprocessing
AudioProcessing* apm = AudioProcessing::Create();

apm->high_pass_filter()->Enable(true);

apm->echo_cancellation()->enable_drift_compensation(false);
apm->echo_cancellation()->Enable(true);

apm->noise_suppression()->set_level(apm->noise_suppression()->kHigh);
apm->noise_suppression()->Enable(true);

apm->gain_control()->set_analog_level_limits(0, 255);
apm->gain_control()->set_mode(apm->gain_control()->kAdaptiveAnalog);
apm->gain_control()->Enable(true);

apm->voice_detection()->Enable(true);

int analog_level = apm->gain_control()->stream_analog_level();
int delay_ms = 20;
int voiceDetected = 0;


long int holdTime = 600; //milliseconds
int prevVoiceDetected = -1;
int holding = 0;
int transmitting = 0;
int prevTransmitting = -1;
struct timeval startHoldTime, currentTime, elapsedHoldTime;

while (1) {
// Read in input frames
err = Pa_ReadStream( stream, sampleBlock, FRAMES_PER_BUFFER );
if( err ) xrun(stream, err, sampleBlock);

// Run webrtc vad
apm->set_stream_delay_ms(delay_ms);
apm->gain_control()->set_stream_analog_level(analog_level);

/*
// A apm->ProcessStream call is required here. The one I've tried here seg faults, probably due to those casts I don't understand
webrtc::StreamConfig inputConfig = webrtc::StreamConfig(SAMPLE_RATE, numChannels, false);
webrtc::StreamConfig outputConfig = webrtc::StreamConfig(SAMPLE_RATE, numChannels, false);
apm->ProcessStream((const float* const*)sampleBlock, inputConfig, outputConfig, (float* const*)sampleBlock);
*/


analog_level = apm->gain_control()->stream_analog_level();
voiceDetected = apm->voice_detection()->stream_has_voice();

transmitting = 0;
if (voiceDetected) {
transmitting = 1;
holding = 0;
} else if (holding) {
gettimeofday (&currentTime, NULL);
long elapsedHoldTime = (((currentTime.tv_sec - startHoldTime.tv_sec)*1000000L+currentTime.tv_usec) - startHoldTime.tv_usec)/1000;
//printf("elapsedtime: %d\n", elapsedHoldTime); fflush(stdout);
if (elapsedHoldTime > holdTime) {
//printf("completedhold\n"); fflush(stdout);
holding = 0;
} else {
//printf("holding\n"); fflush(stdout);
transmitting = 1;
}
} else if (prevVoiceDetected) {
holding = 1;
gettimeofday (&startHoldTime, NULL);
transmitting = 1;
}
prevVoiceDetected = voiceDetected;

if (prevTransmitting != transmitting) {
printf("Transmitting: %s\n", (transmitting) ? "true" : "false"); fflush(stdout);
}
prevTransmitting = transmitting;
}
printf("Wire off.\n"); fflush(stdout);

err = Pa_StopStream( stream );
if( err != paNoError ) error1(stream, sampleBlock);

free( sampleBlock );

Pa_Terminate();
return 0;

}

int xrun(PaStream *stream, int err, char* sampleBlock) {
printf("err = %d\n", err); fflush(stdout);
if( stream ) {
Pa_AbortStream( stream );
Pa_CloseStream( stream );
}
free( sampleBlock );
Pa_Terminate();
if( err & paInputOverflow )
fprintf( stderr, "Input Overflow.\n" );
if( err & paOutputUnderflow )
fprintf( stderr, "Output Underflow.\n" );
return -2;
}

void error1(PaStream *stream, char* sampleBlock) {
free( sampleBlock );
exit(-1);
}
void error2(PaStream *stream, int err) {
if( stream ) {
Pa_AbortStream( stream );
Pa_CloseStream( stream );
}
Pa_Terminate();
fprintf( stderr, "An error occured while using the portaudio stream\n" );
fprintf( stderr, "Error number: %d\n", err );
fprintf( stderr, "Error message: %s\n", Pa_GetErrorText( err ) );
exit(-1);
}

最佳答案

我私下联系了@matzeri,他给我指了一个 working example在 gstreamer 中,它为我指明了正确的方向。包括 module_common_types.h,添加 WEBRTC_AUDIO_RPOCESSING_ONLY_BUILD 指令,并在 webrtc/common_types.h 中为 cygwin 修复字符串比较函数的定义,让我可以定义一个 AudioFrame,然后使用相应的 ProcessStream 调用。

这是一个在 cygwin 上使用 libwebrtc-audio-processing-devel-0.3-1 的工作示例,用于带有 PortAudio 的 VAD!

注意:我需要修改 webrtc/common_types.h 以便它应用以下定义而不是 win32 版本

#define STR_CASE_CMP(s1, s2) ::strcasecmp(s1, s2)
#define STR_NCASE_CMP(s1, s2, n) ::strncasecmp(s1, s2, n)

主要.cpp

#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "portaudio.h"
#include <sys/time.h>
#include <windows.h>
#include <windowsx.h>
#include <unistd.h>

#include "webrtc/modules/audio_processing/include/audio_processing.h"
#include "webrtc/modules/interface/module_common_types.h"
#include "webrtc/system_wrappers/include/trace.h"
using webrtc::AudioProcessing;
using webrtc::AudioFrame;
using webrtc::GainControl;
using webrtc::NoiseSuppression;
using webrtc::EchoCancellation;
using webrtc::VoiceDetection;


#define SAMPLE_RATE (32000)
#define FRAMES_PER_BUFFER (320)
#define DITHER_FLAG (0)

#define PA_SAMPLE_TYPE paInt16
#define SAMPLE_SIZE (2)
#define SAMPLE_SILENCE (0)
#define PRINTF_S_FORMAT "%d"

/*******************************************************************/
int main(int argc, char **argv);
/* error handling */
int xrun(PaStream *stream, int err, char* sampleBlock);
void error1(PaStream *stream, char* sampleBlock);
void error2(PaStream *stream, int err);
int main (int argc, char **argv)
{

PaStreamParameters inputParameters;
PaStream *stream = NULL;
PaError err;
const PaDeviceInfo* inputInfo;
char *sampleBlock = NULL;
int i;
int numBytes;
int numChannels;

err = Pa_Initialize();
if( err != paNoError ) error2(stream, err);

inputParameters.device = Pa_GetDefaultInputDevice(); /* default input device */
inputInfo = Pa_GetDeviceInfo( inputParameters.device );
numChannels = inputInfo->maxInputChannels;
inputParameters.channelCount = 1;// numChannels;
inputParameters.sampleFormat = PA_SAMPLE_TYPE;
inputParameters.suggestedLatency = inputInfo->defaultHighInputLatency ;
inputParameters.hostApiSpecificStreamInfo = NULL;
printf( "Input device # %d.\n", inputParameters.device );
printf( " Name: %s\n", inputInfo->name );

/* -- setup -- */

err = Pa_OpenStream(
&stream,
&inputParameters,
NULL,
SAMPLE_RATE,
FRAMES_PER_BUFFER,
paClipOff, /* we won't output out of range samples so don't bother clipping them */
NULL, /* no callback, use blocking API */
NULL ); /* no callback, so no callback userData */
if( err != paNoError ) error2(stream, err);

numBytes = FRAMES_PER_BUFFER * numChannels * SAMPLE_SIZE ;
sampleBlock = (char *) malloc( numBytes );
if( sampleBlock == NULL )
{
printf("Could not allocate record array.\n");
error1(stream, sampleBlock);
}

err = Pa_StartStream( stream );
if( err != paNoError ) error1(stream, sampleBlock);

// Configure webrtc::audioprocessing
AudioProcessing* apm = AudioProcessing::Create();

apm->high_pass_filter()->Enable(true);

apm->echo_cancellation()->enable_drift_compensation(false);
apm->echo_cancellation()->Enable(true);

apm->noise_suppression()->set_level(apm->noise_suppression()->kHigh);
apm->noise_suppression()->Enable(true);

apm->gain_control()->set_analog_level_limits(0, 255);
apm->gain_control()->set_mode(apm->gain_control()->kAdaptiveAnalog);
apm->gain_control()->Enable(true);

apm->voice_detection()->Enable(true);

int analog_level = apm->gain_control()->stream_analog_level();
int delay_ms = 20;
int voiceDetected = 0;


long int holdTime = 600; //milliseconds
int prevVoiceDetected = -1;
int holding = 0;
int transmitting = 0;
int prevTransmitting = -1;
struct timeval startHoldTime, currentTime, elapsedHoldTime;
int webrtcErr = 0;

while (1) {
// Read in input frames
err = Pa_ReadStream( stream, sampleBlock, FRAMES_PER_BUFFER );
if( err ) xrun(stream, err, sampleBlock);

// Run webrtc vad
apm->set_stream_delay_ms(delay_ms);
apm->gain_control()->set_stream_analog_level(analog_level);

webrtc::AudioFrame frame;
frame.num_channels_ = numChannels;
frame.sample_rate_hz_ = SAMPLE_RATE;
frame.samples_per_channel_ = FRAMES_PER_BUFFER;
memcpy(frame.data_, sampleBlock, numBytes);

if ((webrtcErr = apm->ProcessStream(&frame)) < 0) {
printf("Error Code: %d\n", webrtcErr); fflush(stdout);
return -1;
}

analog_level = apm->gain_control()->stream_analog_level();
voiceDetected = apm->voice_detection()->stream_has_voice();

transmitting = 0;
if (voiceDetected) {
transmitting = 1;
holding = 0;
} else if (holding) {
gettimeofday (&currentTime, NULL);
long elapsedHoldTime = (((currentTime.tv_sec - startHoldTime.tv_sec)*1000000L+currentTime.tv_usec) - startHoldTime.tv_usec)/1000;
//printf("elapsedtime: %d\n", elapsedHoldTime); fflush(stdout);
if (elapsedHoldTime > holdTime) {
//printf("completedhold\n"); fflush(stdout);
holding = 0;
} else {
//printf("holding\n"); fflush(stdout);
transmitting = 1;
}
} else if (prevVoiceDetected) {
holding = 1;
gettimeofday (&startHoldTime, NULL);
transmitting = 1;
}
prevVoiceDetected = voiceDetected;

if (prevTransmitting != transmitting) {
printf("Transmitting: %s\n", (transmitting) ? "true" : "false"); fflush(stdout);
}
prevTransmitting = transmitting;
}
printf("Wire off.\n"); fflush(stdout);

err = Pa_StopStream( stream );
if( err != paNoError ) error1(stream, sampleBlock);

free( sampleBlock );

Pa_Terminate();
return 0;

}

int xrun(PaStream *stream, int err, char* sampleBlock) {
printf("err = %d\n", err); fflush(stdout);
if( stream ) {
Pa_AbortStream( stream );
Pa_CloseStream( stream );
}
free( sampleBlock );
Pa_Terminate();
if( err & paInputOverflow )
fprintf( stderr, "Input Overflow.\n" );
if( err & paOutputUnderflow )
fprintf( stderr, "Output Underflow.\n" );
return -2;
}

void error1(PaStream *stream, char* sampleBlock) {
free( sampleBlock );
exit(-1);
}
void error2(PaStream *stream, int err) {
if( stream ) {
Pa_AbortStream( stream );
Pa_CloseStream( stream );
}
Pa_Terminate();
fprintf( stderr, "An error occured while using the portaudio stream\n" );
fprintf( stderr, "Error number: %d\n", err );
fprintf( stderr, "Error message: %s\n", Pa_GetErrorText( err ) );
exit(-1);
}

编译:

g++ main.cpp -o main -L./ -lcygportaudio-2 -lrt -lm -pthread -L./cygspeexdsp-1 -I/usr/include/webrtc_audio_processing/ -DWEBRTC_WIN -DWEBRTC_AUDIO_PROCESSING_ONLY_BUILD -std=gnu++11 -L/bin/ -lcygwebrtc_audio_processing-1

关于c++ - 将输入流从 PortAudio 馈送到 webrtc::AudioProcessing,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/42609432/

24 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com