gpt4 book ai didi

c - 音频样本生成器多线程 OSX

转载 作者:行者123 更新时间:2023-11-30 17:07:03 25 4
gpt4 key购买 nike

此问题是前一个问题 ( Audio producer threads with OSX AudioComponent consumer thread and callback in C ) 的后续问题,包括一个测试示例,该示例的工作和行为符合预期,但没有完全回答问题。我已经大幅改写了问题,并重新编码了示例,以便它包含纯 C 代码。 (我发现前一个示例中的少数 Objective-C 代码部分只会造成困惑并分散读者对问题本质的注意力。)

为了利用多个处理器核心以及使 CoreAudio 拉模型渲染线程尽可能轻量,LPCM 示例的生成器例程显然必须“坐在”不同的线程,在实时优先级渲染线程/回调之外。它必须将样本提供给循环缓冲区(本例中为TPCircularBuffer),系统将从该缓冲区安排以 inNumberFrames 为单位的数据提取。

Grand Central Dispatch API 提供了一个简单的解决方案,这是我根据一些个人研究(包括试错编码)推断出来的。这个解决方案很优雅,因为它不会阻止任何内容,也不会在推模型和拉模型之间发生冲突。然而,应该负责“子线程”的 GCD 到目前为止还不能满足生产者代码工作线程的特定并行化要求,因此我必须显式地生成一些POSIX 线程,取决于可用逻辑核心的数量。尽管在加速计算方面结果已经很显着,但我仍然觉得混合 POSIX 和 GCD 有点不舒服。特别是它适用于变量 wait_interval,并正确计算它,而不是通过预测渲染线程下一个周期可能需要多少个 PCM 样本。

这是我的测试程序的缩短和简化的(伪)代码,采用纯 C 语言。

Controller 声明:

#include "TPCircularBuffer.h"
#include <AudioToolbox/AudioToolbox.h>
#include <AudioUnit/AudioUnit.h>
#include <dispatch/dispatch.h>
#include <sys/sysctl.h>
#include <pthread.h>

typedef struct {
TPCircularBuffer buffer;
AudioComponentInstance toneUnit;
Float64 sampleRate;
AudioStreamBasicDescription streamFormat;
Float32* f; //array of updated frequencies
Float32* a; //array of updated amps
Float32* prevf; //array of prev. frequencies
Float32* preva; //array of prev. amps
Float32* val;
int* arg;
int* previous_arg;
UInt32 frames;
int state;
Boolean midif; //wip
} MyAudioController;

MyAudioController gen;
dispatch_semaphore_t mSemaphore;
Boolean multithreading, NF;

typedef struct data{
int tid;
int cpuCount;
}data;

Controller 管理:

void setup (void){
// Initialize circular buffer
TPCircularBufferInit(&(self->buffer), kBufferLength);
// Create the semaphore
mSemaphore = dispatch_semaphore_create(0);
// Setup audio
createToneUnit(&gen);
}

void dealloc (void) {
// Release buffer resources
TPCircularBufferCleanup(&buffer);
// Clean up semaphore
dispatch_release(mSemaphore);
// dispose of audio
if(gen.toneUnit){
AudioOutputUnitStop(gen.toneUnit);
AudioUnitUninitialize(gen.toneUnit);
AudioComponentInstanceDispose(gen.toneUnit);
}
}

调度程序调用(从主线程启动生产者队列):

void dproducer (Boolean on, Boolean multithreading, Boolean NF)
{
if (on == true)
{
dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_HIGH, 0), ^{
if((multithreading)||(NF))
producerSum(on);
else
producer(on);
});
}
return;
}

可线程生产者例程:

void producerSum(Boolean on)
{
int rc;
int num = getCPUnum();
pthread_t threads[num];
data thread_args[num];
void* resulT;
static Float32 frames [FR_MAX];
Float32 wait_interval;
int bytesToCopy;
Float32 floatmax;

while(on){
wait_interval = FACT*(gen.frames)/(gen.sampleRate);
Float32 damp = 1./(Float32)(gen.frames);
bytesToCopy = gen.frames*sizeof(Float32);
memset(frames, 0, FR_MAX*sizeof(Float32));
availableBytes = 0;
fbuffW = (Float32**)calloc(num + 1, sizeof(Float32*));
for (int i=0; i<num; ++i)
{
fbuffW[i] = (Float32*)calloc(gen.frames, sizeof(Float32));
thread_args[i].tid = i;
thread_args[i].cpuCount = num;
rc = pthread_create(&threads[i], NULL, producerTN, (void *) &thread_args[i]);
}

for (int i=0; i<num; ++i) rc = pthread_join(threads[i], &resulT);

for(UInt32 samp = 0; samp < gen.frames; samp++)
for(int i = 0; i < num; i++)
frames[samp] += fbuffW[i][samp];

//code for managing producer state and GUI updates
{ ... }

float *head = TPCircularBufferHead(&(gen.buffer), &availableBytes);
memcpy(head,(const void*)frames,MIN(bytesToCopy, availableBytes));//copies frames to head
TPCircularBufferProduce(&(gen.buffer),MIN(bytesToCopy,availableBytes));

dispatch_semaphore_wait(mSemaphore, dispatch_time(DISPATCH_TIME_NOW, wait_interval * NSEC_PER_SEC));
if(gen.state == stopped){gen.state = idle; on = false;}

for(int i = 0; i <= num; i++)
free(fbuffW[i]);
free(fbuffW);
}
return;
}

单个生产者线程可能看起来有点像这样:

void *producerT  (void *TN)
{
Float32 samples[FR_MAX];
data threadData;
threadData = *((data *)TN);
int tid = threadData.tid;
int step = threadData.cpuCount;
int *ret = calloc(1,sizeof(int));

do_something(tid, step, &samples);

{ … }
return (void*)ret;
}

这是渲染回调(CoreAudio 实时消费者线程):

static OSStatus audioRenderCallback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData) {

MyAudioController *THIS = (MyAudioController *)inRefCon;

// An event happens in the render thread- signal whoever is waiting
if (THIS->state == active) dispatch_semaphore_signal(mSemaphore);

// Mono audio rendering: we only need one target buffer
const int channel = 0;
Float32* targetBuffer = (Float32 *)ioData->mBuffers[channel].mData;
memset(targetBuffer,0,inNumberFrames*sizeof(Float32));

// Pull samples from circular buffer
int32_t availableBytes;
Float32 *buffer = TPCircularBufferTail(&THIS->buffer, &availableBytes);

//copy circularBuffer content to target buffer
int bytesToCopy = ioData->mBuffers[channel].mDataByteSize;
memcpy(targetBuffer, buffer, MIN(bytesToCopy, availableBytes));
{ … };

TPCircularBufferConsume(&THIS->buffer, availableBytes);
THIS->frames = inNumberFrames;
return noErr;
}

最佳答案

Grand Central Dispatch 已经负责将操作分派(dispatch)到多个处理器核心和线程。在典型的实时音频渲染或处理中,人们永远不需要等待信号或信号量,因为循环缓冲区消耗率是非常可预测的,并且随着时间的推移漂移得非常缓慢。 AVAudioSession API(如果可用)和音频单元 API 和回调允许您设置和确定回调缓冲区大小,从而设置和确定循环缓冲区可以更改的最大速率。因此,您可以在计时器上调度所有渲染操作,渲染每个计时器周期所需的确切数量,并让缓冲区大小和状态补偿线程调度时间中的任何抖动。

在运行时间极长的音频渲染中,您可能需要测量计时器操作和实时音频消耗(采样率)之间的偏差,并调整渲染的样本数量或计时器偏移量。

关于c - 音频样本生成器多线程 OSX,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/34241278/

25 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com