- html - 出于某种原因,IE8 对我的 Sass 文件中继承的 html5 CSS 不友好?
- JMeter 在响应断言中使用 span 标签的问题
- html - 在 :hover and :active? 上具有不同效果的 CSS 动画
- html - 相对于居中的 html 内容固定的 CSS 重复背景?
我的图表工作与Apple提供的示例应用程序非常相似。
https://developer.apple.com/library/ios/samplecode/MixerHost/Listings/Classes_MixerHostAudio_m.html#//apple_ref/doc/uid/DTS40010210-Classes_MixerHostAudio_m-DontLinkElementID_6
我的mixerNode由自定义数据(而不是吉他/节拍)馈送-但设置类似。两条总线在混音器上均为立体声。
我正在尝试时移内容,但到目前为止一直没有成功。我尝试将kAudioUnitSubType_NewTimePitch添加到图形中,但是无论何时添加图形都无法创建。是否有任何源示例说明我如何使用混音器单元进行时移(移动所有总线)?
这是一些工作代码:
// Describe audio component
AudioComponentDescription output_desc;
bzero(&output_desc, sizeof(output_desc));
output_desc.componentType = kAudioUnitType_Output;
output_desc.componentSubType = self.componentSubType;
output_desc.componentFlags = 0;
output_desc.componentFlagsMask = 0;
output_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
// multichannel mixer unit
AudioComponentDescription mixer_desc;
bzero(&mixer_desc, sizeof(mixer_desc));
mixer_desc.componentType = kAudioUnitType_Mixer;
mixer_desc.componentSubType = kAudioUnitSubType_MultiChannelMixer;
mixer_desc.componentFlags = 0;
mixer_desc.componentFlagsMask = 0;
mixer_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
// Describe NewTimePitch component
AudioComponentDescription speed_desc;
bzero(&speed_desc, sizeof(speed_desc));
speed_desc.componentType = kAudioUnitType_FormatConverter;
speed_desc.componentSubType = kAudioUnitSubType_NewTimePitch;
speed_desc.componentFlags = 0;
speed_desc.componentFlagsMask = 0;
speed_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
result = AUGraphAddNode(mGraph, &output_desc, &outputNode);
if (result) { printf("AUGraphNewNode 1 result %ld %4.4s\n", (long)result, (char*)&result); return; }
result = AUGraphAddNode(mGraph, &speed_desc, &timeNode );
if (result) { printf("AUGraphNewNode 2 result %ld %4.4s\n", (long)result, (char*)&result); return; }
result = AUGraphAddNode(mGraph, &mixer_desc, &mixerNode );
if (result) { printf("AUGraphNewNode 3 result %ld %4.4s\n", (long)result, (char*)&result); return; }
result = AUGraphConnectNodeInput(mGraph, mixerNode, 0, outputNode, 0);
if (result) { printf("AUGraphConnectNodeInput mixer-> time result %ld %4.4s\n", (long)result, (char*)&result); return; }
// open the graph AudioUnits are open but not initialized (no resource allocation occurs here)
result = AUGraphOpen(mGraph);
if (result) { printf("AUGraphOpen result %ld %08lX %4.4s\n", (long)result, (long)result, (char*)&result); return; }
result = AUGraphNodeInfo(mGraph, mixerNode, NULL, &mMixer);
if (result) { printf("AUGraphNodeInfo mixer result %ld %08lX %4.4s\n", (long)result, (long)result, (char*)&result); return; }
result = AUGraphNodeInfo(mGraph, timeNode, NULL, &mTime);
if (result) { printf("AUGraphNodeInfo time result %ld %08lX %4.4s\n", (long)result, (long)result, (char*)&result); return; }
result = AUGraphNodeInfo(mGraph, outputNode, NULL, &mOutput);
if (result) { printf("AUGraphNodeInfo output result %ld %08lX %4.4s\n", (long)result, (long)result, (char*)&result); return; }
UInt32 numbuses = 1;
result = AudioUnitSetProperty(mMixer, kAudioUnitProperty_ElementCount, kAudioUnitScope_Input, 0, &numbuses, sizeof(numbuses));
if (result) { printf("AudioUnitSetProperty bus result %ld %08lX %4.4s\n", (long)result, (long)result, (char*)&result); return; }
for (int i = 0; i < numbuses; ++i) {
// setup render callback struct
AURenderCallbackStruct rcbs;
rcbs.inputProc = &mixerInput;
rcbs.inputProcRefCon = (__bridge void *)(outputStream);
printf("set kAudioUnitProperty_SetRenderCallback for mixer input bus %d\n", i);
// Set a callback for the specified node's specified input
result = AUGraphSetNodeInputCallback(mGraph, mixerNode, i, &rcbs);
// equivalent to AudioUnitSetProperty(mMixer, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, i, &rcbs, sizeof(rcbs));
if (result) { printf("AUGraphSetNodeInputCallback result %ld %08lX %4.4s\n", (long)result, (long)result, (char*)&result); return; }
// set input stream format to what we want
printf("set mixer input kAudioUnitProperty_StreamFormat for bus %d\n", i);
result = AudioUnitSetProperty(mMixer, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, i, mAudioFormat.streamDescription, sizeof(AudioStreamBasicDescription));
if (result) { printf("AudioUnitSetProperty result %ld %08lX %4.4s\n", (long)result, (long)result, (char*)&result); return; }
}
result = AudioUnitSetProperty(mMixer, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &streamInAudioFormat, sizeof(streamInAudioFormat));
if (result) { printf("AudioUnitSetProperty mixer result %ld %08lX %4.4s\n", (long)result, (long)result, (char*)&result); return; }
result = AudioUnitSetProperty(mOutput, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &streamInAudioFormat, sizeof(streamInAudioFormat));
if (result) { printf("AudioUnitSetProperty output result %ld %08lX %4.4s\n", (long)result, (long)result, (char*)&result); return; }
CAShow(mGraph);
// now that we've set everything up we can initialize the graph, this will also validate the connections
result = AUGraphInitialize(mGraph);
if (result) { printf("AUGraphInitialize result %ld %08lX %4.4s\n", (long)result, (long)result, (char*)&result); return; }
result = AUGraphConnectNodeInput(mGraph, mixerNode, 0, timeNode, 0);
result = AUGraphConnectNodeInput(mGraph, timeNode, 0, outputNode, 0);
最佳答案
根据CAShow,您当前的图形是这样的:
混音器-> TimePitch-> VoiceProcess
(您的输出节点不在图中)
您无法另外将调音台输出连接到其他设备
在您的代码中,您有
result = AUGraphConnectNodeInput(mGraph, mixerNode, 0, timeNode, 0);
result = AUGraphConnectNodeInput(mGraph, mixerNode, 0, outputNode, 0);
result = AUGraphConnectNodeInput(mGraph, mixerNode, 0, outputNode, 0);
result = AUGraphConnectNodeInput(mGraph, timeNode, 0, outputNode, 0);
result = AUGraphConnectNodeInput(mGraph, mixerNode, 0, timeNode, 0);
result = AUGraphConnectNodeInput(mGraph, timeNode, 0, outputNode, 0);
关于ios - NewTimePitch与混音器,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/34668685/
我想制作一个网络应用程序,人们可以在其中将录制的声音和样本添加到时间轴中。我希望它输出 1 个声音文件(大约 3 分钟长),并将其发送到服务器。 现在我想用 HTML5 音频 API 来做到这一点,并
我正在尝试使用 NAudio 和 winforms 在 C# 中制作一个非常简单的 DAW。此阶段程序的目标是能够在播放几个音频文件的同时从麦克风录制。到目前为止,该项目是 NAudio 附带的 AS
您能否帮助我正确配置Jackson与Spring MVC混合注释以自定义JSON响应。 这就是我现在拥有的: 我
当我执行此命令时: ffmpeg -i screen.mp4 -i camera.mp4 -filter_complex "overlay=main_w-overlay_w-5:main_h-over
我正在使用 Jackson Mixin 来反序列化 mongo 对象,Mixin 如下所示。 public interface MyMixin { /** * Mixin to set ke
我在 web3 提供程序 (localhost:8545) 上运行时出错 Not possible to connect to the Web3 provider. Make sure the pro
我正在尝试找出是否有一种方法可以确定是否使用Dolby Pro Logic II数据对AAC编码的音轨进行了编码。有没有一种检查文件的方式,以便您可以查看此信息?例如,我已经在Handbrake中使用
如何在 Remix 中传递多个参数?无论我以哪种方式将参数传递给 setOrder 函数,我都会收到不同的错误: SyntaxError: Unexpected token in JSON at po
我正在尝试从表示为树的类中进行深度过滤(重命名/忽略字段)。 使用 Jackson Mixin,我可以重命名或忽略根级别的字段。我想要实现的目标是如何在多个级别上进行过滤(重命名/忽略)? 例如,我有
场景:顶层容器是一个 Swing JDialog,它有一些 fx 内容,包括一个触发按钮处置的 fx 按钮。当手动创建按钮并使用适当的 eventHandler 配置时,处理工作符合预期(隐藏对话框)
Remix 中是否有推荐的模式用于在每个请求上运行通用代码,并可能将上下文数据添加到请求中?像中间件?例如,一个用例可能是进行日志记录或身份验证。 我见过的与此相似的一件事是 loader conte
我希望能够进行一些音调和速度转换、混合和修剪声音以及应用效果。我试过 ffmpeg但不幸的是,它在处理音频文件时有一些巨大的延迟(例如 40 秒对于 36 秒文件的音高 + 速度)。 所以我在网上搜索
我是一名优秀的程序员,十分优秀!