- html - 出于某种原因,IE8 对我的 Sass 文件中继承的 html5 CSS 不友好?
- JMeter 在响应断言中使用 span 标签的问题
- html - 在 :hover and :active? 上具有不同效果的 CSS 动画
- html - 相对于居中的 html 内容固定的 CSS 重复背景?
共有三个音频单元:
equalizerUnit(kAudioUnitSubType_NBandEQ
),
3DmixerUnit(kAudioUnitSubType_SpatialMixer
),
RemoteIOUnit(kAudioUnitSubType_RemoteIO
)。
使用 AUGraph 和节点(equalizerNode、3DmixerNode、remoteNode),它们彼此正确连接:
equalizerUnit ->mixerUnit ->remoteIOUnit。
有一个问题,为了连接 equalizerUnit 和 3DmixerUnit,我使用转换器单元 (kAudioUnitSubType_AUConverter
),在其输出上设置 AudioStreamBasicDescription:
.mSampleRate = 44100.00,
.mFormatID = kAudioFormatLinearPCM,
.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked | kAudioFormatFlagIsNonInterleaved,
.mFramesPerPacket = 1,
.mChannelsPerFrame = 1,
.mBytesPerFrame = 2,
.mBitsPerChannel = 16,
.mBytesPerPacket = 2
因此,我从 Output Score 3DmixerUnit 中获得单声道声音。如何解决 3DmixerUnit 中的立体声问题?
如果有任何帮助,我将不胜感激!
附:一些编辑信息:
主要问题在于我需要一个立体声信号来申请 3DmixerUnit 的两个单声道输入。
Apple 的 3D 混音器音频单元指南指出:
要使用立体声源,您可以将其左声道和右声道视为两个独立的单声道源,然后将立体声流的每一侧馈送到其自己的输入总线。
https://developer.apple.com/library/ios/qa/qa1695/_index.html
我无法弄清楚如何将均衡器单元的立体声分割为两个独立的单 channel 源。如何做到这一点?
最佳答案
也许将来有人会通过解决这个问题来节省时间。
canonicalAudioStreamBasicDescription = (AudioStreamBasicDescription) {
.mSampleRate = 44100.00,
.mFormatID = kAudioFormatLinearPCM,
.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked,
.mFramesPerPacket = 1,
.mChannelsPerFrame = 2,
.mBytesPerFrame = 4,
.mBitsPerChannel = 16,
.mBytesPerPacket = 4
};
canonicalAudioStreamBasicDescription3Dmixer = (AudioStreamBasicDescription) {
.mSampleRate = 44100.00,
.mFormatID = kAudioFormatLinearPCM,
.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked,
.mFramesPerPacket = 1,
.mChannelsPerFrame = 1,
.mBytesPerFrame = 2,
.mBitsPerChannel = 16,
.mBytesPerPacket = 2
};
canonicalAudioStreamBasicDescriptionNonInterleaved = (AudioStreamBasicDescription) {
.mSampleRate = 44100.00,
.mFormatID = kAudioFormatLinearPCM,
.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked | kAudioFormatFlagIsNonInterleaved,
.mFramesPerPacket = 1,
.mChannelsPerFrame = 2,
.mBytesPerFrame = 2,
.mBitsPerChannel = 16,
.mBytesPerPacket = 2
};
convertUnitDescription = (AudioComponentDescription) {
.componentType = kAudioUnitType_FormatConverter,
.componentSubType = kAudioUnitSubType_AUConverter,
.componentFlags = 0,
.componentFlagsMask = 0,
.componentManufacturer = kAudioUnitManufacturer_Apple
};
splittertUnitDescription = (AudioComponentDescription) {
.componentType = kAudioUnitType_FormatConverter,
.componentSubType = kAudioUnitSubType_Splitter,
.componentFlags = 0,
.componentFlagsMask = 0,
.componentManufacturer = kAudioUnitManufacturer_Apple
};
mixerDescription = (AudioComponentDescription){
.componentType = kAudioUnitType_Mixer,
.componentSubType = kAudioUnitSubType_SpatialMixer,
.componentFlags = 0,
.componentFlagsMask = 0,
.componentManufacturer = kAudioUnitManufacturer_Apple
};
AUGraphAddNode(audioGraph, &mixerDescription, &mixerNode);
AUGraphNodeInfo(audioGraph, mixerNode, &mixerDescription, &mixerUnit);
AudioUnitSetProperty(mixerUnit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &maxFramesPerSlice, sizeof(maxFramesPerSlice));
UInt32 busCount = 2;
AudioUnitSetProperty(mixerUnit, kAudioUnitProperty_ElementCount, kAudioUnitScope_Input, 0, &busCount, sizeof(busCount));
Float64 graphSampleRate = 44100.0;
AudioUnitSetProperty(mixerUnit, kAudioUnitProperty_SampleRate, kAudioUnitScope_Output, 0, &graphSampleRate, sizeof(graphSampleRate));
AudioUnitSetParameter(mixerUnit, kSpatialMixerParam_Distance, kAudioUnitScope_Input, 0, 1.0, 0);
AudioUnitSetParameter(mixerUnit, kSpatialMixerParam_Azimuth, kAudioUnitScope_Input, 0, -90, 0);
AudioUnitSetParameter(mixerUnit, kSpatialMixerParam_Distance, kAudioUnitScope_Input, 1, 1.0, 0);
AudioUnitSetParameter(mixerUnit, kSpatialMixerParam_Azimuth, kAudioUnitScope_Input, 1, 90, 0);
AUNode splitterNode;
AudioUnit splittertUnit;
AUGraphAddNode(audioGraph, &splittertUnitDescription, &splitterNode);
AUGraphNodeInfo(audioGraph, splitterNode, &splittertUnitDescription, &splittertUnit);
AUNode convertNodeFromInterlevantToNonInterleaved;
AudioUnit convertUnitFromInterlevantToNonInterleaved;
AUGraphAddNode(audioGraph, &convertUnitDescription, &convertNodeFromInterlevantToNonInterleaved);
AUGraphNodeInfo(audioGraph, convertNodeFromInterlevantToNonInterleaved, &convertUnitDescription, &convertUnitFromInterlevantToNonInterleaved);
AudioUnitSetProperty(convertUnitFromInterlevantToNonInterleavedLeft, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &srcFormatFromEqualizer, sizeof(srcFormatFromEqualizer));
AudioUnitSetProperty(convertUnitFromInterlevantToNonInterleavedLeft, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &canonicalAudioStreamBasicDescriptionNonInterleaved, sizeof(canonicalAudioStreamBasicDescriptionNonInterleaved));
AudioUnitSetProperty(convertUnitFromInterlevantToNonInterleavedLeft, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &maxFramesPerSlice, sizeof(maxFramesPerSlice));
AUNode convertNodeFromInterlevantToNonInterleavedRight;
AudioUnit convertUnitFromInterlevantToNonInterleavedRight;
AUGraphAddNode(audioGraph, &convertUnitDescription, &convertNodeFromInterlevantToNonInterleavedRight);
AUGraphNodeInfo(audioGraph, convertNodeFromInterlevantToNonInterleavedRight, &convertUnitDescription, &convertUnitFromInterlevantToNonInterleavedRight);
AudioUnitSetProperty(convertUnitFromInterlevantToNonInterleavedRight, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &srcFormatFromEqualizer, sizeof(srcFormatFromEqualizer));
AudioUnitSetProperty(convertUnitFromInterlevantToNonInterleavedRight, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &canonicalAudioStreamBasicDescriptionNonInterleaved, sizeof(canonicalAudioStreamBasicDescriptionNonInterleaved));
AudioUnitSetProperty(convertUnitFromInterlevantToNonInterleavedRight, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &maxFramesPerSlice, sizeof(maxFramesPerSlice));
AUNode converterNodeFromNonInterleavedToMonoLeftChannel;
AudioUnit converUnitFromNonInterleavedToMonoLeftChannel;;
SInt32 left[1] = {0};
UInt32 leftSize = (UInt32)sizeof(left);
AUGraphAddNode(audioGraph, &convertUnitDescription, &converterNodeFromNonInterleavedToMonoLeftChannel);
AUGraphNodeInfo(audioGraph, converterNodeFromNonInterleavedToMonoLeftChannel, &convertUnitDescription, &converUnitFromNonInterleavedToMonoLeftChannel);
AudioUnitSetProperty(converUnitFromNonInterleavedToMonoLeftChannel, kAudioOutputUnitProperty_ChannelMap, kAudioUnitScope_Input, 0, &left, leftSize);
AudioUnitSetProperty(converUnitFromNonInterleavedToMonoLeftChannel, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &canonicalAudioStreamBasicDescriptionNonInterleaved, sizeof(canonicalAudioStreamBasicDescriptionNonInterleaved));
AudioUnitSetProperty(converUnitFromNonInterleavedToMonoLeftChannel, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &canonicalAudioStreamBasicDescription3Dmixer, sizeof(canonicalAudioStreamBasicDescription3Dmixer));
AudioUnitSetProperty(converUnitFromNonInterleavedToMonoLeftChannel, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &maxFramesPerSlice, sizeof(maxFramesPerSlice));
AUNode converterNodeFromNonInterleavedToMonoRightChannel;
AudioUnit converUnitFromNonInterleavedToMonoRightChannel;
SInt32 right[1] = {1};
UInt32 rightSize = (UInt32)sizeof(right);
AUGraphAddNode(audioGraph, &convertUnitDescription, &converterNodeFromNonInterleavedToMonoRightChannel);
AUGraphNodeInfo(audioGraph, converterNodeFromNonInterleavedToMonoRightChannel, &convertUnitDescription, &converUnitFromNonInterleavedToMonoRightChannel);
AudioUnitSetProperty(converUnitFromNonInterleavedToMonoRightChannel, kAudioOutputUnitProperty_ChannelMap, kAudioUnitScope_Input, 0, &right, rightSize);
AudioUnitSetProperty(converUnitFromNonInterleavedToMonoRightChannel, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &canonicalAudioStreamBasicDescriptionNonInterleaved, sizeof(canonicalAudioStreamBasicDescriptionNonInterleaved));
AudioUnitSetProperty(converUnitFromNonInterleavedToMonoRightChannel, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &canonicalAudioStreamBasicDescription3Dmixer, sizeof(canonicalAudioStreamBasicDescription3Dmixer));
AudioUnitSetProperty(converUnitFromNonInterleavedToMonoRightChannel, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &maxFramesPerSlice, sizeof(maxFramesPerSlice));
AUGraphConnectNodeInput(audioGraph, еqualizerNode, 0, splitterNode, 0);
AUGraphConnectNodeInput(audioGraph, splitterNode, 0, convertNodeFromInterlevantToNonInterleavedLeft, 0);
AUGraphConnectNodeInput(audioGraph, splitterNode, 1, convertNodeFromInterlevantToNonInterleavedRight, 0);
AUGraphConnectNodeInput(audioGraph, convertNodeFromInterlevantToNonInterleavedLeft, 0, converterNodeFromNonInterleavedToMonoLeftChannel, 0);
AUGraphConnectNodeInput(audioGraph, convertNodeFromInterlevantToNonInterleavedRight, 0, converterNodeFromNonInterleavedToMonoRightChannel, 0);
AUGraphConnectNodeInput(audioGraph, converterNodeFromNonInterleavedToMonoLeftChannel, 0, mixerNode, 0);
AUGraphConnectNodeInput(audioGraph, converterNodeFromNonInterleavedToMonoRightChannel, 0, mixerNode, 1);
仅此而已。代码的完整工作关键部分。
关于ios - 如何在 3D 混合器单元 (kAudioUnitSubType_SpatialMixer) 中使用立体声源?,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/54726571/
我正在使用 libspotify 检索音乐以使用某些音频库播放。 Spotify 音乐应为原始 16 位、44100hz、立体声 LPCM。我一直在尝试用 NAudio 播放音乐,但不幸的是它不是立体
我在stackoverflow上找到了以下不可思议的代码。请任何人帮助我添加第二个 channel (立体声) 将有nchannels = 2 必须(以某种方式)增加文件大小,并且必须添加2.数组(即
我试图将我的音频通话强制设为单声道,我愿意使用 PCMU、G.729、OPUS 和 SpeeX 作为我的通话编解码器。 现在我正在使用以下代码在我的 sdp 消息中搜索所选的编解码器: functio
我正在使用 AudioKit 创建一个实验性 iOS 音频应用程序。目前,我正在尝试将 AKStereoInput 的左声道重新路由到 AudioKit.output 的右声道,并将 AKStereo
我目前正在使用 python 处理 .wav 文件,使用 Pyaudio 流式传输音频,使用 python wave 库加载文件数据。 我计划稍后包括处理单个立体声 channel ,关于信号的幅度和
我正在尝试获取空间中几个点的 3D 坐标,但我从 undistortPoints() 和 triangulatePoints() 都得到了奇怪的结果。 由于两个相机的分辨率不同,我分别校准,得到0,3
我正在尝试从具有以下音轨布局的 Prores 中提取只有 2.0 的 Prores 视频(L R 在同一轨道上)。如何使用 FFmpeg 库来做到这一点? 我无法从以下 ffmpeg 库规范 http
我正在使用javasound并具有格式的AudioInputStreamPCM_SIGNED 8000.0 Hz, 16 bit, stereo, 4 bytes/frame, little-endi
我想要与此匹配的 FFmpeg cli 设置(这是手刹预设) 这是预设文件,我不明白这些对ffmpeg是什么。 { "AlignAVStart": false, "AudioCopyM
为了解决我遇到的 5.1 电影的对话非常好的问题,我使用 FFMPEG 将我的 MKV 电影的每个音轨转换为具有音频规范化的 2.0 音轨,从而使视频和字幕保持不变。 该命令如下所示: for /r
我试图理解 YouTube 在他们推荐的上传编码设置中列出的内容,以获得最佳质量。这是链接:https://support.google.com/youtube/answer/1722171#zipp
我有一个包含 16 个音频单声道流的 MXF 文件,我需要将其重新编码为一个 mp4 文件,其中 2<=n<=16 个 channel 合并输入流,例如在输出 channel 1 上输入 channe
我是一名优秀的程序员,十分优秀!