gpt4 book ai didi

ios - 如何在 3D 混合器单元 (kAudioUnitSubType_SpatialMixer) 中使用立体声源?

转载 作者:行者123 更新时间:2023-11-29 06:00:30 27 4
gpt4 key购买 nike

共有三个音频单元:
equalizerUnit(kAudioUnitSubType_NBandEQ),
3DmixerUnit(kAudioUnitSubType_SpatialMixer),
RemoteIOUnit(kAudioUnitSubType_RemoteIO)。
使用 AUGraph 和节点(equalizerNode、3DmixerNode、remoteNode),它们彼此正确连接:
equalizerUnit ->mixerUnit ->remoteIOUnit。

有一个问题,为了连接 equalizerUnit 和 3DmixerUnit,我使用转换器单元 (kAudioUnitSubType_AUConverter),在其输出上设置 AudioStreamBasicDescription:

    .mSampleRate = 44100.00,
.mFormatID = kAudioFormatLinearPCM,
.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked | kAudioFormatFlagIsNonInterleaved,
.mFramesPerPacket = 1,
.mChannelsPerFrame = 1,
.mBytesPerFrame = 2,
.mBitsPerChannel = 16,
.mBytesPerPacket = 2

因此,我从 Output Score 3DmixerUnit 中获得单声道声音。如何解决 3DmixerUnit 中的立体声问题?

如果有任何帮助,我将不胜感激!

附:一些编辑信息:
主要问题在于我需要一个立体声信号来申请 3DmixerUnit 的两个单声道输入。
Apple 的 3D 混音器音频单元指南指出:
要使用立体声源,您可以将其左声道和右声道视为两个独立的单声道源,然后将立体声流的每一侧馈送到其自己的输入总线。
https://developer.apple.com/library/ios/qa/qa1695/_index.html
我无法弄清楚如何将均衡器单元的立体声分割为两个独立的单 channel 源。如何做到这一点?

最佳答案

也许将来有人会通过解决这个问题来节省时间。

canonicalAudioStreamBasicDescription = (AudioStreamBasicDescription) {
.mSampleRate = 44100.00,
.mFormatID = kAudioFormatLinearPCM,
.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked,
.mFramesPerPacket = 1,
.mChannelsPerFrame = 2,
.mBytesPerFrame = 4,
.mBitsPerChannel = 16,
.mBytesPerPacket = 4
};
canonicalAudioStreamBasicDescription3Dmixer = (AudioStreamBasicDescription) {
.mSampleRate = 44100.00,
.mFormatID = kAudioFormatLinearPCM,
.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked,
.mFramesPerPacket = 1,
.mChannelsPerFrame = 1,
.mBytesPerFrame = 2,
.mBitsPerChannel = 16,
.mBytesPerPacket = 2
};
canonicalAudioStreamBasicDescriptionNonInterleaved = (AudioStreamBasicDescription) {
.mSampleRate = 44100.00,
.mFormatID = kAudioFormatLinearPCM,
.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked | kAudioFormatFlagIsNonInterleaved,
.mFramesPerPacket = 1,
.mChannelsPerFrame = 2,
.mBytesPerFrame = 2,
.mBitsPerChannel = 16,
.mBytesPerPacket = 2
};

convertUnitDescription = (AudioComponentDescription) {
.componentType = kAudioUnitType_FormatConverter,
.componentSubType = kAudioUnitSubType_AUConverter,
.componentFlags = 0,
.componentFlagsMask = 0,
.componentManufacturer = kAudioUnitManufacturer_Apple
};
splittertUnitDescription = (AudioComponentDescription) {
.componentType = kAudioUnitType_FormatConverter,
.componentSubType = kAudioUnitSubType_Splitter,
.componentFlags = 0,
.componentFlagsMask = 0,
.componentManufacturer = kAudioUnitManufacturer_Apple
};
mixerDescription = (AudioComponentDescription){
.componentType = kAudioUnitType_Mixer,
.componentSubType = kAudioUnitSubType_SpatialMixer,
.componentFlags = 0,
.componentFlagsMask = 0,
.componentManufacturer = kAudioUnitManufacturer_Apple
};

AUGraphAddNode(audioGraph, &mixerDescription, &mixerNode);
AUGraphNodeInfo(audioGraph, mixerNode, &mixerDescription, &mixerUnit);
AudioUnitSetProperty(mixerUnit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &maxFramesPerSlice, sizeof(maxFramesPerSlice));
UInt32 busCount = 2;
AudioUnitSetProperty(mixerUnit, kAudioUnitProperty_ElementCount, kAudioUnitScope_Input, 0, &busCount, sizeof(busCount));
Float64 graphSampleRate = 44100.0;
AudioUnitSetProperty(mixerUnit, kAudioUnitProperty_SampleRate, kAudioUnitScope_Output, 0, &graphSampleRate, sizeof(graphSampleRate));
AudioUnitSetParameter(mixerUnit, kSpatialMixerParam_Distance, kAudioUnitScope_Input, 0, 1.0, 0);
AudioUnitSetParameter(mixerUnit, kSpatialMixerParam_Azimuth, kAudioUnitScope_Input, 0, -90, 0);
AudioUnitSetParameter(mixerUnit, kSpatialMixerParam_Distance, kAudioUnitScope_Input, 1, 1.0, 0);
AudioUnitSetParameter(mixerUnit, kSpatialMixerParam_Azimuth, kAudioUnitScope_Input, 1, 90, 0);

AUNode splitterNode;
AudioUnit splittertUnit;
AUGraphAddNode(audioGraph, &splittertUnitDescription, &splitterNode);
AUGraphNodeInfo(audioGraph, splitterNode, &splittertUnitDescription, &splittertUnit);

AUNode convertNodeFromInterlevantToNonInterleaved;
AudioUnit convertUnitFromInterlevantToNonInterleaved;
AUGraphAddNode(audioGraph, &convertUnitDescription, &convertNodeFromInterlevantToNonInterleaved);
AUGraphNodeInfo(audioGraph, convertNodeFromInterlevantToNonInterleaved, &convertUnitDescription, &convertUnitFromInterlevantToNonInterleaved);
AudioUnitSetProperty(convertUnitFromInterlevantToNonInterleavedLeft, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &srcFormatFromEqualizer, sizeof(srcFormatFromEqualizer));
AudioUnitSetProperty(convertUnitFromInterlevantToNonInterleavedLeft, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &canonicalAudioStreamBasicDescriptionNonInterleaved, sizeof(canonicalAudioStreamBasicDescriptionNonInterleaved));
AudioUnitSetProperty(convertUnitFromInterlevantToNonInterleavedLeft, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &maxFramesPerSlice, sizeof(maxFramesPerSlice));

AUNode convertNodeFromInterlevantToNonInterleavedRight;
AudioUnit convertUnitFromInterlevantToNonInterleavedRight;
AUGraphAddNode(audioGraph, &convertUnitDescription, &convertNodeFromInterlevantToNonInterleavedRight);
AUGraphNodeInfo(audioGraph, convertNodeFromInterlevantToNonInterleavedRight, &convertUnitDescription, &convertUnitFromInterlevantToNonInterleavedRight);
AudioUnitSetProperty(convertUnitFromInterlevantToNonInterleavedRight, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &srcFormatFromEqualizer, sizeof(srcFormatFromEqualizer));
AudioUnitSetProperty(convertUnitFromInterlevantToNonInterleavedRight, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &canonicalAudioStreamBasicDescriptionNonInterleaved, sizeof(canonicalAudioStreamBasicDescriptionNonInterleaved));
AudioUnitSetProperty(convertUnitFromInterlevantToNonInterleavedRight, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &maxFramesPerSlice, sizeof(maxFramesPerSlice));

AUNode converterNodeFromNonInterleavedToMonoLeftChannel;
AudioUnit converUnitFromNonInterleavedToMonoLeftChannel;;
SInt32 left[1] = {0};
UInt32 leftSize = (UInt32)sizeof(left);
AUGraphAddNode(audioGraph, &convertUnitDescription, &converterNodeFromNonInterleavedToMonoLeftChannel);
AUGraphNodeInfo(audioGraph, converterNodeFromNonInterleavedToMonoLeftChannel, &convertUnitDescription, &converUnitFromNonInterleavedToMonoLeftChannel);
AudioUnitSetProperty(converUnitFromNonInterleavedToMonoLeftChannel, kAudioOutputUnitProperty_ChannelMap, kAudioUnitScope_Input, 0, &left, leftSize);
AudioUnitSetProperty(converUnitFromNonInterleavedToMonoLeftChannel, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &canonicalAudioStreamBasicDescriptionNonInterleaved, sizeof(canonicalAudioStreamBasicDescriptionNonInterleaved));
AudioUnitSetProperty(converUnitFromNonInterleavedToMonoLeftChannel, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &canonicalAudioStreamBasicDescription3Dmixer, sizeof(canonicalAudioStreamBasicDescription3Dmixer));
AudioUnitSetProperty(converUnitFromNonInterleavedToMonoLeftChannel, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &maxFramesPerSlice, sizeof(maxFramesPerSlice));

AUNode converterNodeFromNonInterleavedToMonoRightChannel;
AudioUnit converUnitFromNonInterleavedToMonoRightChannel;
SInt32 right[1] = {1};
UInt32 rightSize = (UInt32)sizeof(right);
AUGraphAddNode(audioGraph, &convertUnitDescription, &converterNodeFromNonInterleavedToMonoRightChannel);
AUGraphNodeInfo(audioGraph, converterNodeFromNonInterleavedToMonoRightChannel, &convertUnitDescription, &converUnitFromNonInterleavedToMonoRightChannel);
AudioUnitSetProperty(converUnitFromNonInterleavedToMonoRightChannel, kAudioOutputUnitProperty_ChannelMap, kAudioUnitScope_Input, 0, &right, rightSize);
AudioUnitSetProperty(converUnitFromNonInterleavedToMonoRightChannel, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &canonicalAudioStreamBasicDescriptionNonInterleaved, sizeof(canonicalAudioStreamBasicDescriptionNonInterleaved));
AudioUnitSetProperty(converUnitFromNonInterleavedToMonoRightChannel, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &canonicalAudioStreamBasicDescription3Dmixer, sizeof(canonicalAudioStreamBasicDescription3Dmixer));
AudioUnitSetProperty(converUnitFromNonInterleavedToMonoRightChannel, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &maxFramesPerSlice, sizeof(maxFramesPerSlice));

AUGraphConnectNodeInput(audioGraph, еqualizerNode, 0, splitterNode, 0);
AUGraphConnectNodeInput(audioGraph, splitterNode, 0, convertNodeFromInterlevantToNonInterleavedLeft, 0);
AUGraphConnectNodeInput(audioGraph, splitterNode, 1, convertNodeFromInterlevantToNonInterleavedRight, 0);
AUGraphConnectNodeInput(audioGraph, convertNodeFromInterlevantToNonInterleavedLeft, 0, converterNodeFromNonInterleavedToMonoLeftChannel, 0);
AUGraphConnectNodeInput(audioGraph, convertNodeFromInterlevantToNonInterleavedRight, 0, converterNodeFromNonInterleavedToMonoRightChannel, 0);
AUGraphConnectNodeInput(audioGraph, converterNodeFromNonInterleavedToMonoLeftChannel, 0, mixerNode, 0);
AUGraphConnectNodeInput(audioGraph, converterNodeFromNonInterleavedToMonoRightChannel, 0, mixerNode, 1);

仅此而已。代码的完整工作关键部分。

关于ios - 如何在 3D 混合器单元 (kAudioUnitSubType_SpatialMixer) 中使用立体声源?,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/54726571/

27 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com