gpt4 book ai didi

ios - 播放来自NSStream的Raw pcm音频数据

转载 作者:行者123 更新时间:2023-12-01 17:30:08 24 4
gpt4 key购买 nike

我正在尝试从NSInputStream播放pcm数据。任何人都可以为我提供正确的方法或代码。

我使用以下代码在StreamHasData事件中获得了Audio。

uint8_t bytes[self.audioStreamReadMaxLength];
UInt32 length = [audioStream readData:bytes maxLength:self.audioStreamReadMaxLength];

现在如何在iPhone中播放字节音频数据?

最佳答案

我解决了一个类似的问题,最后我解决了这个问题。

这是我所做的基本操作。我正在为套接字使用库

below类负责获取音频并将其提供给已连接的客户端。

#import <Foundation/Foundation.h>
#import "GCDAsyncSocket.h"
#import <AudioToolbox/AudioToolbox.h>

@interface AudioServer : NSObject <GCDAsyncSocketDelegate>

@property (nonatomic, strong)GCDAsyncSocket * serverSocket;

@property (nonatomic, strong)NSMutableArray *connectedClients;

@property (nonatomic) AudioComponentInstance audioUnit;

-(void) start;
-(void) stop;
-(void) writeDataToClients:(NSData*)data;

@end

#define kOutputBus 0
#define kInputBus 1

#import "AudioServer.h"
#import "SM_Utils.h"

static OSStatus recordingCallback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData) {

// TODO: Use inRefCon to access our interface object to do stuff
// Then, use inNumberFrames to figure out how much data is available, and make
// that much space available in buffers in an AudioBufferList.

AudioServer *server = (__bridge AudioServer*)inRefCon;

AudioBufferList bufferList;

SInt16 samples[inNumberFrames]; // A large enough size to not have to worry about buffer overrun
memset (&samples, 0, sizeof (samples));

bufferList.mNumberBuffers = 1;
bufferList.mBuffers[0].mData = samples;
bufferList.mBuffers[0].mNumberChannels = 1;
bufferList.mBuffers[0].mDataByteSize = inNumberFrames*sizeof(SInt16);

// Then:
// Obtain recorded samples

OSStatus status;

status = AudioUnitRender(server.audioUnit,
ioActionFlags,
inTimeStamp,
inBusNumber,
inNumberFrames,
&bufferList);

NSData *dataToSend = [NSData dataWithBytes:bufferList.mBuffers[0].mData length:bufferList.mBuffers[0].mDataByteSize];
[server writeDataToClients:dataToSend];

return noErr;
}

@implementation AudioServer

-(id) init
{
return [super init];
}

-(void) start
{

[UIApplication sharedApplication].idleTimerDisabled = YES;
// Create a new instance of AURemoteIO

AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;

AudioComponent comp = AudioComponentFindNext(NULL, &desc);
AudioComponentInstanceNew(comp, &_audioUnit);

// Enable input and output on AURemoteIO
// Input is enabled on the input scope of the input element
// Output is enabled on the output scope of the output element

UInt32 one = 1;
AudioUnitSetProperty(_audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, &one, sizeof(one));

AudioUnitSetProperty(_audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, &one, sizeof(one));

// Explicitly set the input and output client formats
// sample rate = 44100, num channels = 1, format = 32 bit floating point

AudioStreamBasicDescription audioFormat = [self getAudioDescription];
AudioUnitSetProperty(_audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &audioFormat, sizeof(audioFormat));
AudioUnitSetProperty(_audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &audioFormat, sizeof(audioFormat));

// Set the MaximumFramesPerSlice property. This property is used to describe to an audio unit the maximum number
// of samples it will be asked to produce on any single given call to AudioUnitRender
UInt32 maxFramesPerSlice = 4096;
AudioUnitSetProperty(_audioUnit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &maxFramesPerSlice, sizeof(UInt32));

// Get the property value back from AURemoteIO. We are going to use this value to allocate buffers accordingly
UInt32 propSize = sizeof(UInt32);
AudioUnitGetProperty(_audioUnit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &maxFramesPerSlice, &propSize);


AURenderCallbackStruct renderCallback;
renderCallback.inputProc = recordingCallback;
renderCallback.inputProcRefCon = (__bridge void *)(self);

AudioUnitSetProperty(_audioUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &renderCallback, sizeof(renderCallback));


// Initialize the AURemoteIO instance
AudioUnitInitialize(_audioUnit);

AudioOutputUnitStart(_audioUnit);

_connectedClients = [[NSMutableArray alloc] init];
_serverSocket = [[GCDAsyncSocket alloc] initWithDelegate:self delegateQueue:dispatch_get_main_queue()];

[self startAcceptingConnections];
}

- (AudioStreamBasicDescription)getAudioDescription {
AudioStreamBasicDescription audioDescription = {0};
audioDescription.mFormatID = kAudioFormatLinearPCM;
audioDescription.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked | kAudioFormatFlagsNativeEndian;
audioDescription.mChannelsPerFrame = 1;
audioDescription.mBytesPerPacket = sizeof(SInt16)*audioDescription.mChannelsPerFrame;
audioDescription.mFramesPerPacket = 1;
audioDescription.mBytesPerFrame = sizeof(SInt16)*audioDescription.mChannelsPerFrame;
audioDescription.mBitsPerChannel = 8 * sizeof(SInt16);
audioDescription.mSampleRate = 44100.0;
return audioDescription;
}

-(void) startAcceptingConnections
{
NSError *error = nil;
if(_serverSocket)
[_serverSocket acceptOnPort:[SM_Utils serverPort] error:&error];
}


-(void)socketDidDisconnect:(GCDAsyncSocket *)sock withError:(NSError *)err
{
if(_connectedClients)
[_connectedClients removeObject:sock];
}

- (void)socket:(GCDAsyncSocket *)socket didAcceptNewSocket:(GCDAsyncSocket *)newSocket {

NSLog(@"Accepted New Socket from %@:%hu", [newSocket connectedHost], [newSocket connectedPort]);

@synchronized(_connectedClients)
{
dispatch_async(dispatch_get_main_queue(), ^{
if(_connectedClients)
[_connectedClients addObject:newSocket];
});
}

NSError *error = nil;
if(_serverSocket)
[_serverSocket acceptOnPort:[SM_Utils serverPort] error:&error];
}

-(void) writeDataToClients:(NSData *)data
{
if(_connectedClients)
{
for (GCDAsyncSocket *socket in _connectedClients) {
if([socket isConnected])
{
[socket writeData:data withTimeout:-1 tag:0];
}
else{
if([_connectedClients containsObject:socket])
[_connectedClients removeObject:socket];
}
}
}
}

-(void) stop
{
if(_serverSocket)
{
_serverSocket = nil;
}
[UIApplication sharedApplication].idleTimerDisabled = NO;
AudioOutputUnitStop(_audioUnit);
}

-(void) dealloc
{
if(_serverSocket)
{
_serverSocket = nil;
}
[UIApplication sharedApplication].idleTimerDisabled = NO;
AudioOutputUnitStop(_audioUnit);
}

@end

然后,以下类负责从服务器检索音频并播放音频
#import <Foundation/Foundation.h>
#import "GCDAsyncSocket.h"
#import <AudioToolbox/AudioToolbox.h>

#import "TPCircularBuffer.h"

@protocol AudioClientDelegate <NSObject>

-(void) connected;
-(void) animateSoundIndicator:(float) rms;

@end

@interface AudioClient : NSObject<GCDAsyncSocketDelegate>
{
NSString *ipAddress;
BOOL stopped;
}

@property (nonatomic) TPCircularBuffer circularBuffer;
@property (nonatomic) AudioComponentInstance audioUnit;
@property (nonatomic, strong) GCDAsyncSocket *socket;
@property (nonatomic, strong) id<AudioClientDelegate> delegate;

-(id) initWithDelegate:(id)delegate;
-(void) start:(NSString *)ip;
-(void) stop;
-(TPCircularBuffer *) outputShouldUseCircularBuffer;

@end


static OSStatus OutputRenderCallback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData){


AudioClient *output = (__bridge AudioClient*)inRefCon;


TPCircularBuffer *circularBuffer = [output outputShouldUseCircularBuffer];
if( !circularBuffer ){
AudioUnitSampleType *left = (AudioUnitSampleType*)ioData->mBuffers[0].mData;
for(int i = 0; i < inNumberFrames; i++ ){
left[ i ] = 0.0f;
}
return noErr;
};

int32_t bytesToCopy = ioData->mBuffers[0].mDataByteSize;
SInt16* outputBuffer = ioData->mBuffers[0].mData;

int32_t availableBytes;
SInt16 *sourceBuffer = TPCircularBufferTail(circularBuffer, &availableBytes);

int32_t amount = MIN(bytesToCopy,availableBytes);
memcpy(outputBuffer, sourceBuffer, amount);

TPCircularBufferConsume(circularBuffer,amount);

return noErr;
}

-(id) initWithDelegate:(id)delegate
{
if(!self)
{
self = [super init];
}

[self circularBuffer:&_circularBuffer withSize:24576*5];
_delegate = delegate;
stopped = NO;
return self;
}

-(void) start:(NSString *)ip
{
_socket = [[GCDAsyncSocket alloc] initWithDelegate:self delegateQueue: dispatch_get_main_queue()];

NSError *err;

ipAddress = ip;

[UIApplication sharedApplication].idleTimerDisabled = YES;

if(![_socket connectToHost:ipAddress onPort:[SM_Utils serverPort] error:&err])
{

}

[self setupAudioUnit];
}

-(void) setupAudioUnit
{
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;

AudioComponent comp = AudioComponentFindNext(NULL, &desc);

OSStatus status;

status = AudioComponentInstanceNew(comp, &_audioUnit);

if(status != noErr)
{
NSLog(@"Error creating AudioUnit instance");
}

// Enable input and output on AURemoteIO
// Input is enabled on the input scope of the input element
// Output is enabled on the output scope of the output element

UInt32 one = 1;

status = AudioUnitSetProperty(_audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, kOutputBus, &one, sizeof(one));


if(status != noErr)
{
NSLog(@"Error enableling AudioUnit output bus");
}

// Explicitly set the input and output client formats
// sample rate = 44100, num channels = 1, format = 16 bit int point

AudioStreamBasicDescription audioFormat = [self getAudioDescription];

status = AudioUnitSetProperty(_audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, kOutputBus, &audioFormat, sizeof(audioFormat));

if(status != noErr)
{
NSLog(@"Error setting audio format");
}

AURenderCallbackStruct renderCallback;
renderCallback.inputProc = OutputRenderCallback;
renderCallback.inputProcRefCon = (__bridge void *)(self);

status = AudioUnitSetProperty(_audioUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Global, kOutputBus, &renderCallback, sizeof(renderCallback));

if(status != noErr)
{
NSLog(@"Error setting rendering callback");
}

// Initialize the AURemoteIO instance
status = AudioUnitInitialize(_audioUnit);

if(status != noErr)
{
NSLog(@"Error initializing audio unit");
}
}

- (AudioStreamBasicDescription)getAudioDescription {
AudioStreamBasicDescription audioDescription = {0};
audioDescription.mFormatID = kAudioFormatLinearPCM;
audioDescription.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked | kAudioFormatFlagsNativeEndian;
audioDescription.mChannelsPerFrame = 1;
audioDescription.mBytesPerPacket = sizeof(SInt16)*audioDescription.mChannelsPerFrame;
audioDescription.mFramesPerPacket = 1;
audioDescription.mBytesPerFrame = sizeof(SInt16)*audioDescription.mChannelsPerFrame;
audioDescription.mBitsPerChannel = 8 * sizeof(SInt16);
audioDescription.mSampleRate = 44100.0;
return audioDescription;
}

-(void) socketDidDisconnect:(GCDAsyncSocket *)sock withError:(NSError *)err
{
if(!stopped)
if(![_socket connectToHost:ipAddress onPort:[SM_Utils serverPort] error:&err])
{

}
}

-(void) socket:(GCDAsyncSocket *)socket didReadData:(NSData *)data withTag:(long)tag
{
if(data.length > 0)
{
unsigned long len = [data length];

SInt16* byteData = (SInt16*)malloc(len);
memcpy(byteData, [data bytes], len);

double sum = 0.0;
for(int i = 0; i < len/2; i++) {
sum += byteData[i] * byteData[i];
}

double average = sum / len;
double rms = sqrt(average);

[_delegate animateSoundIndicator:rms];

Byte* soundData = (Byte*)malloc(len);
memcpy(soundData, [data bytes], len);

if(soundData)
{
AudioBufferList *theDataBuffer = (AudioBufferList*) malloc(sizeof(AudioBufferList) *1);
theDataBuffer->mNumberBuffers = 1;
theDataBuffer->mBuffers[0].mDataByteSize = (UInt32)len;
theDataBuffer->mBuffers[0].mNumberChannels = 1;
theDataBuffer->mBuffers[0].mData = (SInt16*)soundData;

[self appendDataToCircularBuffer:&_circularBuffer fromAudioBufferList:theDataBuffer];
}
}

[socket readDataToLength:18432 withTimeout:-1 tag:0];
}

-(void)circularBuffer:(TPCircularBuffer *)circularBuffer withSize:(int)size {
TPCircularBufferInit(circularBuffer,size);
}

-(void)appendDataToCircularBuffer:(TPCircularBuffer*)circularBuffer
fromAudioBufferList:(AudioBufferList*)audioBufferList {
TPCircularBufferProduceBytes(circularBuffer,
audioBufferList->mBuffers[0].mData,
audioBufferList->mBuffers[0].mDataByteSize);
}

-(void)freeCircularBuffer:(TPCircularBuffer *)circularBuffer {
TPCircularBufferClear(circularBuffer);
TPCircularBufferCleanup(circularBuffer);
}

-(void) socket:(GCDAsyncSocket *)socket didConnectToHost:(NSString *)host port:(uint16_t)port
{
OSStatus status = AudioOutputUnitStart(_audioUnit);

if(status != noErr)
{
NSLog(@"Error starting audio unit");
}

[socket readDataToLength:18432 withTimeout:-1 tag:0];

[_delegate connected];
}

-(TPCircularBuffer *) outputShouldUseCircularBuffer
{
return &_circularBuffer;
}

-(void) stop
{

OSStatus status = AudioOutputUnitStop(_audioUnit);

if(status != noErr)
{
NSLog(@"Error stopping audio unit");
}

[UIApplication sharedApplication].idleTimerDisabled = NO;

TPCircularBufferClear(&_circularBuffer);
_audioUnit = nil;
stopped = YES;
}

-(void) dealloc {
OSStatus status = AudioOutputUnitStop(_audioUnit);

if(status != noErr)
{
NSLog(@"Error stopping audio unit");
}

[UIApplication sharedApplication].idleTimerDisabled = NO;

TPCircularBufferClear(&_circularBuffer);
_audioUnit = nil;
stopped = YES;
}

@end

我的要求中有一些代码是唯一的,但是大多数代码都可以重复使用,希望对您有所帮助。

关于ios - 播放来自NSStream的Raw pcm音频数据,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/28340738/

24 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com