gpt4 book ai didi

objective-c - AudioUnit 音调生成器在生成的每个音调结束时给我一个唧唧声

转载 作者:塔克拉玛干 更新时间:2023-11-02 09:50:21 31 4
gpt4 key购买 nike

我正在为旧的 GWBasic PLAY 命令创建一个老式的音乐模拟器。为此,我有一个音调发生器和一个音乐播放器。在演奏的每个音符之间,我听到了一种把事情搞砸的唧唧声。以下是我的两个类(class):

ToneGen.h

#import <Foundation/Foundation.h>

@interface ToneGen : NSObject
@property (nonatomic) id delegate;
@property (nonatomic) double frequency;
@property (nonatomic) double sampleRate;
@property (nonatomic) double theta;
- (void)play:(float)ms;
- (void)play;
- (void)stop;
@end

ToneGen.m

#import <AudioUnit/AudioUnit.h>
#import "ToneGen.h"

OSStatus RenderTone(
void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData);
void ToneInterruptionListener(void *inClientData, UInt32 inInterruptionState);


@interface ToneGen()
@property (nonatomic) AudioComponentInstance toneUnit;
@property (nonatomic) NSTimer *timer;
- (void)createToneUnit;
@end


@implementation ToneGen
@synthesize toneUnit = _toneUnit;
@synthesize timer = _timer;
@synthesize delegate = _delegate;
@synthesize frequency = _frequency;
@synthesize sampleRate = _sampleRate;
@synthesize theta = _theta;

- (id) init
{
self = [super init];
if (self)
{
self.sampleRate = 44100;
self.frequency = 1440.0f;
return self;
}
return nil;
}

- (void)play:(float)ms
{
[self play];
self.timer = [NSTimer scheduledTimerWithTimeInterval:(ms / 100)
target:self
selector:@selector(stop)
userInfo:nil
repeats:NO];
[[NSRunLoop mainRunLoop] addTimer:self.timer forMode:NSRunLoopCommonModes];
}

- (void)play
{
if (!self.toneUnit)
{
[self createToneUnit];

// Stop changing parameters on the unit
OSErr err = AudioUnitInitialize(self.toneUnit);
if (err)
DLog(@"Error initializing unit");

// Start playback
err = AudioOutputUnitStart(self.toneUnit);
if (err)
DLog(@"Error starting unit");
}
}

- (void)stop
{
[self.timer invalidate];
self.timer = nil;

if (self.toneUnit)
{
AudioOutputUnitStop(self.toneUnit);
AudioUnitUninitialize(self.toneUnit);
AudioComponentInstanceDispose(self.toneUnit);
self.toneUnit = nil;
}

if(self.delegate && [self.delegate respondsToSelector:@selector(toneStop)]) {
[self.delegate performSelector:@selector(toneStop)];
}
}

- (void)createToneUnit
{
AudioComponentDescription defaultOutputDescription;
defaultOutputDescription.componentType = kAudioUnitType_Output;
defaultOutputDescription.componentSubType = kAudioUnitSubType_DefaultOutput;
defaultOutputDescription.componentManufacturer = kAudioUnitManufacturer_Apple;
defaultOutputDescription.componentFlags = 0;
defaultOutputDescription.componentFlagsMask = 0;

// Get the default playback output unit
AudioComponent defaultOutput = AudioComponentFindNext(NULL, &defaultOutputDescription);
if (!defaultOutput)
DLog(@"Can't find default output");

// Create a new unit based on this that we'll use for output
OSErr err = AudioComponentInstanceNew(defaultOutput, &_toneUnit);
if (err)
DLog(@"Error creating unit");

// Set our tone rendering function on the unit
AURenderCallbackStruct input;
input.inputProc = RenderTone;
input.inputProcRefCon = (__bridge void*)self;
err = AudioUnitSetProperty(self.toneUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Input,
0,
&input,
sizeof(input));
if (err)
DLog(@"Error setting callback");

// Set the format to 32 bit, single channel, floating point, linear PCM
const int four_bytes_per_float = 4;
const int eight_bits_per_byte = 8;
AudioStreamBasicDescription streamFormat;
streamFormat.mSampleRate = self.sampleRate;
streamFormat.mFormatID = kAudioFormatLinearPCM;
streamFormat.mFormatFlags =
kAudioFormatFlagsNativeFloatPacked | kAudioFormatFlagIsNonInterleaved;
streamFormat.mBytesPerPacket = four_bytes_per_float;
streamFormat.mFramesPerPacket = 1;
streamFormat.mBytesPerFrame = four_bytes_per_float;
streamFormat.mChannelsPerFrame = 1;
streamFormat.mBitsPerChannel = four_bytes_per_float * eight_bits_per_byte;
err = AudioUnitSetProperty (self.toneUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
0,
&streamFormat,
sizeof(AudioStreamBasicDescription));
if (err)
DLog(@"Error setting stream format");
}

@end


OSStatus RenderTone(
void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData)

{
// Fixed amplitude is good enough for our purposes
const double amplitude = 0.25;

// Get the tone parameters out of the view controller
ToneGen *toneGen = (__bridge ToneGen *)inRefCon;
double theta = toneGen.theta;
double theta_increment = 2.0 * M_PI * toneGen.frequency / toneGen.sampleRate;

// This is a mono tone generator so we only need the first buffer
const int channel = 0;
Float32 *buffer = (Float32 *)ioData->mBuffers[channel].mData;

// Generate the samples
for (UInt32 frame = 0; frame < inNumberFrames; frame++)
{
buffer[frame] = sin(theta) * amplitude;

theta += theta_increment;
if (theta > 2.0 * M_PI)
{
theta -= 2.0 * M_PI;
}
}

// Store the theta back in the view controller
toneGen.theta = theta;

return noErr;
}

void ToneInterruptionListener(void *inClientData, UInt32 inInterruptionState)
{
ToneGen *toneGen = (__bridge ToneGen *)inClientData;
[toneGen stop];
}

Music.h

#import <Foundation/Foundation.h>

@interface Music : NSObject
- (void) play:(NSString *)music;
- (void) stop;
@end

Music.m

#import "Music.h"
#import "ToneGen.h"

@interface Music()
@property (nonatomic, readonly) ToneGen *toneGen;
@property (nonatomic, assign) int octive;
@property (nonatomic, assign) int tempo;
@property (nonatomic, assign) int length;

@property (nonatomic, strong) NSData *music;
@property (nonatomic, assign) int dataPos;
@property (nonatomic, assign) BOOL isPlaying;

- (void)playNote;
@end

@implementation Music
@synthesize toneGen = _toneGen;
- (ToneGen*)toneGen
{
if (_toneGen == nil)
{
_toneGen = [[ToneGen alloc] init];
_toneGen.delegate = self;
}
return _toneGen;
}
@synthesize octive = _octive;
- (void)setOctive:(int)octive
{
// Sinity Check
if (octive < 0)
octive = 0;
if (octive > 6)
octive = 6;
_octive = octive;
}
@synthesize tempo = _tempo;
- (void)setTempo:(int)tempo
{
// Sinity Check
if (tempo < 30)
tempo = 30;
if (tempo > 255)
tempo = 255;
_tempo = tempo;
}
@synthesize length = _length;
- (void)setLength:(int)length
{
// Sinity Check
if (length < 1)
length = 1;
if (length > 64)
length = 64;
_length = length;
}
@synthesize music = _music;
@synthesize dataPos = _dataPos;
@synthesize isPlaying = _isPlaying;


- (id)init
{
self = [super init];
if (self)
{
self.octive = 4;
self.tempo = 120;
self.length = 1;
return self;
}
return nil;
}

- (void) play:(NSString *)music
{
DLog(@"%@", music);
self.music = [[music stringByReplacingOccurrencesOfString:@"+" withString:@"#"]
dataUsingEncoding: NSASCIIStringEncoding];
self.dataPos = 0;
self.isPlaying = YES;
[self playNote];
}

- (void)stop
{
self.isPlaying = NO;
}

- (void)playNote
{
if (!self.isPlaying)
return;

if (self.dataPos > self.music.length || self.music.length == 0) {
self.isPlaying = NO;
return;
}

unsigned char *data = (unsigned char*)[self.music bytes];
unsigned int code = (unsigned int)data[self.dataPos];
self.dataPos++;

switch (code) {
case 65: // A
case 66: // B
case 67: // C
case 68: // D
case 69: // E
case 70: // F
case 71: // G
{
// Peak at the next char to look for sharp or flat
bool sharp = NO;
bool flat = NO;
if (self.dataPos < self.music.length) {
unsigned int peak = (unsigned int)data[self.dataPos];
if (peak == 35) // #
{
self.dataPos++;
sharp = YES;
}
else if (peak == 45) // -
{
self.dataPos++;
flat = YES;
}
}

// Peak ahead for a length changes
bool look = YES;
int count = 0;
int newLength = 0;
while (self.dataPos < self.music.length && look) {
unsigned int peak = (unsigned int)data[self.dataPos];
if (peak >= 48 && peak <= 57)
{
peak -= 48;
int n = (count * 10);
if (n == 0) { n = 1; }
newLength += peak * n;
self.dataPos++;
} else {
look = NO;
}
}

// Pick the note length
int length = self.length;
if (newLength != 0)
{
DLog(@"InlineLength: %d", newLength);
length = newLength;
}


// Create the note string
NSString *note = [NSString stringWithFormat:@"%c", code];
if (sharp)
note = [note stringByAppendingFormat:@"#"];
else if (flat)
note = [note stringByAppendingFormat:@"-"];

// Set the tone generator freq
[self setFreq:[self getNoteNumber:note]];

// Play the note
[self.toneGen play:(self.tempo / length)];
}
break;

case 76: // L (length)
{
bool look = YES;
int newLength = 0;
while (self.dataPos < self.music.length && look) {
unsigned int peak = (unsigned int)data[self.dataPos];
if (peak >= 48 && peak <= 57)
{
peak -= 48;
newLength = newLength * 10 + peak;
self.dataPos++;
} else {
look = NO;
}
}
self.length = newLength;
DLog(@"Length: %d", self.length);
[self playNote];
}
break;

case 79: // O (octive)
{
bool look = YES;
int newOctive = 0;
while (self.dataPos < self.music.length && look) {
unsigned int peak = (unsigned int)data[self.dataPos];
if (peak >= 48 && peak <= 57)
{
peak -= 48;
newOctive = newOctive * 10 + peak;
self.dataPos++;
} else {
look = NO;
}
}
self.octive = newOctive;
DLog(@"Octive: %d", self.self.octive);
[self playNote];
}
break;

case 84: // T (tempo)
{
bool look = YES;
int newTempo = 0;
while (self.dataPos < self.music.length && look) {
unsigned int peak = (unsigned int)data[self.dataPos];
if (peak >= 48 && peak <= 57)
{
peak -= 48;
newTempo = newTempo * 10 + peak;
self.dataPos++;
} else {
look = NO;
}
}
self.tempo = newTempo;
DLog(@"Tempo: %d", self.self.tempo);
[self playNote];
}
break;

default:
[self playNote];
break;
}
}


- (int)getNoteNumber:(NSString*)note
{
note = [note uppercaseString];
DLog(@"%@", note);

if ([note isEqualToString:@"A"])
return 0;
else if ([note isEqualToString:@"A#"] || [note isEqualToString:@"B-"])
return 1;
else if ([note isEqualToString:@"B"] || [note isEqualToString:@"C-"])
return 2;
else if ([note isEqualToString:@"C"] || [note isEqualToString:@"B#"])
return 3;
else if ([note isEqualToString:@"C#"] || [note isEqualToString:@"D-"])
return 4;
else if ([note isEqualToString:@"D"])
return 5;
else if ([note isEqualToString:@"D#"] || [note isEqualToString:@"E-"])
return 6;
else if ([note isEqualToString:@"E"] || [note isEqualToString:@"F-"])
return 7;
else if ([note isEqualToString:@"F"] || [note isEqualToString:@"E#"])
return 8;
else if ([note isEqualToString:@"F#"] || [note isEqualToString:@"G-"])
return 9;
else if ([note isEqualToString:@"G"])
return 10;
else if ([note isEqualToString:@"G#"])
return 11;
}

- (void)setFreq:(int)note
{
float a = powf(2, self.octive);
float b = powf(1.059463, note);
float freq = roundf((275.0 * a * b) / 10);
self.toneGen.frequency = freq;
}

- (void)toneStop
{
[self playNote];
}

@end

玩小tune创建一个 Music 对象并播放...

[self.music play:@"T180 DF#A L2 A L4 O4 AA P4 F#F# P4 O3 D DF#A L2 A L4 O4 AA P4 GG P4 O3 C#C#EB L2 B L4 O4 BB P4 GG P4 O3 C#C#EB L2 B L4 O4 BB P4 F+F+ P4 O3 DDF#A L2 O4 D L4 O5 DD P4O4 AA P4 O3 DDF#A L2 O4 D L4 O5 DD P4O4 BB P4 EEG L8 B P8 ML B1 L4 MN G#A ML L3 O5 F#1L4 MN D O4 F# ML L2 F# MN L4 E ML L2 B MN L4 AD P8 D8 D4"];

关于如何消除音符之间的唧唧声有什么想法吗?

最佳答案

我认为你在音符之间停止音频输出的那一点是罪魁祸首:

if (self.toneUnit)
{
AudioOutputUnitStop(self.toneUnit);
AudioUnitUninitialize(self.toneUnit);
AudioComponentInstanceDispose(self.toneUnit);
self.toneUnit = nil;
}

只要让音调单元处于事件状态,您就会听到较少的鸣叫声。您将需要一些其他方式来产生静音,可能是让 RenderTone 继续运行但产生零振幅。

我能够消除残留的轻微啁啾声,方法是在频率变化时将振幅逐渐降低到零,更新频率,然后再次淡入。这当然是旧 PC 扬声器无法做到的(除了少数人会迅速再次打开它),但是如果衰减非常快,您可能会在没有唧唧声的情况下获得老式效果。

这是我的褪色 RenderTone 函数(目前使用邪恶的全局变量):

double currentFrequency=0;
double currentSampleRate=0;
double currentAmplitude=0;

OSStatus RenderTone(
void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData)

{
// Fixed amplitude is good enough for our purposes
const double amplitude = 0.5;

// Get the tone parameters out of the view controller
ToneGen *toneGen = (__bridge ToneGen *)inRefCon;
double theta = toneGen.theta;

BOOL fadingOut = NO;
if ((currentFrequency != toneGen.frequency) || (currentSampleRate != toneGen.sampleRate))
{
if (currentAmplitude > DBL_EPSILON)
{
fadingOut = YES;
}
else
{
currentFrequency = toneGen.frequency;
currentSampleRate = toneGen.sampleRate;
}
}

double theta_increment = 2.0 * M_PI * currentFrequency /currentSampleRate;

// This is a mono tone generator so we only need the first buffer
const int channel = 0;
Float32 *buffer = (Float32 *)ioData->mBuffers[channel].mData;

// Generate the samples
for (UInt32 frame = 0; frame < inNumberFrames; frame++)
{
buffer[frame] = sin(theta) * currentAmplitude;
//NSLog(@"amplitude = %f", currentAmplitude);

theta += theta_increment;
if (theta > 2.0 * M_PI)
{
theta -= 2.0 * M_PI;
}
if (fadingOut)
{
if (currentAmplitude > 0)
{
currentAmplitude -= 0.001;
if (currentAmplitude < 0)
currentAmplitude = 0;
}
}
else
{
if (currentAmplitude < amplitude)
{
currentAmplitude += 0.001;
if (currentAmplitude > amplitude)
currentAmplitude = amplitude;
}
}

}

// Store the theta back in the view controller
toneGen.theta = theta;

return noErr;
}

关于objective-c - AudioUnit 音调生成器在生成的每个音调结束时给我一个唧唧声,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/10051215/

31 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com