- iOS/Objective-C 元类和类别
- objective-c - -1001 错误,当 NSURLSession 通过 httpproxy 和/etc/hosts
- java - 使用网络类获取 url 地址
- ios - 推送通知中不播放声音
我正在为旧的 GWBasic PLAY
命令创建一个老式的音乐模拟器。为此,我有一个音调发生器和一个音乐播放器。在演奏的每个音符之间,我听到了一种把事情搞砸的唧唧声。以下是我的两个类(class):
ToneGen.h
#import <Foundation/Foundation.h>
@interface ToneGen : NSObject
@property (nonatomic) id delegate;
@property (nonatomic) double frequency;
@property (nonatomic) double sampleRate;
@property (nonatomic) double theta;
- (void)play:(float)ms;
- (void)play;
- (void)stop;
@end
ToneGen.m
#import <AudioUnit/AudioUnit.h>
#import "ToneGen.h"
OSStatus RenderTone(
void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData);
void ToneInterruptionListener(void *inClientData, UInt32 inInterruptionState);
@interface ToneGen()
@property (nonatomic) AudioComponentInstance toneUnit;
@property (nonatomic) NSTimer *timer;
- (void)createToneUnit;
@end
@implementation ToneGen
@synthesize toneUnit = _toneUnit;
@synthesize timer = _timer;
@synthesize delegate = _delegate;
@synthesize frequency = _frequency;
@synthesize sampleRate = _sampleRate;
@synthesize theta = _theta;
- (id) init
{
self = [super init];
if (self)
{
self.sampleRate = 44100;
self.frequency = 1440.0f;
return self;
}
return nil;
}
- (void)play:(float)ms
{
[self play];
self.timer = [NSTimer scheduledTimerWithTimeInterval:(ms / 100)
target:self
selector:@selector(stop)
userInfo:nil
repeats:NO];
[[NSRunLoop mainRunLoop] addTimer:self.timer forMode:NSRunLoopCommonModes];
}
- (void)play
{
if (!self.toneUnit)
{
[self createToneUnit];
// Stop changing parameters on the unit
OSErr err = AudioUnitInitialize(self.toneUnit);
if (err)
DLog(@"Error initializing unit");
// Start playback
err = AudioOutputUnitStart(self.toneUnit);
if (err)
DLog(@"Error starting unit");
}
}
- (void)stop
{
[self.timer invalidate];
self.timer = nil;
if (self.toneUnit)
{
AudioOutputUnitStop(self.toneUnit);
AudioUnitUninitialize(self.toneUnit);
AudioComponentInstanceDispose(self.toneUnit);
self.toneUnit = nil;
}
if(self.delegate && [self.delegate respondsToSelector:@selector(toneStop)]) {
[self.delegate performSelector:@selector(toneStop)];
}
}
- (void)createToneUnit
{
AudioComponentDescription defaultOutputDescription;
defaultOutputDescription.componentType = kAudioUnitType_Output;
defaultOutputDescription.componentSubType = kAudioUnitSubType_DefaultOutput;
defaultOutputDescription.componentManufacturer = kAudioUnitManufacturer_Apple;
defaultOutputDescription.componentFlags = 0;
defaultOutputDescription.componentFlagsMask = 0;
// Get the default playback output unit
AudioComponent defaultOutput = AudioComponentFindNext(NULL, &defaultOutputDescription);
if (!defaultOutput)
DLog(@"Can't find default output");
// Create a new unit based on this that we'll use for output
OSErr err = AudioComponentInstanceNew(defaultOutput, &_toneUnit);
if (err)
DLog(@"Error creating unit");
// Set our tone rendering function on the unit
AURenderCallbackStruct input;
input.inputProc = RenderTone;
input.inputProcRefCon = (__bridge void*)self;
err = AudioUnitSetProperty(self.toneUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Input,
0,
&input,
sizeof(input));
if (err)
DLog(@"Error setting callback");
// Set the format to 32 bit, single channel, floating point, linear PCM
const int four_bytes_per_float = 4;
const int eight_bits_per_byte = 8;
AudioStreamBasicDescription streamFormat;
streamFormat.mSampleRate = self.sampleRate;
streamFormat.mFormatID = kAudioFormatLinearPCM;
streamFormat.mFormatFlags =
kAudioFormatFlagsNativeFloatPacked | kAudioFormatFlagIsNonInterleaved;
streamFormat.mBytesPerPacket = four_bytes_per_float;
streamFormat.mFramesPerPacket = 1;
streamFormat.mBytesPerFrame = four_bytes_per_float;
streamFormat.mChannelsPerFrame = 1;
streamFormat.mBitsPerChannel = four_bytes_per_float * eight_bits_per_byte;
err = AudioUnitSetProperty (self.toneUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
0,
&streamFormat,
sizeof(AudioStreamBasicDescription));
if (err)
DLog(@"Error setting stream format");
}
@end
OSStatus RenderTone(
void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData)
{
// Fixed amplitude is good enough for our purposes
const double amplitude = 0.25;
// Get the tone parameters out of the view controller
ToneGen *toneGen = (__bridge ToneGen *)inRefCon;
double theta = toneGen.theta;
double theta_increment = 2.0 * M_PI * toneGen.frequency / toneGen.sampleRate;
// This is a mono tone generator so we only need the first buffer
const int channel = 0;
Float32 *buffer = (Float32 *)ioData->mBuffers[channel].mData;
// Generate the samples
for (UInt32 frame = 0; frame < inNumberFrames; frame++)
{
buffer[frame] = sin(theta) * amplitude;
theta += theta_increment;
if (theta > 2.0 * M_PI)
{
theta -= 2.0 * M_PI;
}
}
// Store the theta back in the view controller
toneGen.theta = theta;
return noErr;
}
void ToneInterruptionListener(void *inClientData, UInt32 inInterruptionState)
{
ToneGen *toneGen = (__bridge ToneGen *)inClientData;
[toneGen stop];
}
Music.h
#import <Foundation/Foundation.h>
@interface Music : NSObject
- (void) play:(NSString *)music;
- (void) stop;
@end
Music.m
#import "Music.h"
#import "ToneGen.h"
@interface Music()
@property (nonatomic, readonly) ToneGen *toneGen;
@property (nonatomic, assign) int octive;
@property (nonatomic, assign) int tempo;
@property (nonatomic, assign) int length;
@property (nonatomic, strong) NSData *music;
@property (nonatomic, assign) int dataPos;
@property (nonatomic, assign) BOOL isPlaying;
- (void)playNote;
@end
@implementation Music
@synthesize toneGen = _toneGen;
- (ToneGen*)toneGen
{
if (_toneGen == nil)
{
_toneGen = [[ToneGen alloc] init];
_toneGen.delegate = self;
}
return _toneGen;
}
@synthesize octive = _octive;
- (void)setOctive:(int)octive
{
// Sinity Check
if (octive < 0)
octive = 0;
if (octive > 6)
octive = 6;
_octive = octive;
}
@synthesize tempo = _tempo;
- (void)setTempo:(int)tempo
{
// Sinity Check
if (tempo < 30)
tempo = 30;
if (tempo > 255)
tempo = 255;
_tempo = tempo;
}
@synthesize length = _length;
- (void)setLength:(int)length
{
// Sinity Check
if (length < 1)
length = 1;
if (length > 64)
length = 64;
_length = length;
}
@synthesize music = _music;
@synthesize dataPos = _dataPos;
@synthesize isPlaying = _isPlaying;
- (id)init
{
self = [super init];
if (self)
{
self.octive = 4;
self.tempo = 120;
self.length = 1;
return self;
}
return nil;
}
- (void) play:(NSString *)music
{
DLog(@"%@", music);
self.music = [[music stringByReplacingOccurrencesOfString:@"+" withString:@"#"]
dataUsingEncoding: NSASCIIStringEncoding];
self.dataPos = 0;
self.isPlaying = YES;
[self playNote];
}
- (void)stop
{
self.isPlaying = NO;
}
- (void)playNote
{
if (!self.isPlaying)
return;
if (self.dataPos > self.music.length || self.music.length == 0) {
self.isPlaying = NO;
return;
}
unsigned char *data = (unsigned char*)[self.music bytes];
unsigned int code = (unsigned int)data[self.dataPos];
self.dataPos++;
switch (code) {
case 65: // A
case 66: // B
case 67: // C
case 68: // D
case 69: // E
case 70: // F
case 71: // G
{
// Peak at the next char to look for sharp or flat
bool sharp = NO;
bool flat = NO;
if (self.dataPos < self.music.length) {
unsigned int peak = (unsigned int)data[self.dataPos];
if (peak == 35) // #
{
self.dataPos++;
sharp = YES;
}
else if (peak == 45) // -
{
self.dataPos++;
flat = YES;
}
}
// Peak ahead for a length changes
bool look = YES;
int count = 0;
int newLength = 0;
while (self.dataPos < self.music.length && look) {
unsigned int peak = (unsigned int)data[self.dataPos];
if (peak >= 48 && peak <= 57)
{
peak -= 48;
int n = (count * 10);
if (n == 0) { n = 1; }
newLength += peak * n;
self.dataPos++;
} else {
look = NO;
}
}
// Pick the note length
int length = self.length;
if (newLength != 0)
{
DLog(@"InlineLength: %d", newLength);
length = newLength;
}
// Create the note string
NSString *note = [NSString stringWithFormat:@"%c", code];
if (sharp)
note = [note stringByAppendingFormat:@"#"];
else if (flat)
note = [note stringByAppendingFormat:@"-"];
// Set the tone generator freq
[self setFreq:[self getNoteNumber:note]];
// Play the note
[self.toneGen play:(self.tempo / length)];
}
break;
case 76: // L (length)
{
bool look = YES;
int newLength = 0;
while (self.dataPos < self.music.length && look) {
unsigned int peak = (unsigned int)data[self.dataPos];
if (peak >= 48 && peak <= 57)
{
peak -= 48;
newLength = newLength * 10 + peak;
self.dataPos++;
} else {
look = NO;
}
}
self.length = newLength;
DLog(@"Length: %d", self.length);
[self playNote];
}
break;
case 79: // O (octive)
{
bool look = YES;
int newOctive = 0;
while (self.dataPos < self.music.length && look) {
unsigned int peak = (unsigned int)data[self.dataPos];
if (peak >= 48 && peak <= 57)
{
peak -= 48;
newOctive = newOctive * 10 + peak;
self.dataPos++;
} else {
look = NO;
}
}
self.octive = newOctive;
DLog(@"Octive: %d", self.self.octive);
[self playNote];
}
break;
case 84: // T (tempo)
{
bool look = YES;
int newTempo = 0;
while (self.dataPos < self.music.length && look) {
unsigned int peak = (unsigned int)data[self.dataPos];
if (peak >= 48 && peak <= 57)
{
peak -= 48;
newTempo = newTempo * 10 + peak;
self.dataPos++;
} else {
look = NO;
}
}
self.tempo = newTempo;
DLog(@"Tempo: %d", self.self.tempo);
[self playNote];
}
break;
default:
[self playNote];
break;
}
}
- (int)getNoteNumber:(NSString*)note
{
note = [note uppercaseString];
DLog(@"%@", note);
if ([note isEqualToString:@"A"])
return 0;
else if ([note isEqualToString:@"A#"] || [note isEqualToString:@"B-"])
return 1;
else if ([note isEqualToString:@"B"] || [note isEqualToString:@"C-"])
return 2;
else if ([note isEqualToString:@"C"] || [note isEqualToString:@"B#"])
return 3;
else if ([note isEqualToString:@"C#"] || [note isEqualToString:@"D-"])
return 4;
else if ([note isEqualToString:@"D"])
return 5;
else if ([note isEqualToString:@"D#"] || [note isEqualToString:@"E-"])
return 6;
else if ([note isEqualToString:@"E"] || [note isEqualToString:@"F-"])
return 7;
else if ([note isEqualToString:@"F"] || [note isEqualToString:@"E#"])
return 8;
else if ([note isEqualToString:@"F#"] || [note isEqualToString:@"G-"])
return 9;
else if ([note isEqualToString:@"G"])
return 10;
else if ([note isEqualToString:@"G#"])
return 11;
}
- (void)setFreq:(int)note
{
float a = powf(2, self.octive);
float b = powf(1.059463, note);
float freq = roundf((275.0 * a * b) / 10);
self.toneGen.frequency = freq;
}
- (void)toneStop
{
[self playNote];
}
@end
玩小tune创建一个 Music
对象并播放...
[self.music play:@"T180 DF#A L2 A L4 O4 AA P4 F#F# P4 O3 D DF#A L2 A L4 O4 AA P4 GG P4 O3 C#C#EB L2 B L4 O4 BB P4 GG P4 O3 C#C#EB L2 B L4 O4 BB P4 F+F+ P4 O3 DDF#A L2 O4 D L4 O5 DD P4O4 AA P4 O3 DDF#A L2 O4 D L4 O5 DD P4O4 BB P4 EEG L8 B P8 ML B1 L4 MN G#A ML L3 O5 F#1L4 MN D O4 F# ML L2 F# MN L4 E ML L2 B MN L4 AD P8 D8 D4"];
关于如何消除音符之间的唧唧声有什么想法吗?
最佳答案
我认为你在音符之间停止音频输出的那一点是罪魁祸首:
if (self.toneUnit)
{
AudioOutputUnitStop(self.toneUnit);
AudioUnitUninitialize(self.toneUnit);
AudioComponentInstanceDispose(self.toneUnit);
self.toneUnit = nil;
}
只要让音调单元处于事件状态,您就会听到较少的鸣叫声。您将需要一些其他方式来产生静音,可能是让 RenderTone 继续运行但产生零振幅。
我能够消除残留的轻微啁啾声,方法是在频率变化时将振幅逐渐降低到零,更新频率,然后再次淡入。这当然是旧 PC 扬声器无法做到的(除了少数人会迅速再次打开它),但是如果衰减非常快,您可能会在没有唧唧声的情况下获得老式效果。
这是我的褪色 RenderTone
函数(目前使用邪恶的全局变量):
double currentFrequency=0;
double currentSampleRate=0;
double currentAmplitude=0;
OSStatus RenderTone(
void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData)
{
// Fixed amplitude is good enough for our purposes
const double amplitude = 0.5;
// Get the tone parameters out of the view controller
ToneGen *toneGen = (__bridge ToneGen *)inRefCon;
double theta = toneGen.theta;
BOOL fadingOut = NO;
if ((currentFrequency != toneGen.frequency) || (currentSampleRate != toneGen.sampleRate))
{
if (currentAmplitude > DBL_EPSILON)
{
fadingOut = YES;
}
else
{
currentFrequency = toneGen.frequency;
currentSampleRate = toneGen.sampleRate;
}
}
double theta_increment = 2.0 * M_PI * currentFrequency /currentSampleRate;
// This is a mono tone generator so we only need the first buffer
const int channel = 0;
Float32 *buffer = (Float32 *)ioData->mBuffers[channel].mData;
// Generate the samples
for (UInt32 frame = 0; frame < inNumberFrames; frame++)
{
buffer[frame] = sin(theta) * currentAmplitude;
//NSLog(@"amplitude = %f", currentAmplitude);
theta += theta_increment;
if (theta > 2.0 * M_PI)
{
theta -= 2.0 * M_PI;
}
if (fadingOut)
{
if (currentAmplitude > 0)
{
currentAmplitude -= 0.001;
if (currentAmplitude < 0)
currentAmplitude = 0;
}
}
else
{
if (currentAmplitude < amplitude)
{
currentAmplitude += 0.001;
if (currentAmplitude > amplitude)
currentAmplitude = amplitude;
}
}
}
// Store the theta back in the view controller
toneGen.theta = theta;
return noErr;
}
关于objective-c - AudioUnit 音调生成器在生成的每个音调结束时给我一个唧唧声,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/10051215/
SQLite、Content provider 和 Shared Preference 之间的所有已知区别。 但我想知道什么时候需要根据情况使用 SQLite 或 Content Provider 或
警告:我正在使用一个我无法完全控制的后端,所以我正在努力解决 Backbone 中的一些注意事项,这些注意事项可能在其他地方更好地解决......不幸的是,我别无选择,只能在这里处理它们! 所以,我的
我一整天都在挣扎。我的预输入搜索表达式与远程 json 数据完美配合。但是当我尝试使用相同的 json 数据作为预取数据时,建议为空。点击第一个标志后,我收到预定义消息“无法找到任何内容...”,结果
我正在制作一个模拟 NHL 选秀彩票的程序,其中屏幕右侧应该有一个 JTextField,并且在左侧绘制弹跳的选秀球。我创建了一个名为 Ball 的类,它实现了 Runnable,并在我的主 Draf
这个问题已经有答案了: How can I calculate a time span in Java and format the output? (18 个回答) 已关闭 9 年前。 这是我的代码
我有一个 ASP.NET Web API 应用程序在我的本地 IIS 实例上运行。 Web 应用程序配置有 CORS。我调用的 Web API 方法类似于: [POST("/API/{foo}/{ba
我将用户输入的时间和日期作为: DatePicker dp = (DatePicker) findViewById(R.id.datePicker); TimePicker tp = (TimePic
放宽“邻居”的标准是否足够,或者是否有其他标准行动可以采取? 最佳答案 如果所有相邻解决方案都是 Tabu,则听起来您的 Tabu 列表的大小太长或您的释放策略太严格。一个好的 Tabu 列表长度是
我正在阅读来自 cppreference 的代码示例: #include #include #include #include template void print_queue(T& q)
我快疯了,我试图理解工具提示的行为,但没有成功。 1. 第一个问题是当我尝试通过插件(按钮 1)在点击事件中使用它时 -> 如果您转到 Fiddle,您会在“内容”内看到该函数' 每次点击都会调用该属
我在功能组件中有以下代码: const [ folder, setFolder ] = useState([]); const folderData = useContext(FolderContex
我在使用预签名网址和 AFNetworking 3.0 从 S3 获取图像时遇到问题。我可以使用 NSMutableURLRequest 和 NSURLSession 获取图像,但是当我使用 AFHT
我正在使用 Oracle ojdbc 12 和 Java 8 处理 Oracle UCP 管理器的问题。当 UCP 池启动失败时,我希望关闭它创建的连接。 当池初始化期间遇到 ORA-02391:超过
关闭。此题需要details or clarity 。目前不接受答案。 想要改进这个问题吗?通过 editing this post 添加详细信息并澄清问题. 已关闭 9 年前。 Improve
引用这个plunker: https://plnkr.co/edit/GWsbdDWVvBYNMqyxzlLY?p=preview 我在 styles.css 文件和 src/app.ts 文件中指定
为什么我的条形这么细?我尝试将宽度设置为 1,它们变得非常厚。我不知道还能尝试什么。默认厚度为 0.8,这是应该的样子吗? import matplotlib.pyplot as plt import
当我编写时,查询按预期执行: SELECT id, day2.count - day1.count AS diff FROM day1 NATURAL JOIN day2; 但我真正想要的是右连接。当
我有以下时间数据: 0 08/01/16 13:07:46,335437 1 18/02/16 08:40:40,565575 2 14/01/16 22:2
一些背景知识 -我的 NodeJS 服务器在端口 3001 上运行,我的 React 应用程序在端口 3000 上运行。我在 React 应用程序 package.json 中设置了一个代理来代理对端
我面临着一个愚蠢的问题。我试图在我的 Angular 应用程序中延迟加载我的图像,我已经尝试过这个2: 但是他们都设置了 src attr 而不是 data-src,我在这里遗漏了什么吗?保留 d
我是一名优秀的程序员,十分优秀!