- html - 出于某种原因,IE8 对我的 Sass 文件中继承的 html5 CSS 不友好?
- JMeter 在响应断言中使用 span 标签的问题
- html - 在 :hover and :active? 上具有不同效果的 CSS 动画
- html - 相对于居中的 html 内容固定的 CSS 重复背景?
我一直在寻找答案,但一直没能找到——显然;)
这是我所拥有的——我有一个单词列表。每个单词都会在 iPhone 上保存为 wav 文件。在我的应用程序中,用户将选择单词,我想将这些单词组合在一起组成一个句子。
我无法确定如何按顺序将多个 wav 文件组合在一起,以将整个句子创建为单个文件。
我已经通过示例弄清楚了如何将这些文件作为一个文件一起播放 - 但示例将它们混合在一起 - 我基本上需要将它们相互附加。我尝试将它们相互附加并从除第一个文件之外的所有文件中删除 header 信息,但此过程不起作用。文件长度正确,但只播放第一个文件的内容。
我认为正确的路径是使用 AudioFileReadPacketData 读取文件并使用 AudioFileWritePacketData 将信息写入新文件。事实证明这很困难...
有人有音频 API 的经验并且可以提供一些示例代码吗?
好的 - 对此事进行更多研究...看起来正确的功能是音频队列离线渲染。 Apple 提供了一些示例代码(AQOfflineRenderTest)。离线渲染的原因是因为您可以将输出缓冲区附加到渲染并将其保存到文件中。随着项目的进展,还会有更多内容......
好吧——三天了,没有任何实际进展......
我正在尝试将三个 .wav 文件合并到目标 .wav 文件中。现在,当您运行此代码时,第一个文件将保存到目标位置。
有什么想法吗?
此源代码使用 Apple 提供的 iPublicUtility 类 - 它们可以在多个项目中下载。一是项目是aurioTouch .
这是我的代码(将其放在 .cpp 文件中并引用普通 Objective C 源文件中的 JointAudioFiles):
// standard includes
#include <AudioToolbox/AudioQueue.h>
#include <AudioToolbox/AudioFile.h>
#include <AudioToolbox/ExtendedAudioFile.h>
// helpers
#include "CAXException.h"
#include "CAStreamBasicDescription.h"
#define kNumberOfBuffers 3
#define kMaxNumberOfFiles 3
// the application specific info we keep track of
struct AQTestInfo
{
AudioFileID mAudioFile[kMaxNumberOfFiles];
CAStreamBasicDescription mDataFormat[kMaxNumberOfFiles];
AudioQueueRef mQueue[kMaxNumberOfFiles];
AudioQueueBufferRef mBuffer[kNumberOfBuffers];
UInt32 mNumberOfAudioFiles;
UInt32 mCurrentAudioFile;
UInt32 mbufferByteSize;
SInt64 mCurrentPacket;
UInt32 mNumPacketsToRead;
AudioStreamPacketDescription *mPacketDescs;
bool mFlushed;
bool mDone;
};
#pragma mark- Helper Functions
// ***********************
// CalculateBytesForTime Utility Function
// we only use time here as a guideline
// we are really trying to get somewhere between 16K and 64K buffers, but not allocate too much if we don't need it
void CalculateBytesForTime (CAStreamBasicDescription &inDesc, UInt32 inMaxPacketSize, Float64 inSeconds, UInt32 *outBufferSize, UInt32 *outNumPackets)
{
static const int maxBufferSize = 0x10000; // limit size to 64K
static const int minBufferSize = 0x4000; // limit size to 16K
if (inDesc.mFramesPerPacket) {
Float64 numPacketsForTime = inDesc.mSampleRate / inDesc.mFramesPerPacket * inSeconds;
*outBufferSize = numPacketsForTime * inMaxPacketSize;
} else {
// if frames per packet is zero, then the codec has no predictable packet == time
// so we can't tailor this (we don't know how many Packets represent a time period
// we'll just return a default buffer size
*outBufferSize = maxBufferSize > inMaxPacketSize ? maxBufferSize : inMaxPacketSize;
}
// we're going to limit our size to our default
if (*outBufferSize > maxBufferSize && *outBufferSize > inMaxPacketSize) {
*outBufferSize = maxBufferSize;
} else {
// also make sure we're not too small - we don't want to go the disk for too small chunks
if (*outBufferSize < minBufferSize) {
*outBufferSize = minBufferSize;
}
}
*outNumPackets = *outBufferSize / inMaxPacketSize;
}
#pragma mark- AQOutputCallback
// ***********************
// AudioQueueOutputCallback function used to push data into the audio queue
static void AQTestBufferCallback(void *inUserData, AudioQueueRef inAQ, AudioQueueBufferRef inCompleteAQBuffer)
{
AQTestInfo * myInfo = (AQTestInfo *)inUserData;
if (myInfo->mDone) return;
UInt32 numBytes;
UInt32 nPackets = myInfo->mNumPacketsToRead;
OSStatus result = AudioFileReadPackets(myInfo->mAudioFile[myInfo->mCurrentAudioFile], // The audio file from which packets of audio data are to be read.
false, // Set to true to cache the data. Otherwise, set to false.
&numBytes, // On output, a pointer to the number of bytes actually returned.
myInfo->mPacketDescs, // A pointer to an array of packet descriptions that have been allocated.
myInfo->mCurrentPacket, // The packet index of the first packet you want to be returned.
&nPackets, // On input, a pointer to the number of packets to read. On output, the number of packets actually read.
inCompleteAQBuffer->mAudioData); // A pointer to user-allocated memory.
if (result) {
DebugMessageN1 ("Error reading from file: %d\n", (int)result);
exit(1);
}
// we have some data
if (nPackets > 0) {
inCompleteAQBuffer->mAudioDataByteSize = numBytes;
result = AudioQueueEnqueueBuffer(inAQ, // The audio queue that owns the audio queue buffer.
inCompleteAQBuffer, // The audio queue buffer to add to the buffer queue.
(myInfo->mPacketDescs ? nPackets : 0), // The number of packets of audio data in the inBuffer parameter. See Docs.
myInfo->mPacketDescs); // An array of packet descriptions. Or NULL. See Docs.
if (result) {
DebugMessageN1 ("Error enqueuing buffer: %d\n", (int)result);
exit(1);
}
myInfo->mCurrentPacket += nPackets;
} else {
// **** This ensures that we flush the queue when done -- ensures you get all the data out ****
if (!myInfo->mFlushed) {
result = AudioQueueFlush(myInfo->mQueue[myInfo->mCurrentAudioFile]);
if (result) {
DebugMessageN1("AudioQueueFlush failed: %d", (int)result);
exit(1);
}
myInfo->mFlushed = true;
}
result = AudioQueueStop(myInfo->mQueue[myInfo->mCurrentAudioFile], false);
if (result) {
DebugMessageN1("AudioQueueStop(false) failed: %d", (int)result);
exit(1);
}
// reading nPackets == 0 is our EOF condition
myInfo->mDone = true;
}
}
// ***********************
#pragma mark- Main Render Function
#if __cplusplus
extern "C" {
#endif
void CombineAudioFiles(CFURLRef sourceURL1, CFURLRef sourceURL2, CFURLRef sourceURL3, CFURLRef destinationURL)
{
// main audio queue code
try {
AQTestInfo myInfo;
myInfo.mDone = false;
myInfo.mFlushed = false;
myInfo.mCurrentPacket = 0;
myInfo.mCurrentAudioFile = 0;
// get the source file
XThrowIfError(AudioFileOpenURL(sourceURL1, 0x01/*fsRdPerm*/, 0/*inFileTypeHint*/, &myInfo.mAudioFile[0]), "AudioFileOpen failed");
XThrowIfError(AudioFileOpenURL(sourceURL2, 0x01/*fsRdPerm*/, 0/*inFileTypeHint*/, &myInfo.mAudioFile[1]), "AudioFileOpen failed");
XThrowIfError(AudioFileOpenURL(sourceURL3, 0x01/*fsRdPerm*/, 0/*inFileTypeHint*/, &myInfo.mAudioFile[2]), "AudioFileOpen failed");
UInt32 size = sizeof(myInfo.mDataFormat[myInfo.mCurrentAudioFile]);
XThrowIfError(AudioFileGetProperty(myInfo.mAudioFile[myInfo.mCurrentAudioFile], kAudioFilePropertyDataFormat, &size, &myInfo.mDataFormat[myInfo.mCurrentAudioFile]), "couldn't get file's data format");
printf ("File format: "); myInfo.mDataFormat[myInfo.mCurrentAudioFile].Print();
// create a new audio queue output
XThrowIfError(AudioQueueNewOutput(&myInfo.mDataFormat[myInfo.mCurrentAudioFile], // The data format of the audio to play. For linear PCM, only interleaved formats are supported.
AQTestBufferCallback, // A callback function to use with the playback audio queue.
&myInfo, // A custom data structure for use with the callback function.
CFRunLoopGetCurrent(), // The event loop on which the callback function pointed to by the inCallbackProc parameter is to be called.
// If you specify NULL, the callback is invoked on one of the audio queue’s internal threads.
kCFRunLoopCommonModes, // The run loop mode in which to invoke the callback function specified in the inCallbackProc parameter.
0, // Reserved for future use. Must be 0.
&myInfo.mQueue[myInfo.mCurrentAudioFile]), // On output, the newly created playback audio queue object.
"AudioQueueNew failed");
UInt32 bufferByteSize;
// we need to calculate how many packets we read at a time and how big a buffer we need
// we base this on the size of the packets in the file and an approximate duration for each buffer
{
bool isFormatVBR = (myInfo.mDataFormat[myInfo.mCurrentAudioFile].mBytesPerPacket == 0 || myInfo.mDataFormat[myInfo.mCurrentAudioFile].mFramesPerPacket == 0);
// first check to see what the max size of a packet is - if it is bigger
// than our allocation default size, that needs to become larger
UInt32 maxPacketSize;
size = sizeof(maxPacketSize);
XThrowIfError(AudioFileGetProperty(myInfo.mAudioFile[myInfo.mCurrentAudioFile], kAudioFilePropertyPacketSizeUpperBound, &size, &maxPacketSize), "couldn't get file's max packet size");
// adjust buffer size to represent about a second of audio based on this format
CalculateBytesForTime(myInfo.mDataFormat[myInfo.mCurrentAudioFile], maxPacketSize, 1.0/*seconds*/, &bufferByteSize, &myInfo.mNumPacketsToRead);
if (isFormatVBR) {
myInfo.mPacketDescs = new AudioStreamPacketDescription [myInfo.mNumPacketsToRead];
} else {
myInfo.mPacketDescs = NULL; // we don't provide packet descriptions for constant bit rate formats (like linear PCM)
}
printf ("Buffer Byte Size: %d, Num Packets to Read: %d\n", (int)bufferByteSize, (int)myInfo.mNumPacketsToRead);
}
// if the file has a magic cookie, we should get it and set it on the AQ
size = sizeof(UInt32);
OSStatus result = AudioFileGetPropertyInfo (myInfo.mAudioFile[myInfo.mCurrentAudioFile], kAudioFilePropertyMagicCookieData, &size, NULL);
if (!result && size) {
char* cookie = new char [size];
XThrowIfError (AudioFileGetProperty (myInfo.mAudioFile[myInfo.mCurrentAudioFile], kAudioFilePropertyMagicCookieData, &size, cookie), "get cookie from file");
XThrowIfError (AudioQueueSetProperty(myInfo.mQueue[myInfo.mCurrentAudioFile], kAudioQueueProperty_MagicCookie, cookie, size), "set cookie on queue");
delete [] cookie;
}
// channel layout?
OSStatus err = AudioFileGetPropertyInfo(myInfo.mAudioFile[myInfo.mCurrentAudioFile], kAudioFilePropertyChannelLayout, &size, NULL);
AudioChannelLayout *acl = NULL;
if (err == noErr && size > 0) {
acl = (AudioChannelLayout *)malloc(size);
XThrowIfError(AudioFileGetProperty(myInfo.mAudioFile[myInfo.mCurrentAudioFile], kAudioFilePropertyChannelLayout, &size, acl), "get audio file's channel layout");
XThrowIfError(AudioQueueSetProperty(myInfo.mQueue[myInfo.mCurrentAudioFile], kAudioQueueProperty_ChannelLayout, acl, size), "set channel layout on queue");
}
//allocate the input read buffer
XThrowIfError(AudioQueueAllocateBuffer(myInfo.mQueue[myInfo.mCurrentAudioFile], bufferByteSize, &myInfo.mBuffer[myInfo.mCurrentAudioFile]), "AudioQueueAllocateBuffer");
// prepare a canonical interleaved capture format
CAStreamBasicDescription captureFormat;
captureFormat.mSampleRate = myInfo.mDataFormat[myInfo.mCurrentAudioFile].mSampleRate;
captureFormat.SetAUCanonical(myInfo.mDataFormat[myInfo.mCurrentAudioFile].mChannelsPerFrame, true); // interleaved
XThrowIfError(AudioQueueSetOfflineRenderFormat(myInfo.mQueue[myInfo.mCurrentAudioFile], &captureFormat, acl), "set offline render format");
ExtAudioFileRef captureFile;
// prepare a 16-bit int file format, sample channel count and sample rate
CAStreamBasicDescription dstFormat;
dstFormat.mSampleRate = myInfo.mDataFormat[myInfo.mCurrentAudioFile].mSampleRate;
dstFormat.mChannelsPerFrame = myInfo.mDataFormat[myInfo.mCurrentAudioFile].mChannelsPerFrame;
dstFormat.mFormatID = kAudioFormatLinearPCM;
dstFormat.mFormatFlags = kLinearPCMFormatFlagIsPacked | kLinearPCMFormatFlagIsSignedInteger; // little-endian
dstFormat.mBitsPerChannel = 16;
dstFormat.mBytesPerPacket = dstFormat.mBytesPerFrame = 2 * dstFormat.mChannelsPerFrame;
dstFormat.mFramesPerPacket = 1;
// create the capture file
XThrowIfError(ExtAudioFileCreateWithURL(destinationURL, kAudioFileCAFType, &dstFormat, acl, kAudioFileFlags_EraseFile, &captureFile), "ExtAudioFileCreateWithURL");
// set the capture file's client format to be the canonical format from the queue
XThrowIfError(ExtAudioFileSetProperty(captureFile, kExtAudioFileProperty_ClientDataFormat, sizeof(AudioStreamBasicDescription), &captureFormat), "set ExtAudioFile client format");
// allocate the capture buffer, just keep it at half the size of the enqueue buffer
// we don't ever want to pull any faster than we can push data in for render
// this 2:1 ratio keeps the AQ Offline Render happy
const UInt32 captureBufferByteSize = bufferByteSize / 2;
AudioQueueBufferRef captureBuffer;
AudioBufferList captureABL;
XThrowIfError(AudioQueueAllocateBuffer(myInfo.mQueue[myInfo.mCurrentAudioFile], captureBufferByteSize, &captureBuffer), "AudioQueueAllocateBuffer");
captureABL.mNumberBuffers = 1;
captureABL.mBuffers[0].mData = captureBuffer->mAudioData;
captureABL.mBuffers[0].mNumberChannels = captureFormat.mChannelsPerFrame;
// lets start playing now - stop is called in the AQTestBufferCallback when there's
// no more to read from the file
XThrowIfError(AudioQueueStart(myInfo.mQueue[myInfo.mCurrentAudioFile], NULL), "AudioQueueStart failed");
AudioTimeStamp ts;
ts.mFlags = kAudioTimeStampSampleTimeValid;
ts.mSampleTime = 0;
// we need to call this once asking for 0 frames
XThrowIfError(AudioQueueOfflineRender(myInfo.mQueue[myInfo.mCurrentAudioFile], &ts, captureBuffer, 0), "AudioQueueOfflineRender");
// we need to enqueue a buffer after the queue has started
AQTestBufferCallback(&myInfo, myInfo.mQueue[myInfo.mCurrentAudioFile], myInfo.mBuffer[myInfo.mCurrentAudioFile]);
while (true) {
UInt32 reqFrames = captureBufferByteSize / captureFormat.mBytesPerFrame;
XThrowIfError(AudioQueueOfflineRender(myInfo.mQueue[myInfo.mCurrentAudioFile], &ts, captureBuffer, reqFrames), "AudioQueueOfflineRender");
captureABL.mBuffers[0].mData = captureBuffer->mAudioData;
captureABL.mBuffers[0].mDataByteSize = captureBuffer->mAudioDataByteSize;
UInt32 writeFrames = captureABL.mBuffers[0].mDataByteSize / captureFormat.mBytesPerFrame;
printf("t = %.f: AudioQueueOfflineRender: req %d fr/%d bytes, got %d fr/%d bytes\n", ts.mSampleTime, (int)reqFrames, (int)captureBufferByteSize, writeFrames, (int)captureABL.mBuffers[0].mDataByteSize);
XThrowIfError(ExtAudioFileWrite(captureFile, writeFrames, &captureABL), "ExtAudioFileWrite");
if (myInfo.mFlushed) break;
ts.mSampleTime += writeFrames;
}
CFRunLoopRunInMode(kCFRunLoopDefaultMode, 1, false);
XThrowIfError(AudioQueueDispose(myInfo.mQueue[myInfo.mCurrentAudioFile], true), "AudioQueueDispose(true) failed");
XThrowIfError(AudioFileClose(myInfo.mAudioFile[myInfo.mCurrentAudioFile]), "AudioQueueDispose(false) failed");
XThrowIfError(ExtAudioFileDispose(captureFile), "ExtAudioFileDispose failed");
if (myInfo.mPacketDescs) delete [] myInfo.mPacketDescs;
if (acl) free(acl);
}
catch (CAXException e) {
char buf[256];
fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
}
return;
}
#if __cplusplus
}
#endif
最佳答案
好的——找到了这个问题的答案——仅适用于 wav 文件...
我使用 NSData 将每个文件连接到主数据中。然后我根据 wav 文件规范重写了 header (前 44 个字节)。
这个过程运行良好...该过程中最复杂的部分是重写 header 信息...但是一旦弄清楚了,使用这个过程就可以很好地工作。
关于iPhone 合并音频文件,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/1905010/
今天我在一个 Java 应用程序中看到了几种不同的加载文件的方法。 文件:/ 文件:// 文件:/// 这三个 URL 开头有什么区别?使用它们的首选方式是什么? 非常感谢 斯特凡 最佳答案 file
就目前而言,这个问题不适合我们的问答形式。我们希望答案得到事实、引用或专业知识的支持,但这个问题可能会引起辩论、争论、投票或扩展讨论。如果您觉得这个问题可以改进并可能重新打开,visit the he
我有一个 javascript 文件,并且在该方法中有一个“测试”方法,我喜欢调用 C# 函数。 c# 函数与 javascript 文件不在同一文件中。 它位于 .cs 文件中。那么我该如何管理 j
需要检查我使用的文件/目录的权限 //filePath = path of file/directory access denied by user ( in windows ) File fil
我在一个目录中有很多 java 文件,我想在我的 Intellij 项目中使用它。但是我不想每次开始一个新项目时都将 java 文件复制到我的项目中。 我知道我可以在 Visual Studio 和
已关闭。此问题不符合Stack Overflow guidelines 。目前不接受答案。 这个问题似乎不是关于 a specific programming problem, a software
我有 3 个组件的 Twig 文件: 文件 1: {# content-here #} 文件 2: {{ title-here }} {# content-here #}
我得到了 mod_ldap.c 和 mod_authnz_ldap.c 文件。我需要使用 Linux 命令的 mod_ldap.so 和 mod_authnz_ldap.so 文件。 最佳答案 从 c
我想使用PIE在我的项目中使用 IE7。 但是我不明白的是,我只能在网络服务器上使用 .htc 文件吗? 我可以在没有网络服务器的情况下通过浏览器加载的本地页面中使用它吗? 我在 PIE 的文档中看到
我在 CI 管道中考虑这一点,我应该首先构建和测试我的应用程序,结果应该是一个 docker 镜像。 我想知道使用构建环境在构建服务器上构建然后运行测试是否更常见。也许为此使用构建脚本。最后只需将 j
using namespace std; struct WebSites { string siteName; int rank; string getSiteName() {
我是 Linux 新手,目前正在尝试使用 ginkgo USB-CAN 接口(interface) 的 API 编程功能。为了使用 C++ 对 API 进行编程,他们提供了库文件,其中包含三个带有 .
我刚学C语言,在实现一个程序时遇到了问题将 test.txt 文件作为程序的输入。 test.txt 文件的内容是: 1 30 30 40 50 60 2 40 30 50 60 60 3 30 20
如何连接两个tcpdump文件,使一个流量在文件中出现一个接一个?具体来说,我想“乘以”一个 tcpdump 文件,这样所有的 session 将一个接一个地按顺序重复几次。 最佳答案 mergeca
我有一个名为 input.MP4 的文件,它已损坏。它来自闭路电视摄像机。我什么都试过了,ffmpeg , VLC 转换,没有运气。但是,我使用了 mediainfo和 exiftool并提取以下信息
我想做什么? 我想提取 ISO 文件并编辑其中的文件,然后将其重新打包回 ISO 文件。 (正如你已经读过的) 我为什么要这样做? 我想开始修改 PSP ISO,为此我必须使用游戏资源、 Assets
给定一个 gzip 文件 Z,如果我将其解压缩为 Z',有什么办法可以重新压缩它以恢复完全相同的 gzip 文件 Z?在粗略阅读了 DEFLATE 格式后,我猜不会,因为任何给定的文件都可能在 DEF
我必须从数据库向我的邮件 ID 发送一封带有附件的邮件。 EXEC msdb.dbo.sp_send_dbmail @profile_name = 'Adventure Works Admin
我有一个大的 M4B 文件和一个 CUE 文件。我想将其拆分为多个 M4B 文件,或将其拆分为多个 MP3 文件(以前首选)。 我想在命令行中执行此操作(OS X,但如果需要可以使用 Linux),而
快速提问。我有一个没有实现文件的类的项目。 然后在 AppDelegate 我有: #import "AppDelegate.h" #import "SomeClass.h" @interface A
我是一名优秀的程序员,十分优秀!