- html - 出于某种原因,IE8 对我的 Sass 文件中继承的 html5 CSS 不友好?
- JMeter 在响应断言中使用 span 标签的问题
- html - 在 :hover and :active? 上具有不同效果的 CSS 动画
- html - 相对于居中的 html 内容固定的 CSS 重复背景?
我正在阅读文档,例如 Capturing a stream或 Loopback recording ,但我找不到关于如何使用 Windows WASAPI 从 Loopback 音频设备(有时称为“您听到的”、“立体声混音”)录制 block 的良好可重现示例(包含、构建说明等)。
您是否有一个简单的可重现示例,展示如何使用 C++ 从 WASAPI 设备循环录制音频 block ?
这是 Python 中的一个类似(有效)示例:
import soundcard as sc # installed with: pip install soundcard
lb = sc.all_microphones(include_loopback=True)[0]
with lb.recorder(samplerate=44100) as mic:
while True:
data = mic.record(numframes=None)
print(data) # chunks of audio data (448 samples x 2 channels as an array by default)
最佳答案
这是一个环回模式音频捕获的例子。
基于文档 Capturing a stream , 制作由 Loopback recording 指出的一些版本如下:
// In the call to the IMMDeviceEnumerator::GetDefaultAudioEndpoint method, change the first parameter (dataFlow) from eCapture to eRender.
hr = pEnumerator->GetDefaultAudioEndpoint(
eRender, eConsole, &pDevice);
...
// In the call to the IAudioClient::Initialize method, change the value of the second parameter (StreamFlags) from 0 to AUDCLNT_STREAMFLAGS_LOOPBACK.
hr = pAudioClient->Initialize(
AUDCLNT_SHAREMODE_SHARED,
AUDCLNT_STREAMFLAGS_LOOPBACK,
hnsRequestedDuration,
0,
pwfx,
NULL);
文档中遗漏的部分:CopyData()
和写入文件函数(WriteWaveHeader()
和 FinishWaveFile()
)。下面显示了这些功能实现的示例。引用博客Sample - WASAPI loopback capture (record what you hear)获取更多详细信息。
HRESULT MyAudioSink::CopyData(BYTE* pData, UINT32 NumFrames, BOOL* pDone, WAVEFORMATEX* pwfx, HMMIO hFile)
{
HRESULT hr = S_OK;
if (0 == NumFrames) {
wprintf(L"IAudioCaptureClient::GetBuffer said to read 0 frames\n");
return E_UNEXPECTED;
}
LONG lBytesToWrite = NumFrames * pwfx->nBlockAlign;
#pragma prefast(suppress: __WARNING_INCORRECT_ANNOTATION, "IAudioCaptureClient::GetBuffer SAL annotation implies a 1-byte buffer")
LONG lBytesWritten = mmioWrite(hFile, reinterpret_cast<PCHAR>(pData), lBytesToWrite);
if (lBytesToWrite != lBytesWritten) {
wprintf(L"mmioWrite wrote %u bytes : expected %u bytes", lBytesWritten, lBytesToWrite);
return E_UNEXPECTED;
}
static int CallCount = 0;
cout << "CallCount = " << CallCount++ << "NumFrames: " << NumFrames << endl ;
if (clock() > 10 * CLOCKS_PER_SEC) //Record 10 seconds. From the first time call clock() at the beginning of the main().
*pDone = true;
return S_OK;
}
HRESULT WriteWaveHeader(HMMIO hFile, LPCWAVEFORMATEX pwfx, MMCKINFO* pckRIFF, MMCKINFO* pckData) {
MMRESULT result;
// make a RIFF/WAVE chunk
pckRIFF->ckid = MAKEFOURCC('R', 'I', 'F', 'F');
pckRIFF->fccType = MAKEFOURCC('W', 'A', 'V', 'E');
result = mmioCreateChunk(hFile, pckRIFF, MMIO_CREATERIFF);
if (MMSYSERR_NOERROR != result) {
wprintf(L"mmioCreateChunk(\"RIFF/WAVE\") failed: MMRESULT = 0x%08x", result);
return E_FAIL;
}
// make a 'fmt ' chunk (within the RIFF/WAVE chunk)
MMCKINFO chunk;
chunk.ckid = MAKEFOURCC('f', 'm', 't', ' ');
result = mmioCreateChunk(hFile, &chunk, 0);
if (MMSYSERR_NOERROR != result) {
wprintf(L"mmioCreateChunk(\"fmt \") failed: MMRESULT = 0x%08x", result);
return E_FAIL;
}
// write the WAVEFORMATEX data to it
LONG lBytesInWfx = sizeof(WAVEFORMATEX) + pwfx->cbSize;
LONG lBytesWritten =
mmioWrite(
hFile,
reinterpret_cast<PCHAR>(const_cast<LPWAVEFORMATEX>(pwfx)),
lBytesInWfx
);
if (lBytesWritten != lBytesInWfx) {
wprintf(L"mmioWrite(fmt data) wrote %u bytes; expected %u bytes", lBytesWritten, lBytesInWfx);
return E_FAIL;
}
// ascend from the 'fmt ' chunk
result = mmioAscend(hFile, &chunk, 0);
if (MMSYSERR_NOERROR != result) {
wprintf(L"mmioAscend(\"fmt \" failed: MMRESULT = 0x%08x", result);
return E_FAIL;
}
// make a 'fact' chunk whose data is (DWORD)0
chunk.ckid = MAKEFOURCC('f', 'a', 'c', 't');
result = mmioCreateChunk(hFile, &chunk, 0);
if (MMSYSERR_NOERROR != result) {
wprintf(L"mmioCreateChunk(\"fmt \") failed: MMRESULT = 0x%08x", result);
return E_FAIL;
}
// write (DWORD)0 to it
// this is cleaned up later
DWORD frames = 0;
lBytesWritten = mmioWrite(hFile, reinterpret_cast<PCHAR>(&frames), sizeof(frames));
if (lBytesWritten != sizeof(frames)) {
wprintf(L"mmioWrite(fact data) wrote %u bytes; expected %u bytes", lBytesWritten, (UINT32)sizeof(frames));
return E_FAIL;
}
// ascend from the 'fact' chunk
result = mmioAscend(hFile, &chunk, 0);
if (MMSYSERR_NOERROR != result) {
wprintf(L"mmioAscend(\"fact\" failed: MMRESULT = 0x%08x", result);
return E_FAIL;
}
// make a 'data' chunk and leave the data pointer there
pckData->ckid = MAKEFOURCC('d', 'a', 't', 'a');
result = mmioCreateChunk(hFile, pckData, 0);
if (MMSYSERR_NOERROR != result) {
wprintf(L"mmioCreateChunk(\"data\") failed: MMRESULT = 0x%08x", result);
return E_FAIL;
}
return S_OK;
}
HRESULT FinishWaveFile(HMMIO hFile, MMCKINFO* pckRIFF, MMCKINFO* pckData) {
MMRESULT result;
result = mmioAscend(hFile, pckData, 0);
if (MMSYSERR_NOERROR != result) {
wprintf(L"mmioAscend(\"data\" failed: MMRESULT = 0x%08x", result);
return E_FAIL;
}
result = mmioAscend(hFile, pckRIFF, 0);
if (MMSYSERR_NOERROR != result) {
wprintf(L"mmioAscend(\"RIFF/WAVE\" failed: MMRESULT = 0x%08x", result);
return E_FAIL;
}
return S_OK;
}
在 pAudioClient->Start()
之前调用 WriteWaveHeader
。在 pAudioClient->Stop()
之后调用 FinishWaveFile
。
因此,它会录制在您的 Windows 上播放的大约 10 秒的音频。
更新 #1:
#include <Windows.h>
#include <mmsystem.h>
#include <mmdeviceapi.h>
#include <audioclient.h>
#include <time.h>
#include <iostream>
int main()
{
clock();
HRESULT hr = CoInitializeEx(NULL, COINIT_MULTITHREADED);
// Create file
MMIOINFO mi = { 0 };
hFile = mmioOpen(
// some flags cause mmioOpen write to this buffer
// but not any that we're using
(LPWSTR)fileName,
&mi,
MMIO_WRITE | MMIO_CREATE
);
if (NULL == hFile) {
wprintf(L"mmioOpen(\"%ls\", ...) failed. wErrorRet == %u", fileName, GetLastError());
return E_FAIL;
}
MyAudioSink AudioSink;
RecordAudioStream(&AudioSink);
mmioClose(hFile, 0);
CoUninitialize();
return 0;
}
编译命令:
cl -DUNICODE loopbackCapture.cpp /link winmm.lib user32.lib Kernel32.lib Ole32.lib
更新#2:
#include <Windows.h>
#include <mmsystem.h>
#include <mmdeviceapi.h>
#include <audioclient.h>
#include <time.h>
#include <iostream>
using namespace std;
#pragma comment(lib, "Winmm.lib")
WCHAR fileName[] = L"loopback-capture.wav";
BOOL bDone = FALSE;
HMMIO hFile = NULL;
// REFERENCE_TIME time units per second and per millisecond
#define REFTIMES_PER_SEC 10000000
#define REFTIMES_PER_MILLISEC 10000
#define EXIT_ON_ERROR(hres) \
if (FAILED(hres)) { goto Exit; }
#define SAFE_RELEASE(punk) \
if ((punk) != NULL) \
{ (punk)->Release(); (punk) = NULL; }
const CLSID CLSID_MMDeviceEnumerator = __uuidof(MMDeviceEnumerator);
const IID IID_IMMDeviceEnumerator = __uuidof(IMMDeviceEnumerator);
const IID IID_IAudioClient = __uuidof(IAudioClient);
const IID IID_IAudioCaptureClient = __uuidof(IAudioCaptureClient);
class MyAudioSink
{
public:
HRESULT CopyData(BYTE* pData, UINT32 NumFrames, BOOL* pDone, WAVEFORMATEX* pwfx, HMMIO hFile);
};
HRESULT WriteWaveHeader(HMMIO hFile, LPCWAVEFORMATEX pwfx, MMCKINFO* pckRIFF, MMCKINFO* pckData);
HRESULT FinishWaveFile(HMMIO hFile, MMCKINFO* pckRIFF, MMCKINFO* pckData);
HRESULT RecordAudioStream(MyAudioSink* pMySink);
int main()
{
clock();
HRESULT hr = CoInitializeEx(NULL, COINIT_MULTITHREADED);
// Create file
MMIOINFO mi = { 0 };
hFile = mmioOpen(
// some flags cause mmioOpen write to this buffer
// but not any that we're using
(LPWSTR)fileName,
&mi,
MMIO_WRITE | MMIO_CREATE
);
if (NULL == hFile) {
wprintf(L"mmioOpen(\"%ls\", ...) failed. wErrorRet == %u", fileName, GetLastError());
return E_FAIL;
}
MyAudioSink AudioSink;
RecordAudioStream(&AudioSink);
mmioClose(hFile, 0);
CoUninitialize();
return 0;
}
HRESULT MyAudioSink::CopyData(BYTE* pData, UINT32 NumFrames, BOOL* pDone, WAVEFORMATEX* pwfx, HMMIO hFile)
{
HRESULT hr = S_OK;
if (0 == NumFrames) {
wprintf(L"IAudioCaptureClient::GetBuffer said to read 0 frames\n");
return E_UNEXPECTED;
}
LONG lBytesToWrite = NumFrames * pwfx->nBlockAlign;
#pragma prefast(suppress: __WARNING_INCORRECT_ANNOTATION, "IAudioCaptureClient::GetBuffer SAL annotation implies a 1-byte buffer")
LONG lBytesWritten = mmioWrite(hFile, reinterpret_cast<PCHAR>(pData), lBytesToWrite);
if (lBytesToWrite != lBytesWritten) {
wprintf(L"mmioWrite wrote %u bytes : expected %u bytes", lBytesWritten, lBytesToWrite);
return E_UNEXPECTED;
}
static int CallCount = 0;
cout << "CallCount = " << CallCount++ << "NumFrames: " << NumFrames << endl ;
if (clock() > 10 * CLOCKS_PER_SEC) //Record 10 seconds. From the first time call clock() at the beginning of the main().
*pDone = true;
return S_OK;
}
HRESULT RecordAudioStream(MyAudioSink* pMySink)
{
HRESULT hr;
REFERENCE_TIME hnsRequestedDuration = REFTIMES_PER_SEC;
REFERENCE_TIME hnsActualDuration;
UINT32 bufferFrameCount;
UINT32 numFramesAvailable;
IMMDeviceEnumerator* pEnumerator = NULL;
IMMDevice* pDevice = NULL;
IAudioClient* pAudioClient = NULL;
IAudioCaptureClient* pCaptureClient = NULL;
WAVEFORMATEX* pwfx = NULL;
UINT32 packetLength = 0;
BYTE* pData;
DWORD flags;
MMCKINFO ckRIFF = { 0 };
MMCKINFO ckData = { 0 };
hr = CoCreateInstance(
CLSID_MMDeviceEnumerator, NULL,
CLSCTX_ALL, IID_IMMDeviceEnumerator,
(void**)& pEnumerator);
EXIT_ON_ERROR(hr)
hr = pEnumerator->GetDefaultAudioEndpoint(
eRender, eConsole, &pDevice);
EXIT_ON_ERROR(hr)
hr = pDevice->Activate(
IID_IAudioClient, CLSCTX_ALL,
NULL, (void**)& pAudioClient);
EXIT_ON_ERROR(hr)
hr = pAudioClient->GetMixFormat(&pwfx);
EXIT_ON_ERROR(hr)
hr = pAudioClient->Initialize(
AUDCLNT_SHAREMODE_SHARED,
AUDCLNT_STREAMFLAGS_LOOPBACK,
hnsRequestedDuration,
0,
pwfx,
NULL);
EXIT_ON_ERROR(hr)
// Get the size of the allocated buffer.
hr = pAudioClient->GetBufferSize(&bufferFrameCount);
EXIT_ON_ERROR(hr)
hr = pAudioClient->GetService(
IID_IAudioCaptureClient,
(void**)& pCaptureClient);
EXIT_ON_ERROR(hr)
hr = WriteWaveHeader((HMMIO)hFile, pwfx, &ckRIFF, &ckData);
if (FAILED(hr)) {
// WriteWaveHeader does its own logging
return hr;
}
// Calculate the actual duration of the allocated buffer.
hnsActualDuration = (double)REFTIMES_PER_SEC *
bufferFrameCount / pwfx->nSamplesPerSec;
hr = pAudioClient->Start(); // Start recording.
EXIT_ON_ERROR(hr)
// Each loop fills about half of the shared buffer.
while (bDone == FALSE)
{
// Sleep for half the buffer duration.
Sleep(hnsActualDuration / REFTIMES_PER_MILLISEC / 2);
hr = pCaptureClient->GetNextPacketSize(&packetLength);
EXIT_ON_ERROR(hr)
while (packetLength != 0)
{
// Get the available data in the shared buffer.
hr = pCaptureClient->GetBuffer(
&pData,
&numFramesAvailable,
&flags, NULL, NULL);
EXIT_ON_ERROR(hr)
if (flags & AUDCLNT_BUFFERFLAGS_SILENT)
{
pData = NULL; // Tell CopyData to write silence.
}
// Copy the available capture data to the audio sink.
hr = pMySink->CopyData(
pData, numFramesAvailable, &bDone, pwfx, (HMMIO)hFile);
EXIT_ON_ERROR(hr)
hr = pCaptureClient->ReleaseBuffer(numFramesAvailable);
EXIT_ON_ERROR(hr)
hr = pCaptureClient->GetNextPacketSize(&packetLength);
EXIT_ON_ERROR(hr)
}
}
hr = pAudioClient->Stop(); // Stop recording.
EXIT_ON_ERROR(hr)
hr = FinishWaveFile((HMMIO)hFile, &ckData, &ckRIFF);
if (FAILED(hr)) {
// FinishWaveFile does it's own logging
return hr;
}
Exit:
CoTaskMemFree(pwfx);
SAFE_RELEASE(pEnumerator)
SAFE_RELEASE(pDevice)
SAFE_RELEASE(pAudioClient)
SAFE_RELEASE(pCaptureClient)
return hr;
}
HRESULT WriteWaveHeader(HMMIO hFile, LPCWAVEFORMATEX pwfx, MMCKINFO* pckRIFF, MMCKINFO* pckData) {
MMRESULT result;
// make a RIFF/WAVE chunk
pckRIFF->ckid = MAKEFOURCC('R', 'I', 'F', 'F');
pckRIFF->fccType = MAKEFOURCC('W', 'A', 'V', 'E');
result = mmioCreateChunk(hFile, pckRIFF, MMIO_CREATERIFF);
if (MMSYSERR_NOERROR != result) {
wprintf(L"mmioCreateChunk(\"RIFF/WAVE\") failed: MMRESULT = 0x%08x", result);
return E_FAIL;
}
// make a 'fmt ' chunk (within the RIFF/WAVE chunk)
MMCKINFO chunk;
chunk.ckid = MAKEFOURCC('f', 'm', 't', ' ');
result = mmioCreateChunk(hFile, &chunk, 0);
if (MMSYSERR_NOERROR != result) {
wprintf(L"mmioCreateChunk(\"fmt \") failed: MMRESULT = 0x%08x", result);
return E_FAIL;
}
// write the WAVEFORMATEX data to it
LONG lBytesInWfx = sizeof(WAVEFORMATEX) + pwfx->cbSize;
LONG lBytesWritten =
mmioWrite(
hFile,
reinterpret_cast<PCHAR>(const_cast<LPWAVEFORMATEX>(pwfx)),
lBytesInWfx
);
if (lBytesWritten != lBytesInWfx) {
wprintf(L"mmioWrite(fmt data) wrote %u bytes; expected %u bytes", lBytesWritten, lBytesInWfx);
return E_FAIL;
}
// ascend from the 'fmt ' chunk
result = mmioAscend(hFile, &chunk, 0);
if (MMSYSERR_NOERROR != result) {
wprintf(L"mmioAscend(\"fmt \" failed: MMRESULT = 0x%08x", result);
return E_FAIL;
}
// make a 'fact' chunk whose data is (DWORD)0
chunk.ckid = MAKEFOURCC('f', 'a', 'c', 't');
result = mmioCreateChunk(hFile, &chunk, 0);
if (MMSYSERR_NOERROR != result) {
wprintf(L"mmioCreateChunk(\"fmt \") failed: MMRESULT = 0x%08x", result);
return E_FAIL;
}
// write (DWORD)0 to it
// this is cleaned up later
DWORD frames = 0;
lBytesWritten = mmioWrite(hFile, reinterpret_cast<PCHAR>(&frames), sizeof(frames));
if (lBytesWritten != sizeof(frames)) {
wprintf(L"mmioWrite(fact data) wrote %u bytes; expected %u bytes", lBytesWritten, (UINT32)sizeof(frames));
return E_FAIL;
}
// ascend from the 'fact' chunk
result = mmioAscend(hFile, &chunk, 0);
if (MMSYSERR_NOERROR != result) {
wprintf(L"mmioAscend(\"fact\" failed: MMRESULT = 0x%08x", result);
return E_FAIL;
}
// make a 'data' chunk and leave the data pointer there
pckData->ckid = MAKEFOURCC('d', 'a', 't', 'a');
result = mmioCreateChunk(hFile, pckData, 0);
if (MMSYSERR_NOERROR != result) {
wprintf(L"mmioCreateChunk(\"data\") failed: MMRESULT = 0x%08x", result);
return E_FAIL;
}
return S_OK;
}
HRESULT FinishWaveFile(HMMIO hFile, MMCKINFO* pckRIFF, MMCKINFO* pckData) {
MMRESULT result;
result = mmioAscend(hFile, pckData, 0);
if (MMSYSERR_NOERROR != result) {
wprintf(L"mmioAscend(\"data\" failed: MMRESULT = 0x%08x", result);
return E_FAIL;
}
result = mmioAscend(hFile, pckRIFF, 0);
if (MMSYSERR_NOERROR != result) {
wprintf(L"mmioAscend(\"RIFF/WAVE\" failed: MMRESULT = 0x%08x", result);
return E_FAIL;
}
return S_OK;
}
关于c++ - 使用 WASAPI 录制音频流,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/64318206/
我正在尝试解决 A/V 同步问题。视频将比音频延迟 1 秒。 (请看我下面的注释) 来自 Android 媒体框架部分, 我可以延迟音频时间戳让它与视频同步,我应该从哪里开始?是音频源吗? MPEG4
我正在使用带有 SignalR 的 MassTransit 请求和响应。该网站向创建文件的 Windows 服务发出请求。创建文件后,Windows 服务会将响应消息发送回网站。该网站将打开该文件并使
我正在尝试创建一个允许用户发出一些声音的应用程序,然后以回放方式使用它。 我想让我的应用程序播放用户将记录的.wav文件。 由于不断出现错误,我在弄清楚如何编写此代码时遇到了麻烦。 ====
有没有办法禁止网页上视频的屏幕共享? 例如,当您尝试录制或屏幕共享(例如通过 Skype)Netflix 视频时,它仅显示黑屏并且没有音频。 我的问题是,他们是如何实现的?我只能想到JavaScrip
我正在尝试使用 html5 .getUserMedia 录制视频,然后在不上传到服务器的情况下再次播放。我尝试了很多教程,我通过使用 canvas 绘制 webp 图像然后使用 Whammy.js 转
我想为我的网站的用户实现屏幕录制功能。这将适用于便士拍卖风格的网站,以便用户可以记录他们的出价,并在拍卖出现问题时提供证据。 这是在线录音机的演示。 http://www.screentoaster.
所以在我的应用程序中,我尝试使用屏幕截图“记录”屏幕。我必须将这些单独的帧作为图像,因为它们稍后会在服务器上进行修改和组合。增加这种复杂性的是,它是在使用 Cocos2D 的慢节奏游戏中。我目前截屏的
是否可以使用单个 ffmpeg 命令同时捕获(记录)RTSP 流和捕获场景变化事件?我几乎可以做我想做的事: ffmpeg -i 'rtsp://mystream' \ -map 0:v -map 0
我是 Objective-c 和 iPhone 编程新手,但我正在开发一个自学应用程序。我一直在尝试弄清楚如何在 iPhone 上录制声音。 Apple 提供了使用 AVAudioRecorder 从
我无法写任何东西来允许这样做,但我希望有人能指出我找到可以做到这一点的代码的正确方向。我擅长 HTML 和 CSS,对 JS 非常陌生。 我需要的是能够使用我的麦克风在单页网站上讲话,并将其流回。这样
想象一下您在浏览器中观看体育赛事直播。这意味着您收到了视频流,对吗?我需要记录这个流并保存到磁盘。问题是我不知道从哪里开始。我对编程并不陌生,但在视频直播方面有一些经验。我看到这个问题分为以下几个部分
我在开始录制时遇到文件未找到异常。此外,我无法在 JMeter 可安装文件夹中找到 RootCA 证书。 最佳答案 根据 TestRecording210 JMeter Wiki 页面当用户(您在其下
我有这个源代码可以在浏览器中录制音频。 Record.js 调用另一个脚本提供录音并将其保存到服务器。 index.html record.js //starts by click on butt
我允许用户按下按钮以通过 SoundPool 播放声音。是否可以录制 SoundPool 正在播放的任何内容,以便用户可以录制一系列声音? 最佳答案 实际上不可能捕捉到播放的声音。我也有同样的愿望,但
我正在尝试使用 xcrun simctl io booted recordVideo recording.mov 录制我的 iOS 11.4 模拟器的屏幕。这将创建一个具有该名称的文件,但不幸的是该文
好的,我将尝试尽可能清楚地说明我的问题,但我很困惑,所以如果我没有传达信息,请告诉我。 我正在尝试使用 getUserMedia 来使用网络摄像头,然后使用这个 http://www.w3.org/T
是否可以使用 html5 录制声音?我已经下载了最新的 canary 版本的 chrome 并使用以下代码: navigator.getUserMedia = navigator.webkitGetU
很多人都在问这个,似乎没有人有答案,所以我也没有。 某些应用程序如何提供记录android系统音频输出的功能?我发现的所有内容都是在 1432 个不同站点上的相同教程,您可以在其中记录 MIC 输入。
不小心撞到了qq而不是 @q ,我的 vim 现在正在记录到寄存器 q . 如果我输入 q再次,它将覆盖以前录制的宏。 有没有办法 取消录制以免覆盖之前的宏或 恢复之前的宏而不从头开始重新录制? 最佳
当我们接到电话时,我们会向来电者播放提示,内容类似于“我们可能会出于质量和培训目的记录通话”。 我们为响应来电而发送的 TWiML 如下所示。 http://domain.tld/may_r
我是一名优秀的程序员,十分优秀!