- html - 出于某种原因,IE8 对我的 Sass 文件中继承的 html5 CSS 不友好?
- JMeter 在响应断言中使用 span 标签的问题
- html - 在 :hover and :active? 上具有不同效果的 CSS 动画
- html - 相对于居中的 html 内容固定的 CSS 重复背景?
我有一个带麦克风的设备,它通过以太网连接到我的计算机,Qt 无法将其视为音频设备,因此,我从中获取数据包并将它们放入 QByteArray。我需要从流中播放这些数据包。我在互联网的某个地方找到了几乎相同问题的解决方案,但使用了内置麦克风。
#include <QApplication>
#include <iostream>
#include <cassert>
#include <QCoreApplication>
#include <QAudioInput>
#include <QAudioOutput>
#include <QBuffer>
int main(int argc, char *argv[]) {
QCoreApplication app(argc, argv);
QBuffer rdBuff;
QBuffer wrBuff;
wrBuff.open(QBuffer::WriteOnly);
rdBuff.open(QBuffer::ReadOnly);
QObject::connect(&wrBuff, &QIODevice::bytesWritten, [&wrBuff, &rdBuff](qint64)
{
rdBuff.buffer().remove(0, rdBuff.pos());
// set pointer to the beginning of the unread data
const auto res = rdBuff.seek(0);
assert(res);
// write new data
rdBuff.buffer().append(wrBuff.buffer());
// remove all data that was already written
wrBuff.buffer().clear();
wrBuff.seek(0);
});
const auto decideAudioFormat = [](const QAudioDeviceInfo& devInfo)
{
QAudioFormat format;
format.setSampleRate(8000);
format.setChannelCount(1);
format.setSampleSize(16);
format.setCodec("audio/pcm");
format.setByteOrder(QAudioFormat::LittleEndian);
format.setSampleType(QAudioFormat::SignedInt);
if (devInfo.isFormatSupported(format))
{
return format;
}
else
{
std::cerr << "Raw audio format not supported by backend, cannot play audio.\n";
throw 0;
}
};
QAudioInput audioInput(decideAudioFormat(QAudioDeviceInfo::defaultInputDevice()));
QAudioOutput audioOutput(decideAudioFormat(QAudioDeviceInfo::defaultOutputDevice()));
audioInput.start(&wrBuff);
audioOutput.start(&rdBuff);
return app.exec();
}
它工作得很好,但我需要将 QByteArray 设置为 QAudioInput 的源。有什么可能的解决方案吗?
最佳答案
不确定我是否在直接回答您的问题。但一个可能的解决方案是在新数据到来时手动向输出音频设备(推送模式)馈送。
您还可以使用自定义(QFile 继承的)类来录制声音,当声音出现时,同时提供文件和输出音频设备。
这是一个例子:
音频输出.h:
#ifndef AUDIOOUTPUT_H
#define AUDIOOUTPUT_H
#include <QtCore>
#include <QtMultimedia>
#define MAX_BUFFERED_TIME 10*1000
static inline int timeToSize(int ms, const QAudioFormat &format)
{
return ((format.channelCount() * (format.sampleSize() / 8) * format.sampleRate()) * ms / 1000);
}
class AudioOutput : public QObject
{
Q_OBJECT
public:
explicit AudioOutput(QObject *parent = nullptr);
public slots:
bool start(const QAudioDeviceInfo &devinfo,
const QAudioFormat &format,
int time_to_buffer);
void write(const QByteArray &data);
private slots:
void verifyBuffer();
void preplay();
void play();
private:
bool m_initialized;
QAudioOutput *m_audio_output;
QIODevice *m_device;
QByteArray m_buffer;
bool m_buffer_requested;
bool m_play_called;
int m_size_to_buffer;
int m_time_to_buffer;
int m_max_size_to_buffer;
QAudioFormat m_format;
};
#endif // AUDIOOUTPUT_H
录音机.h:
#ifndef AUDIORECORDER_H
#define AUDIORECORDER_H
#include <QtCore>
#include <QtMultimedia>
class AudioRecorder : public QFile
{
Q_OBJECT
public:
explicit AudioRecorder(const QString &name, const QAudioFormat &format, QObject *parent = nullptr);
~AudioRecorder();
using QFile::open;
public slots:
bool open();
qint64 write(const QByteArray &data);
void close();
private:
void writeHeader();
bool hasSupportedFormat();
QAudioFormat format;
};
#endif // AUDIORECORDER_H
音频输出.cpp:
#include "audiooutput.h"
AudioOutput::AudioOutput(QObject *parent) : QObject(parent)
{
m_initialized = false;
m_audio_output = nullptr;
m_device = nullptr;
m_buffer_requested = true;
m_play_called = false;
m_size_to_buffer = 0;
m_time_to_buffer = 0;
m_max_size_to_buffer = 0;
}
bool AudioOutput::start(const QAudioDeviceInfo &devinfo,
const QAudioFormat &format,
int time_to_buffer)
{
if (!devinfo.isFormatSupported(format))
{
qDebug() << "Format not supported by output device";
return m_initialized;
}
m_format = format;
int internal_buffer_size;
//Adjust internal buffer size
if (format.sampleRate() >= 44100)
internal_buffer_size = (1024 * 10) * format.channelCount();
else if (format.sampleRate() >= 24000)
internal_buffer_size = (1024 * 6) * format.channelCount();
else
internal_buffer_size = (1024 * 4) * format.channelCount();
//Initialize the audio output device
m_audio_output = new QAudioOutput(devinfo, format, this);
//Increase the buffer size to enable higher sample rates
m_audio_output->setBufferSize(internal_buffer_size);
m_time_to_buffer = time_to_buffer;
//Compute the size in bytes to be buffered based on the current format
m_size_to_buffer = timeToSize(m_time_to_buffer, m_format);
//Define a highest size that the buffer are allowed to have in the given time
//This value is used to discard too old buffered data
m_max_size_to_buffer = m_size_to_buffer + timeToSize(MAX_BUFFERED_TIME, m_format);
m_device = m_audio_output->start();
if (!m_device)
{
qDebug() << "Failed to open output audio device";
return m_initialized;
}
//Timer that helps to keep playing data while it's available on the internal buffer
QTimer *timer_play = new QTimer(this);
timer_play->setTimerType(Qt::PreciseTimer);
connect(timer_play, &QTimer::timeout, this, &AudioOutput::preplay);
timer_play->start(10);
//Timer that checks for too old data in the buffer
QTimer *timer_verifier = new QTimer(this);
connect(timer_verifier, &QTimer::timeout, this, &AudioOutput::verifyBuffer);
timer_verifier->start(qMax(m_time_to_buffer, 10));
m_initialized = true;
return m_initialized;
}
void AudioOutput::verifyBuffer()
{
if (m_buffer.size() >= m_max_size_to_buffer)
m_buffer.clear();
}
void AudioOutput::write(const QByteArray &data)
{
m_buffer.append(data);
preplay();
}
void AudioOutput::preplay()
{
if (!m_initialized)
return;
//Verify if exists a pending call to play function
//If not, call the play function async
if (!m_play_called)
{
m_play_called = true;
QMetaObject::invokeMethod(this, "play", Qt::QueuedConnection);
}
}
void AudioOutput::play()
{
//Set that last async call was triggered
m_play_called = false;
if (m_buffer.isEmpty())
{
//If data is empty set that nothing should be played
//until the buffer has at least the minimum buffered size already set
m_buffer_requested = true;
return;
}
else if (m_buffer.size() < m_size_to_buffer)
{
//If buffer doesn't contains enough data,
//check if exists a already flag telling that the buffer comes
//from a empty state and should not play anything until have the minimum data size
if (m_buffer_requested)
return;
}
else
{
//Buffer is ready and data can be played
m_buffer_requested = false;
}
int readlen = m_audio_output->periodSize();
int chunks = m_audio_output->bytesFree() / readlen;
//Play data while it's available in the output device
while (chunks)
{
//Get chunk from the buffer
QByteArray samples = m_buffer.mid(0, readlen);
int len = samples.size();
m_buffer.remove(0, len);
//Write data to the output device
if (len)
m_device->write(samples);
//If chunk is smaller than the output chunk size, exit loop
if (len != readlen)
break;
//Decrease the available number of chunks
chunks--;
}
}
录音机.cpp:
#include "audiorecorder.h"
AudioRecorder::AudioRecorder(const QString &name, const QAudioFormat &format, QObject *parent) : QFile(name, parent), format(format)
{
}
AudioRecorder::~AudioRecorder()
{
if (!isOpen())
return;
close();
}
bool AudioRecorder::hasSupportedFormat()
{
return (format.sampleSize() == 8
&& format.sampleType() == QAudioFormat::UnSignedInt)
|| (format.sampleSize() > 8
&& format.sampleType() == QAudioFormat::SignedInt
&& format.byteOrder() == QAudioFormat::LittleEndian);
}
bool AudioRecorder::open()
{
if (!hasSupportedFormat())
{
setErrorString("Wav PCM supports only 8-bit unsigned samples "
"or 16-bit (or more) signed samples (in little endian)");
return false;
}
else
{
if (!QFile::open(ReadWrite | Truncate))
return false;
writeHeader();
return true;
}
}
qint64 AudioRecorder::write(const QByteArray &data)
{
return QFile::write(data);
}
void AudioRecorder::writeHeader()
{
QDataStream out(this);
out.setByteOrder(QDataStream::LittleEndian);
// RIFF chunk
out.writeRawData("RIFF", 4);
out << quint32(0); // Placeholder for the RIFF chunk size (filled by close())
out.writeRawData("WAVE", 4);
// Format description chunk
out.writeRawData("fmt ", 4);
out << quint32(16); // "fmt " chunk size (always 16 for PCM)
out << quint16(1); // data format (1 => PCM)
out << quint16(format.channelCount());
out << quint32(format.sampleRate());
out << quint32(format.sampleRate() * format.channelCount()
* format.sampleSize() / 8 ); // bytes per second
out << quint16(format.channelCount() * format.sampleSize() / 8); // Block align
out << quint16(format.sampleSize()); // Significant Bits Per Sample
// Data chunk
out.writeRawData("data", 4);
out << quint32(0); // Placeholder for the data chunk size (filled by close())
Q_ASSERT(pos() == 44); // Must be 44 for WAV PCM
}
void AudioRecorder::close()
{
// Fill the header size placeholders
quint32 fileSize = size();
QDataStream out(this);
// Set the same ByteOrder like in writeHeader()
out.setByteOrder(QDataStream::LittleEndian);
// RIFF chunk size
seek(4);
out << quint32(fileSize - 8);
// data chunk size
seek(40);
out << quint32(fileSize - 44);
QFile::close();
}
主要.cpp:
#include <QtCore>
#include "audiooutput.h"
#include "audiorecorder.h"
#include <signal.h>
QByteArray tone_generator()
{
//Tone generator from http://www.cplusplus.com/forum/general/129827/
const unsigned int samplerate = 8000;
const unsigned short channels = 1;
const double pi = M_PI;
const qint16 amplitude = std::numeric_limits<qint16>::max() * 0.5;
const unsigned short n_frequencies = 8;
const unsigned short n_seconds_each = 1;
float frequencies[n_frequencies] = {55.0, 110.0, 220.0, 440.0, 880.0, 1760.0, 3520.0, 7040.0};
const int n_samples = channels * samplerate * n_frequencies * n_seconds_each;
QVector<qint16> data;
data.resize(n_samples);
int index = n_samples / n_frequencies;
for (unsigned short i = 0; i < n_frequencies; ++i)
{
float freq = frequencies[i];
double d = (samplerate / freq);
int c = 0;
for (int j = index * i; j < index * (i + 1); j += 2)
{
double deg = 360.0 / d;
data[j] = data[j + (channels - 1)] = qSin((c++ * deg) * pi / 180.0) * amplitude;
}
}
return QByteArray((char*)data.data(), data.size() * sizeof(qint16));
}
void signalHandler(int signum)
{
qDebug().nospace() << "Interrupt signal (" << signum << ") received.";
qApp->exit();
}
int main(int argc, char *argv[])
{
//Handle console close to ensure destructors being called
#ifdef Q_OS_WIN
signal(SIGBREAK, signalHandler);
#else
signal(SIGHUP, signalHandler);
#endif
signal(SIGINT, signalHandler);
QCoreApplication a(argc, argv);
QAudioFormat format;
format.setSampleRate(8000);
format.setChannelCount(1);
format.setSampleSize(16);
format.setCodec("audio/pcm");
format.setByteOrder(QAudioFormat::LittleEndian);
format.setSampleType(QAudioFormat::SignedInt);
AudioOutput output;
AudioRecorder file("tone.wav", format);
if (!output.start(QAudioDeviceInfo::defaultOutputDevice(), format, 10 * 1000)) //10 seconds of buffer
return a.exec();
if (!file.open())
{
qDebug() << qPrintable(file.errorString());
return a.exec();
}
qDebug() << "Started!";
QByteArray audio_data = tone_generator();
QTimer timer;
QObject::connect(&timer, &QTimer::timeout, [&]{
qDebug() << "Writting" << audio_data.size() << "bytes";
output.write(audio_data);
file.write(audio_data);
});
qDebug() << "Writting" << audio_data.size() << "bytes";
output.write(audio_data);
file.write(audio_data);
timer.start(8000); //8 seconds because we generated 8 seconds of sound
return a.exec();
}
关于c++ - 来自数组的 QAudioInput,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/50076208/
我正在尝试创建一个包含 int[][] 项的数组 即 int version0Indexes[][4] = { {1,2,3,4}, {5,6,7,8} }; int version1Indexes[
我有一个整数数组: private int array[]; 如果我还有一个名为 add 的方法,那么以下有什么区别: public void add(int value) { array[va
当您尝试在 JavaScript 中将一个数组添加到另一个数组时,它会将其转换为一个字符串。通常,当以另一种语言执行此操作时,列表会合并。 JavaScript [1, 2] + [3, 4] = "
根据我正在阅读的教程,如果您想创建一个包含 5 列和 3 行的表格来表示这样的数据... 45 4 34 99 56 3 23 99 43 2 1 1 0 43 67 ...它说你可以使用下
我通常使用 python 编写脚本/程序,但最近开始使用 JavaScript 进行编程,并且在使用数组时遇到了一些问题。 在 python 中,当我创建一个数组并使用 for x in y 时,我得
我有一个这样的数组: temp = [ 'data1', ['data1_a','data1_b'], ['data2_a','data2_b','data2_c'] ]; // 我想使用 toStr
rent_property (table name) id fullName propertyName 1 A House Name1 2 B
这个问题在这里已经有了答案: 关闭13年前。 Possible Duplicate: In C arrays why is this true? a[5] == 5[a] array[index] 和
使用 Excel 2013。经过多年的寻找和适应,我的第一篇文章。 我正在尝试将当前 App 用户(即“John Smith”)与他的电子邮件地址“jsmith@work.com”进行匹配。 使用两个
当仅在一个边距上操作时,apply 似乎不会重新组装 3D 数组。考虑: arr 1),但对我来说仍然很奇怪,如果一个函数返回一个具有尺寸的对象,那么它们基本上会被忽略。 最佳答案 这是一个不太理
我有一个包含 GPS 坐标的 MySQL 数据库。这是我检索坐标的部分 PHP 代码; $sql = "SELECT lat, lon FROM gps_data"; $stmt=$db->query
我需要找到一种方法来执行这个操作,我有一个形状数组 [批量大小, 150, 1] 代表 batch_size 整数序列,每个序列有 150 个元素长,但在每个序列中都有很多添加的零,以使所有序列具有相
我必须通过 url 中的 json 获取文本。 层次结构如下: 对象>数组>对象>数组>对象。 我想用这段代码获取文本。但是我收到错误 :org.json.JSONException: No valu
enter code here- (void)viewDidLoad { NSMutableArray *imageViewArray= [[NSMutableArray alloc] init];
知道如何对二维字符串数组执行修剪操作,例如使用 Java 流 API 进行 3x3 并将其收集回相同维度的 3x3 数组? 重点是避免使用显式的 for 循环。 当前的解决方案只是简单地执行一个 fo
已关闭。此问题需要 debugging details 。目前不接受答案。 编辑问题以包含 desired behavior, a specific problem or error, and the
我有来自 ASP.NET Web 服务的以下 XML 输出: 1710 1711 1712 1713
如果我有一个对象todo作为您状态的一部分,并且该对象包含数组列表,则列表内部有对象,在这些对象内部还有另一个数组listItems。如何更新数组 listItems 中 id 为“poi098”的对
我想将最大长度为 8 的 bool 数组打包成一个字节,通过网络发送它,然后将其解压回 bool 数组。已经在这里尝试了一些解决方案,但没有用。我正在使用单声道。 我制作了 BitArray,然后尝试
我们的数据库中有这个字段指示一周中的每一天的真/假标志,如下所示:'1111110' 我需要将此值转换为 boolean 数组。 为此,我编写了以下代码: char[] freqs = weekday
我是一名优秀的程序员,十分优秀!