gpt4 book ai didi

c# - 从设备发送 NAudio/Opus 编码的音频作为 RTP

转载 作者:太空宇宙 更新时间:2023-11-03 15:28:11 38 4
gpt4 key购买 nike

首先,我要道歉。很久以前,我曾经对 VB5 进行修补,并且已经离开程序员的职位多年了——我仍在重新学习基础知识,最近开始学习 C#/.NET。我也是这个网站的新手,请耐心等待和指导。关于我的背景故事已经足够了。

使用 this wrapper for Opus ,其中我将包装器项目添加到我自己的解决方案中,而 NAudio 我相信我已经将其设置为主动从我的设备(声卡)获取音频并利用示例编码器代码将编码音频放入 _playBuffer。

我的下一个任务是从中获取编码数据并使用 RDP 发送它,这样它就可以发送到另一台机器上的客户端应用程序中进行解码,在那里它将被解码并在他们的声音设备上播放。

我对 _playBuffer 中的数据是现成的编码数据的理解是否正确?或者这是否需要对 RTP 数据包进行不同的拆分? (我看到一个 uLAW example here ,但我不确定我是否可以适应我的需要。因为下载的源代码似乎是用德语评论的 - 但我几乎不会说和写英语作为第一语言 - 即使那些不是非常有帮助。)

(我什至使用了正确的术语吗?)截至目前,您看到的股票代码通过 WaveOut 将 _playBuffer 数据放回原处,就像他的示例一样 - 我在这里忽略了并留下来解释我的(可能缺乏了解。 (如果它是“可播放的”,它就是“可发送的”。)

另一个问题是我的意图是通过互联网以点对点的方式多播流 - 虽然我不确定多播是否是我想要的。

    using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
using NAudio;
using NAudio.CoreAudioApi;
using NAudio.Wave;
using FragLabs.Audio.Codecs;

namespace VUmeterappStereo
{
public partial class Form1 : Form
{private void Form1_Load(object sender, EventArgs e)
{
for (int i = 0; i < WaveIn.DeviceCount; i++)
{
comboBox1.Items.Add(WaveIn.GetCapabilities(i).ProductName);
}
if (WaveIn.DeviceCount > 0)
comboBox1.SelectedIndex = 0;
for (int i = 0; i < WaveOut.DeviceCount; i++)
{
comboBox2.Items.Add(WaveOut.GetCapabilities(i).ProductName);
}
if (WaveOut.DeviceCount > 0)
comboBox2.SelectedIndex = 0;
}

private void button1_Click(object sender, EventArgs e)
{
button2.Enabled = true;
button1.Enabled = false;
StartEncoding();
}

private void button2_Click(object sender, EventArgs e)
{
button1.Enabled = true;
button2.Enabled = false;
StopEncoding();
}

WaveIn _waveIn;
WaveOut _waveOut;
BufferedWaveProvider _playBuffer;
OpusEncoder _encoder;
OpusDecoder _decoder;
int _segmentFrames;
int _bytesPerSegment;
ulong _bytesSent;
DateTime _startTime;
Timer _timer = null;

void StartEncoding()
{
_startTime = DateTime.Now;
_bytesSent = 0;
_segmentFrames = 960;
_encoder = OpusEncoder.Create(48000, 1, FragLabs.Audio.Codecs.Opus.Application.Voip);
_encoder.Bitrate = 8192;
_decoder = OpusDecoder.Create(48000, 1);
_bytesPerSegment = _encoder.FrameByteCount(_segmentFrames);

_waveIn = new WaveIn(WaveCallbackInfo.FunctionCallback());
_waveIn.BufferMilliseconds = 50;
_waveIn.DeviceNumber = comboBox1.SelectedIndex;
_waveIn.DataAvailable += _waveIn_DataAvailable;
_waveIn.WaveFormat = new WaveFormat(48000, 16, 1);

_playBuffer = new BufferedWaveProvider(new WaveFormat(48000, 16, 1));

_waveOut = new WaveOut(WaveCallbackInfo.FunctionCallback());
_waveOut.DeviceNumber = comboBox2.SelectedIndex;
_waveOut.Init(_playBuffer);

_waveOut.Play();
_waveIn.StartRecording();

if (_timer == null)
{
_timer = new Timer();
_timer.Interval = 1000;
_timer.Tick += _timer_Tick;
}
_timer.Start();
}

void _timer_Tick(object sender, EventArgs e)
{
var timeDiff = DateTime.Now - _startTime;
var bytesPerSecond = _bytesSent / timeDiff.TotalSeconds;
Console.WriteLine("{0} Bps", bytesPerSecond);
}

byte[] _notEncodedBuffer = new byte[0];
void _waveIn_DataAvailable(object sender, WaveInEventArgs e)
{
byte[] soundBuffer = new byte[e.BytesRecorded + _notEncodedBuffer.Length];
for (int i = 0; i < _notEncodedBuffer.Length; i++)
soundBuffer[i] = _notEncodedBuffer[i];
for (int i = 0; i < e.BytesRecorded; i++)
soundBuffer[i + _notEncodedBuffer.Length] = e.Buffer[i];

int byteCap = _bytesPerSegment;
int segmentCount = (int)Math.Floor((decimal)soundBuffer.Length / byteCap);
int segmentsEnd = segmentCount * byteCap;
int notEncodedCount = soundBuffer.Length - segmentsEnd;
_notEncodedBuffer = new byte[notEncodedCount];
for (int i = 0; i < notEncodedCount; i++)
{
_notEncodedBuffer[i] = soundBuffer[segmentsEnd + i];
}

for (int i = 0; i < segmentCount; i++)
{
byte[] segment = new byte[byteCap];
for (int j = 0; j < segment.Length; j++)
segment[j] = soundBuffer[(i * byteCap) + j];
int len;
byte[] buff = _encoder.Encode(segment, segment.Length, out len);
_bytesSent += (ulong)len;
buff = _decoder.Decode(buff, len, out len);
_playBuffer.AddSamples(buff, 0, len);
}
}

void StopEncoding()
{
_timer.Stop();
_waveIn.StopRecording();
_waveIn.Dispose();
_waveIn = null;
_waveOut.Stop();
_waveOut.Dispose();
_waveOut = null;
_playBuffer = null;
_encoder.Dispose();
_encoder = null;
_decoder.Dispose();
_decoder = null;

}



private void timer1_Tick(object sender, EventArgs e)
{
MMDeviceEnumerator de = new MMDeviceEnumerator();
MMDevice device = de.GetDefaultAudioEndpoint(DataFlow.Render, Role.Multimedia);
//float volume = (float)device.AudioMeterInformation.MasterPeakValue * 100;
float volLeft = (float)device.AudioMeterInformation.PeakValues[0] * 100;
float volRight = (float)device.AudioMeterInformation.PeakValues[1] * 100;
progressBar1.Value = (int)volLeft;
progressBar2.Value = (int)volRight;
}

private void timer2_Tick(object sender, EventArgs e)
{

}
}
}

感谢您为帮助我了解如何完成通过 RTP 流获取数据所做的贡献。

哦,是的,这首先是我从教程示例中重新创建 VU 表开始的 - 因此命名空间名称和额外代码确实起作用。

最佳答案

代码示例对音频进行编码而不是解码。您需要将 Buff 中包含的字节发送到网络。

上面例子中的这段代码是从声卡接收音频。

    byte[] _notEncodedBuffer = new byte[0];
void _waveIn_DataAvailable(object sender, WaveInEventArgs e)
{
byte[] soundBuffer = new byte[e.BytesRecorded + _notEncodedBuffer.Length];
for (int i = 0; i < _notEncodedBuffer.Length; i++)
soundBuffer[i] = _notEncodedBuffer[i];
for (int i = 0; i < e.BytesRecorded; i++)
soundBuffer[i + _notEncodedBuffer.Length] = e.Buffer[i];

int byteCap = _bytesPerSegment;
int segmentCount = (int)Math.Floor((decimal)soundBuffer.Length / byteCap);
int segmentsEnd = segmentCount * byteCap;
int notEncodedCount = soundBuffer.Length - segmentsEnd;
_notEncodedBuffer = new byte[notEncodedCount];
for (int i = 0; i < notEncodedCount; i++)
{
_notEncodedBuffer[i] = soundBuffer[segmentsEnd + i];
}

for (int i = 0; i < segmentCount; i++)
{
byte[] segment = new byte[byteCap];
for (int j = 0; j < segment.Length; j++)
segment[j] = soundBuffer[(i * byteCap) + j];
int len;
byte[] buff = _encoder.Encode(segment, segment.Length, out len);
_bytesSent += (ulong)len;
buff = _decoder.Decode(buff, len, out len);
_playBuffer.AddSamples(buff, 0, len);
}
}

在这一行

byte[] buff = _encoder.Encode(segment, segment.Length, out len);

此时您创建 RTP 数据包

https://www.rfc-editor.org/rfc/rfc3550

然后用C#在网络上发送

通常为UDP

Sending UDP Packet in C#

在从 RTP 数据包中提取 Buff 之后,其余代码属于接收应用程序。

buff = _decoder.Decode(buff, len, out len);
_playBuffer.AddSamples(buff, 0, len);

关于c# - 从设备发送 NAudio/Opus 编码的音频作为 RTP,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/34506330/

38 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com