gpt4 book ai didi

python - PyAudio——如何在单个流中捕获麦克风和系统声音?

转载 作者:行者123 更新时间:2023-12-01 06:36:03 79 4
gpt4 key购买 nike

我正在尝试使用 pyaudio 构建一个应用程序来记录扬声器和麦克风的声音,但我只是不知道如何同时记录它们。我尝试启用立体声混音器,但它没有成功,因为我只能听扬声器发出声音。我使用的这段代码使用 pyaudio 在后台线程中记录来自默认麦克风的音频:

import pyaudio
import wave
import threading
import time
import subprocess

CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
RECORD_SECONDS = 5
WAVE_OUTPUT_FILENAME = "tmp/tmp.wav"

class recorder:
def __init__(self):
self.going = False
self.process = None
self.filename = "ScreenCapture.mpg"
def record(self,filename):
try:
if self.process.is_alive():
self.going = False
except AttributeError:
print("test")
self.process = threading.Thread(target=self._record)
self.process.start()
self.filename = filename
def _record(self):
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)

print("* recording")

frames = []

self.going = True

while self.going:
data = stream.read(CHUNK)
frames.append(data)

print("* done recording")

stream.stop_stream()
stream.close()
p.terminate()

wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()


def stop_recording(self):
self.going = False

我使用的是 Windows,我还可以使用其他库,而不仅仅是 PyAudio。我只需要完成这项工作。

编辑:我发现此代码可以记录扬声器的输出,但我无法使其在我的应用程序中工作:

import pyaudio
import wave

CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
RECORD_SECONDS = 5
WAVE_OUTPUT_FILENAME = "output.wav"

p = pyaudio.PyAudio()

SPEAKERS = p.,get_default_output_device_info()["hostApi"]
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK,
input_host_api_specific_stream_info=SPEAKERS,
as_loopback=True)

print("* recording")

frames = []

for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)

print("* done recording")

stream.stop_stream()
stream.close()
p.terminate()

wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()

更新:我可以用这个来录制我的扬声器和麦克风:

import pyaudio
import wave
import numpy as np


CHUNK = 1024
FORMAT = pyaudio.paInt16
RATE = 44100
RECORD_SECONDS = 2
WAVE_OUTPUT_FILENAME = "tmp.wav"


p = pyaudio.PyAudio()


for i in range(0, p.get_device_count()):
print(i, p.get_device_info_by_index(i)['name'])


#stream using as_loopback to get sound from OS
stream = p.open(
format = FORMAT,
channels = 2,
rate = RATE,
input=True,
frames_per_buffer=CHUNK,
input_device_index=2,
as_loopback=True)

##stream using my Microphone's input device
stream2 = p.open(
format = FORMAT,
channels = 1,
rate = RATE,
input=True,
frames_per_buffer=CHUNK,
input_device_index=1)
#as_loopback=False)


frames = []
frames2 = []


for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
data2 = stream2.read(CHUNK)
frames.append(data)
frames2.append(data2)


#frames = as_loopback sound data (Speakers)
frames= b''.join(frames);

#frames2 = sound data of Microphone
frames2= b''.join(frames2);

#decoding Speaker data
Sdecoded = np.frombuffer(frames, 'int16')

#decoding the microphone data
Mdecoded = np.frombuffer(frames2, 'int16')

#converting Speaker data into a Numpy vector (making life easier when picking up audio channels)
Sdecoded= np.array(Sdecoded, dtype='int16')

#getting the data on the right side
direito=Sdecoded[1::2]

#getting the data on the left side
esquerdo=Sdecoded[::2]

#mixing everything to mono = add right side + left side + Microphone decoded data that is already mono
mix=(direito+esquerdo+Mdecoded)

#ensuring no value goes beyond the limits of short int
signal=np.clip(mix, -32767, 32766)

#encode the data again
encodecoded = wave.struct.pack("%dh"%(len(signal)), *list(signal))


#stop all streams and terminate pyaudio
stream.stop_stream()
stream.close()
stream2.stop_stream()
stream2.close()
p.terminate()


#recording mixed audio in mono
wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
wf.setnchannels(1)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes((encodecoded))
wf.close()

但是当我尝试与我的代码混合时它不起作用。发生了什么事?我想我快要解决它了

import numpy as np
import pyaudio
import wave
import threading
import time
import subprocess

CHUNK = 1024
FORMAT = pyaudio.paInt16
RATE = 44100
RECORD_SECONDS = 5
WAVE_OUTPUT_FILENAME = "tmp/tmp.wav"

p = pyaudio.PyAudio()

for i in range(0, p.get_device_count()):
print(i, p.get_device_info_by_index(i)['name'])


class recorder:
def __init__(self):
self.going = False
self.process = None
self.filename = "ScreenCapture.mpg"
def record(self,filename):
try:
if self.process.is_alive():
self.going = False
except AttributeError:
print("test")
self.process = threading.Thread(target=self._record)
self.process.start()
self.filename = filename
def _record(self):
p = pyaudio.PyAudio()
#stream using as_loopback to get sound from OS
stream = p.open(
format=FORMAT,
channels=2,
rate=RATE,
input=True,
frames_per_buffer=CHUNK,
input_device_index=2,
as_loopback=True)
##stream using my Microphone's input device
stream2 = p.open(
format=FORMAT,
channels=1,
rate=RATE,
input=True,
frames_per_buffer=CHUNK,
input_device_index=1)
# as_loopback=False)
#print("* recording")

frames = []
frames2= []

self.going = True

while self.going:
data = stream.read(CHUNK)
data2 = stream2.read(CHUNK)
frames.append(data)
frames2.append(data2)
# frames = as_loopback sound data (Speakers)
frames = b''.join(frames);

# frames2 = sound data of Microphone
frames2 = b''.join(frames2);

# decoding Speaker data
Sdecoded = np.frombuffer(frames, 'int16')

# decoding the microphone data
Mdecoded = np.frombuffer(frames2, 'int16')

# converting Speaker data into a Numpy vector (making life easier when picking up audio channels)
Sdecoded = np.array(Sdecoded, dtype='int16')

# getting the data on the right side
direito = Sdecoded[1::2]

# getting the data on the left side
esquerdo = Sdecoded[::2]

# mixing everything to mono = add right side + left side + Microphone decoded data that is already mono

mix = (direito + esquerdo + Mdecoded)

# ensuring no value goes beyond the limits of short int

signal = np.clip(mix, -32767, 32766)

# encode the data again
encodecoded = wave.struct.pack("%dh" % (len(signal)), *list(signal))

# print("* done recording")

stream.stop_stream()
stream.close()
stream2.stop_stream()
stream2.close()
p.terminate()

wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
wf.setnchannels(1)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(encodecoded)
wf.close()


def stop_recording(self):
self.going = False

我使代码非常干净,并对每个部分进行了注释,以便您了解发生了什么。我在 Pyaudio 的开头做了一个 for 循环,向我展示了我的操作系统中的接口(interface):

0 Mapeador de som da Microsoft - Input
1 Microfone (Realtek(R) Audio)
2 Mixagem estéreo (Realtek(R) Aud
3 Mapeador de som da Microsoft - Output
4 Alto-falantes (Realtek(R) Audio
5 Alto-falantes (Realtek(R) Audio)
6 Microfone (Realtek(R) Audio)
7 Mixagem estéreo (Realtek(R) Audio)
8 Speakers 1 (Realtek HD Audio output with SST)
9 Speakers 2 (Realtek HD Audio output with SST)
10 Alto-falante (Realtek HD Audio output with SST)
11 Microfone (Realtek HD Audio Mic input)
12 Mixagem estéreo (Realtek HD Audio Stereo input)

最佳答案

您可以使用 2 个单独的线程将 2 个不同的设备(提供单独的设备索引)录制到单独的 Wav 文件中。

然后使用 pydub 混合这两个文件图书馆

from pydub import AudioSegment

speakersound = AudioSegment.from_file("/path/speaker.wav")
micsound = AudioSegment.from_file("/path/mic.wav")

mixsound = speakersound.overlay(micsound)

mixsound.export("/path/mixsound.wav", format='wav')

关于python - PyAudio——如何在单个流中捕获麦克风和系统声音?,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/59665469/

79 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com