gpt4 book ai didi

node.js - 在哪里检索音频文件? — Arduino-光子项目

转载 作者:行者123 更新时间:2023-12-03 01:59:29 24 4
gpt4 key购买 nike

我刚开始接触电子产品,并使用基于Arduino的Spark Photon进行了一个项目。项目网站在这里:http://hackster.io/middleca/sending-sound-over-the-internet

我将以下两个文件(.ino和.js)上传到Photon,然后应捕获并传输声音(我直接假设)。我期望将创建一个test.wav。但是,在哪里可以找到此文件,以便检查是否一切正常?

main.ino文件:

#define MICROPHONE_PIN A5
#define AUDIO_BUFFER_MAX 8192

int audioStartIdx = 0, audioEndIdx = 0;
uint16_t audioBuffer[AUDIO_BUFFER_MAX];
uint16_t txBuffer[AUDIO_BUFFER_MAX];

// version without timers
unsigned long lastRead = micros();
char myIpAddress[24];

TCPClient audioClient;
TCPClient checkClient;
TCPServer audioServer = TCPServer(3443);

void setup() {
Serial.begin(115200);
pinMode(MICROPHONE_PIN, INPUT);

// so we know where to connect, try:
// particle get MY_DEVICE_NAME ipAddress

Spark.variable("ipAddress", myIpAddress, STRING);
IPAddress myIp = WiFi.localIP();
sprintf(myIpAddress, "%d.%d.%d.%d", myIp[0], myIp[1], myIp[2], myIp[3]);


// 1/8000th of a second is 125 microseconds
audioServer.begin();

lastRead = micros();
}

void loop() {
checkClient = audioServer.available();
if (checkClient.connected()) {
audioClient = checkClient;
}

//listen for 100ms, taking a sample every 125us,
//and then send that chunk over the network.
listenAndSend(100);
}

void listenAndSend(int delay) {
unsigned long startedListening = millis();

while ((millis() - startedListening) < delay) {
unsigned long time = micros();

if (lastRead > time) {
// time wrapped?
//lets just skip a beat for now, whatever.
lastRead = time;
}

//125 microseconds is 1/8000th of a second
if ((time - lastRead) > 125) {
lastRead = time;
readMic();
}
}
sendAudio();
}


// Callback for Timer 1
void readMic(void) {
uint16_t value = analogRead(MICROPHONE_PIN);
if (audioEndIdx >= AUDIO_BUFFER_MAX) {
audioEndIdx = 0;
}
audioBuffer[audioEndIdx++] = value;
}

void copyAudio(uint16_t *bufferPtr) {
//if end is after start, read from start->end
//if end is before start, then we wrapped, read from start->max, 0->end

int endSnapshotIdx = audioEndIdx;
bool wrapped = endSnapshotIdx < audioStartIdx;
int endIdx = (wrapped) ? AUDIO_BUFFER_MAX : endSnapshotIdx;
int c = 0;

for(int i=audioStartIdx;i<endIdx;i++) {
// do a thing
bufferPtr[c++] = audioBuffer[i];
}

if (wrapped) {
//we have extra
for(int i=0;i<endSnapshotIdx;i++) {
// do more of a thing.
bufferPtr[c++] = audioBuffer[i];
}
}

//and we're done.
audioStartIdx = audioEndIdx;

if (c < AUDIO_BUFFER_MAX) {
bufferPtr[c] = -1;
}
}

// Callback for Timer 1
void sendAudio(void) {
copyAudio(txBuffer);

int i=0;
uint16_t val = 0;

if (audioClient.connected()) {
write_socket(audioClient, txBuffer);
}
else {
while( (val = txBuffer[i++]) < 65535 ) {
Serial.print(val);
Serial.print(',');
}
Serial.println("DONE");
}
}


// an audio sample is 16bit, we need to convert it to bytes for sending over the network
void write_socket(TCPClient socket, uint16_t *buffer) {
int i=0;
uint16_t val = 0;

int tcpIdx = 0;
uint8_t tcpBuffer[1024];

while( (val = buffer[i++]) < 65535 ) {
if ((tcpIdx+1) >= 1024) {
socket.write(tcpBuffer, tcpIdx);
tcpIdx = 0;
}

tcpBuffer[tcpIdx] = val & 0xff;
tcpBuffer[tcpIdx+1] = (val >> 8);
tcpIdx += 2;
}

// any leftovers?
if (tcpIdx > 0) {
socket.write(tcpBuffer, tcpIdx);
}
}

和waveRecorder.js文件:
// make sure you have Node.js Installed!
// Get the IP address of your photon, and put it here:

// CLI command to get your photon's IP address
//
// particle get MY_DEVICE_NAME ipAddress

// Put your IP here!
var settings = {
ip: "192.168.0.54",
port: 3443
};

/**
* Created by middleca on 7/18/15.
*/

//based on a sample from here
// http://stackoverflow.com/questions/19548755/nodejs-write-binary-data-into-writablestream-with-buffer

var fs = require("fs");

var samplesLength = 1000;
var sampleRate = 8000;

var outStream = fs.createWriteStream("test.wav");

var writeHeader = function() {
var b = new Buffer(1024);
b.write('RIFF', 0);
/* file length */
b.writeUInt32LE(32 + samplesLength * 2, 4);
//b.writeUint32LE(0, 4);

b.write('WAVE', 8);
/* format chunk identifier */
b.write('fmt ', 12);

/* format chunk length */
b.writeUInt32LE(16, 16);

/* sample format (raw) */
b.writeUInt16LE(1, 20);

/* channel count */
b.writeUInt16LE(1, 22);

/* sample rate */
b.writeUInt32LE(sampleRate, 24);

/* byte rate (sample rate * block align) */
b.writeUInt32LE(sampleRate * 2, 28);

/* block align (channel count * bytes per sample) */
b.writeUInt16LE(2, 32);

/* bits per sample */
b.writeUInt16LE(16, 34);

/* data chunk identifier */
b.write('data', 36);

/* data chunk length */
//b.writeUInt32LE(40, samplesLength * 2);
b.writeUInt32LE(0, 40);


outStream.write(b.slice(0, 50));
};





writeHeader(outStream);





var net = require('net');
console.log("connecting...");
client = net.connect(settings.port, settings.ip, function () {
client.setNoDelay(true);

client.on("data", function (data) {
try {
console.log("GOT DATA");
outStream.write(data);
//outStream.flush();
console.log("got chunk of " + data.toString('hex'));
}
catch (ex) {
console.error("Er!" + ex);
}
});
});




setTimeout(function() {
console.log('recorded for 10 seconds');
client.end();
outStream.end();
process.exit(0);
}, 10 * 1000);

最佳答案

盗贼!这样的初学者的问题...太不值得了!

无论如何,我会坚定地告诉你答案。

首先,您误会了:.ino文件应该转到Photon,waveRecorder.js文件应该存储在您的计算机(或服务器)上,并在需要检索音频时调用。正如您可以阅读的代码所示,.ino文件确保每毫秒检查一次是否要连接的内容,如果连接成功,它将把声音流到与waveRecorder.js相同位置存储的wav.file中。文件。启动waveRecorder.js时会发生“某事想要连接”的情况。确保已安装节点。

因此,总结一下:

  • 将两个文件(main.ino和waveRecorder.js)下载到计算机中的文件夹../xx/folderName
  • 然后使用光子的IPAddress在两个文件中配置IPAddress
  • 将main.ino上载到光子(在终端中键入'particle flash abcdefgh123456578“xx /../ folderName / main.ino”“)
  • 然后通过在终端中键入“node“xx /../ folderName / waveRecorder.js””来运行waveRecorder.js。

  • 那应该做到的。即使我也能正常工作:)

    关于node.js - 在哪里检索音频文件? — Arduino-光子项目,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/32883364/

    24 4 0
    Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
    广告合作:1813099741@qq.com 6ren.com