gpt4 book ai didi

java - 在 Java 中使用 FFT 从 .wav 创建频谱图

转载 作者:塔克拉玛干 更新时间:2023-11-03 02:56:17 34 4
gpt4 key购买 nike

经过研究和大量的反复试验,我得出了一个观点,我可以构建一个我认为它具有对与错元素的频谱图。

1.首先,我将 .wav 文件读入一个字节数组并仅提取数据部分。

2. 我将字节数组转换为一个 double 组,它取左右声道的平均值。我还注意到 1 个 channel 的 1 个样本由 2 个字节组成。所以,4 个字节变成 1 个 double。

3. 对于 2 的特定窗口大小,我从 here 应用 FFT并获得频域中的振幅。这是频谱图图像的垂直 strip 。

4. 我用相同的窗口大小重复执行此操作并重叠整个数据并获得频谱图。

下面是将.wav读入double数组的代码

import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Arrays;

public class readWAV2Array {

private byte[] entireFileData;

//SR = sampling rate
public double getSR(){
ByteBuffer wrapped = ByteBuffer.wrap(Arrays.copyOfRange(entireFileData, 24, 28)); // big-endian by default
double SR = wrapped.order(java.nio.ByteOrder.LITTLE_ENDIAN).getInt();
return SR;
}

public readWAV2Array(String filepath, boolean print_info) throws IOException{
Path path = Paths.get(filepath);
this.entireFileData = Files.readAllBytes(path);

if (print_info){

//extract format
String format = new String(Arrays.copyOfRange(entireFileData, 8, 12), "UTF-8");

//extract number of channels
int noOfChannels = entireFileData[22];
String noOfChannels_str;
if (noOfChannels == 2)
noOfChannels_str = "2 (stereo)";
else if (noOfChannels == 1)
noOfChannels_str = "1 (mono)";
else
noOfChannels_str = noOfChannels + "(more than 2 channels)";

//extract sampling rate (SR)
int SR = (int) this.getSR();

//extract Bit Per Second (BPS/Bit depth)
int BPS = entireFileData[34];

System.out.println("---------------------------------------------------");
System.out.println("File path: " + filepath);
System.out.println("File format: " + format);
System.out.println("Number of channels: " + noOfChannels_str);
System.out.println("Sampling rate: " + SR);
System.out.println("Bit depth: " + BPS);
System.out.println("---------------------------------------------------");

}
}

public double[] getByteArray (){
byte[] data_raw = Arrays.copyOfRange(entireFileData, 44, entireFileData.length);
int totalLength = data_raw.length;

//declare double array for mono
int new_length = totalLength/4;
double[] data_mono = new double[new_length];

double left, right;
for (int i = 0; i < new_length; i++){
left = ((data_raw[i] & 0xff) << 8) | (data_raw[i+1] & 0xff);
right = ((data_raw[i+2] & 0xff) << 8) | (data_raw[i+3] & 0xff);
data_mono[i] = (left+right)/2.0;
}
return data_mono;
}
}

下面的代码是要运行的主程序

import java.awt.Color;
import java.awt.image.BufferedImage;
import java.io.File;
import java.io.IOException;
import java.util.Arrays;

import javax.imageio.ImageIO;

public class App {

public static Color getColor(double power) {
double H = power * 0.4; // Hue (note 0.4 = Green, see huge chart below)
double S = 1.0; // Saturation
double B = 1.0; // Brightness

return Color.getHSBColor((float)H, (float)S, (float)B);
}

public static void main(String[] args) {
// TODO Auto-generated method stub
String filepath = "audio_work/Sine_Sweep_Full_Spectrum_20_Hz_20_kHz_audiocheck.wav";
try {

//get raw double array containing .WAV data
readWAV2Array audioTest = new readWAV2Array(filepath, true);
double[] rawData = audioTest.getByteArray();
int length = rawData.length;

//initialize parameters for FFT
int WS = 2048; //WS = window size
int OF = 8; //OF = overlap factor
int windowStep = WS/OF;

//calculate FFT parameters
double SR = audioTest.getSR();
double time_resolution = WS/SR;
double frequency_resolution = SR/WS;
double highest_detectable_frequency = SR/2.0;
double lowest_detectable_frequency = 5.0*SR/WS;

System.out.println("time_resolution: " + time_resolution*1000 + " ms");
System.out.println("frequency_resolution: " + frequency_resolution + " Hz");
System.out.println("highest_detectable_frequency: " + highest_detectable_frequency + " Hz");
System.out.println("lowest_detectable_frequency: " + lowest_detectable_frequency + " Hz");

//initialize plotData array
int nX = (length-WS)/windowStep;
int nY = WS;
double[][] plotData = new double[nX][nY];

//apply FFT and find MAX and MIN amplitudes

double maxAmp = Double.MIN_VALUE;
double minAmp = Double.MAX_VALUE;

double amp_square;

double[] inputImag = new double[length];

for (int i = 0; i < nX; i++){
Arrays.fill(inputImag, 0.0);
double[] WS_array = FFT.fft(Arrays.copyOfRange(rawData, i*windowStep, i*windowStep+WS), inputImag, true);
for (int j = 0; j < nY; j++){
amp_square = (WS_array[2*j]*WS_array[2*j]) + (WS_array[2*j+1]*WS_array[2*j+1]);
if (amp_square == 0.0){
plotData[i][j] = amp_square;
}
else{
plotData[i][j] = 10 * Math.log10(amp_square);
}

//find MAX and MIN amplitude
if (plotData[i][j] > maxAmp)
maxAmp = plotData[i][j];
else if (plotData[i][j] < minAmp)
minAmp = plotData[i][j];

}
}

System.out.println("---------------------------------------------------");
System.out.println("Maximum amplitude: " + maxAmp);
System.out.println("Minimum amplitude: " + minAmp);
System.out.println("---------------------------------------------------");

//Normalization
double diff = maxAmp - minAmp;
for (int i = 0; i < nX; i++){
for (int j = 0; j < nY; j++){
plotData[i][j] = (plotData[i][j]-minAmp)/diff;
}
}

//plot image
BufferedImage theImage = new BufferedImage(nX, nY, BufferedImage.TYPE_INT_RGB);
double ratio;
for(int x = 0; x<nX; x++){
for(int y = 0; y<nY; y++){
ratio = plotData[x][y];

//theImage.setRGB(x, y, new Color(red, green, 0).getRGB());
Color newColor = getColor(1.0-ratio);
theImage.setRGB(x, y, newColor.getRGB());
}
}
File outputfile = new File("saved.png");
ImageIO.write(theImage, "png", outputfile);

} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}

}

然而,我从 .wav 播放 20-20kHz 的扫描声音中获得的图像是这样的:

颜色表示声音强度红色(高)-->绿色(低)

enter image description here

正确的,它应该如下图所示:

enter image description here

如果我能对我的项目提出任何正确/改进/建议,我将不胜感激。预先感谢您对我的问题发表评论。

最佳答案

幸运的是,你的权利似乎多于错误。

导致额外红线的第一个主要问题是您如何解码 readWAV2Array.getByteArray 中的数据。由于样本跨越 4 个字节,因此您必须以 4 的倍数进行索引(例如,样本 0 的字节 0、1、2、3,样本 1 的字节 4、5、6、7)否则您将读取 4 字节的重叠 block (例如,样本 0 的字节 0、1、2、3,样本 1 的字节 1、2、3、4)。此转换的另一件事是,您必须将结果显式转换为带符号的 short 类型,然后才能将其分配给 leftright (其类型为 double),以便从无符号字节中获得带符号的 16 位结果。这应该给你一个转换循环,看起来像:

for (int i = 0; 4*i+3 < totalLength; i++){
left = (short)((data_raw[4*i+1] & 0xff) << 8) | (data_raw[4*i] & 0xff);
right = (short)((data_raw[4*i+3] & 0xff) << 8) | (data_raw[4*i+2] & 0xff);
data_mono[i] = (left+right)/2.0;
}

在这一点上,您应该开始得到一个具有代表 20Hz-20kHz 线性调频信号的强线的图:

fixed decoding

但是您应该注意到您实际上得到了 2 行。这是因为对于实值信号,频谱具有厄密对称性。因此,高于奈奎斯特频率的频谱幅度(采样率的一半,在本例中为 44100Hz/2)是低于奈奎斯特频率的频谱的冗余反射。通过将 mainnY 的定义更改为:

可以只绘制低于奈奎斯特频率的非冗余部分:
int nY = WS/2 + 1;

并且会给你:

only non-redundant spectrum

几乎是我们正在寻找的东西,但随着频率的增加而扫描会生成一个带有下降线的图形。那是因为您的索引使 0Hz 频率位于图顶部的索引 0 处,而 22050Hz 频率位于图底部的索引 nY-1 处。要翻转图形并在底部获得更常见的 0Hz 并在顶部获得 22050Hz,您可以更改索引以使用:

plotData[i][nY-j-1] = 10 * Math.log10(amp_square);

现在你应该有一个看起来像你期望的那样的图(虽然有不同的颜色图):

0Hz at the bottom

最后一点:虽然我理解您打算在转换为分贝时避免取 0 的对数,但在这种特定情况下将输出设置为线性刻度振幅可能会产生意想不到的结果。相反,我会选择保护的截止阈值振幅:

// select threshold based on the expected spectrum amplitudes
// e.g. 80dB below your signal's spectrum peak amplitude
double threshold = 1.0;
// limit values and convert to dB
plotData[i][nY-j-1] = 10 * Math.log10(Math.max(amp_square,threshold));

关于java - 在 Java 中使用 FFT 从 .wav 创建频谱图,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/39295589/

34 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com