gpt4 book ai didi

使用AudioWaveformCreator的java绘图2(多个)波形

转载 作者:行者123 更新时间:2023-11-30 12:08:28 28 4
gpt4 key购买 nike

这个问题是关于用于回答 this 的代码的线。我使用的是 Nicholas DiPiazza 发布的代码,后来是 Andrew Thompson 的变体。我在这段代码中添加了第二个 AudioWaveformCreator,两个 AWC 的结果相同。我不知道为什么。我想要做的是在单个 JOptionpane 中显示 2 个不同的波形(来自不同的文件)。

import java.awt.BasicStroke;
import java.awt.Color;
import java.awt.Font;
import java.awt.Graphics2D;
import java.awt.font.FontRenderContext;
import java.awt.font.LineBreakMeasurer;
import java.awt.font.TextAttribute;
import java.awt.font.TextLayout;
import java.awt.geom.Line2D;
import java.awt.image.BufferedImage;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.net.URL;
import java.text.AttributedCharacterIterator;
import java.text.AttributedString;
import java.util.Vector;

import javax.imageio.ImageIO;
import javax.sound.sampled.AudioFormat;
import javax.sound.sampled.AudioInputStream;
import javax.sound.sampled.AudioSystem;
import javax.sound.sampled.DataLine;
import javax.sound.sampled.LineUnavailableException;
import javax.sound.sampled.TargetDataLine;
import javax.sound.sampled.UnsupportedAudioFileException;
import javax.swing.ImageIcon;
import javax.swing.JLabel;
import javax.swing.JOptionPane;


public class AudioWaveformCreator2 {
AudioInputStream audioInputStream;
Vector<Line2D.Double> lines = new Vector<Line2D.Double>();
String errStr;
Capture capture = new Capture();
double duration, seconds;
//File file;
String fileName = "out.png";
SamplingGraph samplingGraph;
String waveformFilename;
Color imageBackgroundColor = new Color(20,20,20);
Object result = null;

public AudioWaveformCreator2(File url, String waveformFilename) throws Exception {
if (url != null) {
try {
errStr = null;
audioInputStream = AudioSystem.getAudioInputStream(url);
long milliseconds = (long)((audioInputStream.getFrameLength() * 1000) / audioInputStream.getFormat().getFrameRate());
duration = milliseconds / 1000.0;
samplingGraph = new SamplingGraph();
samplingGraph.createWaveForm(null);
} catch (Exception ex) {
reportStatus(ex.toString());
throw ex;
}
} else {
reportStatus("Audio file required.");
}
}
/**
* Render a WaveForm.
*/
class SamplingGraph implements Runnable {

private Thread thread;
private Font font10 = new Font("serif", Font.PLAIN, 10);
private Font font12 = new Font("serif", Font.PLAIN, 12);
Color jfcBlue = new Color(000, 000, 255);
Color pink = new Color(255, 175, 175);


public SamplingGraph() {
}


public void createWaveForm(byte[] audioBytes) {

lines.removeAllElements(); // clear the old vector

AudioFormat format = audioInputStream.getFormat();
if (audioBytes == null) {
try {
audioBytes = new byte[
(int) (audioInputStream.getFrameLength()
* format.getFrameSize())];
audioInputStream.read(audioBytes);
} catch (Exception ex) {
reportStatus(ex.getMessage());
return;
}
}
int w = 500;
int h = 200;
int[] audioData = null;
if (format.getSampleSizeInBits() == 16) {
int nlengthInSamples = audioBytes.length / 2;
audioData = new int[nlengthInSamples];
if (format.isBigEndian()) {
for (int i = 0; i < nlengthInSamples; i++) {
/* First byte is MSB (high order) */
int MSB = (int) audioBytes[2*i];
/* Second byte is LSB (low order) */
int LSB = (int) audioBytes[2*i+1];
audioData[i] = MSB << 8 | (255 & LSB);
}
} else {
for (int i = 0; i < nlengthInSamples; i++) {
/* First byte is LSB (low order) */
int LSB = (int) audioBytes[2*i];
/* Second byte is MSB (high order) */
int MSB = (int) audioBytes[2*i+1];
audioData[i] = MSB << 8 | (255 & LSB);
}
}
} else if (format.getSampleSizeInBits() == 8) {
int nlengthInSamples = audioBytes.length;
audioData = new int[nlengthInSamples];
if (format.getEncoding().toString().startsWith("PCM_SIGN")) {
for (int i = 0; i < audioBytes.length; i++) {
audioData[i] = audioBytes[i];
}
} else {
for (int i = 0; i < audioBytes.length; i++) {
audioData[i] = audioBytes[i] - 128;
}
}
}

int frames_per_pixel = audioBytes.length / format.getFrameSize()/w;
byte my_byte = 0;
double y_last = 0;
int numChannels = format.getChannels();
for (double x = 0; x < w && audioData != null; x++) {
int idx = (int) (frames_per_pixel * numChannels * x);
if (format.getSampleSizeInBits() == 8) {
my_byte = (byte) audioData[idx];
} else {
my_byte = (byte) (128 * audioData[idx] / 32768 );
}
double y_new = (double) (h * (128 - my_byte) / 256);
lines.add(new Line2D.Double(x, y_last, x, y_new));
y_last = y_new;
}
saveToFile();
}


public void saveToFile() {
int w = 500;
int h = 200;
int INFOPAD = 0;

BufferedImage bufferedImage = new BufferedImage(w, h, BufferedImage.TYPE_INT_RGB);
Graphics2D g2 = bufferedImage.createGraphics();

createSampleOnGraphicsContext(w, h, INFOPAD, g2);
g2.dispose();
// Write generated image to a file
try {
// Save as PNG
File file = new File(fileName);
System.out.println(file.getAbsolutePath());
ImageIO.write(bufferedImage, "png", file);
result = new ImageIcon(fileName);
} catch (IOException e) {
}
}


private void createSampleOnGraphicsContext(int w, int h, int INFOPAD, Graphics2D g2) {
g2.setBackground(imageBackgroundColor);
g2.clearRect(0, 0, w, h);
g2.setColor(Color.white);
g2.fillRect(0, h-INFOPAD, w, INFOPAD);

if (errStr != null) {
g2.setColor(jfcBlue);
g2.setFont(new Font("serif", Font.BOLD, 18));
g2.drawString("ERROR", 5, 20);
AttributedString as = new AttributedString(errStr);
as.addAttribute(TextAttribute.FONT, font12, 0, errStr.length());
AttributedCharacterIterator aci = as.getIterator();
FontRenderContext frc = g2.getFontRenderContext();
LineBreakMeasurer lbm = new LineBreakMeasurer(aci, frc);
float x = 5, y = 25;
lbm.setPosition(0);
while (lbm.getPosition() < errStr.length()) {
TextLayout tl = lbm.nextLayout(w-x-5);
if (!tl.isLeftToRight()) {
x = w - tl.getAdvance();
}
tl.draw(g2, x, y += tl.getAscent());
y += tl.getDescent() + tl.getLeading();
}
} else if (capture.thread != null) {
g2.setColor(Color.black);
g2.setFont(font12);
//g2.drawString("Length: " + String.valueOf(seconds), 3, h-4);
} else {
g2.setColor(Color.black);
g2.setFont(font12);
//g2.drawString("File: " + fileName + " Length: " + String.valueOf(duration) + " Position: " + String.valueOf(seconds), 3, h-4);

if (audioInputStream != null) {
// .. render sampling graph ..
g2.setColor(jfcBlue);
for (int i = 1; i < lines.size(); i++) {
g2.draw((Line2D) lines.get(i));
}

// .. draw current position ..
if (seconds != 0) {
double loc = seconds/duration*w;
g2.setColor(pink);
g2.setStroke(new BasicStroke(3));
g2.draw(new Line2D.Double(loc, 0, loc, h-INFOPAD-2));
}
}
}
}

public void start() {
thread = new Thread(this);
thread.setName("SamplingGraph");
thread.start();
seconds = 0;
}

public void stop() {
if (thread != null) {
thread.interrupt();
}
thread = null;
}

public void run() {
seconds = 0;
while (thread != null) {
if ( (capture.line != null) && (capture.line.isActive()) ) {
long milliseconds = (long)(capture.line.getMicrosecondPosition() / 1000);
seconds = milliseconds / 1000.0;
}
try { thread.sleep(100); } catch (Exception e) { break; }
while ((capture.line != null && !capture.line.isActive()))
{
try { thread.sleep(10); } catch (Exception e) { break; }
}
}
seconds = 0;
}
} // End class SamplingGraph

/**
* Reads data from the input channel and writes to the output stream
*/
class Capture implements Runnable {

TargetDataLine line;
Thread thread;

public void start() {
errStr = null;
thread = new Thread(this);
thread.setName("Capture");
thread.start();
}

public void stop() {
thread = null;
}

private void shutDown(String message) {
if ((errStr = message) != null && thread != null) {
thread = null;
samplingGraph.stop();
System.err.println(errStr);
}
}

public void run() {

duration = 0;
audioInputStream = null;

// define the required attributes for our line,
// and make sure a compatible line is supported.

AudioFormat format = audioInputStream.getFormat();
DataLine.Info info = new DataLine.Info(TargetDataLine.class,
format);

if (!AudioSystem.isLineSupported(info)) {
shutDown("Line matching " + info + " not supported.");
return;
}

// get and open the target data line for capture.

try {
line = (TargetDataLine) AudioSystem.getLine(info);
line.open(format, line.getBufferSize());
} catch (LineUnavailableException ex) {
shutDown("Unable to open the line: " + ex);
return;
} catch (SecurityException ex) {
shutDown(ex.toString());
//JavaSound.showInfoDialog();
return;
} catch (Exception ex) {
shutDown(ex.toString());
return;
}

// play back the captured audio data
ByteArrayOutputStream out = new ByteArrayOutputStream();
int frameSizeInBytes = format.getFrameSize();
int bufferLengthInFrames = line.getBufferSize() / 8;
int bufferLengthInBytes = bufferLengthInFrames * frameSizeInBytes;
byte[] data = new byte[bufferLengthInBytes];
int numBytesRead;

line.start();

while (thread != null) {
if((numBytesRead = line.read(data, 0, bufferLengthInBytes)) == -1) {
break;
}
out.write(data, 0, numBytesRead);
}

// we reached the end of the stream. stop and close the line.
line.stop();
line.close();
line = null;

// stop and close the output stream
try {
out.flush();
out.close();
} catch (IOException ex) {
ex.printStackTrace();
}

// load bytes into the audio input stream for playback

byte audioBytes[] = out.toByteArray();
ByteArrayInputStream bais = new ByteArrayInputStream(audioBytes);
audioInputStream = new AudioInputStream(bais, format, audioBytes.length / frameSizeInBytes);

long milliseconds = (long)((audioInputStream.getFrameLength() * 1000) / format.getFrameRate());
duration = milliseconds / 1000.0;

try {
audioInputStream.reset();
} catch (Exception ex) {
ex.printStackTrace();
return;
}

samplingGraph.createWaveForm(audioBytes);
}
} // End class Capture

public static void main(String [] args) throws Exception {

AudioWaveformCreator2 awc = new AudioWaveformCreator2(new File("E:/PRODI ILKOM/Semester VIII/TA/wave/cars062.wav"), "cars062.png");
AudioWaveformCreator2 awc2 = new AudioWaveformCreator2(new File("E:/PRODI ILKOM/Semester VIII/TA/wave/plain wav.wav"), "plain wav.png");
Object[] fields = {
"Plain", awc.result
,"Stego", awc2.result
};
JOptionPane.showConfirmDialog(null, fields, "Wave Form", JOptionPane.PLAIN_MESSAGE);
}

private void reportStatus(String msg) {
if ((errStr = msg) != null) {
System.out.println(errStr);
}
}

private static void printUsage() {
System.out.println("AudioWaveformCreator usage: java AudioWaveformCreator.class [path to audio file for generating the image] [path to save waveform image to]");
}
}

这是我得到的两个波形:

enter image description here

最佳答案

创建 AudioWaveformCreator2 实例时,随后会执行 SamplingGraph#saveToFile 方法。该方法将先前生成的波形存储在文件 fileName 中,其中 fileName 是一个 AudioWaveformCreator2 字段,用 fixed 名称 out.png。因此,当创建多个 AudioWaveformCreator2 实例时,两个实例都将它们的数据存储在相同文件 out.png 中,第二个文件会覆盖第一个。 AudioWaveformCreator2 实例存储文件后,使用 ImageIcon(String filename) 构造函数创建一个新的 ImageIconImageIcon 的源代码(例如 http://hg.openjdk.java.net/jdk10/jdk10/jdk/file/777356696811/src/java.desktop/share/classes/javax/swing/ImageIcon.java)显示 ImageIcon(String filename)-构造函数稍后调用 Toolkit.getDefaultToolkit().getImage(文件名)-方法。该方法的描述表明有一种缓存机制可以为具有相同文件名的请求返回相同的图像(参见例如 https://docs.oracle.com/javase/10/docs/api/java/awt/Toolkit.html#getImage(java.lang.String) ):

Returns an image which gets pixel data from the specified file, whose format can be either GIF, JPEG or PNG. The underlying toolkit attempts to resolve multiple requests with the same filename to the same returned Image. Since the mechanism required to facilitate this sharing of Image objects may continue to hold onto images that are no longer in use for an indefinite period of time, developers are encouraged to implement their own caching of images by using the createImage variant wherever available. If the image data contained in the specified file changes, the Image object returned from this method may still contain stale information which was loaded from the file after a prior call. Previously loaded image data can be manually discarded by calling the flush method on the returned Image.

缓存固定名称out.png 是观察到的行为的原因:虽然第二个AudioWaveformCreator2-instance覆盖 out.png 文件缓存机制提供第一张图像,因此第一张图像显示两次:

enter image description here

一个可能的解决方案是在 AudioWaveformCreator2-constructor 中传递和初始化文件名​​:

public AudioWaveformCreator2(File url, String waveformFilename, String fileName) throws Exception {
if (url != null) {
try {
this.fileName = fileName;
...

AudioWaveformCreator2 awc = new AudioWaveformCreator2(new File("E:/PRODI ILKOM/Semester VIII/TA/wave/cars062.wav"), "cars062.png", "out.png");
AudioWaveformCreator2 awc2 = new AudioWaveformCreator2(new File("E:/PRODI ILKOM/Semester VIII/TA/wave/plain wav.wav"), "plain wav.png", "out2.png");
...

第一个 AudioWaveformCreator2 实例将图像存储在文件 out.png 中,第二个 AudioWaveformCreator2 实例存储在文件 中>out2.png。然后,缓存机制可以区分这两个图像:

enter image description here

还有其他解决方案可以保留在相同文件中存储out.png(即修改AudioWaveformCreator2-constructor不需要),例如Toolkit.getDefaultToolkit().createImage(filename) 方法的用法,描述如下(参见例如 https://docs.oracle.com/javase/10/docs/api/java/awt/Toolkit.html#createImage(java.lang.String) ):

Returns an image which gets pixel data from the specified URL. The returned Image is a new object which will not be shared with any other caller of this method or its getImage variant.

因此,不涉及缓存机制,修复只是替换

result = new ImageIcon(fileName);

result = new ImageIcon(Toolkit.getDefaultToolkit().createImage(fileName));

同样避免缓存机制的第三种解决方案是替换

result = new ImageIcon(fileName);

result =  new ImageIcon(bufferedImage);

由于ImageIcon(Image image)-构造函数不使用Toolkit.getDefaultToolkit().getImage(filename)-方法,而是直接bufferedImage 中包含的数据。

关于使用AudioWaveformCreator的java绘图2(多个)波形,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/54349054/

28 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com