- html - 出于某种原因,IE8 对我的 Sass 文件中继承的 html5 CSS 不友好?
- JMeter 在响应断言中使用 span 标签的问题
- html - 在 :hover and :active? 上具有不同效果的 CSS 动画
- html - 相对于居中的 html 内容固定的 CSS 重复背景?
这个问题是关于用于回答 this 的代码的线。我使用的是 Nicholas DiPiazza 发布的代码,后来是 Andrew Thompson 的变体。我在这段代码中添加了第二个 AudioWaveformCreator,两个 AWC 的结果相同。我不知道为什么。我想要做的是在单个 JOptionpane 中显示 2 个不同的波形(来自不同的文件)。
import java.awt.BasicStroke;
import java.awt.Color;
import java.awt.Font;
import java.awt.Graphics2D;
import java.awt.font.FontRenderContext;
import java.awt.font.LineBreakMeasurer;
import java.awt.font.TextAttribute;
import java.awt.font.TextLayout;
import java.awt.geom.Line2D;
import java.awt.image.BufferedImage;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.net.URL;
import java.text.AttributedCharacterIterator;
import java.text.AttributedString;
import java.util.Vector;
import javax.imageio.ImageIO;
import javax.sound.sampled.AudioFormat;
import javax.sound.sampled.AudioInputStream;
import javax.sound.sampled.AudioSystem;
import javax.sound.sampled.DataLine;
import javax.sound.sampled.LineUnavailableException;
import javax.sound.sampled.TargetDataLine;
import javax.sound.sampled.UnsupportedAudioFileException;
import javax.swing.ImageIcon;
import javax.swing.JLabel;
import javax.swing.JOptionPane;
public class AudioWaveformCreator2 {
AudioInputStream audioInputStream;
Vector<Line2D.Double> lines = new Vector<Line2D.Double>();
String errStr;
Capture capture = new Capture();
double duration, seconds;
//File file;
String fileName = "out.png";
SamplingGraph samplingGraph;
String waveformFilename;
Color imageBackgroundColor = new Color(20,20,20);
Object result = null;
public AudioWaveformCreator2(File url, String waveformFilename) throws Exception {
if (url != null) {
try {
errStr = null;
audioInputStream = AudioSystem.getAudioInputStream(url);
long milliseconds = (long)((audioInputStream.getFrameLength() * 1000) / audioInputStream.getFormat().getFrameRate());
duration = milliseconds / 1000.0;
samplingGraph = new SamplingGraph();
samplingGraph.createWaveForm(null);
} catch (Exception ex) {
reportStatus(ex.toString());
throw ex;
}
} else {
reportStatus("Audio file required.");
}
}
/**
* Render a WaveForm.
*/
class SamplingGraph implements Runnable {
private Thread thread;
private Font font10 = new Font("serif", Font.PLAIN, 10);
private Font font12 = new Font("serif", Font.PLAIN, 12);
Color jfcBlue = new Color(000, 000, 255);
Color pink = new Color(255, 175, 175);
public SamplingGraph() {
}
public void createWaveForm(byte[] audioBytes) {
lines.removeAllElements(); // clear the old vector
AudioFormat format = audioInputStream.getFormat();
if (audioBytes == null) {
try {
audioBytes = new byte[
(int) (audioInputStream.getFrameLength()
* format.getFrameSize())];
audioInputStream.read(audioBytes);
} catch (Exception ex) {
reportStatus(ex.getMessage());
return;
}
}
int w = 500;
int h = 200;
int[] audioData = null;
if (format.getSampleSizeInBits() == 16) {
int nlengthInSamples = audioBytes.length / 2;
audioData = new int[nlengthInSamples];
if (format.isBigEndian()) {
for (int i = 0; i < nlengthInSamples; i++) {
/* First byte is MSB (high order) */
int MSB = (int) audioBytes[2*i];
/* Second byte is LSB (low order) */
int LSB = (int) audioBytes[2*i+1];
audioData[i] = MSB << 8 | (255 & LSB);
}
} else {
for (int i = 0; i < nlengthInSamples; i++) {
/* First byte is LSB (low order) */
int LSB = (int) audioBytes[2*i];
/* Second byte is MSB (high order) */
int MSB = (int) audioBytes[2*i+1];
audioData[i] = MSB << 8 | (255 & LSB);
}
}
} else if (format.getSampleSizeInBits() == 8) {
int nlengthInSamples = audioBytes.length;
audioData = new int[nlengthInSamples];
if (format.getEncoding().toString().startsWith("PCM_SIGN")) {
for (int i = 0; i < audioBytes.length; i++) {
audioData[i] = audioBytes[i];
}
} else {
for (int i = 0; i < audioBytes.length; i++) {
audioData[i] = audioBytes[i] - 128;
}
}
}
int frames_per_pixel = audioBytes.length / format.getFrameSize()/w;
byte my_byte = 0;
double y_last = 0;
int numChannels = format.getChannels();
for (double x = 0; x < w && audioData != null; x++) {
int idx = (int) (frames_per_pixel * numChannels * x);
if (format.getSampleSizeInBits() == 8) {
my_byte = (byte) audioData[idx];
} else {
my_byte = (byte) (128 * audioData[idx] / 32768 );
}
double y_new = (double) (h * (128 - my_byte) / 256);
lines.add(new Line2D.Double(x, y_last, x, y_new));
y_last = y_new;
}
saveToFile();
}
public void saveToFile() {
int w = 500;
int h = 200;
int INFOPAD = 0;
BufferedImage bufferedImage = new BufferedImage(w, h, BufferedImage.TYPE_INT_RGB);
Graphics2D g2 = bufferedImage.createGraphics();
createSampleOnGraphicsContext(w, h, INFOPAD, g2);
g2.dispose();
// Write generated image to a file
try {
// Save as PNG
File file = new File(fileName);
System.out.println(file.getAbsolutePath());
ImageIO.write(bufferedImage, "png", file);
result = new ImageIcon(fileName);
} catch (IOException e) {
}
}
private void createSampleOnGraphicsContext(int w, int h, int INFOPAD, Graphics2D g2) {
g2.setBackground(imageBackgroundColor);
g2.clearRect(0, 0, w, h);
g2.setColor(Color.white);
g2.fillRect(0, h-INFOPAD, w, INFOPAD);
if (errStr != null) {
g2.setColor(jfcBlue);
g2.setFont(new Font("serif", Font.BOLD, 18));
g2.drawString("ERROR", 5, 20);
AttributedString as = new AttributedString(errStr);
as.addAttribute(TextAttribute.FONT, font12, 0, errStr.length());
AttributedCharacterIterator aci = as.getIterator();
FontRenderContext frc = g2.getFontRenderContext();
LineBreakMeasurer lbm = new LineBreakMeasurer(aci, frc);
float x = 5, y = 25;
lbm.setPosition(0);
while (lbm.getPosition() < errStr.length()) {
TextLayout tl = lbm.nextLayout(w-x-5);
if (!tl.isLeftToRight()) {
x = w - tl.getAdvance();
}
tl.draw(g2, x, y += tl.getAscent());
y += tl.getDescent() + tl.getLeading();
}
} else if (capture.thread != null) {
g2.setColor(Color.black);
g2.setFont(font12);
//g2.drawString("Length: " + String.valueOf(seconds), 3, h-4);
} else {
g2.setColor(Color.black);
g2.setFont(font12);
//g2.drawString("File: " + fileName + " Length: " + String.valueOf(duration) + " Position: " + String.valueOf(seconds), 3, h-4);
if (audioInputStream != null) {
// .. render sampling graph ..
g2.setColor(jfcBlue);
for (int i = 1; i < lines.size(); i++) {
g2.draw((Line2D) lines.get(i));
}
// .. draw current position ..
if (seconds != 0) {
double loc = seconds/duration*w;
g2.setColor(pink);
g2.setStroke(new BasicStroke(3));
g2.draw(new Line2D.Double(loc, 0, loc, h-INFOPAD-2));
}
}
}
}
public void start() {
thread = new Thread(this);
thread.setName("SamplingGraph");
thread.start();
seconds = 0;
}
public void stop() {
if (thread != null) {
thread.interrupt();
}
thread = null;
}
public void run() {
seconds = 0;
while (thread != null) {
if ( (capture.line != null) && (capture.line.isActive()) ) {
long milliseconds = (long)(capture.line.getMicrosecondPosition() / 1000);
seconds = milliseconds / 1000.0;
}
try { thread.sleep(100); } catch (Exception e) { break; }
while ((capture.line != null && !capture.line.isActive()))
{
try { thread.sleep(10); } catch (Exception e) { break; }
}
}
seconds = 0;
}
} // End class SamplingGraph
/**
* Reads data from the input channel and writes to the output stream
*/
class Capture implements Runnable {
TargetDataLine line;
Thread thread;
public void start() {
errStr = null;
thread = new Thread(this);
thread.setName("Capture");
thread.start();
}
public void stop() {
thread = null;
}
private void shutDown(String message) {
if ((errStr = message) != null && thread != null) {
thread = null;
samplingGraph.stop();
System.err.println(errStr);
}
}
public void run() {
duration = 0;
audioInputStream = null;
// define the required attributes for our line,
// and make sure a compatible line is supported.
AudioFormat format = audioInputStream.getFormat();
DataLine.Info info = new DataLine.Info(TargetDataLine.class,
format);
if (!AudioSystem.isLineSupported(info)) {
shutDown("Line matching " + info + " not supported.");
return;
}
// get and open the target data line for capture.
try {
line = (TargetDataLine) AudioSystem.getLine(info);
line.open(format, line.getBufferSize());
} catch (LineUnavailableException ex) {
shutDown("Unable to open the line: " + ex);
return;
} catch (SecurityException ex) {
shutDown(ex.toString());
//JavaSound.showInfoDialog();
return;
} catch (Exception ex) {
shutDown(ex.toString());
return;
}
// play back the captured audio data
ByteArrayOutputStream out = new ByteArrayOutputStream();
int frameSizeInBytes = format.getFrameSize();
int bufferLengthInFrames = line.getBufferSize() / 8;
int bufferLengthInBytes = bufferLengthInFrames * frameSizeInBytes;
byte[] data = new byte[bufferLengthInBytes];
int numBytesRead;
line.start();
while (thread != null) {
if((numBytesRead = line.read(data, 0, bufferLengthInBytes)) == -1) {
break;
}
out.write(data, 0, numBytesRead);
}
// we reached the end of the stream. stop and close the line.
line.stop();
line.close();
line = null;
// stop and close the output stream
try {
out.flush();
out.close();
} catch (IOException ex) {
ex.printStackTrace();
}
// load bytes into the audio input stream for playback
byte audioBytes[] = out.toByteArray();
ByteArrayInputStream bais = new ByteArrayInputStream(audioBytes);
audioInputStream = new AudioInputStream(bais, format, audioBytes.length / frameSizeInBytes);
long milliseconds = (long)((audioInputStream.getFrameLength() * 1000) / format.getFrameRate());
duration = milliseconds / 1000.0;
try {
audioInputStream.reset();
} catch (Exception ex) {
ex.printStackTrace();
return;
}
samplingGraph.createWaveForm(audioBytes);
}
} // End class Capture
public static void main(String [] args) throws Exception {
AudioWaveformCreator2 awc = new AudioWaveformCreator2(new File("E:/PRODI ILKOM/Semester VIII/TA/wave/cars062.wav"), "cars062.png");
AudioWaveformCreator2 awc2 = new AudioWaveformCreator2(new File("E:/PRODI ILKOM/Semester VIII/TA/wave/plain wav.wav"), "plain wav.png");
Object[] fields = {
"Plain", awc.result
,"Stego", awc2.result
};
JOptionPane.showConfirmDialog(null, fields, "Wave Form", JOptionPane.PLAIN_MESSAGE);
}
private void reportStatus(String msg) {
if ((errStr = msg) != null) {
System.out.println(errStr);
}
}
private static void printUsage() {
System.out.println("AudioWaveformCreator usage: java AudioWaveformCreator.class [path to audio file for generating the image] [path to save waveform image to]");
}
}
这是我得到的两个波形:
最佳答案
创建 AudioWaveformCreator2
实例时,随后会执行 SamplingGraph#saveToFile
方法。该方法将先前生成的波形存储在文件 fileName
中,其中 fileName
是一个 AudioWaveformCreator2
字段,用 fixed 名称 out.png
。因此,当创建多个 AudioWaveformCreator2
实例时,两个实例都将它们的数据存储在相同文件 out.png
中,第二个文件会覆盖第一个。 AudioWaveformCreator2
实例存储文件后,使用 ImageIcon(String filename)
构造函数创建一个新的 ImageIcon
。 ImageIcon
的源代码(例如 http://hg.openjdk.java.net/jdk10/jdk10/jdk/file/777356696811/src/java.desktop/share/classes/javax/swing/ImageIcon.java)显示 ImageIcon(String filename)
-构造函数稍后调用 Toolkit.getDefaultToolkit().getImage(文件名)
-方法。该方法的描述表明有一种缓存机制可以为具有相同文件名的请求返回相同的图像(参见例如 https://docs.oracle.com/javase/10/docs/api/java/awt/Toolkit.html#getImage(java.lang.String) ):
Returns an image which gets pixel data from the specified file, whose format can be either GIF, JPEG or PNG. The underlying toolkit attempts to resolve multiple requests with the same filename to the same returned Image. Since the mechanism required to facilitate this sharing of Image objects may continue to hold onto images that are no longer in use for an indefinite period of time, developers are encouraged to implement their own caching of images by using the createImage variant wherever available. If the image data contained in the specified file changes, the Image object returned from this method may still contain stale information which was loaded from the file after a prior call. Previously loaded image data can be manually discarded by calling the flush method on the returned Image.
缓存与固定名称out.png
是观察到的行为的原因:虽然第二个AudioWaveformCreator2
-instance覆盖 out.png
文件缓存机制提供第一张图像,因此第一张图像显示两次:
一个可能的解决方案是在 AudioWaveformCreator2
-constructor 中传递和初始化文件名:
public AudioWaveformCreator2(File url, String waveformFilename, String fileName) throws Exception {
if (url != null) {
try {
this.fileName = fileName;
...
和
AudioWaveformCreator2 awc = new AudioWaveformCreator2(new File("E:/PRODI ILKOM/Semester VIII/TA/wave/cars062.wav"), "cars062.png", "out.png");
AudioWaveformCreator2 awc2 = new AudioWaveformCreator2(new File("E:/PRODI ILKOM/Semester VIII/TA/wave/plain wav.wav"), "plain wav.png", "out2.png");
...
第一个 AudioWaveformCreator2
实例将图像存储在文件 out.png
中,第二个 AudioWaveformCreator2
实例存储在文件 中>out2.png
。然后,缓存机制可以区分这两个图像:
还有其他解决方案可以保留在相同文件中存储out.png
(即修改AudioWaveformCreator2
-constructor不需要),例如Toolkit.getDefaultToolkit().createImage(filename)
方法的用法,描述如下(参见例如 https://docs.oracle.com/javase/10/docs/api/java/awt/Toolkit.html#createImage(java.lang.String) ):
Returns an image which gets pixel data from the specified URL. The returned Image is a new object which will not be shared with any other caller of this method or its getImage variant.
因此,不涉及缓存机制,修复只是替换
result = new ImageIcon(fileName);
与
result = new ImageIcon(Toolkit.getDefaultToolkit().createImage(fileName));
同样避免缓存机制的第三种解决方案是替换
result = new ImageIcon(fileName);
与
result = new ImageIcon(bufferedImage);
由于ImageIcon(Image image)
-构造函数不使用Toolkit.getDefaultToolkit().getImage(filename)
-方法,而是直接bufferedImage
中包含的数据。
关于使用AudioWaveformCreator的java绘图2(多个)波形,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/54349054/
我正在尝试编写一个函数来制作绘图并将其自动保存到文件中。 我努力用它来动态地做的技巧[plotname=varname & filename=varname &], 并使其与从循环中调用它兼容。 #
有人可以帮助我如何在绘图条形图中添加“下拉”菜单。 我在以下链接 ( https://plot.ly/python/v3/dropdowns/ ) 上找到了一些信息,但我正在努力调整代码,因此下拉选项
我不确切知道如何表达这一点,但我本质上希望根据其他数据之前的列值将数据分组为 Excel 图的系列。例如: size weight apple 3 35 orange 4
我正在为出版物创建图表并希望它们具有相同的字体大小。 当我用多图创建图形时,字体大小会减小,即使我没有更改tiff() 分辨率或pointsize 参数。我根据最终适合的地 block 数量增加了图形
我用 glm::perspective(80.0f, 4.0f/3.0f, 1.0f, 120.0f);并乘以 glm::mat4 view = glm::lookAt( glm::vec3(
我在 Shiny UI 中有一个情节。如果我更改任何输入参数并且通过 react 性图将会改变。但是让我们考虑以下情况:- Shiny UI 中的绘图可以说股票的日内价格变动。为此,您查询一些实时数据
我对 R 有点陌生。我在以下两个线程中跟踪并实现了结果: http://tolstoy.newcastle.edu.au/R/e17/help/12/03/7984.html http://lukem
我想在 WPF 控件中使用 GDI+ 绘图。 最佳答案 有多种方法可以做到这一点,最简单的方法是锁定您使用 GDI 操作的位图,获取像素缓冲区(从锁定中获取的 BitmapData 中的 Scan0
如何在以下取自其网站的绘图示例中隐藏颜色条? df % layout(title = '2014 Global GDPSource:CIA World Factbook',
我有两列数据,X 和 Y,每个条目在两个向量的小数点后都有 4 位数据。 当我使用 plot(x,y) 绘制简单图时,轴上显示的数据精确到小数点后两位。如何在两个轴上将其更改为小数点后 4 位精度?
我目前正在使用 Canvas 处理 JavaFX-Drawing-Application。在 GraphicsContext 的帮助下,我使用 beginPath() 和 lineTo() 方法绘制线
如果这个问题已经得到解答,但我无法找到我需要的东西,我提前道歉。我想从名为 data1.dat、data2.dat 的文件中绘制一些结果......我设法通过循环导入数据,但我无法使用循环绘制结果。仅
我的 pandas 数据框中有一个功能,可以(可能)具有多个级别。我可以使用以下方法获得独特的级别: reasons = df["Reason"].unique() 我可以通过执行以下操作在单个图表上
我在 Ubuntu 14 和 Windows 7(均为 64 位)中用 Python 绘制结果时遇到问题。作为一个简单的比较,我做了: from tvb.simulator.lab import *
以下代码相当简单 - 它用随机选择的像素填充设计表面 - 没什么特别的(暂时忽略第二种方法中的 XXXXXXX)。 private void PaintBackground() { Rando
我正在尝试制作一个绘制函数图形的 swing 应用程序(现在很简单,例如 x+2)但我在根据屏幕坐标制作我的点的数学坐标时遇到问题。我希望它在我的图表中简单地画一条从 P1(0,1) 到 P2(1,2
编辑 4:问题的新格式 背景:我有一个扩展 JFrame 的类 Window,在 JFrame 中我有一个 Canvas 。我向 Canvas 添加自定义对象。这个对象的唯一目的(为了争论)是在 Ca
我需要为即将到来的锦标赛标记阶梯,但我找不到任何方法来语义标记它。到目前为止我看到的唯一方法是 mark it up as a table ,我想不惜一切代价避免这种情况。 有什么想法吗? 最佳答案
我目前正在为一个小型 uC 项目编写 UI。在计算垂直线的位置时遇到一些问题。这个想法是将红线沿 x 轴移动到矩形的末端。 使用无限旋转编码器递增的值,范围为 0 到 800,增量为 1。矩形的左侧是
我正在尝试绘制光分布图。我想准确地执行此问题的第一步所要求的:Statistical analysis on Bell shaped (Gaussian) curve . 现在我有一组值。我希望数组元
我是一名优秀的程序员,十分优秀!