java 分析声音 绘制_java显示声音波形图示例

package _tmp;

import java.awt.BorderLayout;

import java.awt.Color;

import java.awt.Graphics;

import java.awt.Image;

import java.awt.Toolkit;

import java.awt.image.BufferedImage;

import java.io.File;

import java.io.IOException;

import java.util.ArrayDeque;

import java.util.Deque;

import java.util.Iterator;

import java.util.LinkedList;

import java.util.Timer;

import java.util.TimerTask;

import javax.sound.sampled.AudioFormat;

import javax.sound.sampled.AudioInputStream;

import javax.sound.sampled.AudioSystem;

import javax.sound.sampled.SourceDataLine;

import javax.swing.JFrame;

import javax.swing.SwingUtilities;

public class SoundTest {

public static class WaveformGraph extends JFrame {

private Deque deque = new LinkedList();

private Timer timer;

private Image buffered;

private Image showing;

public WaveformGraph(int width, int height) {

setSize(width, height);

timer = new Timer();

buffered = new BufferedImage(width, height, BufferedImage.TYPE_4BYTE_ABGR);

timer.schedule(new TimerTask() {

@Override public void run() {

Graphics g = buffered.getGraphics();

g.setColor(Color.WHITE);

g.fillRect(0, 0, getWidth(), getHeight());

g.setColor(Color.BLACK);

g.translate(10, getHeight()/2);

synchronized (deque) {

float heightRate = 1;

if(deque.size() > 1) {

Iterator iter = deque.iterator();

Short p1 = iter.next();

Short p2 = iter.next();

int x1 = 0, x2 = 0;

while(iter.hasNext()) {

g.drawLine(x1, (int)(p1*heightRate), x2, (int)(p2*heightRate));

p1 = p2;

p2 = iter.next();

x1 = x2;

x2 += 1;

}

}

}

g.dispose();

SwingUtilities.invokeLater(new Runnable() {

@Override public void run() {

showing = buffered;

repaint();

showing = null;

}

});

}

}, 100, 100);

}

@Override

public void paint(Graphics g) {

super.paint(g);

if(buffered!=null) {

g.drawImage(buffered, 0, 0, null);

}

}

public void put(short v) {

synchronized (deque) {

deque.add(v);

if(deque.size() > 500) {

deque.removeFirst();

}

}

}

public void clear() {

deque.clear();

}

}

public static void main(String[] args) throws Exception {

//  record();

WaveformGraph waveformGraph = new WaveformGraph(500, 300);

waveformGraph.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);

waveformGraph.setVisible(true);

AudioInputStream ais = AudioSystem.getAudioInputStream(new File("C:\\Documents and Settings\\wml\\My Documents\\My Music\\苏仨 - 失眠症.wav"));

printFormat(ais.getFormat());

SourceDataLine player = AudioSystem.getSourceDataLine(ais.getFormat());

player.open();

player.start();

byte[] buf = new byte[4];

int len;

while((len=ais.read(buf))!=-1) {

if(ais.getFormat().getChannels() == 2) {

if(ais.getFormat().getSampleRate() == 16) {

waveformGraph.put((short) ((buf[1] << 8) | buf[0]));//左声道

//     waveformGraph.put((short) ((buf[3] << 8) | buf[2]));//右声道

} else {

waveformGraph.put(buf[1]);//左声道

waveformGraph.put(buf[3]);//左声道

//     waveformGraph.put(buf[2]);//右声道

//     waveformGraph.put(buf[4]);//右声道

}

} else {

if(ais.getFormat().getSampleRate() == 16) {

waveformGraph.put((short) ((buf[1] << 8) | buf[0]));

waveformGraph.put((short) ((buf[3] << 8) | buf[2]));

} else {

waveformGraph.put(buf[1]);

waveformGraph.put(buf[2]);

waveformGraph.put(buf[3]);

waveformGraph.put(buf[4]);

}

}

player.write(buf, 0, len);

}

player.close();

ais.close();

}

public static void printFormat(AudioFormat format) {

System.out.println(format.getEncoding() + " => "

+ format.getSampleRate()+" hz, "

+ format.getSampleSizeInBits() + " bit, "

+ format.getChannels() + " channel, "

+ format.getFrameRate() + " frames/second, "

+ format.getFrameSize() + " bytes/frame");

}

// public static void record() throws LineUnavailableException,

//   InterruptedException {

//  AudioFormat audioFormat = new AudioFormat(AudioFormat.Encoding.PCM_SIGNED, 48000F, 16, 1, 2, 48000F, false);

//  Info recordDevInfo = new DataLine.Info(TargetDataLine.class, audioFormat);

//

//  final TargetDataLine recordLine = (TargetDataLine) AudioSystem.getLine(recordDevInfo);

//  final SourceDataLine playLine = AudioSystem.getSourceDataLine(audioFormat);

//

//  recordLine.open(audioFormat, recordLine.getBufferSize());

//  playLine.open(audioFormat, recordLine.getBufferSize());

//

//  Thread recorder = new Thread() {

//   public void run() {

//    recordLine.start();

//    playLine.start();

//

//    FloatControl fc = (FloatControl) playLine.getControl(FloatControl.Type.MASTER_GAIN);

//    double value = 2;

//    float dB = (float) (Math.log(value == 0.0 ? 0.0001 : value) / Math.log(10.0) * 20.0);

//    fc.setValue(dB);

//

//    try {

//     AudioInputStream in = new AudioInputStream(recordLine);

//     byte[] buf = new byte[recordLine.getBufferSize()];

//     int len;

//     while((len=in.read(buf)) != -1) {

//      playLine.write(buf, 0, len);

//     }

//    } catch (IOException e) {

//     e.printStackTrace();

//    } finally {

//     recordLine.stop();

//     playLine.stop();

//    }

//   };

//  };

//  recorder.start();

//  recorder.join();

// }

}

使用jdk16编译 import java.awt.Graphics; import java.awt.GridLayout; import java.io.File; import javax.sound.sampled.AudioFormat; import javax.sound.sampled.AudioInputStream; import javax.sound.sampled.AudioSystem; import javax.sound.sampled.DataLine; import javax.sound.sampled.SourceDataLine; import javax.swing.GroupLayout; import javax.swing.JFrame; /** * */ /** * @author Administrator * */ public class Musicline extends JFrame implements Runnable { private byte[] audioData = null; private int intBytes = 0; private byte[] ml = new byte[1]; private int[] drawl = null; /** Creates new form Musicline */ public Musicline() { initComponents(); Graphics g; g = this.getGraphics(); } public void paint(Graphics g) { g.clearRect(0, 0, 900, 900); // System.out.print(drawl.length); if (audioData != null) { drawl = new int[audioData.length]; for (int i = 0; i < audioData.length; i++) { ml[0] = audioData[i]; // String s=new String(ml); drawl[i] = Math.abs((int) ml[0]); } System.out.println(drawl[0]); for (int i = 0; i < drawl.length - 1; i++) { g.drawLine(i * this.getWidth() / 256, drawl[i] + 100, (i + 1) * this.getWidth() / 256, drawl[i + 1] + 100); } } } /* * (non-Javadoc) * * @see java.lang.Runnable#run() */ public void run() { // TODO Auto-generated method stub while (intBytes != -1) { try { synchronized (this) { this.wait(10); } } catch (InterruptedException ex) { ex.printStackTrace(); } repaint(); } } public void play() { try { AudioInputStream ais = AudioSystem.getAudioInputStream(new File( "F:/perl/key2.wav"));// 获得音频输入流 ais = AudioSystem.getAudioInputStream( AudioFormat.Encoding.PCM_SIGNED, ais); AudioFormat baseFormat = ais.getFormat();// 指定声音流中特定数据安排 System.out.println("baseFormat=" + baseFormat); DataLine.Info info = new DataLine.Info(SourceDataLine.class, baseFormat); System.out.println("info=" + info);
这个问题的答案取决于你的输入数据是什么。如果你有一个音频文件,你需要首先将其读入到Java中,然后对其进行处理以确定它是否包含人声或杂音。 以下是一个简单的示例代码,它演示了如何使用Java Sound API读取音频文件并绘制波形图: ``` import javax.sound.sampled.*; import javax.swing.*; import java.awt.*; import java.io.*; public class WaveformExample extends JPanel { private byte[] audioData; public WaveformExample(File audioFile) { try { AudioInputStream audioStream = AudioSystem.getAudioInputStream(audioFile); audioData = new byte[audioStream.available()]; audioStream.read(audioData); } catch (UnsupportedAudioFileException | IOException e) { e.printStackTrace(); } } @Override public void paintComponent(Graphics g) { super.paintComponent(g); Graphics2D g2d = (Graphics2D) g; int width = getWidth(); int height = getHeight(); int halfHeight = height / 2; int maxAudioValue = 127; g2d.setColor(Color.BLACK); for (int i = 0; i < audioData.length; i++) { int audioValue = audioData[i]; int x1 = i * width / audioData.length; int y1 = halfHeight - (audioValue * halfHeight / maxAudioValue); int x2 = (i + 1) * width / audioData.length; int y2 = halfHeight - (audioData[i + 1] * halfHeight / maxAudioValue); g2d.drawLine(x1, y1, x2, y2); } } public static void main(String[] args) { JFrame frame = new JFrame("Waveform Example"); frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE); frame.setSize(800, 600); WaveformExample waveformExample = new WaveformExample(new File("audio.wav")); frame.add(waveformExample); frame.setVisible(true); } } ``` 该代码读取名为“audio.wav”的音频文件,并在窗口中显示波形图。该示例使用一个简单的算法来确定每个采样的音量,并将其绘制为一个线条。如果音量超过某个阈值,则可以认为该采样包含人声;否则,它只是杂音。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值