Java sound方面知识 求助 我现在从声卡捕获到了一个音频流AudioInputStream, 我想将这个流先通过一个buffer数组缓存一下,然后想写入一个.wav的文件,但是这个文件不能播放?问题好像在文件的音频格式没有处理,我想请问能否在写入.wav文件前对AudioInputStream对象流做一个音频文件格式处理。如果能的话,请问怎么处理?谢谢! 解决方案 » 免费领取超大流量手机卡,每月29元包185G流量+100分钟通话, 中国电信官方发货 lz可以看看下面的文章,写的很详细:http://ouroom.bokee.com/viewdiary.11252708.html public class SpeechStore { private static byte[] RIFF = "RIFF".getBytes(); private static byte[] RIFF_SIZE = new byte[4]; private static byte[] RIFF_TYPE = "WAVE".getBytes(); private static byte[] FORMAT = "fmt ".getBytes(); private static byte[] FORMAT_SIZE = new byte[4]; private static byte[] FORMATTAG = new byte[2]; private static byte[] CHANNELS = new byte[2]; private static byte[] SamplesPerSec = new byte[4]; private static byte[] AvgBytesPerSec = new byte[4]; private static byte[] BlockAlign = new byte[2]; private static byte[] BitsPerSample = new byte[2]; private static byte[] DataChunkID = "data".getBytes(); private static byte[] DataSize = new byte[4]; public static boolean isrecording = false; static byte[] b = new byte[10000]; public void writeToWave() { } public static void init() { // 这里主要就是设置参数,要注意revers函数在这里的作用 FORMAT_SIZE = new byte[] { (byte) 16, (byte) 0, (byte) 0, (byte) 0 }; byte[] tmp = revers(intToBytes(1)); FORMATTAG = new byte[] { tmp[0], tmp[1] }; CHANNELS = new byte[] { tmp[0], tmp[1] }; SamplesPerSec = revers(intToBytes(16000)); AvgBytesPerSec = revers(intToBytes(32000)); tmp = revers(intToBytes(2)); BlockAlign = new byte[] { tmp[0], tmp[1] }; tmp = revers(intToBytes(16)); BitsPerSample = new byte[] { tmp[0], tmp[1] }; } public static byte[] revers(byte[] tmp) { byte[] reversed = new byte[tmp.length]; for (int i = 0; i < tmp.length; i++) { reversed[i] = tmp[tmp.length - i - 1]; } return reversed; } public static byte[] intToBytes(int num) { byte[] bytes = new byte[4]; bytes[0] = (byte) (num >> 24); bytes[1] = (byte) ((num >> 16) & 0x000000FF); bytes[2] = (byte) ((num >> 8) & 0x000000FF); bytes[3] = (byte) (num & 0x000000FF); return bytes; } public static void main(String[] args) throws IOException { InputStream input = capAudio(); int toaldatasize = 0; int audiolen; byte[] audiochunk = new byte[1024]; // 因为文件需要顺序读写,并且只能在最后才能确定riffsize和datasize参数,所以对前面的data要缓存 ByteArrayOutputStream bytebuff = new ByteArrayOutputStream(9600000); Timer tm = new Timer(1000); tm.start(); try { while (isrecording) { audiolen = input.read(audiochunk); toaldatasize += audiolen; bytebuff.write(audiochunk, 0, audiolen); } } catch (IOException e1) { // TODO Auto-generated catch block e1.printStackTrace(); } DataSize = revers(intToBytes(toaldatasize)); RIFF_SIZE = revers(intToBytes(toaldatasize + 36 - 8)); File wavfile = new File("E:\\file.wav"); FileOutputStream file = null; try { file = new FileOutputStream(wavfile); BufferedOutputStream fw = new BufferedOutputStream(file); init(); fw.write(RIFF); fw.write(RIFF_SIZE); fw.write(RIFF_TYPE); fw.write(FORMAT); fw.write(FORMAT_SIZE); fw.write(FORMATTAG); fw.write(CHANNELS); fw.write(SamplesPerSec); fw.write(AvgBytesPerSec); fw.write(BlockAlign); fw.write(BitsPerSample); fw.write(DataChunkID); fw.write(DataSize); fw.write(bytebuff.toByteArray()); fw.flush(); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } } // 这是音频采集的部分。 public static InputStream capAudio() throws IOException { float fFrameRate = 44100.0F; TargetDataLine target_line = null; AudioFormat format = new AudioFormat(AudioFormat.Encoding.PCM_SIGNED, fFrameRate, 16, 2, 4, fFrameRate, false); DataLine.Info lineInfo = new DataLine.Info(TargetDataLine.class, format, 65536); try { target_line = (TargetDataLine) AudioSystem.getLine(lineInfo); target_line.open(format, 655360); DataLine.Info out_dataLine_info = new DataLine.Info(SourceDataLine.class, format); SourceDataLine sourceDataLine = (SourceDataLine) AudioSystem.getLine(out_dataLine_info); } catch (LineUnavailableException e) { System.err .println("ERROR: LineUnavailableException at AudioSender()"); e.printStackTrace(); } AudioInputStream audio_input = new AudioInputStream(target_line); target_line.start(); isrecording = true; return audio_input; } public void stopRecord() { }}// Timer 是一个定时线程,到指定时间后将isrecording设置为false从而停止采集音频。class Timer extends Thread { private int len; public Timer(int len_) { this.len = len_; } public void run() { try { Thread.sleep(len); } catch (InterruptedException e) { // TODO Auto-generated catch block e.printStackTrace(); } SpeechStore.isrecording = false; }网上看到的.找了很久..希望对你有帮助! 你对这方面熟悉不 /熟悉的话,我现在也有个问题挺烦恼的.就是如何输出这个音频的PCM序列? return;?? Java初学者,请各位帮帮忙 java中常用的数据结构封装在哪里了? 帮我看一下是什么错误! 求助 一个有趣的问题 帮我看看这个小类错哪了 效果是:“当鼠标移动到那十字加就画到那!”我那里写错了? 关于Bean Builder的安装 TOMCAT问题,请进 j2se开发蓝牙程序 关于接口的使用方法
private static byte[] RIFF_SIZE = new byte[4];
private static byte[] RIFF_TYPE = "WAVE".getBytes(); private static byte[] FORMAT = "fmt ".getBytes();
private static byte[] FORMAT_SIZE = new byte[4];
private static byte[] FORMATTAG = new byte[2];
private static byte[] CHANNELS = new byte[2];
private static byte[] SamplesPerSec = new byte[4];
private static byte[] AvgBytesPerSec = new byte[4];
private static byte[] BlockAlign = new byte[2];
private static byte[] BitsPerSample = new byte[2]; private static byte[] DataChunkID = "data".getBytes();
private static byte[] DataSize = new byte[4];
public static boolean isrecording = false;
static byte[] b = new byte[10000]; public void writeToWave() { } public static void init() {
// 这里主要就是设置参数,要注意revers函数在这里的作用 FORMAT_SIZE = new byte[] { (byte) 16, (byte) 0, (byte) 0, (byte) 0 };
byte[] tmp = revers(intToBytes(1));
FORMATTAG = new byte[] { tmp[0], tmp[1] };
CHANNELS = new byte[] { tmp[0], tmp[1] };
SamplesPerSec = revers(intToBytes(16000));
AvgBytesPerSec = revers(intToBytes(32000));
tmp = revers(intToBytes(2));
BlockAlign = new byte[] { tmp[0], tmp[1] };
tmp = revers(intToBytes(16));
BitsPerSample = new byte[] { tmp[0], tmp[1] };
} public static byte[] revers(byte[] tmp) {
byte[] reversed = new byte[tmp.length];
for (int i = 0; i < tmp.length; i++) {
reversed[i] = tmp[tmp.length - i - 1]; }
return reversed;
} public static byte[] intToBytes(int num) {
byte[] bytes = new byte[4];
bytes[0] = (byte) (num >> 24);
bytes[1] = (byte) ((num >> 16) & 0x000000FF);
bytes[2] = (byte) ((num >> 8) & 0x000000FF);
bytes[3] = (byte) (num & 0x000000FF);
return bytes; } public static void main(String[] args) throws IOException { InputStream input = capAudio(); int toaldatasize = 0;
int audiolen;
byte[] audiochunk = new byte[1024];
// 因为文件需要顺序读写,并且只能在最后才能确定riffsize和datasize参数,所以对前面的data要缓存 ByteArrayOutputStream bytebuff = new ByteArrayOutputStream(9600000);
Timer tm = new Timer(1000);
tm.start();
try {
while (isrecording) {
audiolen = input.read(audiochunk);
toaldatasize += audiolen;
bytebuff.write(audiochunk, 0, audiolen);
}
} catch (IOException e1) {
// TODO Auto-generated catch block
e1.printStackTrace();
} DataSize = revers(intToBytes(toaldatasize));
RIFF_SIZE = revers(intToBytes(toaldatasize + 36 - 8));
File wavfile = new File("E:\\file.wav");
FileOutputStream file = null; try {
file = new FileOutputStream(wavfile);
BufferedOutputStream fw = new BufferedOutputStream(file);
init(); fw.write(RIFF);
fw.write(RIFF_SIZE);
fw.write(RIFF_TYPE);
fw.write(FORMAT);
fw.write(FORMAT_SIZE);
fw.write(FORMATTAG);
fw.write(CHANNELS);
fw.write(SamplesPerSec);
fw.write(AvgBytesPerSec);
fw.write(BlockAlign);
fw.write(BitsPerSample);
fw.write(DataChunkID);
fw.write(DataSize);
fw.write(bytebuff.toByteArray());
fw.flush();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
} // 这是音频采集的部分。
public static InputStream capAudio() throws IOException {
float fFrameRate = 44100.0F;
TargetDataLine target_line = null;
AudioFormat format = new AudioFormat(AudioFormat.Encoding.PCM_SIGNED,
fFrameRate, 16, 2, 4, fFrameRate, false);
DataLine.Info lineInfo = new DataLine.Info(TargetDataLine.class,
format, 65536);
try { target_line = (TargetDataLine) AudioSystem.getLine(lineInfo);
target_line.open(format, 655360);
DataLine.Info out_dataLine_info = new DataLine.Info(SourceDataLine.class, format);
SourceDataLine sourceDataLine = (SourceDataLine) AudioSystem.getLine(out_dataLine_info);
} catch (LineUnavailableException e) {
System.err
.println("ERROR: LineUnavailableException at AudioSender()");
e.printStackTrace();
}
AudioInputStream audio_input = new AudioInputStream(target_line);
target_line.start();
isrecording = true;
return audio_input;
} public void stopRecord() { }
}// Timer 是一个定时线程,到指定时间后将isrecording设置为false从而停止采集音频。class Timer extends Thread { private int len; public Timer(int len_) {
this.len = len_;
} public void run() {
try {
Thread.sleep(len);
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
SpeechStore.isrecording = false;
}
网上看到的.找了很久..希望对你有帮助!