目前,根据WAV文件结构新写入生成一个WAV格式的文件,现在不能播放出声音来,就想请教下这个WAV文件中数据是如何存放的,具体是需要些什么参数,我分析出的问题应该是在数据方面,如何存放,如何取出,是否在存入之前有一些处理,然后成为一个具体的值,比如: 需要参数A.B.C等参数,A+B+C之后等于D,然后把这个D写入到文件存放数据的位置,然后在播放器播放的时候就会自动解析播放出声音了。 望各位高手能指点下,有代码更好,谢谢了
调试欢乐多
import java.io.File;
import java.io.IOException;
import javax.sound.sampled.AudioFormat;
import javax.sound.sampled.AudioInputStream;
import javax.sound.sampled.AudioSystem;
import javax.sound.sampled.DataLine;
import javax.sound.sampled.FloatControl;
import javax.sound.sampled.LineUnavailableException;
import javax.sound.sampled.SourceDataLine;
import javax.sound.sampled.UnsupportedAudioFileException;
public class AePlayWave extends Thread {
private String filename;
private Position curPosition;
private final int EXTERNAL_BUFFER_SIZE = 524288; // 128Kb
enum Position {
LEFT, RIGHT, NORMAL
};
public AePlayWave(String wavfile) {
filename = wavfile;
curPosition = Position.NORMAL;
}
public AePlayWave(String wavfile, Position p) {
filename = wavfile;
curPosition = p;
}
public void run() {
File soundFile = new File(filename);
if (!soundFile.exists()) {
System.err.println("Wave file not found: " + filename);
return;
}
AudioInputStream audioInputStream = null;
try {
audioInputStream = AudioSystem.getAudioInputStream(soundFile);
} catch (UnsupportedAudioFileException e1) {
e1.printStackTrace();
return;
} catch (IOException e1) {
e1.printStackTrace();
return;
}
AudioFormat format = audioInputStream.getFormat();
SourceDataLine auline = null;
DataLine.Info info = new DataLine.Info(SourceDataLine.class, format);
try {
auline = (SourceDataLine) AudioSystem.getLine(info);
auline.open(format);
} catch (LineUnavailableException e) {
e.printStackTrace();
return;
} catch (Exception e) {
e.printStackTrace();
return;
}
if (auline.isControlSupported(FloatControl.Type.PAN)) {
FloatControl pan = (FloatControl) auline
.getControl(FloatControl.Type.PAN);
if (curPosition == Position.RIGHT)
pan.setValue(1.0f);
else if (curPosition == Position.LEFT)
pan.setValue(-1.0f);
}
auline.start();
int nBytesRead = 0;
byte[] abData = new byte[EXTERNAL_BUFFER_SIZE];
try {
while (nBytesRead != -1) {
nBytesRead = audioInputStream.read(abData, 0, abData.length);
if (nBytesRead >= 0)
auline.write(abData, 0, nBytesRead);
}
} catch (IOException e) {
e.printStackTrace();
return;
} finally {
auline.drain();
auline.close();
}
}
}
public static void main(String args[]) {
AePlayWave p = new AePlayWave("bg.wav");
p.start();
}
}
https://ccrma.stanford.edu/courses/422/projects/WaveFormat/
这个是wav文件格式的定义,可参照该定义生成文件了。
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStream;
import javax.sound.sampled.AudioFormat;
import javax.sound.sampled.AudioInputStream;
import javax.sound.sampled.AudioSystem;
import javax.sound.sampled.DataLine;
import javax.sound.sampled.LineUnavailableException;
import javax.sound.sampled.TargetDataLine;
public class SpeechStore {
private static byte[] RIFF="RIFF".getBytes();
private static byte[] RIFF_SIZE=new byte[4];
private static byte[] RIFF_TYPE="WAVE".getBytes();
private static byte[] FORMAT="fmt ".getBytes();
private static byte[] FORMAT_SIZE=new byte[4];
private static byte[] FORMATTAG=new byte[2];
private static byte[] CHANNELS=new byte[2];
private static byte[] SamplesPerSec =new byte[4];
private static byte[] AvgBytesPerSec=new byte[4];
private static byte[] BlockAlign =new byte[2];
private static byte[] BitsPerSample =new byte[2];
private static byte[] DataChunkID="data".getBytes();
private static byte[] DataSize=new byte[4];
public static boolean isrecording=false;
public void writeToWave(){
}
public static void init(){
//这里主要就是设置参数,要注意revers函数在这里的作用
FORMAT_SIZE=new byte[]{(byte)16,(byte)0,(byte)0,(byte)0};
byte[] tmp=revers(intToBytes(1));
FORMATTAG=new byte[]{tmp[0],tmp[1]};
CHANNELS=new byte[]{tmp[0],tmp[1]};
SamplesPerSec=revers(intToBytes(16000));
AvgBytesPerSec=revers(intToBytes(32000));
tmp=revers(intToBytes(2));
BlockAlign=new byte[]{tmp[0],tmp[1]};
tmp=revers(intToBytes(16));
BitsPerSample=new byte[]{tmp[0],tmp[1]};
}
public static byte[] revers(byte[] tmp){
byte[] reversed=new byte[tmp.length];
for(int i=0;i<tmp.length;i++){
reversed[i]=tmp[tmp.length-i-1];
}
return reversed;
}
public static byte[] intToBytes(int num){
byte[] bytes=new byte[4];
bytes[0]=(byte)(num>>24);
bytes[1]=(byte)((num>>16)& 0x000000FF);
bytes[2]=(byte)((num>>8)& 0x000000FF);
bytes[3]=(byte)(num & 0x000000FF);
return bytes;
}
public static void main(String[] args){
InputStream input=capAudio();
int toaldatasize=0;
int audiolen;
byte[] audiochunk=new byte[1024];
//因为文件需要顺序读写,并且只能在最后才能确定riffsize和datasize参数,所以对前面的data要缓存。
ByteArrayOutputStream bytebuff=new ByteArrayOutputStream(9600000);
Timer tm=new Timer(20000);
tm.start();
try {
while(isrecording){
audiolen=input.read(audiochunk);
toaldatasize+=audiolen;
bytebuff.write(audiochunk, 0, audiolen);
}
} catch (IOException e1) {
// TODO Auto-generated catch block
e1.printStackTrace();
}
DataSize=revers(intToBytes(toaldatasize));
RIFF_SIZE=revers(intToBytes(toaldatasize+36-8));
File wavfile= new File("F:\\writedformdata.wav");
FileOutputStream file=null;
try {
file=new FileOutputStream(wavfile);
BufferedOutputStream fw=new BufferedOutputStream(file);
init();
fw.write(RIFF);
fw.write(RIFF_SIZE);
fw.write(RIFF_TYPE);
fw.write(FORMAT);
fw.write(FORMAT_SIZE);
fw.write(FORMATTAG);
fw.write(CHANNELS);
fw.write(SamplesPerSec);
fw.write(AvgBytesPerSec);
fw.write(BlockAlign);
fw.write(BitsPerSample);
fw.write(DataChunkID);
fw.write(DataSize);
fw.write(bytebuff.toByteArray());
fw.flush();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
//这是音频采集的部分。
public static InputStream capAudio(){
float fFrameRate = 16000.0F;
TargetDataLine target_line = null;
AudioFormat format = new AudioFormat(AudioFormat.Encoding.PCM_SIGNED,
fFrameRate, 16, 1, 2, fFrameRate, false);
DataLine.Info lineInfo = new DataLine.Info(TargetDataLine.class,
format, 65536);
try {
target_line = (TargetDataLine) AudioSystem.getLine(lineInfo);
target_line.open(format, 655360);
} catch (LineUnavailableException e) {
System.err
.println("ERROR: LineUnavailableException at AudioSender()");
e.printStackTrace();
}
AudioInputStream audio_input = new AudioInputStream(target_line);
target_line.start();
isrecording=true;
return audio_input;
}
public void stopRecord(){
}
}
//Timer 是一个定时线程,到指定时间后将isrecording设置为false从而停止采集音频。
public class Timer extends Thread{
private int len;
public Timer(int len_){
this.len=len_;
}
public void run(){
try {
Thread.sleep(len);
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
SpeechStore.isrecording=false;
}
}