问题是这样的,在模拟器上调试,回调函数里面能有音频数据,但是用IPAD2调试,队列里面的音频数据的长度是0.下面是代码:#import "PcmRecorder.h"
#pragma -
#pragma recorder call back
static void MyInputBufferHandler( void * inUserData,
AudioQueueRef inAQ,
AudioQueueBufferRef inBuffer,
const AudioTimeStamp * inStartTime,
UInt32 inNumPackets,
const AudioStreamPacketDescription* inPacketDesc)
{
//拿到一帧的数据
unsigned int buf_length = inBuffer->mAudioDataByteSize;
void * buf = inBuffer->mAudioData;
if(buf_length == 0)
{
AudioQueueEnqueueBuffer(inAQ, inBuffer, 0, nil);
NSLog(@"recording fail!!");
return;
}
//AudioQueueEnqueueBuffer(inAQ, inBuffer, 0, nil);
NSLog(@"AudioRecordAQInputCallback");
PcmRecorder *recorder = (PcmRecorder *)inUserData;
[recorder setRecordData:buf length:buf_length];
//[recorder stopRecord]; //播完一帧就不播了
}@implementation PcmRecorder- (id)init
{
self = [super init];
if (self) {
synlock = [[NSLock alloc] init];
}
return self;
}-(void)initAudio
{
///设置音频参数
audioDescription.mSampleRate = 44100;//采样率
audioDescription.mFormatID = kAudioFormatLinearPCM;
audioDescription.mFormatFlags =kLinearPCMFormatFlagIsBigEndian| kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
audioDescription.mChannelsPerFrame = 1;///单声道
audioDescription.mFramesPerPacket = 1;//每一个packet一侦数据
audioDescription.mBitsPerChannel = 8;//每个采样点16bit量化
audioDescription.mBytesPerFrame = (audioDescription.mBitsPerChannel/8) * audioDescription.mChannelsPerFrame;
audioDescription.mBytesPerPacket = audioDescription.mBytesPerFrame ;
///创建一个新的从audioqueue到硬件层的通道
// AudioQueueNewOutput(&audioDescription, AudioPlayerAQInputCallback, self, CFRunLoopGetCurrent(), kCFRunLoopCommonModes, 0, &audioQueue);///使用当前线程播
// AudioQueueNewOutput(&audioDescription, AudioPlayerAQInputCallback, self, nil, nil, 0, &audioQueue);//使用player的内部线程播
AudioQueueNewInput(&audioDescription, MyInputBufferHandler, self, nil, kCFRunLoopCommonModes, 0, &audioQueue);
////添加buffer区
//for(int i=0;i<QUEUE_BUFFER_SIZE;i++)
//{
//int result = AudioQueueAllocateBuffer(audioQueue, 44100*2, &audioQueueBuffers[i]);///创建buffer区,MIN_SIZE_PER_FRAME为每一侦所需要的最小的大小,该大小应该比每次往buffer里写的最大的一次还大
//NSLog(@"AudioQueueAllocateBuffer i = %d,result = %d",i,result);
//}
}
-(void) record
{
[synlock lock];
int ret = 0;
[self initAudio];
NSLog(@"recording start");
for(int i=0;i<QUEUE_BUFFER_SIZE;i++)
{
//[self readPCMAndPlay:audioQueue buffer:audioQueueBuffers[i]];
ret = AudioQueueAllocateBuffer(audioQueue, 44100, &audioQueueBuffers[i]);
NSLog(@"ret = %d", ret);
ret = AudioQueueEnqueueBuffer(audioQueue, audioQueueBuffers[i], 0, nil);
NSLog(@"ret = %d", ret); }
ret = AudioQueueStart(audioQueue, nil);
NSLog(@"ret = %d", ret); [synlock unlock];
}-(void) stopRecord
{
AudioQueueStop(audioQueue, YES);
AudioQueueDispose(audioQueue, YES);
}-(void) setRecordData:(Byte*)pcm_data_buf length:(unsigned int)buf_length
{
recordData = [NSData dataWithBytes:pcm_data_buf length:buf_length];
//pcmDataBuffer = pcm_data_buf;
//bufLength = buf_length;
}-(void) setBufLength:(unsigned int ) len
{
bufLength = len;
}
-(NSData*) getRecordData
{
return recordData;
}
@end
#pragma -
#pragma recorder call back
static void MyInputBufferHandler( void * inUserData,
AudioQueueRef inAQ,
AudioQueueBufferRef inBuffer,
const AudioTimeStamp * inStartTime,
UInt32 inNumPackets,
const AudioStreamPacketDescription* inPacketDesc)
{
//拿到一帧的数据
unsigned int buf_length = inBuffer->mAudioDataByteSize;
void * buf = inBuffer->mAudioData;
if(buf_length == 0)
{
AudioQueueEnqueueBuffer(inAQ, inBuffer, 0, nil);
NSLog(@"recording fail!!");
return;
}
//AudioQueueEnqueueBuffer(inAQ, inBuffer, 0, nil);
NSLog(@"AudioRecordAQInputCallback");
PcmRecorder *recorder = (PcmRecorder *)inUserData;
[recorder setRecordData:buf length:buf_length];
//[recorder stopRecord]; //播完一帧就不播了
}@implementation PcmRecorder- (id)init
{
self = [super init];
if (self) {
synlock = [[NSLock alloc] init];
}
return self;
}-(void)initAudio
{
///设置音频参数
audioDescription.mSampleRate = 44100;//采样率
audioDescription.mFormatID = kAudioFormatLinearPCM;
audioDescription.mFormatFlags =kLinearPCMFormatFlagIsBigEndian| kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
audioDescription.mChannelsPerFrame = 1;///单声道
audioDescription.mFramesPerPacket = 1;//每一个packet一侦数据
audioDescription.mBitsPerChannel = 8;//每个采样点16bit量化
audioDescription.mBytesPerFrame = (audioDescription.mBitsPerChannel/8) * audioDescription.mChannelsPerFrame;
audioDescription.mBytesPerPacket = audioDescription.mBytesPerFrame ;
///创建一个新的从audioqueue到硬件层的通道
// AudioQueueNewOutput(&audioDescription, AudioPlayerAQInputCallback, self, CFRunLoopGetCurrent(), kCFRunLoopCommonModes, 0, &audioQueue);///使用当前线程播
// AudioQueueNewOutput(&audioDescription, AudioPlayerAQInputCallback, self, nil, nil, 0, &audioQueue);//使用player的内部线程播
AudioQueueNewInput(&audioDescription, MyInputBufferHandler, self, nil, kCFRunLoopCommonModes, 0, &audioQueue);
////添加buffer区
//for(int i=0;i<QUEUE_BUFFER_SIZE;i++)
//{
//int result = AudioQueueAllocateBuffer(audioQueue, 44100*2, &audioQueueBuffers[i]);///创建buffer区,MIN_SIZE_PER_FRAME为每一侦所需要的最小的大小,该大小应该比每次往buffer里写的最大的一次还大
//NSLog(@"AudioQueueAllocateBuffer i = %d,result = %d",i,result);
//}
}
-(void) record
{
[synlock lock];
int ret = 0;
[self initAudio];
NSLog(@"recording start");
for(int i=0;i<QUEUE_BUFFER_SIZE;i++)
{
//[self readPCMAndPlay:audioQueue buffer:audioQueueBuffers[i]];
ret = AudioQueueAllocateBuffer(audioQueue, 44100, &audioQueueBuffers[i]);
NSLog(@"ret = %d", ret);
ret = AudioQueueEnqueueBuffer(audioQueue, audioQueueBuffers[i], 0, nil);
NSLog(@"ret = %d", ret); }
ret = AudioQueueStart(audioQueue, nil);
NSLog(@"ret = %d", ret); [synlock unlock];
}-(void) stopRecord
{
AudioQueueStop(audioQueue, YES);
AudioQueueDispose(audioQueue, YES);
}-(void) setRecordData:(Byte*)pcm_data_buf length:(unsigned int)buf_length
{
recordData = [NSData dataWithBytes:pcm_data_buf length:buf_length];
//pcmDataBuffer = pcm_data_buf;
//bufLength = buf_length;
}-(void) setBufLength:(unsigned int ) len
{
bufLength = len;
}
-(NSData*) getRecordData
{
return recordData;
}
@end
问题终于解决啦,原来是我的程序里面另外有一个地方在占用着AVAudioSession,,就那个为了使程序在后台继续运行的.我从后台切换到前台的时候,因为一些小判断没有停止AudioPlayer成功.