简述 AudioRecord 类是管理Android应用程序的音频资源,以便开发者通过此类能够录制相关的硬件所收集的声音。这是通过从 AudioRecord 对象通过“pulling”同步(reading)数据来实现的。应用程序负责使用以下三种方法之一及时轮询 AudioRecord 对象 read(byte[], int, int):read(short[], int, int) 或read(java.nio.ByteBuffer, int)。选择使用哪种方法将基于 AudioRecord 用户最方便的音频数据存储格式。 在创建时,AudioRecord 对象初始化其关联的音频缓冲区,它将用新的音频数据填充。这个缓冲区的大小,在构造过程中指定,决定了 AudioRecord 在“溢出”尚未读取的数据之前可以记录多长时间。应该从音频硬件中以小于总记录缓冲区大小的块读取数据。 创建 AudioRecord 实例的应用程序需要 Manifest.permission.RECORD_AUDIO否则 Builder 将抛出 UnsupportedOperationException
录音步骤
1.创建AudioRecord对象
- audioSource:音频采集的来源,可以是麦克风声音、通话声音、系统内置声音
- sampleRateInHz: 音频采样率
- channelConfig:单声道、双声道…
- audioFormat:音频采样精度,指定采样的数据的格式和每次采样的大小,只支持8位和16位
- bufferSizeInBytes:录音期间音频数据写入的缓冲区的总大小(以字节为单位),可以通过getMinBufferSize()获取系统最新缓冲区大小.
audioRecord = new AudioRecord(MediaRecorder.AudioSource.MIC, sampleRateInHz, channelConfig,
audioFormat, recordMinBufferSize);
2.创建buffer
可以通过AudioRecord.getMinBufferSize()获取系统最新缓冲区大小.
int recordMinBufferSize = AudioRecord.getMinBufferSize(sampleRateInHz, channelConfig, audioFormat);
buffer = new byte[recordMinBufferSize];
3.开始录音
创建一个IO流,一边从 AudioRecord 中读取声音数据到初始化的 buffer,一边将 buffer 中数据导入IO流.
if (audioRecord.getState() == AudioRecord.RECORDSTATE_STOPPED) {
recorderState = true;
audioRecord.startRecording();
new RecordThread(path, name).start();
} else {
Log.i(TAG, "start: " + audioRecord.getState());
}
...
FileOutputStream fos = null;
try {
fos = new FileOutputStream(cachePath);
} catch (FileNotFoundException e) {
e.printStackTrace();
}
if (fos == null) {
Log.i(TAG, "run: 未找到缓存文件" + cachePath);
return;
}
int read;
while (recorderState && !isInterrupted()) {
read = audioRecord.read(buffer, 0, buffer.length);
if (AudioRecord.ERROR_INVALID_OPERATION != read) {
try {
fos.write(buffer);
Log.i(TAG, "run: 写录音数据->" + read);
} catch (IOException e) {
e.printStackTrace();
}
}
}
try {
fos.flush();
fos.close();
} catch (IOException e) {
e.printStackTrace();
}
PcmToWavUtil.getInstance().pcmToWav(cachePath, path + name);
4.停止录音
public void stop() {
recorderState = false;
if (audioRecord.getState() == AudioRecord.RECORDSTATE_RECORDING) {
audioRecord.stop();
}
}
5.释放资源
public void release() {
recorderState = false;
if (audioRecord.getState() == AudioRecord.RECORDSTATE_RECORDING) {
audioRecord.stop();
}
audioRecord.release();
audioRecord = null;
}
使用示例
完整使用示例可以看下面代码,录音缓存pcm, pcm转wav到存储本地.
public class AudioRecordUtil {
public static final int sampleRateInHz = 16000;
public static final int channelConfig = AudioFormat.CHANNEL_IN_MONO;
public static final int audioFormat = AudioFormat.ENCODING_PCM_16BIT;
private boolean recorderState = true;
private byte[] buffer;
private AudioRecord audioRecord;
private static AudioRecordUtil audioRecordUtil = new AudioRecordUtil();
private String TAG = "AudioRecordUtil";
public static AudioRecordUtil getInstance() {
return audioRecordUtil;
}
private AudioRecordUtil() {
init();
}
@SuppressLint("MissingPermission")
private void init() {
int recordMinBufferSize = AudioRecord.getMinBufferSize(sampleRateInHz, channelConfig, audioFormat);
buffer = new byte[recordMinBufferSize];
audioRecord = new AudioRecord(MediaRecorder.AudioSource.MIC, sampleRateInHz, channelConfig,
audioFormat, recordMinBufferSize);
}
public void start(String path, String name) {
if (audioRecord.getState() == AudioRecord.RECORDSTATE_STOPPED) {
recorderState = true;
audioRecord.startRecording();
new RecordThread(path, name).start();
} else {
Log.i(TAG, "start: " + audioRecord.getState());
}
}
public void stop() {
recorderState = false;
if (audioRecord.getState() == AudioRecord.RECORDSTATE_RECORDING) {
audioRecord.stop();
}
}
public void release() {
recorderState = false;
if (audioRecord.getState() == AudioRecord.RECORDSTATE_RECORDING) {
audioRecord.stop();
}
audioRecord.release();
audioRecord = null;
}
private class RecordThread extends Thread {
private String cachePath;
private String name;
private String path;
public RecordThread(String path, String name) {
this.path = path;
this.name = name;
this.cachePath = path + "cache.pcm";
}
@Override
public void run() {
Log.i(TAG, "run: pcm目录=path" + cachePath);
FileUtils.delete(cachePath);
boolean file = FileUtils.createFile(path, "cache.pcm");
if (file)
Log.i(TAG, "run: 创建缓存文件成功:" + cachePath);
else {
Log.i(TAG, "run: 创建缓存文件失败:" + cachePath);
return;
}
FileOutputStream fos = null;
try {
fos = new FileOutputStream(cachePath);
} catch (FileNotFoundException e) {
e.printStackTrace();
}
if (fos == null) {
Log.i(TAG, "run: 未找到缓存文件" + cachePath);
return;
}
int read;
while (recorderState && !isInterrupted()) {
read = audioRecord.read(buffer, 0, buffer.length);
if (AudioRecord.ERROR_INVALID_OPERATION != read) {
try {
fos.write(buffer);
Log.i(TAG, "run: 写录音数据->" + read);
} catch (IOException e) {
e.printStackTrace();
}
}
}
try {
fos.flush();
fos.close();
} catch (IOException e) {
e.printStackTrace();
}
PcmToWavUtil.getInstance().pcmToWav(cachePath, path + name);
}
}
public byte[] convert(String path) throws IOException {
FileInputStream fis = new FileInputStream(path);
ByteArrayOutputStream bos = new ByteArrayOutputStream();
byte[] b = new byte[1024];
for (int readNum; (readNum = fis.read(b)) != -1; ) {
bos.write(b, 0, readNum);
}
byte[] bytes = bos.toByteArray();
fis.close();
bos.close();
return bytes;
}
}
pcm转wav
pcm原始数据,转成wav要加上数据头.
public class PcmToWavUtil {
private int mBufferSize;
private int mSampleRate = 44100;
private int mChannel = AudioFormat.CHANNEL_IN_STEREO;
private int mEncoding = AudioFormat.ENCODING_PCM_16BIT;
private static class SingleHolder {
static PcmToWavUtil mInstance = new PcmToWavUtil();
}
public static PcmToWavUtil getInstance() {
return SingleHolder.mInstance;
}
public PcmToWavUtil() {
mSampleRate = AudioRecordUtil.sampleRateInHz;
mChannel = AudioRecordUtil.channelConfig;
mEncoding = AudioRecordUtil.audioFormat;
Log.i("AudioRecordUtil", "PcmToWavUtil:mChannel "+mChannel +"mEncoding:"+mEncoding+"mSampleRate:"+mSampleRate);
this.mBufferSize = AudioRecord.getMinBufferSize(mSampleRate, mChannel, mEncoding);
}
public PcmToWavUtil(int sampleRate, int channel, int encoding) {
this.mSampleRate = sampleRate;
this.mChannel = channel;
this.mEncoding = encoding;
this.mBufferSize = AudioRecord.getMinBufferSize(mSampleRate, mChannel, mEncoding);
}
public void pcmToWav(String inFilename, String outFilename, boolean deleteOrg) {
FileInputStream in;
FileOutputStream out;
long totalAudioLen;
long totalDataLen;
long longSampleRate = mSampleRate;
int channels = mChannel == AudioFormat.CHANNEL_IN_MONO ? 1 : 2;
long byteRate = mEncoding * longSampleRate * channels /8;
byte[] data = new byte[mBufferSize];
try {
in = new FileInputStream(inFilename);
out = new FileOutputStream(outFilename);
totalAudioLen = in.getChannel().size();
totalDataLen = totalAudioLen + 36;
writeWaveFileHeader(out, totalAudioLen, totalDataLen,
longSampleRate, channels, byteRate);
while (in.read(data) != -1) {
out.write(data);
}
in.close();
out.flush();
out.close();
if (deleteOrg) {
new File(inFilename).delete();
}
} catch (IOException e) {
e.printStackTrace();
}
}
public void pcmToWav(String inFilename, String outFilename) {
pcmToWav(inFilename, outFilename, false);
}
private void writeWaveFileHeader(FileOutputStream out, long totalAudioLen,
long totalDataLen, long longSampleRate, int channels, long byteRate)
throws IOException {
byte[] header = new byte[44];
header[0] = 'R';
header[1] = 'I';
header[2] = 'F';
header[3] = 'F';
header[4] = (byte) (totalDataLen & 0xff);
header[5] = (byte) ((totalDataLen >> 8) & 0xff);
header[6] = (byte) ((totalDataLen >> 16) & 0xff);
header[7] = (byte) ((totalDataLen >> 24) & 0xff);
header[8] = 'W';
header[9] = 'A';
header[10] = 'V';
header[11] = 'E';
header[12] = 'f';
header[13] = 'm';
header[14] = 't';
header[15] = ' ';
header[16] = 16;
header[17] = 0;
header[18] = 0;
header[19] = 0;
header[20] = 1;
header[21] = 0;
header[22] = (byte) channels;
header[23] = 0;
header[24] = (byte) (longSampleRate & 0xff);
header[25] = (byte) ((longSampleRate >> 8) & 0xff);
header[26] = (byte) ((longSampleRate >> 16) & 0xff);
header[27] = (byte) ((longSampleRate >> 24) & 0xff);
header[28] = (byte) (byteRate & 0xff);
header[29] = (byte) ((byteRate >> 8) & 0xff);
header[30] = (byte) ((byteRate >> 16) & 0xff);
header[31] = (byte) ((byteRate >> 24) & 0xff);
header[32] = (byte) (channels * mEncoding / 8);
header[33] = 0;
header[34] = 16;
header[35] = 0;
header[36] = 'd';
header[37] = 'a';
header[38] = 't';
header[39] = 'a';
header[40] = (byte) (totalAudioLen & 0xff);
header[41] = (byte) ((totalAudioLen >> 8) & 0xff);
header[42] = (byte) ((totalAudioLen >> 16) & 0xff);
header[43] = (byte) ((totalAudioLen >> 24) & 0xff);
out.write(header, 0, 44);
}
}
END.
|