1. AudioTrack
AudioTrack用于播放PCM流格式的音频数据。播放器会在framework层创建相应的解码器,解码器将MP3,WAV等格式的音频文件解码成PCM流后,将该数据传递给AudioTrack。 AudioTrack有两种播放模式
- MODE_STREAM:通过write()将data连续的写入AudioTrack。用于数据量大,延时要求低的情况,如:播放音乐。但是一次性不能拷贝太多的数据,否者系统无法分配足够的内存。
- MODE_STATIC:一次性将data传递到AudioTrack。常用于数据量小,延时要求高的情况,如:UI,游戏音效。
2.创建AudioTrack对象
- 详细解读AudioTrack.java种AudioTrack的构造方法
private AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes,
int mode, int sessionId, boolean offload, int encapsulationMode,
@Nullable TunerConfiguration tunerConfiguration)
throws IllegalArgumentException {
super(attributes, AudioPlaybackConfiguration.PLAYER_TYPE_JAM_AUDIOTRACK);
mConfiguredAudioAttributes = attributes;
if (format == null) {
throw new IllegalArgumentException("Illegal null AudioFormat");
}
if (shouldEnablePowerSaving(mAttributes, format, bufferSizeInBytes, mode)) {
mAttributes = new AudioAttributes.Builder(mAttributes)
.replaceFlags((mAttributes.getAllFlags()
| AudioAttributes.FLAG_DEEP_BUFFER)
& ~AudioAttributes.FLAG_LOW_LATENCY)
.build();
}
Looper looper;
if ((looper = Looper.myLooper()) == null) {
looper = Looper.getMainLooper();
}
int rate = format.getSampleRate();
if (rate == AudioFormat.SAMPLE_RATE_UNSPECIFIED) {
rate = 0;
}
int channelIndexMask = 0;
if ((format.getPropertySetMask()
& AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_INDEX_MASK) != 0) {
channelIndexMask = format.getChannelIndexMask();
}
int channelMask = 0;
if ((format.getPropertySetMask()
& AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK) != 0) {
channelMask = format.getChannelMask();
} else if (channelIndexMask == 0) {
channelMask = AudioFormat.CHANNEL_OUT_FRONT_LEFT
| AudioFormat.CHANNEL_OUT_FRONT_RIGHT;
}
int encoding = AudioFormat.ENCODING_DEFAULT;
if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_ENCODING) != 0) {
encoding = format.getEncoding();
}
audioParamCheck(rate, channelMask, channelIndexMask, encoding, mode);
mOffloaded = offload;
mStreamType = AudioSystem.STREAM_DEFAULT;
audioBuffSizeCheck(bufferSizeInBytes);
mInitializationLooper = looper;
if (sessionId < 0) {
throw new IllegalArgumentException("Invalid audio session ID: "+sessionId);
}
int[] sampleRate = new int[] {mSampleRate};
int[] session = new int[1];
session[0] = sessionId;
int initResult = native_setup(new WeakReference<AudioTrack>(this), mAttributes,
sampleRate, mChannelMask, mChannelIndexMask, mAudioFormat,
mNativeBufferSizeInBytes, mDataLoadMode, session, 0 ,
offload, encapsulationMode, tunerConfiguration,
getCurrentOpPackageName());
if (initResult != SUCCESS) {
loge("Error code "+initResult+" when initializing AudioTrack.");
return;
}
mSampleRate = sampleRate[0];
mSessionId = session[0];
if ((mAttributes.getFlags() & AudioAttributes.FLAG_HW_AV_SYNC) != 0) {
int frameSizeInBytes;
if (AudioFormat.isEncodingLinearFrames(mAudioFormat)) {
frameSizeInBytes = mChannelCount * AudioFormat.getBytesPerSample(mAudioFormat);
} else {
frameSizeInBytes = 1;
}
mOffset = ((int) Math.ceil(HEADER_V2_SIZE_BYTES / frameSizeInBytes)) * frameSizeInBytes;
}
if (mDataLoadMode == MODE_STATIC) {
mState = STATE_NO_STATIC_DATA;
} else {
mState = STATE_INITIALIZED;
}
baseRegisterPlayer(mSessionId);
native_setPlayerIId(mPlayerIId);
}
- android_media_AudioTraack.cpp中的android_media_AudioTrack_setup()方法详解
android_media_AudioTrack_is_direct_output_supported(JNIEnv *env, jobject thiz,
jobject jaa, jintArray jSampleRate,
jint channelPositionMask, jint channelIndexMask,
jint audioFormat, jint buffSizeInBytes, jint memoryMode,
jintArray jSession, jlong nativeAudioTrack,
jboolean offload, jint encapsulationMode,
jobject tunerConfiguration, jstring opPackageName) {
ALOGV("sampleRates=%p, channel mask=%x, index mask=%x, audioFormat(Java)=%d, buffSize=%d,"
" nativeAudioTrack=0x%" PRIX64 ", offload=%d encapsulationMode=%d tuner=%p",
jSampleRate, channelPositionMask, channelIndexMask, audioFormat, buffSizeInBytes,
nativeAudioTrack, offload, encapsulationMode, tunerConfiguration);
if (jSession == NULL) {
ALOGE("Error creating AudioTrack: invalid session ID pointer");
return (jint) AUDIO_JAVA_ERROR;
}
const TunerConfigurationHelper tunerHelper(env, tunerConfiguration);
jint* nSession = (jint *) env->GetPrimitiveArrayCritical(jSession, NULL);
if (nSession == NULL) {
ALOGE("Error creating AudioTrack: Error retrieving session id pointer");
return (jint) AUDIO_JAVA_ERROR;
}
audio_session_t sessionId = (audio_session_t) nSession[0];
env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);
nSession = NULL;
AudioTrackJniStorage* lpJniStorage = NULL;
jclass clazz = env->GetObjectClass(thiz);
if (clazz == NULL) {
ALOGE("Can't find %s when setting up callback.", kClassPathName);
return (jint) AUDIOTRACK_ERROR_SETUP_NATIVEINITFAILED;
}
sp<AudioTrack> lpTrack;
if (nativeAudioTrack == 0) {
if (jaa == 0) {
ALOGE("Error creating AudioTrack: invalid audio attributes");
return (jint) AUDIO_JAVA_ERROR;
}
if (jSampleRate == 0) {
ALOGE("Error creating AudioTrack: invalid sample rates");
return (jint) AUDIO_JAVA_ERROR;
}
int* sampleRates = env->GetIntArrayElements(jSampleRate, NULL);
int sampleRateInHertz = sampleRates[0];
env->ReleaseIntArrayElements(jSampleRate, sampleRates, JNI_ABORT);
audio_channel_mask_t nativeChannelMask = nativeChannelMaskFromJavaChannelMasks(
channelPositionMask, channelIndexMask);
if (!audio_is_output_channel(nativeChannelMask)) {
ALOGE("Error creating AudioTrack: invalid native channel mask %#x.", nativeChannelMask);
return (jint) AUDIOTRACK_ERROR_SETUP_INVALIDCHANNELMASK;
}
uint32_t channelCount = audio_channel_count_from_out_mask(nativeChannelMask);
audio_format_t format = audioFormatToNative(audioFormat);
if (format == AUDIO_FORMAT_INVALID) {
ALOGE("Error creating AudioTrack: unsupported audio format %d.", audioFormat);
return (jint) AUDIOTRACK_ERROR_SETUP_INVALIDFORMAT;
}
size_t frameCount;
if (audio_has_proportional_frames(format)) {
const size_t bytesPerSample = audio_bytes_per_sample(format);
frameCount = buffSizeInBytes / (channelCount * bytesPerSample);
} else {
frameCount = buffSizeInBytes;
}
ScopedUtfChars opPackageNameStr(env, opPackageName);
AttributionSourceState attributionSource;
attributionSource.packageName = std::string(opPackageNameStr.c_str());
attributionSource.token = sp<BBinder>::make();
lpTrack = new AudioTrack(attributionSource);
auto paa = JNIAudioAttributeHelper::makeUnique();
jint jStatus = JNIAudioAttributeHelper::nativeFromJava(env, jaa, paa.get());
if (jStatus != (jint)AUDIO_JAVA_SUCCESS) {
return jStatus;
}
ALOGV("AudioTrack_setup for usage=%d content=%d flags=0x%#x tags=%s",
paa->usage, paa->content_type, paa->flags, paa->tags);
lpJniStorage = new AudioTrackJniStorage();
lpJniStorage->mCallbackData.audioTrack_class = (jclass)env->NewGlobalRef(clazz);
lpJniStorage->mCallbackData.audioTrack_ref = env->NewGlobalRef(weak_this);
lpJniStorage->mCallbackData.isOffload = offload;
lpJniStorage->mCallbackData.busy = false;
audio_offload_info_t offloadInfo;
if (offload == JNI_TRUE) {
offloadInfo = AUDIO_INFO_INITIALIZER;
offloadInfo.format = format;
offloadInfo.sample_rate = sampleRateInHertz;
offloadInfo.channel_mask = nativeChannelMask;
offloadInfo.has_video = false;
offloadInfo.stream_type = AUDIO_STREAM_MUSIC;
}
if (encapsulationMode != 0) {
offloadInfo = AUDIO_INFO_INITIALIZER;
offloadInfo.format = format;
offloadInfo.sample_rate = sampleRateInHertz;
offloadInfo.channel_mask = nativeChannelMask;
offloadInfo.stream_type = AUDIO_STREAM_MUSIC;
offloadInfo.encapsulation_mode =
static_cast<audio_encapsulation_mode_t>(encapsulationMode);
offloadInfo.content_id = tunerHelper.getContentId();
offloadInfo.sync_id = tunerHelper.getSyncId();
}
status_t status = NO_ERROR;
switch (memoryMode) {
case MODE_STREAM:
status = lpTrack->set(AUDIO_STREAM_DEFAULT,
sampleRateInHertz,
format,
nativeChannelMask, offload ? 0 : frameCount,
offload ? AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD
: AUDIO_OUTPUT_FLAG_NONE,
audioCallback,
&(lpJniStorage->mCallbackData),
0,
0,
true,
sessionId,
offload ? AudioTrack::TRANSFER_SYNC_NOTIF_CALLBACK
: AudioTrack::TRANSFER_SYNC,
(offload || encapsulationMode) ? &offloadInfo : NULL,
AttributionSourceState(),
paa.get());
break;
case MODE_STATIC:
if (!lpJniStorage->allocSharedMem(buffSizeInBytes)) {
ALOGE("Error creating AudioTrack in static mode: error creating mem heap base");
goto native_init_failure;
}
status = lpTrack->set(AUDIO_STREAM_DEFAULT,
sampleRateInHertz,
format,
nativeChannelMask, frameCount, AUDIO_OUTPUT_FLAG_NONE,
audioCallback,
&(lpJniStorage->mCallbackData),
0,
lpJniStorage->mMemBase,
true,
sessionId,
AudioTrack::TRANSFER_SHARED,
NULL,
AttributionSourceState(),
paa.get());
break;
default:
ALOGE("Unknown mode %d", memoryMode);
goto native_init_failure;
}
if (status != NO_ERROR) {
ALOGE("Error %d initializing AudioTrack", status);
goto native_init_failure;
}
lpTrack->setCallerName("java");
} else {
lpTrack = (AudioTrack*)nativeAudioTrack;
lpJniStorage = new AudioTrackJniStorage();
lpJniStorage->mCallbackData.audioTrack_class = (jclass)env->NewGlobalRef(clazz);
lpJniStorage->mCallbackData.audioTrack_ref = env->NewGlobalRef(weak_this);
lpJniStorage->mCallbackData.busy = false;
}
lpJniStorage->mAudioTrackCallback =
new JNIAudioTrackCallback(env, thiz, lpJniStorage->mCallbackData.audioTrack_ref,
javaAudioTrackFields.postNativeEventInJava);
lpTrack->setAudioTrackCallback(lpJniStorage->mAudioTrackCallback);
nSession = (jint *) env->GetPrimitiveArrayCritical(jSession, NULL);
if (nSession == NULL) {
ALOGE("Error creating AudioTrack: Error retrieving session id pointer");
goto native_init_failure;
}
nSession[0] = lpTrack->getSessionId();
env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);
nSession = NULL;
{
const jint elements[1] = { (jint) lpTrack->getSampleRate() };
env->SetIntArrayRegion(jSampleRate, 0, 1, elements);
}
{
Mutex::Autolock l(sLock);
sAudioTrackCallBackCookies.add(&lpJniStorage->mCallbackData);
}
setAudioTrack(env, thiz, lpTrack);
env->SetLongField(thiz, javaAudioTrackFields.jniData, (jlong)lpJniStorage);
env->SetIntField(thiz, javaAudioTrackFields.fieldStreamType, (jint) lpTrack->streamType());
return (jint) AUDIO_JAVA_SUCCESS;
native_init_failure:
if (nSession != NULL) {
env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);
}
env->DeleteGlobalRef(lpJniStorage->mCallbackData.audioTrack_class);
env->DeleteGlobalRef(lpJniStorage->mCallbackData.audioTrack_ref);
delete lpJniStorage;
env->SetLongField(thiz, javaAudioTrackFields.jniData, 0);
return (jint) AUDIOTRACK_ERROR_SETUP_NATIVEINITFAILED;
}
创建AudioTrack时初始化的缓冲区决定了AudioTrack在耗尽数据之前能够播放多少时间
|