IT数码 购物 网址 头条 软件 日历 阅读 图书馆
TxT小说阅读器
↓语音阅读,小说下载,古典文学↓
图片批量下载器
↓批量下载图片,美女图库↓
图片自动播放器
↓图片自动播放器↓
一键清除垃圾
↓轻轻一点,清除系统垃圾↓
开发: C++知识库 Java知识库 JavaScript Python PHP知识库 人工智能 区块链 大数据 移动开发 嵌入式 开发工具 数据结构与算法 开发测试 游戏开发 网络协议 系统运维
教程: HTML教程 CSS教程 JavaScript教程 Go语言教程 JQuery教程 VUE教程 VUE3教程 Bootstrap教程 SQL数据库教程 C语言教程 C++教程 Java教程 Python教程 Python3教程 C#教程
数码: 电脑 笔记本 显卡 显示器 固态硬盘 硬盘 耳机 手机 iphone vivo oppo 小米 华为 单反 装机 图拉丁
 
   -> 移动开发 -> Android Audio播放流程详解 -> 正文阅读

[移动开发]Android Audio播放流程详解

1. AudioTrack

AudioTrack用于播放PCM流格式的音频数据。播放器会在framework层创建相应的解码器,解码器将MP3,WAV等格式的音频文件解码成PCM流后,将该数据传递给AudioTrack。
AudioTrack有两种播放模式

  • MODE_STREAM:通过write()将data连续的写入AudioTrack。用于数据量大,延时要求低的情况,如:播放音乐。但是一次性不能拷贝太多的数据,否者系统无法分配足够的内存。
  • MODE_STATIC:一次性将data传递到AudioTrack。常用于数据量小,延时要求高的情况,如:UI,游戏音效。

2.创建AudioTrack对象

  1. 详细解读AudioTrack.java种AudioTrack的构造方法
	  /**
	   * AudioAttribute attribute:音频流信息属性的集合
	   * AudioFormat format:音频格式,这里的音频格式指的是采样率,编码,声道等信息的集合
	   * int bufferSizeInBytes:AudioTrack内部缓冲区的大小
	   * int mode:MODE_STATIC or MODE_STREAM
	   * int sessionId:AudioTrack必须附加的会话id
	   * boolean offload:是否是offload播放模式,一种直接给到硬件播放的格式
	   * int encapsulationMode:封装模式
	   * TunerConfiguration tunerConfiguration:可为null
	   */
      private AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes,
              int mode, int sessionId, boolean offload, int encapsulationMode,
              @Nullable TunerConfiguration tunerConfiguration)
                      throws IllegalArgumentException {
          super(attributes, AudioPlaybackConfiguration.PLAYER_TYPE_JAM_AUDIOTRACK);
          // mState already == STATE_UNINITIALIZED
  
  		  //记录当前的音频流信息属性集合,用于getAudioAttributes()
          mConfiguredAudioAttributes = attributes; // object copy not needed, immutable.
  
  		  //检查音频格式是否为null,是则抛出参数异常的提醒
          if (format == null) {
              throw new IllegalArgumentException("Illegal null AudioFormat");
          }
  
          // Check if we should enable deep buffer mode
          //shouldEnablePowerSaving()是否开启省电模式,如果开启省电模式,则需要重新创建AudioAttribute
          if (shouldEnablePowerSaving(mAttributes, format, bufferSizeInBytes, mode)) {
              mAttributes = new AudioAttributes.Builder(mAttributes)
                  .replaceFlags((mAttributes.getAllFlags()
                          | AudioAttributes.FLAG_DEEP_BUFFER)
                          & ~AudioAttributes.FLAG_LOW_LATENCY)
                  .build();
          }
  
          // remember which looper is associated with the AudioTrack instantiation
          //记录当前创建AudioTrack的Looper
          Looper looper;
          if ((looper = Looper.myLooper()) == null) {
              looper = Looper.getMainLooper();
          }
  
  		  //如果没有指定采样率,则赋值为0
          int rate = format.getSampleRate();
          if (rate == AudioFormat.SAMPLE_RATE_UNSPECIFIED) {
              rate = 0;
          }
  
  		  //channelIndexMask和channelMask互斥,如果有channelIndexMask则不使用channelMask,如没有则使用channelMask
          int channelIndexMask = 0;
          if ((format.getPropertySetMask()
                  & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_INDEX_MASK) != 0) {
              channelIndexMask = format.getChannelIndexMask();
          }
          int channelMask = 0;
          if ((format.getPropertySetMask()
                  & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK) != 0) {
              channelMask = format.getChannelMask();
          } else if (channelIndexMask == 0) { // if no masks at all, use stereo
          	  //如果channelMask和channelIndexMask都没有指定,则默认为左右声道
              channelMask = AudioFormat.CHANNEL_OUT_FRONT_LEFT
                      | AudioFormat.CHANNEL_OUT_FRONT_RIGHT;
          }
          //获取编码模式
          int encoding = AudioFormat.ENCODING_DEFAULT;
          if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_ENCODING) != 0) {
              encoding = format.getEncoding();
          }
          //audioPramaCheck()检查相关属性值是否合法
          //在指定采样率后不能小于最小(8000Hz),不能大于最大(1600000Hz)
          //声道:如果编码模式为ENCODING_IEC61937,则声道数必须为2或者是8,
          //编码:检查当前编码是否支持使用的声道数,如果没有指定编码格式则默认为PCM 16bit
          //播放模式:检查当前的播放模式是否是STREAM或者STATIC两者之一,如果是STREAM模式,编码模式必须是PCM线性格式的
          audioParamCheck(rate, channelMask, channelIndexMask, encoding, mode);
          mOffloaded = offload;
          mStreamType = AudioSystem.STREAM_DEFAULT;
  		  //audioBufferSizeCheck()检查缓冲区大小,只对PCM和ENCODING_IEC61937有效
  		  //bufferSize不能小于1,每一帧的大小必须能被buffer整除
          audioBuffSizeCheck(bufferSizeInBytes);
          mInitializationLooper = looper;
  
          if (sessionId < 0) {
              throw new IllegalArgumentException("Invalid audio session ID: "+sessionId);
          }
  
          int[] sampleRate = new int[] {mSampleRate};
          int[] session = new int[1];
          session[0] = sessionId;
          // native initialization
          //检查完属性值后,开始创建native方法
          //native_setup()对应android_media_AudioTrack.cpp中android_media_AudioTrack_setup()
          //wakeReference弱引用,便于回收
          int initResult = native_setup(new WeakReference<AudioTrack>(this), mAttributes,
                  sampleRate, mChannelMask, mChannelIndexMask, mAudioFormat,
                  mNativeBufferSizeInBytes, mDataLoadMode, session, 0 /*nativeTrackInJavaObj*/,
                  offload, encapsulationMode, tunerConfiguration,
                  getCurrentOpPackageName());
          if (initResult != SUCCESS) {
              loge("Error code "+initResult+" when initializing AudioTrack.");
              return; // with mState == STATE_UNINITIALIZED
          }
  
          mSampleRate = sampleRate[0];
          mSessionId = session[0];
  
          // TODO: consider caching encapsulationMode and tunerConfiguration in the Java object.
  
          if ((mAttributes.getFlags() & AudioAttributes.FLAG_HW_AV_SYNC) != 0) {
              int frameSizeInBytes;
              if (AudioFormat.isEncodingLinearFrames(mAudioFormat)) {
                  frameSizeInBytes = mChannelCount * AudioFormat.getBytesPerSample(mAudioFormat);
              } else {
                  frameSizeInBytes = 1;
              }
              mOffset = ((int) Math.ceil(HEADER_V2_SIZE_BYTES / frameSizeInBytes)) * frameSizeInBytes;
          }
  
          if (mDataLoadMode == MODE_STATIC) {
              mState = STATE_NO_STATIC_DATA;
          } else {
              mState = STATE_INITIALIZED;
          }
  
          baseRegisterPlayer(mSessionId);
          native_setPlayerIId(mPlayerIId); // mPlayerIId now ready to send to native AudioTrack.
      }
  1. android_media_AudioTraack.cpp中的android_media_AudioTrack_setup()方法详解
  /**
   * jobject jaa:AudioAttribute
   */
  android_media_AudioTrack_is_direct_output_supported(JNIEnv *env, jobject thiz,
                                             jobject jaa, jintArray jSampleRate,
                                             jint channelPositionMask, jint channelIndexMask,
                                             jint audioFormat, jint buffSizeInBytes, jint memoryMode,
                                             jintArray jSession, jlong nativeAudioTrack,
                                             jboolean offload, jint encapsulationMode,
                                             jobject tunerConfiguration, jstring opPackageName) {
      ALOGV("sampleRates=%p, channel mask=%x, index mask=%x, audioFormat(Java)=%d, buffSize=%d,"
            " nativeAudioTrack=0x%" PRIX64 ", offload=%d encapsulationMode=%d tuner=%p",
            jSampleRate, channelPositionMask, channelIndexMask, audioFormat, buffSizeInBytes,
            nativeAudioTrack, offload, encapsulationMode, tunerConfiguration);
  
      if (jSession == NULL) {
          ALOGE("Error creating AudioTrack: invalid session ID pointer");
          return (jint) AUDIO_JAVA_ERROR;
      }
  
      const TunerConfigurationHelper tunerHelper(env, tunerConfiguration);
  	  //通过映射关系获取session
      jint* nSession = (jint *) env->GetPrimitiveArrayCritical(jSession, NULL);
      if (nSession == NULL) {
          ALOGE("Error creating AudioTrack: Error retrieving session id pointer");
          return (jint) AUDIO_JAVA_ERROR;
      }
      audio_session_t sessionId = (audio_session_t) nSession[0];
      env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);
      nSession = NULL;
  
      AudioTrackJniStorage* lpJniStorage = NULL;
  
      jclass clazz = env->GetObjectClass(thiz);
      if (clazz == NULL) {
          ALOGE("Can't find %s when setting up callback.", kClassPathName);
          return (jint) AUDIOTRACK_ERROR_SETUP_NATIVEINITFAILED;
      }
  
      // if we pass in an existing *Native* AudioTrack, we don't need to create/initialize one.
      sp<AudioTrack> lpTrack;
      if (nativeAudioTrack == 0) {
          if (jaa == 0) {
              ALOGE("Error creating AudioTrack: invalid audio attributes");
              return (jint) AUDIO_JAVA_ERROR;
          }
  
          if (jSampleRate == 0) {
              ALOGE("Error creating AudioTrack: invalid sample rates");
              return (jint) AUDIO_JAVA_ERROR;
          }
  
          int* sampleRates = env->GetIntArrayElements(jSampleRate, NULL);
          int sampleRateInHertz = sampleRates[0];
          env->ReleaseIntArrayElements(jSampleRate, sampleRates, JNI_ABORT);
  
          // Invalid channel representations are caught by !audio_is_output_channel() below.
          //将java格式表示的声道转换成native的
          audio_channel_mask_t nativeChannelMask = nativeChannelMaskFromJavaChannelMasks(
                  channelPositionMask, channelIndexMask);
          if (!audio_is_output_channel(nativeChannelMask)) {
              ALOGE("Error creating AudioTrack: invalid native channel mask %#x.", nativeChannelMask);
              return (jint) AUDIOTRACK_ERROR_SETUP_INVALIDCHANNELMASK;
          }
  
          uint32_t channelCount = audio_channel_count_from_out_mask(nativeChannelMask);
  
          // check the format.
          // This function was called from Java, so we compare the format against the Java constants
          audio_format_t format = audioFormatToNative(audioFormat);
          if (format == AUDIO_FORMAT_INVALID) {
              ALOGE("Error creating AudioTrack: unsupported audio format %d.", audioFormat);
              return (jint) AUDIOTRACK_ERROR_SETUP_INVALIDFORMAT;
          }
  
          // compute the frame count
          size_t frameCount;
          //如果是如果当前格式是线性的
          if (audio_has_proportional_frames(format)) {
          	  //bytesPerSample一个声道中一个数据单位的大小
              const size_t bytesPerSample = audio_bytes_per_sample(format);
              //缓冲区总的大小除以一帧的大小得到这个缓冲区一次性最多能容纳多少帧的数据
              frameCount = buffSizeInBytes / (channelCount * bytesPerSample);
          } else {
          	  //如果不是线性的,则帧数直接为当前buffer的大小
              frameCount = buffSizeInBytes;
          }
  
          // create the native AudioTrack object
          ScopedUtfChars opPackageNameStr(env, opPackageName);
          // TODO b/182469354: make consistent with AudioRecord
          //AttributeSourceState一个aidl对象
          AttributionSourceState attributionSource;
          //是谁创建的AudioTrack
          attributionSource.packageName = std::string(opPackageNameStr.c_str());
          attributionSource.token = sp<BBinder>::make();
          //new native的AudioTrack
          lpTrack = new AudioTrack(attributionSource);
  
          // read the AudioAttributes values
          auto paa = JNIAudioAttributeHelper::makeUnique();
          jint jStatus = JNIAudioAttributeHelper::nativeFromJava(env, jaa, paa.get());
          if (jStatus != (jint)AUDIO_JAVA_SUCCESS) {
              return jStatus;
          }
          ALOGV("AudioTrack_setup for usage=%d content=%d flags=0x%#x tags=%s",
                  paa->usage, paa->content_type, paa->flags, paa->tags);
  
          // initialize the callback information:
          // this data will be passed with every AudioTrack callback
          lpJniStorage = new AudioTrackJniStorage();
          lpJniStorage->mCallbackData.audioTrack_class = (jclass)env->NewGlobalRef(clazz);
          // we use a weak reference so the AudioTrack object can be garbage collected.
          lpJniStorage->mCallbackData.audioTrack_ref = env->NewGlobalRef(weak_this);
          lpJniStorage->mCallbackData.isOffload = offload;
          lpJniStorage->mCallbackData.busy = false;
  		  //audio_offload_info_t offload相关配置信息
          audio_offload_info_t offloadInfo;
          //如果是offload格式,需要初始化offloadInfo
          if (offload == JNI_TRUE) {
              offloadInfo = AUDIO_INFO_INITIALIZER;
              offloadInfo.format = format;
              offloadInfo.sample_rate = sampleRateInHertz;
              offloadInfo.channel_mask = nativeChannelMask;
              offloadInfo.has_video = false;
              offloadInfo.stream_type = AUDIO_STREAM_MUSIC; //required for offload
          }
  
          if (encapsulationMode != 0) {
              offloadInfo = AUDIO_INFO_INITIALIZER;
              offloadInfo.format = format;
              offloadInfo.sample_rate = sampleRateInHertz;
              offloadInfo.channel_mask = nativeChannelMask;
              offloadInfo.stream_type = AUDIO_STREAM_MUSIC;
              offloadInfo.encapsulation_mode =
                      static_cast<audio_encapsulation_mode_t>(encapsulationMode);
              offloadInfo.content_id = tunerHelper.getContentId();
              offloadInfo.sync_id = tunerHelper.getSyncId();
          }
  
          // initialize the native AudioTrack object
          status_t status = NO_ERROR;
          //判断播放模式,进行不同的初始化
          switch (memoryMode) {
          case MODE_STREAM:
              status = lpTrack->set(AUDIO_STREAM_DEFAULT, // stream type, but more info conveyed
                                                          // in paa (last argument)
                                    sampleRateInHertz,
                                    format, // word length, PCM
                                    nativeChannelMask, offload ? 0 : frameCount,
                                    offload ? AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD
                                            : AUDIO_OUTPUT_FLAG_NONE,
                                    audioCallback,
                                    &(lpJniStorage->mCallbackData), // callback, callback data (user)
                                    0,    // notificationFrames == 0 since not using EVENT_MORE_DATA
                                          // to feed the AudioTrack
                                    0,    // shared mem
                                    true, // thread can call Java
                                    sessionId, // audio session ID
                                    offload ? AudioTrack::TRANSFER_SYNC_NOTIF_CALLBACK
                                            : AudioTrack::TRANSFER_SYNC,
                                    (offload || encapsulationMode) ? &offloadInfo : NULL,
                                    AttributionSourceState(), // default uid, pid values
                                    paa.get());
              break;
  
          case MODE_STATIC:
              // AudioTrack is using shared memory
  
              if (!lpJniStorage->allocSharedMem(buffSizeInBytes)) {
                  ALOGE("Error creating AudioTrack in static mode: error creating mem heap base");
                  goto native_init_failure;
              }
  
              status = lpTrack->set(AUDIO_STREAM_DEFAULT, // stream type, but more info conveyed
                                                          // in paa (last argument)
                                    sampleRateInHertz,
                                    format, // word length, PCM
                                    nativeChannelMask, frameCount, AUDIO_OUTPUT_FLAG_NONE,
                                    audioCallback,
                                    &(lpJniStorage->mCallbackData), // callback, callback data (user)
                                    0, // notificationFrames == 0 since not using EVENT_MORE_DATA
                                       // to feed the AudioTrack
                                    lpJniStorage->mMemBase, // shared mem
                                    true,                   // thread can call Java
                                    sessionId,              // audio session ID
                                    AudioTrack::TRANSFER_SHARED,
                                    NULL,       // default offloadInfo
                                    AttributionSourceState(), // default uid, pid values
                                    paa.get());
              break;
  
          default:
              ALOGE("Unknown mode %d", memoryMode);
              goto native_init_failure;
          }
  
          if (status != NO_ERROR) {
              ALOGE("Error %d initializing AudioTrack", status);
              goto native_init_failure;
          }
          // Set caller name so it can be logged in destructor.
          // MediaMetricsConstants.h: AMEDIAMETRICS_PROP_CALLERNAME_VALUE_JAVA
          lpTrack->setCallerName("java");
      } else {  // end if (nativeAudioTrack == 0)
          lpTrack = (AudioTrack*)nativeAudioTrack;
          // TODO: We need to find out which members of the Java AudioTrack might
          // need to be initialized from the Native AudioTrack
          // these are directly returned from getters:
          //  mSampleRate
          //  mAudioFormat
          //  mStreamType
          //  mChannelConfiguration
          //  mChannelCount
          //  mState (?)
          //  mPlayState (?)
          // these may be used internally (Java AudioTrack.audioParamCheck():
          //  mChannelMask
          //  mChannelIndexMask
          //  mDataLoadMode
  
          // initialize the callback information:
          // this data will be passed with every AudioTrack callback
          lpJniStorage = new AudioTrackJniStorage();
          lpJniStorage->mCallbackData.audioTrack_class = (jclass)env->NewGlobalRef(clazz);
          // we use a weak reference so the AudioTrack object can be garbage collected.
          lpJniStorage->mCallbackData.audioTrack_ref = env->NewGlobalRef(weak_this);
          lpJniStorage->mCallbackData.busy = false;
      }
      lpJniStorage->mAudioTrackCallback =
              new JNIAudioTrackCallback(env, thiz, lpJniStorage->mCallbackData.audioTrack_ref,
                                        javaAudioTrackFields.postNativeEventInJava);
      lpTrack->setAudioTrackCallback(lpJniStorage->mAudioTrackCallback);
  
      nSession = (jint *) env->GetPrimitiveArrayCritical(jSession, NULL);
      if (nSession == NULL) {
          ALOGE("Error creating AudioTrack: Error retrieving session id pointer");
          goto native_init_failure;
      }
      // read the audio session ID back from AudioTrack in case we create a new session
      nSession[0] = lpTrack->getSessionId();
      env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);
      nSession = NULL;
  
      {
          const jint elements[1] = { (jint) lpTrack->getSampleRate() };
          env->SetIntArrayRegion(jSampleRate, 0, 1, elements);
      }
  
      {   // scope for the lock
          Mutex::Autolock l(sLock);
          sAudioTrackCallBackCookies.add(&lpJniStorage->mCallbackData);
      }
      // save our newly created C++ AudioTrack in the "nativeTrackInJavaObj" field
      // of the Java object (in mNativeTrackInJavaObj)
      setAudioTrack(env, thiz, lpTrack);
  
      // save the JNI resources so we can free them later
      //ALOGV("storing lpJniStorage: %x\n", (long)lpJniStorage);
      env->SetLongField(thiz, javaAudioTrackFields.jniData, (jlong)lpJniStorage);
  
      // since we had audio attributes, the stream type was derived from them during the
      // creation of the native AudioTrack: push the same value to the Java object
      env->SetIntField(thiz, javaAudioTrackFields.fieldStreamType, (jint) lpTrack->streamType());
  
      return (jint) AUDIO_JAVA_SUCCESS;
  
      // failures:
  native_init_failure:
      if (nSession != NULL) {
          env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);
      }
      env->DeleteGlobalRef(lpJniStorage->mCallbackData.audioTrack_class);
      env->DeleteGlobalRef(lpJniStorage->mCallbackData.audioTrack_ref);
      delete lpJniStorage;
      env->SetLongField(thiz, javaAudioTrackFields.jniData, 0);
  
      // lpTrack goes out of scope, so reference count drops to zero
      return (jint) AUDIOTRACK_ERROR_SETUP_NATIVEINITFAILED;
  }

创建AudioTrack时初始化的缓冲区决定了AudioTrack在耗尽数据之前能够播放多少时间

  移动开发 最新文章
Vue3装载axios和element-ui
android adb cmd
【xcode】Xcode常用快捷键与技巧
Android开发中的线程池使用
Java 和 Android 的 Base64
Android 测试文字编码格式
微信小程序支付
安卓权限记录
知乎之自动养号
【Android Jetpack】DataStore
上一篇文章      下一篇文章      查看所有文章
加:2022-04-09 18:32:44  更:2022-04-09 18:35:12 
 
开发: C++知识库 Java知识库 JavaScript Python PHP知识库 人工智能 区块链 大数据 移动开发 嵌入式 开发工具 数据结构与算法 开发测试 游戏开发 网络协议 系统运维
教程: HTML教程 CSS教程 JavaScript教程 Go语言教程 JQuery教程 VUE教程 VUE3教程 Bootstrap教程 SQL数据库教程 C语言教程 C++教程 Java教程 Python教程 Python3教程 C#教程
数码: 电脑 笔记本 显卡 显示器 固态硬盘 硬盘 耳机 手机 iphone vivo oppo 小米 华为 单反 装机 图拉丁

360图书馆 购物 三丰科技 阅读网 日历 万年历 2024年11日历 -2024/11/24 21:05:46-

图片自动播放器
↓图片自动播放器↓
TxT小说阅读器
↓语音阅读,小说下载,古典文学↓
一键清除垃圾
↓轻轻一点,清除系统垃圾↓
图片批量下载器
↓批量下载图片,美女图库↓
  网站联系: qq:121756557 email:121756557@qq.com  IT数码