你好!这里是风筝的博客,
欢迎和我一起交流。
Android framework中的代码每个平台基本都是大同小异,只有Hal上代码才是厂商特制,每个平台都不相同,这里以MTK平台为例,记录下MTK平台Hal audio音频录音代码流程。
调用大致流程如下
AudioALSAHardwa: openInputStream()
AudioALSAStreamManager: openInputStream()
AudioALSAStreamIn: AudioALSAStreamIn
AudioALSAStreamIn: set()
AudioALSAStreamIn: checkOpenStreamFormat
AudioALSAStreamIn: checkOpenStreamChannels
AudioALSAStreamIn: checkOpenStreamSampleRate
AudioALSAHardware: createAudioPatch()
AudioALSAStreamManager: setParameters()
AudioALSAStreamIn: setParameters()
AudioALSAStreamManager: routingInputDevice()
AudioALSAStreamIn: read()
AudioALSAStreamIn: open()
AudioALSAStreamManager: createCaptureHandler()
AudioALSAStreamManager: ulStreamAttributeTargetCustomization
AudioALSACaptureDataProviderNormal: AudioALSACaptureDataProviderNormal()
AudioALSACaptureDataProviderBase: AudioALSACaptureDataProviderBase()
AudioALSACaptureHandlerNormal: AudioALSACaptureHandlerNormal()
AudioALSACaptureHandlerNormal: init()
AudioALSACaptureHandlerNormal: open()
AudioALSACaptureDataProviderBase: AudioALSACaptureDataProviderBase()
AudioALSACaptureDataProviderDspRaw: AudioALSACaptureDataProviderDspRaw()
AudioALSACaptureDataClientAurisysNormal: AudioALSACaptureDataClientAurisysNormal(+)
AudioALSACaptureDataProviderBase: configStreamAttribute()
AudioALSACaptureDataProviderBase: attach
AudioALSACaptureDataProviderDspRaw: open(+)
AudioALSADeviceConfigManager: ApplyDeviceTurnonSequenceByName() DeviceName = ADDA_TO_CAPTURE1 descriptor->DeviceStatusCounte = 0
AudioALSACaptureDataProviderBase: enablePmicInputDevice
AudioALSAHardwareResourceManager: +startInputDevice()
AudioALSADeviceConfigManager: ApplyDeviceTurnonSequenceByName() DeviceName = builtin_Mic_DualMic descriptor->DeviceStatusCounte = 0
AudioALSACaptureDataProviderBase: getInputSampleRate()
AudioMTKGainController: +SetCaptureGain()
AudioMTKGainController: ApplyMicGain()
AudioALSACaptureDataProviderDspRaw: openApHwPcm(), mPcm = 0xf2b55260
AudioDspStreamManager: addCaptureDataProvider()
AudioDspStreamManager: openCaptureDspHwPcm(), mDspHwPcm = 0xf2b55340
pthread_create(&hReadThread, NULL, AudioALSACaptureDataProviderDspRaw::readThread, (void *)this);
AudioALSACaptureDataProviderDspRaw: +readThread()
AudioALSACaptureDataProviderBase: waitPcmStart
AudioALSACaptureDataProviderBase: pcm_start
AudioALSACaptureDataProviderBase: pcmRead
AudioALSACaptureDataProviderBase: provideCaptureDataToAllClients
AudioALSACaptureDataClientAurisysNormal: copyCaptureDataToClient()
pthread_create(&hProcessThread, NULL, AudioALSACaptureDataClientAurisysNormal::processThread, (void *)this);
AudioALSACaptureDataClientAurisysNormal: processThread
AudioALSACaptureHandlerNormal: read()
bytes = mCaptureDataClient->read(buffer, bytes);
AudioMTKStreamInInterface *AudioALSAStreamManager::openInputStream(
uint32_t devices,
int *format,
uint32_t *channels,
uint32_t *sampleRate,
status_t *status,
audio_in_acoustics_t acoustics,
uint32_t input_flag) {
if (format == NULL || channels == NULL || sampleRate == NULL || status == NULL) {
ALOGE("%s(), NULL pointer!! format = %p, channels = %p, sampleRate = %p, status = %p",
__FUNCTION__, format, channels, sampleRate, status);
if (status != NULL) { *status = INVALID_OPERATION; }
return NULL;
}
ALOGD("%s(), devices = 0x%x, format = 0x%x, channels = 0x%x, sampleRate = %d, status = %d, acoustics = 0x%x, input_flag 0x%x",
__FUNCTION__, devices, *format, *channels, *sampleRate, *status, acoustics, input_flag);
AudioALSAStreamIn *pAudioALSAStreamIn = new AudioALSAStreamIn();
pAudioALSAStreamIn->set(devices, format, channels, sampleRate, status, acoustics, input_flag);
pAudioALSAStreamIn->setIdentity(mStreamInIndex);
mStreamInVector.add(mStreamInIndex, pAudioALSAStreamIn);
return pAudioALSAStreamIn;
}
status_t AudioALSAStreamIn::set(
uint32_t devices,
int *format,
uint32_t *channels,
uint32_t *sampleRate,
status_t *status,
audio_in_acoustics_t acoustics, uint32_t flags) {
if (checkOpenStreamFormat(static_cast<audio_devices_t>(devices), format) == false) {
*status = BAD_VALUE;
}
if (checkOpenStreamChannels(static_cast<audio_devices_t>(devices), channels) == false) {
*status = BAD_VALUE;
}
if (checkOpenStreamSampleRate(static_cast<audio_devices_t>(devices), sampleRate) == false) {
*status = BAD_VALUE;
}
if (*status == NO_ERROR) {
mStreamAttributeTarget.audio_format = static_cast<audio_format_t>(*format);
mStreamAttributeTarget.audio_channel_mask = static_cast<audio_channel_mask_t>(*channels);
mStreamAttributeTarget.num_channels = popcount(*channels);
mStreamAttributeTarget.sample_rate = *sampleRate;
mStreamAttributeTarget.input_device = static_cast<audio_devices_t>(devices);
mStreamAttributeTarget.acoustics_mask = static_cast<audio_in_acoustics_t>(acoustics);
}
}
上层服务会通过setParameters下发参数给HAL,这里下发的参数是:IOport = 70, keyValuePairs = input_source=1;routing=-2147483644
status_t AudioALSAStreamIn::setParameters(const String8 &keyValuePairs) {
if (param.getInt(keyRouting, value) == NO_ERROR) {
status = mStreamManager->routingInputDevice(this, mStreamAttributeTarget.input_device, inputdevice);
}
}
status_t AudioALSAStreamManager::routingInputDevice(AudioALSAStreamIn *pAudioALSAStreamIn,
const audio_devices_t current_input_device,
audio_devices_t input_device) {
if (input_device == AUDIO_DEVICE_NONE ||
input_device == current_input_device) {
ALOGW("-%s(), input_device(0x%x) is AUDIO_DEVICE_NONE(0x%x) or current_input_device(0x%x), return",
__FUNCTION__,
input_device, AUDIO_DEVICE_NONE, current_input_device);
return NO_ERROR;
}
setAllInputStreamsSuspend(true, false);
standbyAllInputStreams();
if (mStreamInVector.size() > 0) {
for (size_t i = 0; i < mStreamInVector.size(); i++) {
if ((input_device == AUDIO_DEVICE_IN_FM_TUNER) || (current_input_device == AUDIO_DEVICE_IN_FM_TUNER) ||
(input_device == AUDIO_DEVICE_IN_TELEPHONY_RX) || (current_input_device == AUDIO_DEVICE_IN_TELEPHONY_RX)) {
if (pAudioALSAStreamIn == mStreamInVector[i]) {
status = mStreamInVector[i]->routing(input_device);
ASSERT(status == NO_ERROR);
}
} else {
status = mStreamInVector[i]->routing(input_device);
ASSERT(status == NO_ERROR);
}
}
}
setAllInputStreamsSuspend(false, false);
}
接下来就是最重要的read调用了,上层服务会调用read函数来读取录音数据:
ssize_t AudioALSAStreamIn::read(void *buffer, ssize_t bytes) {
if (mStandby == true) {
status = open();
}
}
open是放在read里面来做的:
status_t AudioALSAStreamIn::open() {
if (mStandby == true) {
ASSERT(mCaptureHandler == NULL);
mCaptureHandler = mStreamManager->createCaptureHandler(&mStreamAttributeTarget);
if (mCaptureHandler == NULL) {
status = BAD_VALUE;
return status;
}
status = mCaptureHandler->open();
mStandby = false;
}
return status;
}
先创建CaptureHandler,不同的设备和源会创建不同的CaptureHandler执行类,之后open对应的CaptureHandler。 可以留一下createCaptureHandler的逻辑,主要还是对设备做区分:
AudioALSACaptureHandlerBase *AudioALSAStreamManager::createCaptureHandler(
stream_attribute_target->audio_mode = mAudioMode;
stream_attribute_target->output_devices = current_output_devices;
stream_attribute_target->micmute = mMicMute;
ulStreamAttributeTargetCustomization(stream_attribute_target);
if (stream_attribute_target->input_source == AUDIO_SOURCE_HOTWORD) {
if (mAudioALSAVoiceWakeUpController->getVoiceWakeUpEnable() == false) {
mAudioALSAVoiceWakeUpController->setVoiceWakeUpEnable(true);
}
if (mVoiceWakeUpNeedOn == true) {
mAudioALSAVoiceWakeUpController->SeamlessRecordEnable();
}
pCaptureHandler = new AudioALSACaptureHandlerVOW(stream_attribute_target);
} else if (stream_attribute_target->input_source == AUDIO_SOURCE_VOICE_UNLOCK ||
stream_attribute_target->input_source == AUDIO_SOURCE_ECHO_REFERENCE) {
pCaptureHandler = new AudioALSACaptureHandlerSyncIO(stream_attribute_target);
} else if (isPhoneCallOpen() == true) {
pCaptureHandler = new AudioALSACaptureHandlerVoice(stream_attribute_target);
} else if ((stream_attribute_target->NativePreprocess_Info.PreProcessEffect_AECOn == true) ||
(stream_attribute_target->input_source == AUDIO_SOURCE_VOICE_COMMUNICATION) ||
(stream_attribute_target->input_source == AUDIO_SOURCE_CUSTOMIZATION1) ||
(stream_attribute_target->input_source == AUDIO_SOURCE_CUSTOMIZATION2)) {
AudioALSAHardwareResourceManager::getInstance()->setHDRRecord(false);
switch (stream_attribute_target->input_device) {
case AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET: {
if (stream_attribute_target->output_devices & AUDIO_DEVICE_OUT_ALL_SCO) {
pCaptureHandler = new AudioALSACaptureHandlerAEC(stream_attribute_target);
} else {
pCaptureHandler = new AudioALSACaptureHandlerBT(stream_attribute_target);
}
break;
}
case AUDIO_DEVICE_IN_USB_DEVICE:
case AUDIO_DEVICE_IN_USB_HEADSET:
#if defined(MTK_AURISYS_FRAMEWORK_SUPPORT)
pCaptureHandler = new AudioALSACaptureHandlerAEC(stream_attribute_target);
#else
pCaptureHandler = new AudioALSACaptureHandlerUsb(stream_attribute_target);
#endif
break;
default: {
if (isAdspOptionEnable() &&
((isCaptureOffload(stream_attribute_target) && !isIEMsOn &&
!AudioALSACaptureDataProviderNormal::getInstance()->getNormalOn()) ||
isBleInputDevice(stream_attribute_target->input_device))) {
pCaptureHandler = new AudioALSACaptureHandlerDsp(stream_attribute_target);
} else {
pCaptureHandler = new AudioALSACaptureHandlerAEC(stream_attribute_target);
}
break;
}
}
} else {
switch (stream_attribute_target->input_device) {
case AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET: {
pCaptureHandler = new AudioALSACaptureHandlerBT(stream_attribute_target);
break;
}
case AUDIO_DEVICE_IN_USB_DEVICE:
case AUDIO_DEVICE_IN_USB_HEADSET:
pCaptureHandler = new AudioALSACaptureHandlerUsb(stream_attribute_target);
break;
case AUDIO_DEVICE_IN_BUILTIN_MIC:
case AUDIO_DEVICE_IN_BACK_MIC:
case AUDIO_DEVICE_IN_WIRED_HEADSET:
case AUDIO_DEVICE_IN_BLE_HEADSET:
case AUDIO_DEVICE_IN_BUS:
default: {
if (AudioSmartPaController::getInstance()->isInCalibration()) {
pCaptureHandler = new AudioALSACaptureHandlerNormal(stream_attribute_target);
break;
}
if (isAdspOptionEnable() &&
!(AUDIO_INPUT_FLAG_MMAP_NOIRQ & stream_attribute_target->mAudioInputFlags) &&((isCaptureOffload(stream_attribute_target) && !isIEMsOn &&
!AudioALSACaptureDataProviderNormal::getInstance()->getNormalOn()) ||
isBleInputDevice(stream_attribute_target->input_device))) {
if (isPhoneCallOpen() == true) {
pCaptureHandler = new AudioALSACaptureHandlerVoice(stream_attribute_target);
} else {
pCaptureHandler = new AudioALSACaptureHandlerDsp(stream_attribute_target);
}
} else {
pCaptureHandler = new AudioALSACaptureHandlerNormal(stream_attribute_target);
}
break;
}
}
}
mCaptureHandlerVector.add(mCaptureHandlerIndex, pCaptureHandler);
return pCaptureHandler;
}
createCaptureHandler里面逻辑还是比较多的,这里我们分析AUDIO_DEVICE_IN_BUILTIN_MIC,也就是普通MIC录音场景,也就是AudioALSACaptureHandlerNormal。
status_t AudioALSACaptureHandlerNormal::open() {
if (!AudioSmartPaController::getInstance()->isInCalibration()) {
if (isAdspOptionEnable() &&
(AudioDspStreamManager::getInstance()->getDspRawInHandlerEnable(mStreamAttributeTarget->mAudioInputFlags) > 0) &&
(AudioDspStreamManager::getInstance()->getDspInHandlerEnable(mStreamAttributeTarget->mAudioInputFlags) > 0) && !isIEMsOn &&
!AudioALSACaptureDataProviderNormal::getInstance()->getNormalOn()) {
mCaptureDataClient = new AudioALSACaptureDataClientAurisysNormal(AudioALSACaptureDataProviderDspRaw::getInstance(),
mStreamAttributeTarget, NULL);
} else {
mCaptureDataClient = new AudioALSACaptureDataClientAurisysNormal(AudioALSACaptureDataProviderNormal::getInstance(),
mStreamAttributeTarget, NULL);
}
} else {
mCaptureDataClient = new AudioALSACaptureDataClientAurisysNormal(AudioALSACaptureDataProviderEchoRefExt::getInstance(),
mStreamAttributeTarget, NULL);
}
}
这里主要走的是AudioALSACaptureDataClientAurisysNormal,DataProvider是AudioALSACaptureDataProviderDspRaw(这个后面表述)。
继续看下AudioALSACaptureDataClientAurisysNormal
AudioALSACaptureDataClientAurisysNormal::AudioALSACaptureDataClientAurisysNormal(
AudioALSACaptureDataProviderBase *pCaptureDataProvider,
stream_attribute_t *stream_attribute_target,
AudioALSACaptureDataProviderBase *pCaptureDataProviderEchoRef) {
mCaptureDataProvider->configStreamAttribute(mStreamAttributeTarget);
mCaptureDataProvider->attach(this);
mLatency = (IsLowLatencyCapture()) ? UPLINK_LOW_LATENCY_MS : UPLINK_NORMAL_LATENCY_MS;
if (mAudioALSAVolumeController != NULL) {
mAudioALSAVolumeController->SetCaptureGain(mStreamAttributeTarget->audio_mode,
mStreamAttributeTarget->input_source,
mStreamAttributeTarget->input_device,
mStreamAttributeTarget->output_devices);
}
CreateAurisysLibManager();
drop_ms = getDropMs(mStreamAttributeTarget);
if (drop_ms) {
if ((drop_ms % mLatency) != 0) {
drop_ms = ((drop_ms / mLatency) + 1) * mLatency;
}
mDropPopSize = (audio_bytes_per_sample(mStreamAttributeTarget->audio_format) *
mStreamAttributeTarget->num_channels *
mStreamAttributeTarget->sample_rate *
drop_ms) / 1000;
}
ret = pthread_create(&hProcessThread, NULL,
AudioALSACaptureDataClientAurisysNormal::processThread,
(void *)this);
}
- 配置Attribute
- DataProvider attach,提供录音数据
- 计算Latency ,录音间隔延时
- 设置gain增益
- 创建aurisys,算法会用到
- 计算录音开头需要丢弃的数据大小mDropPopSize,避免录音开头有杂音
- 创建线程去读取录音数据
void *AudioALSACaptureDataClientAurisysNormal::processThread(void *arg) {
client->mProcessThreadLaunched = true;
raw_ul = &client->mRawDataBuf;
processed = &client->mProcessedDataBuf;
while (client->mEnable == true) {
data_count_raw_ul = audio_ringbuf_count(raw_ul);
if ((data_count_raw_ul < client->mRawDataPeriodBufSize) ||
(client->IsAECEnable() == true &&
((client->mIsEchoRefDataSync == false && client->isNeedSkipSyncEchoRef() == false) ||
data_count_raw_aec < client->mEchoRefDataPeriodBufSize))) {
wait_result = AL_WAIT_MS(client->mRawDataBufLock, MAX_PROCESS_DATA_WAIT_TIME_OUT_MS);
}
audio_pool_buf_copy_from_ringbuf(ul_in, raw_ul, client->mRawDataPeriodBufSize);
aurisys_process_ul_only(manager,
ul_in,
ul_out,
ul_aec);
if (client->mDropPopSize > 0) {
ALOGV("data_count %u, mDropPopSize %u, %dL", data_count, client->mDropPopSize, __LINE__);
if (data_count >= client->mDropPopSize) {
audio_ringbuf_drop_data(&ul_out->ringbuf, client->mDropPopSize);
data_count -= client->mDropPopSize;
client->mDropPopSize = 0;
} else {
audio_ringbuf_drop_data(&ul_out->ringbuf, data_count);
client->mDropPopSize -= data_count;
data_count = 0;
}
}
audio_ringbuf_copy_from_linear(processed, effect_buf, data_count);
}
pthread_exit(NULL);
return NULL;
}
线程里面会不停循环的从client->mRawDataBuf这个ringbuf的里面copy数据出来,这个数据也就是录音数据,同时会通过aurisys ul过一道算法处理,然后通过之前在AudioALSACaptureDataClientAurisysNormal算出的mDropPopSize,丢弃一段数据避免录音杂音。
那么问题来了,为什么ringbuf里面会有录音数据呢?是谁提供的呢?
这就是之前说的:DataProvider是AudioALSACaptureDataProviderDspRaw
之前在AudioALSACaptureDataClientAurisysNormal::AudioALSACaptureDataClientAurisysNormal中有调用:mCaptureDataProvider->attach(this); mCaptureDataProvider也就是传入的AudioALSACaptureDataProviderDspRaw!
void AudioALSACaptureDataProviderBase::attach(IAudioALSACaptureDataClient *pCaptureDataClient) {
mCaptureDataClientVector.add(pCaptureDataClient->getIdentity(), pCaptureDataClient);
size = (uint32_t)mCaptureDataClientVector.size();
if (size == 1) {
mOpenIndex++;
open();
} else {
if (!hasLowLatencyCapture && pCaptureDataClient->IsLowLatencyCapture()) {
updateReadSize(getPeriodBufSize(pStreamAttr, UPLINK_NORMAL_LATENCY_MS) *
lowLatencyMs / UPLINK_NORMAL_LATENCY_MS);
if (mCaptureDataProviderType != CAPTURE_PROVIDER_DSP) {
mHardwareResourceManager->setULInterruptRate(mStreamAttributeSource.sample_rate *
lowLatencyMs / 1000);
} else if (isAdspOptionEnable()) {
AudioDspStreamManager::getInstance()->UpdateCaptureDspLatency();
}
}
enablePmicInputDevice(true);
}
}
第一次录音时,size必定等于1,所以会进入open函数:
status_t AudioALSACaptureDataProviderDspRaw::open() {
unsigned int feature_id = CAPTURE_RAW_FEATURE_ID;
mAudioMessengerIPI->registerAdspFeature(feature_id);
if (AudioALSAHardwareResourceManager::getInstance()->getNumPhoneMicSupport() > 2 && mStreamAttributeSource.input_device != AUDIO_DEVICE_IN_WIRED_HEADSET) {
mApTurnOnSequence = AUDIO_CTL_ADDA_TO_CAPTURE1_4CH;
} else {
mApTurnOnSequence = AUDIO_CTL_ADDA_TO_CAPTURE1;
}
AudioALSADeviceConfigManager::getInstance()->ApplyDeviceTurnonSequenceByName(mApTurnOnSequence);
mStreamAttributeSource.Time_Info.total_frames_readed = 0;
mStreamAttributeSource.sample_rate = getInputSampleRate(mStreamAttributeSource.input_device,
mStreamAttributeSource.output_devices);
mStreamAttributeSource.audio_format = AUDIO_FORMAT_PCM_8_24_BIT;
if (mStreamAttributeSource.input_device == AUDIO_DEVICE_IN_WIRED_HEADSET ||
mStreamAttributeSource.input_source == AUDIO_SOURCE_UNPROCESSED){
mStreamAttributeSource.num_channels = 1;
} else {
mStreamAttributeSource.num_channels = 2;
}
mStreamAttributeSource.latency = mlatency;
setApHwPcm();
mAudioMessengerIPI->registerDmaCbk(
TASK_SCENE_CAPTURE_RAW,
0x2000,
0xF000,
processDmaMsgWrapper,
this);
mAudioALSAVolumeController->SetCaptureGain(mStreamAttributeSource.audio_mode,
mStreamAttributeSource.input_source,
mStreamAttributeSource.input_device,
mStreamAttributeSource.output_devices);
openApHwPcm();
AudioDspStreamManager::getInstance()->addCaptureDataProvider(this);
int ret = pthread_create(&hReadThread, NULL, AudioALSACaptureDataProviderDspRaw::readThread, (void *)this);
}
open里面,主要就是操作到底层里面pcm_open和pcm_start,开始录音,然后就是最重要的readThread线程了!
void *AudioALSACaptureDataProviderDspRaw::readThread(void *arg) {
pDataProvider->waitPcmStart();
char linear_buffer[kReadBufferSizeNormal];
while (pDataProvider->mEnable == true) {
ret = pDataProvider->pcmRead(pDataProvider->mPcm, linear_buffer, kReadBufferSize);
pDataProvider->mPcmReadBuf.pBufBase = linear_buffer;
pDataProvider->mPcmReadBuf.bufLen = Read_Size + 1;
pDataProvider->mPcmReadBuf.pRead = linear_buffer;
pDataProvider->mPcmReadBuf.pWrite = linear_buffer + Read_Size;
pDataProvider->provideCaptureDataToAllClients(open_index);
}
pthread_exit(NULL);
return NULL;
}
重点来了,这里就是在线程中不断的从alsa底层中读取到录音数据到linear_buffer中,然后放到pDataProvider->mPcmReadBuf.pBufBase中,提供给所有clien,也就是我们AudioALSACaptureDataClientAurisysNormal。
void AudioALSACaptureDataProviderBase::provideCaptureDataToAllClients(const uint32_t open_index) {
for (size_t i = 0; i < mCaptureDataClientVector.size(); i++) {
pCaptureDataClient = mCaptureDataClientVector[i];
pCaptureDataClient->copyCaptureDataToClient(mPcmReadBuf);
}
}
类似于广播一样的机制,查询所有ClientVector,并且将录音数据提供出去。
为什么MTK要设计成这样子呢?
这样有个好处,就是使得即使有多个AudioStreamIn,但每个AudioStreamIn实例client中有各自的环形Readbuffer,从硬件来的数据会扔到各自的环形Readbuffer中,从而互不影响。
uint32_t AudioALSACaptureDataClientAurisysNormal::copyCaptureDataToClient(RingBuf pcm_read_buf) {
pcm_read_buf_wrap.base = pcm_read_buf.pBufBase;
pcm_read_buf_wrap.read = pcm_read_buf.pRead;
pcm_read_buf_wrap.write = pcm_read_buf.pWrite;
pcm_read_buf_wrap.size = pcm_read_buf.bufLen;
audio_ringbuf_copy_from_ringbuf_all(&mRawDataBuf, &pcm_read_buf_wrap);
}
最后就是将数据填装到mRawDataBuf了,这样就可以在AudioALSACaptureDataClientAurisysNormal::processThread中拿到录音数据了。
总的来说,录音流程还是比较简单的,核心就是AudioALSACaptureDataProviderDspRaw::readThread负责生产数据,AudioALSACaptureDataClientAurisysNormal::processThread负责消耗数据,一个生产者-消费者模型。
|