cameraserver进程当中:
void CameraClient::dataCallbackTimestamp(nsecs_t timestamp,
int32_t msgType, const sp<IMemory>& dataPtr, void* user) {
void Camera::dataCallbackTimestamp(nsecs_t timestamp, int32_t msgType, const sp<IMemory>& dataPtr)
mediaserver 进程:
void CameraSourceListener::postDataTimestamp(
nsecs_t timestamp, int32_t msgType, const sp<IMemory>& dataPtr) {
void CameraSource::dataCallbackTimestamp(int64_t timestampUs,
int32_t msgType __unused, const sp<IMemory> &data) {
status_t CameraSource::read(
MediaBuffer **buffer, const ReadOptions *options) {
void MediaCodecSource::Puller::onMessageReceived(const sp<AMessage> &msg) {
void MediaCodecSource::Puller::Queue::pushBuffer(MediaBuffer *mbuf) {
bool MediaCodecSource::Puller::Queue::readBuffer(MediaBuffer **mbuf, bool isStopping)
status_t MediaCodecSource::feedEncoderInputBuffers() {
sp<MediaCodecBuffer> inbuf;
status_t err = mEncoder->getInputBuffer(bufferIndex, &inbuf);
memcpy(inbuf->data(), mbuf->data(), size);
到这里,数据就从Cameraserver进入到mediaserver,并且从MediaBuffer复制到MediaCodecBuffer,后面会是MediaCodec的流程,我们暂时不管编码后的数据,再看下这个编码前的数据如何返回到cameraserver
从MediaBuffer的release开始
mbuf->release();
mObserver->signalBufferReturned(this);
void CameraSource::signalBufferReturned(MediaBuffer *buffer) {
void CameraSource::releaseRecordingFrame(const sp<IMemory>& frame) {
mCameraRecordingProxy->releaseRecordingFrame(frame);
回到cameraserver进程
void Camera::RecordingProxy::releaseRecordingFrame(const sp<IMemory>& mem)
void Camera::releaseRecordingFrame(const sp<IMemory>& mem)
void CameraClient::releaseRecordingFrame(const sp<IMemory>& mem) {
mHardware->releaseRecordingFrame(mem);
至此,内存返回给HW,这个YUV的数据流程结束
|