1. ServiceManager的启动流程
system\core\roodir\init.rc:
service servicemanager /system/bin/servicemanager
class core
user system
group system
critical
onrestart restart healthd
onrestart restart zygote
onrestart restart media
onrestart restart surfaceflinger
onrestart restart drm
frameworks\native\cmds\servicemanager:
在此目录下有:service_manager.c、binder.c
frameworks\native\cmds\servicemanager\Service_manager.c:
int main(int argc, char **argv)
{
struct binder_state *bs
bs = binder_open(128*1024);
if (binder_become_context_manager(bs)) {
ALOGE("cannot become context manager (%s)\n", strerror(errno));
return -1;
}
...
svcmgr_handle = BINDER_SERVICE_MANAGER;
binder_loop(bs, svcmgr_handler);
return 0;
}
分析binder_state:
struct binder_state
{
int fd;
void *mapped;
size_t mapsize;
};
分析BINDER_SERVICE_MANAGER:
#define BINDER_SERVICE_MANAGER 0U
1.1 分析binder_open(128*1024)
frameworks/native/cmds/servicemanager/Binder.c:
struct binder_state *binder_open(size_t mapsize)
{
struct binder_state *bs;
struct binder_version vers;
bs = malloc(sizeof(*bs));
if (!bs) {
errno = ENOMEM;
return NULL;
}
bs->fd = open("/dev/binder", O_RDWR);
if (bs->fd < 0) {
goto fail_open;
}
if ((ioctl(bs->fd, BINDER_VERSION, &vers) == -1) ||
(vers.protocol_version != BINDER_CURRENT_PROTOCOL_VERSION)) {
goto fail_open;
}
bs->mapsize = mapsize;
bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);
if (bs->mapped == MAP_FAILED) {
fprintf(stderr,"binder: cannot map device (%s)\n",
strerror(errno));
goto fail_map;
}
return bs;
fail_map:
close(bs->fd);
fail_open:
free(bs);
return NULL;
}
执行open("/dev/binder", O_RDWR);时从用户态进入内核态,因此会执行:
drivers/staging/android/binder.c
static int binder_open(struct inode *nodp, struct file *filp)
{
struct binder_proc *proc;
proc = kzalloc(sizeof(*proc), GFP_KERNEL);
if (proc == NULL)
return -ENOMEM;
get_task_struct(current);
proc->tsk = current;
INIT_LIST_HEAD(&proc->todo);
init_waitqueue_head(&proc->wait);
proc->default_priority = task_nice(current);
binder_lock(__func__);
binder_stats_created(BINDER_STAT_PROC);
hlist_add_head(&proc->proc_node, &binder_procs);
proc->pid = current->group_leader->pid;
INIT_LIST_HEAD(&proc->delivered_death);
filp->private_data = proc;
binder_unlock(__func__);
return 0;
}
打开binder驱动:binder_open创建一个servicemanager进程对应的binder_proc结构体,并保存到binder_procs链表的头部
1.2 分析binder_become_context_manager(bs)
frameworks\native\cmds\servicemanager\Binder.c:
int binder_become_context_manager(struct binder_state *bs)
{
return ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0);
}
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int ret;
struct binder_proc *proc = filp->private_data;
struct binder_thread *thread;
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
if (ret)
return ret;
binder_lock(__func__);
thread = binder_get_thread(proc);
if (thread == NULL) {
ret = -ENOMEM;
goto err;
}
switch (cmd) {
...
case BINDER_SET_CONTEXT_MGR:
if (binder_context_mgr_node != NULL) {
...
}
ret = security_binder_set_context_mgr(proc->tsk);
if (ret < 0)
goto err;
if (binder_context_mgr_uid != -1) {
if (binder_context_mgr_uid != current->cred->euid) {
...
}
} else {
binder_context_mgr_uid = current->cred->euid;
binder_context_mgr_node = binder_new_node(proc, NULL, NULL);
binder_context_mgr_node->local_weak_refs++;
binder_context_mgr_node->local_strong_refs++;
binder_context_mgr_node->has_strong_ref = 1;
binder_context_mgr_node->has_weak_ref = 1;
}
break;
}
ret = 0;
...
return ret;
}
设置自己为binder驱动大管家:创建一个binder_node — binder_context_mgr_node,binder_node.proc对应Service_manager进程对应的binder_proc结构体 之所以称之为binder驱动大管家,是因为创建的binder_context_mgr_node是一个全局变量
1.3 分析binder_loop(bs, svcmgr_handler)
frameworks\native\cmds\servicemanager\Binder.c:
void binder_loop(struct binder_state *bs, binder_handler func)
{
int res;
struct binder_write_read bwr;
uint32_t readbuf[32];
bwr.write_size = 0;
bwr.write_consumed = 0;
bwr.write_buffer = 0;
readbuf[0] = BC_ENTER_LOOPER;
binder_write(bs, readbuf, sizeof(uint32_t));
for (;;) {
bwr.read_size = sizeof(readbuf);
bwr.read_consumed = 0;
bwr.read_buffer = (uintptr_t) readbuf;
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
...
res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
}
}
int binder_write(struct binder_state *bs, void *data, size_t len)
{
struct binder_write_read bwr;
int res;
bwr.write_size = len;
bwr.write_consumed = 0;
bwr.write_buffer = (uintptr_t) data;
bwr.read_size = 0;
bwr.read_consumed = 0;
bwr.read_buffer = 0;
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
return res;
}
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int ret;
struct binder_proc *proc = filp->private_data;
struct binder_thread *thread;
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
if (ret)
return ret;
binder_lock(__func__);
thread = binder_get_thread(proc);
if (thread == NULL) {
ret = -ENOMEM;
goto err;
}
switch (cmd) {
...
case BINDER_WRITE_READ: {
struct binder_write_read bwr;
if (size != sizeof(struct binder_write_read)) {
ret = -EINVAL;
goto err;
}
if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
ret = -EFAULT; bwr.read_buffer = 0
goto err;
}
if (bwr.write_size > 0) {
ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
if (ret < 0) {
bwr.read_consumed = 0;
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto err;
}
}
if (bwr.read_size > 0) {
...
}
if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
ret = -EFAULT;
goto err;
}
break;
}
ret = 0;
...
return ret;
}
使用copy_from_user获取用户态的binder_write_read,并调用binder_thread_read和binder_thread_write处理请求,最终通过copy_to_user将结果binder_write_read返回给用户态
int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
void __user *buffer, int size, signed long *consumed)
{
uint32_t cmd;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
while (ptr < end && thread->return_error == BR_OK) {
if (get_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
binder_stats.bc[_IOC_NR(cmd)]++;
proc->stats.bc[_IOC_NR(cmd)]++;
thread->stats.bc[_IOC_NR(cmd)]++;
}
switch (cmd) {
case BC_ENTER_LOOPER:
...
thread->looper |= BINDER_LOOPER_STATE_ENTERED;
break;
}
return 0;
}
ServiceManager充当Server等待Client请求:调用binder_thread_write将binder_thread.looper赋值为BINDER_LOOPER_STATE_ENTERED
ServiceManager 启动流程总结: a. 打开Binder驱动,建立128K = 128*1024内存映射 b. 设置自己(ServiceManager)为Binder的大管家 c. 开启for循环,充当Server的角色,等待Client连接
2. ServiceManager 注册服务
ServiceManager.addService(Context.WINDOW_SERVICE, wm);
等价:ServiceManager.addService("window", new WindowManagerService(...));
以注册WMS为例分析注册服务的过程
frameworks/base/core/java/android/os/ServiceManager.java:
public static void addService(String name, IBinder service) {
try {
getIServiceManager().addService(name, service, false);
} catch (RemoteException e) {
Log.e(TAG, "error in addService", e);
}
}
2.1 分析getIServiceManager()
private static IServiceManager getIServiceManager() {
if (sServiceManager != null) {
return sServiceManager;
}
sServiceManager = ServiceManagerNative.asInterface(BinderInternal.getContextObject());
return sServiceManager;
}
a. 分析BinderInternal.getContextObject():
frameworks/base/core/java/com/android/internal/os/BinderInternal.java:
public static final native IBinder getContextObject();
frameworks/base/core/jni/android_util_Binder.cpp:
{ "getContextObject", "()Landroid/os/IBinder;", (void*)android_os_BinderInternal_getContextObject },
static jobject android_os_BinderInternal_getContextObject(JNIEnv* env, jobject clazz)
{
sp<IBinder> b = ProcessState::self()->getContextObject(NULL);
return javaObjectForIBinder(env, b);
}
a.1 分析sp<IBinder> b = ProcessState::self()->getContextObject(NULL);
sp<IBinder> ProcessState::getContextObject(const sp<IBinder>& )
{
return getStrongProxyForHandle(0);
}
sp<IBinder> ProcessState::getStrongProxyForHandle(int32_t handle)
{
sp<IBinder> result;
AutoMutex _l(mLock);
handle_entry* e = lookupHandleLocked(handle);
if (e != NULL) {
IBinder* b = e->binder;
if (b == NULL || !e->refs->attemptIncWeak(this)) {
if (handle == 0) {
Parcel data;
status_t status = IPCThreadState::self()->transact(
0, IBinder::PING_TRANSACTION, data, NULL, 0);
if (status == DEAD_OBJECT)
return NULL;
}
b = new BpBinder(handle);
e->binder = b;
if (b) e->refs = b->getWeakRefs();
result = b;
} else {
result.force_set(b);
e->refs->decWeak(this);
}
}
return result;
}
a.2 分析return javaObjectForIBinder(env, b);
frameworks/base/core/jni/android_util_Binder.cpp:
jobject javaObjectForIBinder(JNIEnv* env, const sp<IBinder>& val)
{
jobject object = env->NewObject(gBinderProxyOffsets.mClass, gBinderProxyOffsets.mConstructor);
if (object != NULL) {
env->SetLongField(object, gBinderProxyOffsets.mObject, (jlong)val.get());
val->incStrong((void*)javaObjectForIBinder);
...
}
return object;
}
BinderInternal.getContextObject()相当于new BinderProxy(),且BinderProxy.mObject成员记录了new BpBinder(0)对象的地址 b. 分析ServiceManagerNative.asInterface(…) 等价于:ServiceManagerNative.asInterface(new BinderProxy())
frameworks/base/core/java/android/os/ServiceManagerNative.java:
static public IServiceManager asInterface(IBinder obj)
{
if (obj == null) {
return null;
}
IServiceManager in = (IServiceManager)obj.queryLocalInterface(descriptor);
if (in != null) {
return in;
}
return new ServiceManagerProxy(obj);
}
class ServiceManagerProxy implements IServiceManager {
public ServiceManagerProxy(IBinder remote) {
mRemote = remote;
}
}
getIServiceManager():返回ServiceManagerProxy,其中ServiceManagerProxy.mRemote = new BinderProxy(),且BinderProxy.mObject成员记录了new BpBinder(0)对象的地址
2.2 分析getIServiceManager().addService(name, service, false);
等价于:ServiceManagerProxy.addService(“window”, new WindowManagerService(…), false)
frameworks/base/core/java/android/os/ServiceManagerNative.java:
class ServiceManagerProxy implements IServiceManager {
public void addService(String name, IBinder service, boolean allowIsolated)
throws RemoteException {
Parcel data = Parcel.obtain();
Parcel reply = Parcel.obtain();
data.writeInterfaceToken(IServiceManager.descriptor);
data.writeString(name);
data.writeStrongBinder(service);
data.writeInt(allowIsolated ? 1 : 0);
mRemote.transact(ADD_SERVICE_TRANSACTION, data, reply, 0);
reply.recycle();
data.recycle();
}
}
a. 创建Binder进程通信的数据载体Parcel
frameworks/base/core/java/android/os/Parcel.java:
public static Parcel obtain() {
...
return new Parcel(0);
}
a.1 分析data.writeString("window");
frameworks/base/core/java/android/os/Parcel.java:
public final void writeString(String val) {
nativeWriteString(mNativePtr, val);
}
frameworks/base/core/jni/android_os_Parcel.cpp:
{"nativeWriteString", "(JLjava/lang/String;)V", (void*)android_os_Parcel_writeString},
static void android_os_Parcel_writeString(JNIEnv* env, jclass clazz, jlong nativePtr, jstring val)
{
Parcel* parcel = reinterpret_cast<Parcel*>(nativePtr);
if (parcel != NULL) {
...
err = parcel->writeString16(str, env->GetStringLength(val));
}
}
frameworks/native/libs/binder/Parcel.cpp:
status_t Parcel::writeString16(const String16& str)
{
return writeString16(str.string(), str.size());
}
status_t Parcel::writeString16(const char16_t* str, size_t len)
{
status_t err = writeInt32(len);
if (err == NO_ERROR) {
len *= sizeof(char16_t);
uint8_t* data = (uint8_t*)writeInplace(len+sizeof(char16_t));
if (data) {
memcpy(data, str, len);
*reinterpret_cast<char16_t*>(data+len) = 0;
return NO_ERROR;
}
err = mError;
}
return err;
}
status_t Parcel::writeInt32(int32_t val)
{
return writeAligned(val);
}
void* Parcel::writeInplace(size_t len)
{
...
uint8_t* const data = mData+mDataPos;
return data;
}
static void memcpy(void* dst, void* src, size_t size) {
char* dst_c = (char*) dst, *src_c = (char*) src;
for (; size > 0; size--) {
*dst_c++ = *src_c++;
}
}
分析data.writeString("window");可知:data.mData保存着"window"
a.2 分析data.writeStrongBinder(new WindowManagerService(...));
```cpp
frameworks/base/core/java/android/os/Parcel.java:
public final void writeStrongBinder(IBinder val) {
nativeWriteStrongBinder(mNativePtr, val);
}
frameworks/base/core/jni/android_os_Parcel.cpp:
{"nativeWriteStrongBinder", "(JLandroid/os/IBinder;)V", (void*)android_os_Parcel_writeStrongBinder},
static void android_os_Parcel_writeStrongBinder(JNIEnv* env, jclass clazz, jlong nativePtr, jobject object)
{
Parcel* parcel = reinterpret_cast<Parcel*>(nativePtr);
if (parcel != NULL) {
const status_t err = parcel->writeStrongBinder(ibinderForJavaObject(env, object));
if (err != NO_ERROR) {
signalExceptionForError(env, clazz, err);
}
}
}
frameworks/native/libs/binder/Parcel.cpp:
status_t Parcel::writeStrongBinder(const sp<IBinder>& val)
{
return flatten_binder(ProcessState::self(), val, this);
}
status_t flatten_binder(const sp<ProcessState>& ,
const sp<IBinder>& binder, Parcel* out)
{
flat_binder_object obj;
obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
if (binder != NULL) {
IBinder *local = binder->localBinder();
if (!local) {
BpBinder *proxy = binder->remoteBinder();
if (proxy == NULL) {
ALOGE("null proxy");
}
const int32_t handle = proxy ? proxy->handle() : 0;
...
} else {
obj.type = BINDER_TYPE_BINDER;
obj.binder = reinterpret_cast<uintptr_t>(local->getWeakRefs());
obj.cookie = reinterpret_cast<uintptr_t>(local);
}
} else {
...
}
return finish_flatten_binder(binder, obj, out);
}
inline static status_t finish_flatten_binder(
const sp<IBinder>& , const flat_binder_object& flat, Parcel* out)
{
return out->writeObject(flat, false);
}
status_t Parcel::writeObject(const flat_binder_object& val, bool nullMetaData)
{
const bool enoughData = (mDataPos+sizeof(val)) <= mDataCapacity;
const bool enoughObjects = mObjectsSize < mObjectsCapacity;
if (enoughData && enoughObjects) {
*reinterpret_cast<flat_binder_object*>(mData+mDataPos) = val;
if (nullMetaData || val.binder != 0) {
mObjects[mObjectsSize] = mDataPos;
acquire_object(ProcessState::self(), val, this);
mObjectsSize++;
}
}
...
}
分析data.writeStrongBinder(new WindowManagerService(...));可知:data.mObjects保存new WindowManagerService(...)的服务端
1)Parcel的成员变量mData保存着"window" 2)Parcel的成员变量mObjects保存flat_binder_object,其中 flat_binder_object.binder保存着new WindowManagerService(…)的服务端的弱引用 flat_binder_object.cookie保存着new WindowManagerService(…)的服务端
b. 分析mRemote.transact(ADD_SERVICE_TRANSACTION, data, reply, 0); 其中:mRemote = new BinderProxy(),data = Parcel 参数data保存Proxy向native发送的数据,reply保存native向Proxy返回的数据
frameworks/base/core/java/android/os/Binder.java:
final class BinderProxy implements IBinder {
public boolean transact(int code, Parcel data, Parcel reply, int flags) throws RemoteException {
Binder.checkParcel(this, code, data, "Unreasonably large binder buffer");
return transactNative(code, data, reply, flags);
}
public native boolean transactNative(int code, Parcel data, Parcel reply,
int flags) throws RemoteException;
}
frameworks/base/core/jni/android_util_Binder.cpp:
{"transactNative", "(ILandroid/os/Parcel;Landroid/os/Parcel;I)Z", (void*)android_os_BinderProxy_transact},
static jboolean android_os_BinderProxy_transact(JNIEnv* env, jobject obj,
jint code, jobject dataObj, jobject replyObj, jint flags)
{
IBinder* target = (IBinder*)env->GetLongField(obj, gBinderProxyOffsets.mObject);
status_t err = target->transact(code, *data, reply, flags);
}
frameworks/native/libs/binder/BpBinder.cpp:
status_t BpBinder::transact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
if (mAlive) {
status_t status = IPCThreadState::self()->transact(
mHandle, code, data, reply, flags);
if (status == DEAD_OBJECT) mAlive = 0;
return status;
}
return DEAD_OBJECT;
}
frameworks/native/libs/binder/IPCThreadState.cpp:
status_t IPCThreadState::transact(int32_t handle,
uint32_t code, const Parcel& data,
Parcel* reply, uint32_t flags)
{
status_t err = data.errorCheck();
err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);
err = waitForResponse(reply);
return err;
}
开始调用IPC通信
status_t IPCThreadState::writeTransactionData(int32_t cmd, uint32_t binderFlags,
int32_t handle, uint32_t code, const Parcel& data, status_t* statusBuffer)
{
binder_transaction_data tr;
tr.target.ptr = 0;
tr.target.handle = handle;
tr.code = code;
tr.flags = binderFlags;
tr.cookie = 0;
tr.sender_pid = 0;
tr.sender_euid = 0;
tr.data_size = data.ipcDataSize();
tr.data.ptr.buffer = data.ipcData();
tr.offsets_size = data.ipcObjectsCount()*sizeof(binder_size_t);
tr.data.ptr.offsets = data.ipcObjects();
mOut.writeInt32(cmd);
mOut.write(&tr, sizeof(tr));
return NO_ERROR; binder_transaction_data.data.ptr.buffer = data.ipcData() = mData = "window"
} binder_transaction_data.data.ptr.offsets = data.ipcObjects() = mObjects = new WindowManagerService(...)的服务端
向IPCThreadState.mOut中写入: 1)cmd = BC_TRANSACTION 2)tr = binder_transaction_data,binder_transaction_data保存Parcel的数据 binder_transaction_data.target.handle = BpBinder.mHandle对应着ServiceManager进程的handle,在注册服务时ServiceManager相当于Server binder_transaction_data.data.ptr.buffer = data.ipcData() = mData = “window” binder_transaction_data.data.ptr.offsets = data.ipcObjects() = mObjects = new WindowManagerService(…)的服务端
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
int32_t cmd;
int32_t err;
while (1) {
if ((err=talkWithDriver()) < NO_ERROR) break;
err = mIn.errorCheck();
if (err < NO_ERROR) break;
if (mIn.dataAvail() == 0) continue;
cmd = mIn.readInt32();
switch (cmd) {
...
default:
err = executeCommand(cmd);
}
}
}
status_t IPCThreadState::talkWithDriver(bool doReceive)
{
binder_write_read bwr;
const bool needRead = mIn.dataPosition() >= mIn.dataSize();
const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;
bwr.write_size = outAvail;
bwr.write_buffer = (uintptr_t)mOut.data();
if (doReceive && needRead) { binder_transaction_data.data.ptr.buffer = data.ipcData() = mData = "window"
bwr.read_size = mIn.dataCapacity(); binder_transaction_data.data.ptr.offsets = data.ipcObjects() = mObjects = new WindowManagerService(...)的服务端
bwr.read_buffer = (uintptr_t)mIn.data();
} else {
bwr.read_size = 0;
bwr.read_buffer = 0;
}
if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR;
bwr.write_consumed = 0;
bwr.read_consumed = 0;
status_t err;
do {
if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
err = NO_ERROR;
else
err = -errno;
...
} while (err == -EINTR);
if (err >= NO_ERROR) {
if (bwr.write_consumed > 0) {
if (bwr.write_consumed < mOut.dataSize())
mOut.remove(0, bwr.write_consumed);
else
mOut.setDataSize(0);
}
if (bwr.read_consumed > 0) {
mIn.setDataSize(bwr.read_consumed);
mIn.setDataPosition(0);
}
return NO_ERROR;
}
return err;
}
drivers/staging/android/binder.c
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{ binder_transaction_data.data.ptr.buffer = data.ipcData() = mData = "window"
int ret; binder_transaction_data.data.ptr.offsets = data.ipcObjects() = mObjects = new WindowManagerService(...)的服务端
struct binder_proc *proc = filp->private_data;
struct binder_thread *thread;
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
if (ret)
return ret;
thread = binder_get_thread(proc);
switch (cmd) {
case BINDER_WRITE_READ: {
struct binder_write_read bwr;
if (size != sizeof(struct binder_write_read)) {
ret = -EINVAL;
goto err;
}
if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
ret = -EFAULT;
goto err;
}
if (bwr.write_size > 0) {
ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
if (ret < 0) {
bwr.read_consumed = 0;
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto err;
}
}
if (bwr.read_size > 0) {
ret = binder_thread_read(proc, thread, (void __user *)bwr.Read_Buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
...
}
if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
ret = -EFAULT;
goto err;
}
break;
}
...
}
ret = 0;
err:
...
return ret;
}
使用copy_from_user获取用户态的binder_write_read,并调用binder_thread_read和binder_thread_write处理请求,最终通过copy_to_user将结果binder_write_read返回给用户态
binder_thread_write的参数bwr.write_buffer = mOut,mOut保存binder_transaction_data,其中 binder_transaction_data.target.handle = BpBinder.mHandle对应着ServiceManager进程的handle,在注册服务时ServiceManager相当于Server binder_transaction_data.data.ptr.buffer = data.ipcData() = Parcel.mData = “window” binder_transaction_data.data.ptr.offsets = data.ipcObjects() = Parcel.mObjects = new WindowManagerService(…)的服务端
在分析“创建Binder进程通信的数据载体Parcel”中得出结论: 1)Parcel的成员变量mData保存着"window" 2)Parcel的成员变量mObjects保存flat_binder_object,其中 flat_binder_object.binder保存着new WindowManagerService(…)的服务端的弱引用 flat_binder_object.cookie保存着new WindowManagerService(…)的服务端
int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
void __user *buffer, int size, signed long *consumed)
{
uint32_t cmd;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
while (ptr < end && thread->return_error == BR_OK) {
if (get_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
binder_stats.bc[_IOC_NR(cmd)]++;
proc->stats.bc[_IOC_NR(cmd)]++;
thread->stats.bc[_IOC_NR(cmd)]++;
}
switch (cmd) {
case BC_TRANSACTION:
case BC_REPLY: {
struct binder_transaction_data tr;
if (copy_from_user(&tr, ptr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
binder_transaction(proc, thread, &tr, cmd);
break;
}
}
return 0;
}
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data * tr, int reply)
{
struct binder_transaction *t;
struct binder_work *tcomplete;
size_t *offp, *off_end;
size_t off_min;
struct binder_proc *target_proc;
struct binder_thread *target_thread = NULL;
struct binder_node *target_node = NULL;
struct list_head *target_list;
wait_queue_head_t *target_wait;
struct binder_transaction *in_reply_to = NULL;
struct binder_transaction_log_entry *e;
uint32_t return_error = BR_OK;
struct binder_ref *ref;
ref = binder_get_ref(proc, tr->target.handle);
target_node = ref->node;
target_proc = target_node->proc;
if (!reply && !(tr->flags & TF_ONE_WAY))
t->from = thread;
else
t->from = NULL;
target_list = &target_proc->todo;
t->sender_euid = proc->tsk->cred->euid;
t->to_proc = target_proc;
t->to_thread = target_thread;
t->code = tr->code;
t->flags = tr->flags;
t->priority = task_nice(current);
t->buffer = binder_alloc_buf(target_proc, tr->data_size, tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
t->buffer->allow_user_free = 0;
t->buffer->debug_id = t->debug_id;
t->buffer->transaction = t;
t->buffer->target_node = target_node;
if (target_node)
binder_inc_node(target_node, 1, 0, NULL);
offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *)));
if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) {
...
}
if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) {
...
}
off_end = (void *)offp + tr->offsets_size;
for (; offp < off_end; offp++) {
struct flat_binder_object *fp;
fp = (struct flat_binder_object *)(t->buffer->data + *offp);
off_min = *offp + sizeof(struct flat_binder_object);
switch (fp->type) {
case BINDER_TYPE_BINDER:
case BINDER_TYPE_WEAK_BINDER: {
struct binder_ref *ref;
struct binder_node *node = binder_get_node(proc, fp->binder);
if (node == NULL) {
node = binder_new_node(proc, fp->binder, fp->cookie);
node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
}
...
ref = binder_get_ref_for_node(target_proc, node);
fp->handle = ref->desc;
binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
&thread->todo);
} break;
...
}
}
if (reply) {
binder_pop_transaction(target_thread, in_reply_to);
} else if (!(t->flags & TF_ONE_WAY)) {
t->need_reply = 1;
t->from_parent = thread->transaction_stack;
thread->transaction_stack = t;
}
t->work.type = BINDER_WORK_TRANSACTION;
list_add_tail(&t->work.entry, target_list);
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
list_add_tail(&tcomplete->entry, &thread->todo);
if (target_wait)
wake_up_interruptible(target_wait);
return;
}
binder_transaction的参数tr = bwr.write_buffer = mOut,mOut保存binder_transaction_data,其中 binder_transaction_data.target.handle = BpBinder.mHandle对应着ServiceManager进程的handle,在注册服务时ServiceManager相当于Server binder_transaction_data.data.ptr.buffer = data.ipcData() = Parcel.mData = “window” binder_transaction_data.data.ptr.offsets = data.ipcObjects() = Parcel.mObjects = new WindowManagerService(…)的服务端 1)第一次调用copy_from_user将tr->data.ptr.buffer = "window"保存到binder_transaction->buffer->data 2)第二次调用copy_from_user将tr->data.ptr.offsets = Parcel.mObjects = new WindowManagerService(…)的服务端保存到offp指针,进而将offp保存到fp,然后调用binder_new_node创建一个binder_node,并最终调用binder_inc_ref将创建的binder_node保存到ServiceManager线程的thread->todo的list中,其中: binder_node.ptr = “window” binder_node.cookie = fp->cookie对应着new WindowManagerService(…)的服务端
c. 分析Binder_Loop
frameworks/native/cmds/servicemanager/Binder.c:
void Binder_Loop(struct binder_state *bs, binder_handler func)
{
int res;
struct binder_write_read bwr;
uint32_t readbuf[32];
bwr.write_size = 0;
bwr.write_consumed = 0;
bwr.write_buffer = 0;
readbuf[0] = BC_ENTER_LOOPER;
binder_write(bs, readbuf, sizeof(uint32_t));
for (;;) {
bwr.read_size = sizeof(readbuf);
bwr.read_consumed = 0;
bwr.read_buffer = (uintptr_t) readbuf;
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
...
res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
}
}
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int ret;
struct binder_proc *proc = filp->private_data;
struct binder_thread *thread;
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
if (ret)
return ret;
binder_lock(__func__);
thread = binder_get_thread(proc);
if (thread == NULL) {
ret = -ENOMEM;
goto err;
}
switch (cmd) {
...
case BINDER_WRITE_READ: {
struct binder_write_read bwr;
if (size != sizeof(struct binder_write_read)) {
ret = -EINVAL;
goto err;
}
if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
ret = -EFAULT; bwr.write_buffer = 0
goto err;
}
if (bwr.write_size > 0) {
...
}
if (bwr.read_size > 0) {
ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
if (!list_empty(&proc->todo))
wake_up_interruptible(&proc->wait);
if (ret < 0) {
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto err;
}
}
if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
ret = -EFAULT;
goto err;
}
break;
}
ret = 0;
...
return ret;
}
使用copy_from_user获取用户态的binder_write_read,并调用binder_thread_read和binder_thread_write处理请求,最终通过copy_to_user将结果binder_write_read返回给用户态
static int binder_thread_read(struct binder_proc *proc,
struct binder_thread *thread,
void __user *buffer, int size,
signed long *consumed, int non_block)
{
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
int ret = 0;
int wait_for_proc_work;
if (*consumed == 0) {
if (put_user(BR_NOOP, (uint32_t __user *)ptr))
return -EFAULT; 即bwr.read_buffer = BR_NOOP
ptr += sizeof(uint32_t);
}
...
while (1) {
uint32_t cmd;
struct binder_transaction_data tr;
struct binder_work *w;
struct binder_transaction *t = NULL;
if (!list_empty(&thread->todo))
w = list_first_entry(&thread->todo, struct binder_work, entry);
else if (!list_empty(&proc->todo) && wait_for_proc_work)
w = list_first_entry(&proc->todo, struct binder_work, entry);
else {
if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN))
goto retry;
break;
}
if (end - ptr < sizeof(tr) + 4)
break;
switch (w->type) {
...
case BINDER_WORK_TRANSACTION: {
t = container_of(w, struct binder_transaction, work);
} break;
}
if (t->buffer->target_node) {
struct binder_node *target_node = t->buffer->target_node;
tr.target.ptr = target_node->ptr;
tr.cookie = target_node->cookie;
t->saved_priority = task_nice(current);
if (t->priority < target_node->min_priority &&
!(t->flags & TF_ONE_WAY))
binder_set_nice(t->priority);
else if (!(t->flags & TF_ONE_WAY) ||
t->saved_priority > target_node->min_priority)
binder_set_nice(target_node->min_priority);
cmd = BR_TRANSACTION;
} else {
tr.target.ptr = NULL;
tr.cookie = NULL;
cmd = BR_REPLY;
}
...
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
if (copy_to_user(ptr, &tr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
}
return 0;
}
之前将binder_node保存到ServiceManager线程的thread->todo的list中,其中: binder_node.ptr = “window” binder_node.cookie = fp->cookie对应着new WindowManagerService(…)的服务端
执行list_first_entry取出binder_node,保存到target_node,从target_node中取出服务名和服务端并保存到binder_transaction_data
执行完binder_thread_read之后, bwr.read_buffer前4个子节:BR_TRANSACTION bwr.read_buffer4个子节后:binder_transaction_data,其中tr.target.ptr = “window”,tr.cookie = new WindowManagerService(…)
int binder_parse(struct binder_state *bs, struct binder_io *bio,
uintptr_t ptr, size_t size, binder_handler func)
{
int r = 1;
uintptr_t end = ptr + (uintptr_t) size;
while (ptr < end) {
uint32_t cmd = *(uint32_t *) ptr;
ptr += sizeof(uint32_t);
switch(cmd) {
...
case BR_TRANSACTION: {
struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
if ((end - ptr) < sizeof(*txn)) {
ALOGE("parse: txn too small!\n");
return -1;
}
binder_dump_txn(txn);
if (func) {
unsigned rdata[256/4];
struct binder_io msg;
struct binder_io reply;
int res;
bio_init(&reply, rdata, sizeof(rdata), 4);
bio_init_from_txn(&msg, txn);
res = func(bs, txn, &msg, &reply);
binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);
}
ptr += sizeof(*txn);
break;
}
}
return r;
}
bio_init_from_txn创建一个binder_io结构体msg,其中msg->data = “window”,msg->offs = new WindowManagerService(…)服务端
frameworks/native/cmds/servicemanager/Service_manager.c:
int svcmgr_handler(struct binder_state *bs,
struct binder_transaction_data *txn,
struct binder_io *msg,
struct binder_io *reply)
{
struct svcinfo *si;
uint16_t *s;
size_t len;
uint32_t handle;
uint32_t strict_policy;
int allow_isolated;
if (txn->target.handle != svcmgr_handle)
return -1;
if (txn->code == PING_TRANSACTION)
return 0;
strict_policy = bio_get_uint32(msg);
s = bio_get_string16(msg, &len);
if (s == NULL) {
return -1;
}
if ((len != (sizeof(svcmgr_id) / 2)) ||
memcmp(svcmgr_id, s, sizeof(svcmgr_id))) {
fprintf(stderr,"invalid id %s\n", str8(s, len));
return -1;
}
if (sehandle && selinux_status_updated() > 0) {
struct selabel_handle *tmp_sehandle = selinux_android_service_context_handle();
if (tmp_sehandle) {
selabel_close(sehandle);
sehandle = tmp_sehandle;
}
}
switch(txn->code) {
...
case SVC_MGR_ADD_SERVICE:
s = bio_get_string16(msg, &len);
if (s == NULL) {
return -1;
}
handle = bio_get_ref(msg);
allow_isolated = bio_get_uint32(msg) ? 1 : 0;
if (do_add_service(bs, s, len, handle, txn->sender_euid,
allow_isolated, txn->sender_pid))
return -1;
break;
}
bio_put_uint32(reply, 0);
return 0;
}
开始调用do_add_service注册服务
int do_add_service(struct binder_state *bs,
const uint16_t *s, size_t len,
uint32_t handle, uid_t uid, int allow_isolated,
pid_t spid)
{
struct svcinfo *si;
if (!handle || (len == 0) || (len > 127))
return -1;
if (!svc_can_register(s, len, spid)) {
return -1;
}
si = find_svc(s, len);
if (si) {
if (si->handle) {
svcinfo_death(bs, si);
}
si->handle = handle;
} else {
si = malloc(sizeof(*si) + (len + 1) * sizeof(uint16_t));
if (!si) {
return -1;
}
si->handle = handle;
si->len = len;
memcpy(si->name, s, (len + 1) * sizeof(uint16_t));
si->name[len] = '\0';
si->death.func = (void*) svcinfo_death;
si->death.ptr = si;
si->allow_isolated = allow_isolated;
si->next = svclist;
svclist = si;
}
binder_acquire(bs, handle);
binder_link_to_death(bs, handle, &si->death);
return 0;
}
struct svcinfo
{
struct svcinfo *next;
uint32_t handle;
struct binder_death death;
int allow_isolated;
size_t len;
uint16_t name[0];
};
执行do_add_service,创建一个svcinfo结构体放入链表,其中svcinfo.name保存Service的名称 svcinfo.handle保存Service的handle
ServiceManager 注册服务过程的总结: 例如ServiceManager.addService(“window”, new WindowManagerService(…)); a. 创建一个binder_node结构体,并保存到保存到ServiceManager的thread->todo list中,其中: binder_node.ptr = “window” binder_node.cookie = new WindowManagerService(…)的服务端 b. 从ServiceManager的todo list队列中取出binder_node结构体 c. 根据binder_node结构体创建一个svcinfo结构体放入链表,其中 svcinfo.name保存服务的名称"window" svcinfo.handle对应new WindowManagerService(…)的服务端
3. ServiceManager 获取服务
frameworks/base/services/core/java/com/android/server/InputMethodManagerService.java:
ServiceManager.getService(Context.WINDOW_SERVICE);
等价:ServiceManager.getService("window");
以获取WMS服务为例分析获取服务的过程
frameworks/base/core/java/android/os/ServiceManager.java:
public static IBinder getService(String name) {
try {
IBinder service = sCache.get(name);
if (service != null) {
return service;
} else {
return getIServiceManager().getService(name);
}
} catch (RemoteException e) {
Log.e(TAG, "error in getService", e);
}
return null;
}
getIServiceManager().getService(name)
等价:ServiceManagerProxy.getService("window")
class ServiceManagerProxy implements IServiceManager {
public IBinder getService(String name) throws RemoteException {
Parcel data = Parcel.obtain();
Parcel reply = Parcel.obtain();
data.writeInterfaceToken(IServiceManager.descriptor);
data.writeString(name);
mRemote.transact(GET_SERVICE_TRANSACTION, data, reply, 0);
IBinder binder = reply.readStrongBinder();
reply.recycle();
data.recycle();
return binder;
}
}
a. Parcel.obtain(); //1. 创建一个Parcel对象 b. data.writeString(“window”); //2. 向Parcel中写入需要跨进程传输的数据 c. mRemote.transact(GET_SERVICE_TRANSACTION, data, reply, 0); //传入参数data(保存Proxy向native发送的数据),reply(保存native向Proxy返回的数据)
3.1 分析mRemote.transact(GET_SERVICE_TRANSACTION, data, reply, 0);
由前面的分析可知:mRemote = new BinderProxy()
最终会调用:
frameworks/native/libs/binder/IPCThreadState.cpp:
status_t IPCThreadState::transact(int32_t handle,
uint32_t code, const Parcel& data,
Parcel* reply, uint32_t flags)
{
status_t err = data.errorCheck();
err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);
err = waitForResponse(reply);
return err;
}
status_t IPCThreadState::writeTransactionData(int32_t cmd, uint32_t binderFlags,
int32_t handle, uint32_t code, const Parcel& data, status_t* statusBuffer)
{
binder_transaction_data tr;
tr.target.ptr = 0;
tr.target.handle = handle;
tr.code = code;
tr.flags = binderFlags;
tr.cookie = 0;
tr.sender_pid = 0;
tr.sender_euid = 0;
tr.data_size = data.ipcDataSize();
tr.data.ptr.buffer = data.ipcData();
tr.offsets_size = data.ipcObjectsCount()*sizeof(binder_size_t);
tr.data.ptr.offsets = data.ipcObjects();
mOut.writeInt32(cmd);
mOut.write(&tr, sizeof(tr));
return NO_ERROR;
}
构建一个binder_transaction_data并保存到mOut,其中 binder_transaction_data.target.handle = BpBinder.mHandle binder_transaction_data.code = GET_SERVICE_TRANSACTION binder_transaction_data.data.ptr.buffer = “window”
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
int32_t cmd;
int32_t err;
while (1) {
if ((err=talkWithDriver()) < NO_ERROR) break;
err = mIn.errorCheck();
if (err < NO_ERROR) break;
if (mIn.dataAvail() == 0) continue;
cmd = mIn.readInt32();
switch (cmd) {
...
default:
err = executeCommand(cmd);
}
}
}
status_t IPCThreadState::talkWithDriver(bool doReceive)
{
binder_write_read bwr;
const bool needRead = mIn.dataPosition() >= mIn.dataSize();
const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;
bwr.write_size = outAvail;
bwr.write_buffer = (uintptr_t)mOut.data();
if (doReceive && needRead) {
bwr.read_size = mIn.dataCapacity();
bwr.read_buffer = (uintptr_t)mIn.data();
} else {
bwr.read_size = 0;
bwr.read_buffer = 0;
}
if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR;
bwr.write_consumed = 0;
bwr.read_consumed = 0;
status_t err;
do {
if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
err = NO_ERROR;
else
err = -errno;
...
} while (err == -EINTR);
...
return err;
}
取出保存在mOut中的binder_transaction_data,并将binder_transaction_data保存到binder_write_read.write_buffer
drivers/staging/android/binder.c
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int ret;
struct binder_proc *proc = filp->private_data;
struct binder_thread *thread;
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
if (ret)
return ret;
thread = binder_get_thread(proc);
switch (cmd) {
case BINDER_WRITE_READ: {
struct binder_write_read bwr;
if (size != sizeof(struct binder_write_read)) {
ret = -EINVAL;
goto err;
}
if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
ret = -EFAULT;
goto err;
}
if (bwr.write_size > 0) {
ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
if (ret < 0) {
bwr.read_consumed = 0;
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto err;
}
}
if (bwr.read_size > 0) {
ret = binder_thread_read(proc, thread, (void __user *)bwr.Read_Buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
...
}
if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
ret = -EFAULT;
goto err;
}
break;
}
...
}
ret = 0;
err:
...
return ret;
}
使用copy_from_user获取用户态的binder_write_read,并调用binder_thread_read和binder_thread_write处理请求,最终通过copy_to_user将结果binder_write_read返回给用户态
int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
void __user *buffer, int size, signed long *consumed)
{
uint32_t cmd;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
while (ptr < end && thread->return_error == BR_OK) {
if (get_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
binder_stats.bc[_IOC_NR(cmd)]++;
proc->stats.bc[_IOC_NR(cmd)]++;
thread->stats.bc[_IOC_NR(cmd)]++;
}
switch (cmd) {
case BC_TRANSACTION:
case BC_REPLY: {
struct binder_transaction_data tr;
if (copy_from_user(&tr, ptr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
binder_transaction(proc, thread, &tr, cmd);
break;
}
*consumed = ptr - buffer;
}
return 0;
}
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply)
{
struct binder_transaction *t;
struct binder_work *tcomplete;
size_t *offp, *off_end;
size_t off_min;
struct binder_proc *target_proc;
struct binder_thread *target_thread = NULL;
struct binder_node *target_node = NULL;
struct list_head *target_list;
wait_queue_head_t *target_wait;
struct binder_transaction *in_reply_to = NULL;
struct binder_transaction_log_entry *e;
uint32_t return_error = BR_OK;
struct binder_ref *ref;
ref = binder_get_ref(proc, tr->target.handle);
target_node = ref->node;
target_proc = target_node->proc;
target_list = &target_proc->todo;
if (!reply && !(tr->flags & TF_ONE_WAY))
t->from = thread;
else
t->from = NULL;
t->sender_euid = proc->tsk->cred->euid;
t->to_proc = target_proc;
t->to_thread = target_thread;
t->code = tr->code;
t->flags = tr->flags;
t->priority = task_nice(current);
t->buffer = binder_alloc_buf(target_proc, tr->data_size, tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
t->buffer->allow_user_free = 0;
t->buffer->debug_id = t->debug_id;
t->buffer->transaction = t;
t->buffer->target_node = target_node;
if (target_node)
binder_inc_node(target_node, 1, 0, NULL);
offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *)));
if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) {
...
}
if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) {
...
}
off_end = (void *)offp + tr->offsets_size;
for (; offp < off_end; offp++) {
...
}
if (reply) {
binder_pop_transaction(target_thread, in_reply_to);
} else if (!(t->flags & TF_ONE_WAY)) {
t->need_reply = 1;
t->from_parent = thread->transaction_stack;
thread->transaction_stack = t;
}
t->work.type = BINDER_WORK_TRANSACTION;
list_add_tail(&t->work.entry, target_list);
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; binder_transaction.buffer.data = binder_transaction_data.data.ptr.buffer = "window"
list_add_tail(&tcomplete->entry, &thread->todo);
if (target_wait)
wake_up_interruptible(target_wait);
return;
}
根据binder_transaction_data构造binder_transaction结构体,并将binder_transaction放入binder驱动进程binder_proc的todo链表中,其中 binder_transaction.buffer.data = binder_transaction_data.data.ptr.buffer = “window” binder_transaction.code = binder_transaction_data.code = GET_SERVICE_TRANSACTION 注:binder_transaction_data.target.handle = BpBinder.mHandle
3.2 分析Binder_Loop
ServiceManager线程的thread->todo队列保存着binder_transaction结构体t,t->buffer->data = tr->data.ptr.buffer = "window"
frameworks/native/cmds/servicemanager/Binder.c:
void Binder_Loop(struct binder_state *bs, binder_handler func)
{
int res;
struct binder_write_read bwr;
uint32_t readbuf[32];
bwr.write_size = 0;
bwr.write_consumed = 0;
bwr.write_buffer = 0;
readbuf[0] = BC_ENTER_LOOPER;
binder_write(bs, readbuf, sizeof(uint32_t));
for (;;) {
bwr.read_size = sizeof(readbuf);
bwr.read_consumed = 0;
bwr.read_buffer = (uintptr_t) readbuf;
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
...
res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
}
}
a. 分析res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int ret;
struct binder_proc *proc = filp->private_data;
struct binder_thread *thread;
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
if (ret)
return ret;
binder_lock(__func__);
thread = binder_get_thread(proc);
if (thread == NULL) {
ret = -ENOMEM;
goto err;
}
switch (cmd) {
...
case BINDER_WRITE_READ: {
struct binder_write_read bwr;
if (size != sizeof(struct binder_write_read)) {
ret = -EINVAL;
goto err;
}
if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
ret = -EFAULT; bwr.write_buffer = 0
goto err;
}
if (bwr.write_size > 0) {
...
}
if (bwr.read_size > 0) {
ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
if (!list_empty(&proc->todo))
wake_up_interruptible(&proc->wait);
if (ret < 0) {
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto err;
}
}
if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
ret = -EFAULT;
goto err;
}
break;
}
ret = 0;
...
return ret;
}
使用copy_from_user获取用户态的binder_write_read,并调用binder_thread_read和binder_thread_write处理请求,最终通过copy_to_user将结果binder_write_read返回给用户态
static int binder_thread_read(struct binder_proc *proc,
struct binder_thread *thread,
void __user *buffer, int size,
signed long *consumed, int non_block)
{
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
int ret = 0;
int wait_for_proc_work;
if (*consumed == 0) {
if (put_user(BR_NOOP, (uint32_t __user *)ptr))
return -EFAULT; 即bwr.read_buffer = BR_NOOP
ptr += sizeof(uint32_t);
}
...
while (1) {
uint32_t cmd;
struct binder_transaction_data tr;
struct binder_work *w;
struct binder_transaction *t = NULL;
if (!list_empty(&thread->todo))
w = list_first_entry(&thread->todo, struct binder_work, entry);
else if (!list_empty(&proc->todo) && wait_for_proc_work)
w = list_first_entry(&proc->todo, struct binder_work, entry);
else {
if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN))
goto retry;
break;
}
if (end - ptr < sizeof(tr) + 4)
break;
switch (w->type) {
...
case BINDER_WORK_TRANSACTION: {
t = container_of(w, struct binder_transaction, work);
} break;
}
if (t->buffer->target_node) {
struct binder_node *target_node = t->buffer->target_node;
tr.target.ptr = target_node->ptr;
tr.cookie = target_node->cookie;
t->saved_priority = task_nice(current);
if (t->priority < target_node->min_priority &&
!(t->flags & TF_ONE_WAY))
binder_set_nice(t->priority);
else if (!(t->flags & TF_ONE_WAY) ||
t->saved_priority > target_node->min_priority)
binder_set_nice(target_node->min_priority);
cmd = BR_TRANSACTION;
} else {
tr.target.ptr = NULL;
tr.cookie = NULL;
cmd = BR_REPLY;
}
...
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT; 即bwr.read_buffer = BR_TRANSACTION
ptr += sizeof(uint32_t);
if (copy_to_user(ptr, &tr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
}
return 0;
}
调用binder_thread_read处理请求,bwr.read_buffer前4个子节保存BR_TRANSACTION,后4个子节保存binder_transaction_data结构体 因此:bwr.read_buffer = BR_TRANSACTION b. 分析res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
int binder_parse(struct binder_state *bs, struct binder_io *bio,
uintptr_t ptr, size_t size, binder_handler func)
{
int r = 1;
uintptr_t end = ptr + (uintptr_t) size;
while (ptr < end) {
uint32_t cmd = *(uint32_t *) ptr;
ptr += sizeof(uint32_t);
switch(cmd) {
...
case BR_TRANSACTION: {
struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
if ((end - ptr) < sizeof(*txn)) {
ALOGE("parse: txn too small!\n");
return -1;
}
binder_dump_txn(txn);
if (func) {
unsigned rdata[256/4];
struct binder_io msg;
struct binder_io reply;
int res;
bio_init(&reply, rdata, sizeof(rdata), 4);
bio_init_from_txn(&msg, txn);
res = func(bs, txn, &msg, &reply);
binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);
}
ptr += sizeof(*txn);
break;
}
}
return r;
}
struct binder_io{
char *data
binder_size_t *offs;
char *data0;
...
}
void bio_init(struct binder_io *bio, void *data,
size_t maxdata, size_t maxoffs)
{
size_t n = maxoffs * sizeof(size_t);
if (n > maxdata) {
bio->flags = BIO_F_OVERFLOW;
bio->data_avail = 0;
bio->offs_avail = 0;
return;
}
bio->data = bio->data0 = (char *) data + n;
bio->offs = bio->offs0 = data;
bio->data_avail = maxdata - n;
bio->offs_avail = maxoffs;
bio->flags = 0;
}
void bio_init_from_txn(struct binder_io *bio, struct binder_transaction_data *txn)
{
bio->data = bio->data0 = (char *)(intptr_t)txn->data.ptr.buffer;
bio->offs = bio->offs0 = (binder_size_t *)(intptr_t)txn->data.ptr.offsets;
bio->data_avail = txn->data_size;
bio->offs_avail = txn->offsets_size / sizeof(size_t);
bio->flags = BIO_F_SHARED;
}
创建一个binder_io结构体msg,其中msg.data = txn->data.ptr.buffer = “window”,即msg.data中保存着服务名"window"
frameworks/native/cmds/servicemanager/Service_manager.c:
int svcmgr_handler(struct binder_state *bs,
struct binder_transaction_data *txn,
struct binder_io *msg,
struct binder_io *reply)
{
struct svcinfo *si;
uint16_t *s;
size_t len;
uint32_t handle;
uint32_t strict_policy;
int allow_isolated;
if (txn->target.handle != svcmgr_handle)
return -1;
if (txn->code == PING_TRANSACTION)
return 0;
strict_policy = bio_get_uint32(msg);
s = bio_get_string16(msg, &len);
if (s == NULL) {
return -1;
}
if ((len != (sizeof(svcmgr_id) / 2)) ||
memcmp(svcmgr_id, s, sizeof(svcmgr_id))) {
fprintf(stderr,"invalid id %s\n", str8(s, len));
return -1;
}
if (sehandle && selinux_status_updated() > 0) {
struct selabel_handle *tmp_sehandle = selinux_android_service_context_handle();
if (tmp_sehandle) {
selabel_close(sehandle);
sehandle = tmp_sehandle;
}
}
switch(txn->code) {
case SVC_MGR_GET_SERVICE:
case SVC_MGR_CHECK_SERVICE:
s = bio_get_string16(msg, &len);
if (s == NULL) {
return -1;
}
handle = do_find_service(bs, s, len, txn->sender_euid, txn->sender_pid);
if (!handle)
break;
bio_put_ref(reply, handle);
return 0;
}
bio_put_uint32(reply, 0);
return 0;
}
执行do_find_service查找服务
uint32_t do_find_service(struct binder_state *bs, const uint16_t *s, size_t len, uid_t uid, pid_t spid)
{
struct svcinfo *si;
if (!svc_can_find(s, len, spid)) {
return 0;
}
si = find_svc(s, len);
if (si && si->handle) {
if (!si->allow_isolated) {
uid_t appid = uid % AID_USER;
if (appid >= AID_ISOLATED_START && appid <= AID_ISOLATED_END) {
return 0;
}
}
return si->handle;
} else {
return 0;
}
}
struct svcinfo *find_svc(const uint16_t *s16, size_t len)
{
struct svcinfo *si;
for (si = svclist; si; si = si->next) {
if ((len == si->len) &&
!memcmp(s16, si->name, len * sizeof(uint16_t))) {
return si;
}
}
return NULL;
}
从binder_io结构体msg中取出服务名,执行do_find_service根据服务名从svclist链表中寻找name = "window"的svcinfo,并返回svcinfo.handle给reply,即返回name = "window"对应的new WindowManagerService(…)的服务端
ServiceManager 服务获得过程的总结: 例如ServiceManager.getService(“window”); 从ServiceManager从svclist链表中寻找name = "window"的svcinfo,并返回svcinfo.handle即为name = "window"对应的new WindowManagerService(…)的服务端
|