Android Binder之旅

WaterlooBridge發表於2020-04-06

由於最近想換工作,於是就開啟了Android 10的原始碼,將Binder通訊又複習了一遍,以便面試時有備無患。

aidl

通常,Android應用在實現程式間通訊都會使用到aidl,編寫aidl檔案之後,通過sync project會幫助我們生成對應的Stub與Proxy程式碼,通過實現Stub類就可以作為服務端的遠端物件,然後通過Service的onBind方法將物件返回,而客戶端通過相同aidl檔案會生成的asInterface方法,然後將ServiceConnection的回撥方法onServiceConnected的IBinder引數作為asInterface的引數會得到服務端遠端物件的代理物件,也就是aidl生成類Proxy的物件。注意:如果是相同程式,通過asInterface得到的直接是Stub實現類的物件,也就是onBind返回的物件。

transact

通過Proxy物件我們就可以無感知的呼叫服務端遠端物件的方法,而中間其實是經由IBinder的transact方法。

@Override public void setSurface(android.view.Surface surface) throws android.os.RemoteException
{
    android.os.Parcel _data = android.os.Parcel.obtain();
    android.os.Parcel _reply = android.os.Parcel.obtain();
    try {
        _data.writeInterfaceToken(DESCRIPTOR);
        if ((surface!=null)) {
            _data.writeInt(1);
            surface.writeToParcel(_data, 0);
        }
        else {
            _data.writeInt(0);
        }
        mRemote.transact(Stub.TRANSACTION_setSurface, _data, _reply, 0);
        _reply.readException();
    }
    finally {
        _reply.recycle();
        _data.recycle();
    }
}
複製程式碼

而客戶端對應的IBinder物件是後文會介紹到的BinderProxy frameworks/base/core/java/android/os/BinderProxy.java

public boolean transact(int code, Parcel data, Parcel reply, int flags) throws RemoteException {
        Binder.checkParcel(this, code, data, "Unreasonably large binder buffer");
         ...    
        try {
            return transactNative(code, data, reply, flags);
        } finally {
            ...     
        }
    }
複製程式碼

最終會呼叫到transactNative的jni方法

frameworks/base/core/jni/android_util_Binder.cpp

static const JNINativeMethod gBinderProxyMethods[] = {
     /* name, signature, funcPtr */
    {"pingBinder",          "()Z", (void*)android_os_BinderProxy_pingBinder},
    {"isBinderAlive",       "()Z", (void*)android_os_BinderProxy_isBinderAlive},
    {"getInterfaceDescriptor", "()Ljava/lang/String;", (void*)android_os_BinderProxy_getInterfaceDescriptor},
    {"transactNative",      "(ILandroid/os/Parcel;Landroid/os/Parcel;I)Z", (void*)android_os_BinderProxy_transact},
    {"linkToDeath",         "(Landroid/os/IBinder$DeathRecipient;I)V", (void*)android_os_BinderProxy_linkToDeath},
    {"unlinkToDeath",       "(Landroid/os/IBinder$DeathRecipient;I)Z", (void*)android_os_BinderProxy_unlinkToDeath},
    {"getNativeFinalizer",  "()J", (void*)android_os_BinderProxy_getNativeFinalizer},
};
複製程式碼
static jboolean android_os_BinderProxy_transact(JNIEnv* env, jobject obj,
        jint code, jobject dataObj, jobject replyObj, jint flags) // throws RemoteException
{
    Parcel* data = parcelForJavaObject(env, dataObj);
      ...   
    IBinder* target = getBPNativeData(env, obj)->mObject.get();
      ...   
    status_t err = target->transact(code, *data, reply, flags);
      ...   

    if (err == NO_ERROR) {
        return JNI_TRUE;
    } else if (err == UNKNOWN_TRANSACTION) {
        return JNI_FALSE;
    }
    signalExceptionForError(env, obj, err, true /*canThrowRemoteException*/, data->dataSize());
    return JNI_FALSE;
}
複製程式碼

其中getBPNativeData獲取的是java物件BinderProxy的mNativeData屬性,而該屬性是在建構函式中通過構造引數賦值的,BinderProxy物件的由來將在後文中介紹,mNativeData中的mObject指向的是c++物件BpBinder

frameworks/native/libs/binder/BpBinder.cpp

status_t BpBinder::transact(
    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
    // Once a binder has died, it will never come back to life.
    if (mAlive) {
        status_t status = IPCThreadState::self()->transact(
            mHandle, code, data, reply, flags);
        if (status == DEAD_OBJECT) mAlive = 0;
        return status;
    }

    return DEAD_OBJECT;
}
複製程式碼

IPCThreadState

說到IPCThreadState,就要提到App程式的建立,Android中App程式的建立都是通過Zygote程式fork建立出來的。子程式建立後,會呼叫ZygoteInit.nativeZygoteInit()

frameworks/base/core/jni/AndroidRuntime.cpp

static void com_android_internal_os_ZygoteInit_nativeZygoteInit(JNIEnv* env, jobject clazz)
{
    gCurRuntime->onZygoteInit();
}
複製程式碼

frameworks/base/cmds/app_process/app_main.cpp

virtual void onZygoteInit()
    {
        sp<ProcessState> proc = ProcessState::self();
        ALOGV("App process: starting thread pool.\n");
        proc->startThreadPool();
    }
複製程式碼

此時會引入另外一個物件ProcessState::self(),ProcessState::self()是一個單例物件,而在ProcessState的建構函式中,會呼叫open_driver關聯binder驅動,並進行記憶體對映。

frameworks/native/libs/binder/ProcessState.cpp

sp<ProcessState> ProcessState::self()
{
    Mutex::Autolock _l(gProcessMutex);
    if (gProcess != nullptr) {
        return gProcess;
    }
    gProcess = new ProcessState(kDefaultDriver);
    return gProcess;
}
複製程式碼
ProcessState::ProcessState(const char *driver)
    : mDriverName(String8(driver))
    , mDriverFD(open_driver(driver))
    , mVMStart(MAP_FAILED)
    , mThreadCountLock(PTHREAD_MUTEX_INITIALIZER)
    , mThreadCountDecrement(PTHREAD_COND_INITIALIZER)
    , mExecutingThreadsCount(0)
    , mMaxThreads(DEFAULT_MAX_BINDER_THREADS)
    , mStarvationStartTimeMs(0)
    , mManagesContexts(false)
    , mBinderContextCheckFunc(nullptr)
    , mBinderContextUserData(nullptr)
    , mThreadPoolStarted(false)
    , mThreadPoolSeq(1)
    , mCallRestriction(CallRestriction::NONE)
{
    if (mDriverFD >= 0) {
        // mmap the binder, providing a chunk of virtual address space to receive transactions.
        mVMStart = mmap(nullptr, BINDER_VM_SIZE, PROT_READ, MAP_PRIVATE | MAP_NORESERVE, mDriverFD, 0);
       ...     
    }
}
複製程式碼
static int open_driver(const char *driver)
{
    int fd = open(driver, O_RDWR | O_CLOEXEC);
    if (fd >= 0) {
        int vers = 0;
        status_t result = ioctl(fd, BINDER_VERSION, &vers);
        ...     
    }
    return fd;
}
複製程式碼

因為在註冊binder裝置驅動的時候,對應的file_operations已經註冊好了,所以呼叫open開啟binder裝置時,最終將進入核心呼叫到函式binder_open

static const struct file_operations binder_fops = {
	.owner = THIS_MODULE,
	.poll = binder_poll,
	.unlocked_ioctl = binder_ioctl,
	.compat_ioctl = binder_ioctl,
	.mmap = binder_mmap,
	.open = binder_open,
	.flush = binder_flush,
	.release = binder_release,
};
複製程式碼

因為binder驅動屬於核心模組,所以下面關於binder的程式碼來源於kernel_3.18 drivers/staging/android/binder.c

static int binder_open(struct inode *nodp, struct file *filp)
{
	struct binder_proc *proc;

	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
        ...     
	return 0;
}
複製程式碼

而ProcessState建構函式中呼叫的mmap函式最終將進入核心呼叫到函式binder_mmap,Binder機制中mmap的最大特點是一次拷貝即可完成程式間通訊。

static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
{
	int ret;
	struct vm_struct *area;
	struct binder_proc *proc = filp->private_data;
	const char *failure_string;
	struct binder_buffer *buffer;

	if (proc->tsk != current)
		return -EINVAL;

	if ((vma->vm_end - vma->vm_start) > SZ_4M)
		vma->vm_end = vma->vm_start + SZ_4M;

	if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
		ret = -EPERM;
		failure_string = "bad vm_flags";
		goto err_bad_arg;
	}
	vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;

	mutex_lock(&binder_mmap_lock);
	if (proc->buffer) {
		ret = -EBUSY;
		failure_string = "already mapped";
		goto err_already_mapped;
	}

	area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
	if (area == NULL) {
		ret = -ENOMEM;
		failure_string = "get_vm_area";
		goto err_get_vm_area_failed;
	}
	proc->buffer = area->addr;
	proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
	mutex_unlock(&binder_mmap_lock);

	proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
	if (proc->pages == NULL) {
		ret = -ENOMEM;
		failure_string = "alloc page array";
		goto err_alloc_pages_failed;
	}
	proc->buffer_size = vma->vm_end - vma->vm_start;

	vma->vm_ops = &binder_vm_ops;
	vma->vm_private_data = proc;
        //binder_update_page_range主要做了三件事:1)分配指定個數的物理頁;2)將物理頁對映到核心地址空間area中;3)同時將物理頁對映到使用者地址空間的vma中
	if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
		ret = -ENOMEM;
		failure_string = "alloc small buf";
		goto err_alloc_small_buf_failed;
	}
	buffer = proc->buffer;
	INIT_LIST_HEAD(&proc->buffers);
	list_add(&buffer->entry, &proc->buffers);
	buffer->free = 1;
	binder_insert_free_buffer(proc, buffer);
	proc->free_async_space = proc->buffer_size / 2;
	barrier();
	proc->files = get_files_struct(current);
	proc->vma = vma;
	proc->vma_vm_mm = vma->vm_mm;

	return 0;
}
複製程式碼

Linux記憶體分為使用者空間和核心空間,同時頁表也分為兩類,使用者空間頁表和核心空間頁表,每個程式都有一個使用者空間頁表,但是系統只有一個核心空間頁表。而binder_mmap關鍵是:更新使用者空間對應頁表的同時也同步對映核心頁表,讓兩個頁表都指向同一塊記憶體,這樣一來,資料只需要從A程式的使用者空間,直接拷貝到B程式所對應的核心空間,而B對應的核心空間在B程式的使用者空間也有相應的對映,這樣就無需從核心拷貝到使用者空間了。

Android Binder之旅
回到onZygoteInit(),通過ProcessState::self()得到proc物件之後,緊接著就呼叫了startThreadPool(),而startThreadPool()會建立一個PoolThread執行緒物件,而PoolThread的threadLoop()裡會呼叫IPCThreadState::self()的joinThreadPool(),此時就提到了上文transact中最終的呼叫物件IPCThreadState::self() frameworks/native/libs/binder/IPCThreadState.cpp

IPCThreadState::IPCThreadState()
    : mProcess(ProcessState::self()),
      mWorkSource(kUnsetWorkSource),
      mPropagateWorkSource(false),
      mStrictModePolicy(0),
      mLastTransactionBinderFlags(0),
      mCallRestriction(mProcess->mCallRestriction)
{
    pthread_setspecific(gTLS, this);
    clearCaller();
    mIn.setDataCapacity(256);
    mOut.setDataCapacity(256);
    mIPCThreadStateBase = IPCThreadStateBase::self();
}
複製程式碼
void IPCThreadState::joinThreadPool(bool isMain)
{
    mOut.writeInt32(isMain ? BC_ENTER_LOOPER : BC_REGISTER_LOOPER);

    status_t result;
    do {
        processPendingDerefs();
        // now get the next command to be processed, waiting if necessary
        result = getAndExecuteCommand();
        ...        
    } while (result != -ECONNREFUSED && result != -EBADF);

    mOut.writeInt32(BC_EXIT_LOOPER);
    talkWithDriver(false);
}
複製程式碼

joinThreadPool()的主要工作就是迴圈執行getAndExecuteCommand(),從mIn中取出資料,然後根據取出的cmd做相應的處理。getAndExecuteCommand()將在下文中詳細分析,此時回到IPCThreadState::self()->transact()

IPCThreadState::self()->transact()

transact中呼叫writeTransactionData將Parcel中的資訊封裝到結構體binder_transaction_data中並寫入mOut,然後在函式waitForResponse中將mOut中資料寫入binder裝置中

status_t IPCThreadState::transact(int32_t handle,
                                  uint32_t code, const Parcel& data,
                                  Parcel* reply, uint32_t flags)
{
    status_t err;
     ...        
    err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, nullptr);

    if ((flags & TF_ONE_WAY) == 0) {
        ...        
        if (reply) {
            err = waitForResponse(reply);
        } else {
            Parcel fakeReply;
            err = waitForResponse(&fakeReply);
        }
        ...           
    } else {
        err = waitForResponse(nullptr, nullptr);
    }

    return err;
}
複製程式碼
status_t IPCThreadState::writeTransactionData(int32_t cmd, uint32_t binderFlags,
    int32_t handle, uint32_t code, const Parcel& data, status_t* statusBuffer)
{
    binder_transaction_data tr;

    tr.target.ptr = 0; /* Don't pass uninitialized stack data to a remote process */
    tr.target.handle = handle;
    tr.code = code;
    tr.flags = binderFlags;
    tr.cookie = 0;
    tr.sender_pid = 0;
    tr.sender_euid = 0;

    const status_t err = data.errorCheck();
    if (err == NO_ERROR) {
        tr.data_size = data.ipcDataSize();
        tr.data.ptr.buffer = data.ipcData();
        tr.offsets_size = data.ipcObjectsCount()*sizeof(binder_size_t);
        tr.data.ptr.offsets = data.ipcObjects();
    } else if (statusBuffer) {
        tr.flags |= TF_STATUS_CODE;
        *statusBuffer = err;
        tr.data_size = sizeof(status_t);
        tr.data.ptr.buffer = reinterpret_cast<uintptr_t>(statusBuffer);
        tr.offsets_size = 0;
        tr.data.ptr.offsets = 0;
    } else {
        return (mLastError = err);
    }

    mOut.writeInt32(cmd);
    mOut.write(&tr, sizeof(tr));

    return NO_ERROR;
}
複製程式碼

其中mOut是IPCThreadState物件的Parcel型別的私有屬性,而code引數是java層通過Proxy物件呼叫函式時對應函式的code,data引數是對應函式的引數資料,所以binder_transaction_data的data.ptr.buffer指向的是java層Proxy物件呼叫函式時的引數資料。

status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
    uint32_t cmd;
    int32_t err;

    while (1) {
        if ((err=talkWithDriver()) < NO_ERROR) break;
        err = mIn.errorCheck();
        if (err < NO_ERROR) break;
        if (mIn.dataAvail() == 0) continue;

        cmd = (uint32_t)mIn.readInt32();

        switch (cmd) {
        case BR_TRANSACTION_COMPLETE:
            if (!reply && !acquireResult) goto finish;
            break;
          ...        
        case BR_REPLY:
          ...        

        default:
            err = executeCommand(cmd);
            if (err != NO_ERROR) goto finish;
            break;
        }
    }
    ...           
    return err;
}
複製程式碼
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
    uint32_t cmd;
    int32_t err;

    while (1) {
        if ((err=talkWithDriver()) < NO_ERROR) break;
        err = mIn.errorCheck();
        if (err < NO_ERROR) break;
        if (mIn.dataAvail() == 0) continue;

        cmd = (uint32_t)mIn.readInt32();

        switch (cmd) {
        case BR_TRANSACTION_COMPLETE:
            if (!reply && !acquireResult) goto finish;
            break;
          ...        
        case BR_REPLY:
          ...        

        default:
            err = executeCommand(cmd);
            if (err != NO_ERROR) goto finish;
            break;
        }
    }
    ...           
    return err;
}
複製程式碼

waitForResponse()會呼叫talkWithDriver(),在talkWithDriver()中會建立binder_write_read物件,並呼叫ioctl陷入核心。

status_t IPCThreadState::talkWithDriver(bool doReceive)
{
    if (mProcess->mDriverFD <= 0) {
        return -EBADF;
    }

    binder_write_read bwr;

    // Is the read buffer empty?
    const bool needRead = mIn.dataPosition() >= mIn.dataSize();

    // We don't want to write anything if we are still reading
    // from data left in the input buffer and the caller
    // has requested to read the next data.
    const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;

    bwr.write_size = outAvail;
    bwr.write_buffer = (uintptr_t)mOut.data();

    // This is what we'll read.
    if (doReceive && needRead) {
        bwr.read_size = mIn.dataCapacity();
        bwr.read_buffer = (uintptr_t)mIn.data();
    } else {
        bwr.read_size = 0;
        bwr.read_buffer = 0;
    }
     ...        
    bwr.write_consumed = 0;
    bwr.read_consumed = 0;
    do {
          ...        
        if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
            err = NO_ERROR;
          ...        
    } while (err == -EINTR);

    return err;
}
複製程式碼

在呼叫ioctl中會將binder_write_read物件bwr的地址傳入

Android Binder之旅
因為最終ioctl會進入核心呼叫binder_ioctl,所以此時我們回到核心binder的程式碼中

drivers/staging/android/binder.c

static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
	int ret;
	struct binder_proc *proc = filp->private_data;
	struct binder_thread *thread;
	unsigned int size = _IOC_SIZE(cmd);
	void __user *ubuf = (void __user *)arg;
	   ...        
	thread = binder_get_thread(proc);
	   ...        
	switch (cmd) {
	case BINDER_WRITE_READ:
		ret = binder_ioctl_write_read(filp, cmd, arg, thread);
		if (ret)
			goto err;
		break;
	case BINDER_SET_MAX_THREADS:
	   ...        
	case BINDER_SET_CONTEXT_MGR:
	   ...        
	case BINDER_THREAD_EXIT:
	   ...        
	case BINDER_VERSION: {
	   ...        
	default:
		ret = -EINVAL;
		goto err;
	}
	   ...        
	trace_binder_ioctl_done(ret);
	return ret;
}
複製程式碼

因為呼叫ioctl時傳入的引數是BINDER_WRITE_READ,所以最後將呼叫binder_ioctl_write_read

static int binder_ioctl_write_read(struct file *filp,
				unsigned int cmd, unsigned long arg,
				struct binder_thread *thread)
{
	int ret = 0;
	struct binder_proc *proc = filp->private_data;
	unsigned int size = _IOC_SIZE(cmd);
	void __user *ubuf = (void __user *)arg;
	struct binder_write_read bwr;
	    ...   
	if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
		ret = -EFAULT;
		goto out;
	}
	    ...   
	if (bwr.write_size > 0) {
		ret = binder_thread_write(proc, thread,
					  bwr.write_buffer,
					  bwr.write_size,
					  &bwr.write_consumed);
          	    ...   
	}
	if (bwr.read_size > 0) {
		ret = binder_thread_read(proc, thread, bwr.read_buffer,
					 bwr.read_size,
					 &bwr.read_consumed,
					 filp->f_flags & O_NONBLOCK);
          	    ...   
	}
	    ...   
	if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
		ret = -EFAULT;
		goto out;
	}
	return ret;
}
複製程式碼

通過copy_from_user將binder_write_read物件從使用者空間複製到核心空間,之後將呼叫binder_thread_write讀取bwr.write_buffer中的資料。

static int binder_thread_write(struct binder_proc *proc,
			struct binder_thread *thread,
			binder_uintptr_t binder_buffer, size_t size,
			binder_size_t *consumed)
{
	uint32_t cmd;
	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
	void __user *ptr = buffer + *consumed;
	void __user *end = buffer + size;

	while (ptr < end && thread->return_error == BR_OK) {
		if (get_user(cmd, (uint32_t __user *)ptr))
			return -EFAULT;
		ptr += sizeof(uint32_t);
		trace_binder_command(cmd);
		if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
			binder_stats.bc[_IOC_NR(cmd)]++;
			proc->stats.bc[_IOC_NR(cmd)]++;
			thread->stats.bc[_IOC_NR(cmd)]++;
		}
		switch (cmd) {
        	    ...   
		case BC_TRANSACTION:
		case BC_REPLY: {
			struct binder_transaction_data tr;

			if (copy_from_user(&tr, ptr, sizeof(tr)))
				return -EFAULT;
			ptr += sizeof(tr);
			binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
			break;
		}
        	    ...   
		}
		*consumed = ptr - buffer;
	}
	return 0;
}
複製程式碼

因為在上文writeTransactionData中,向mOut寫入了BC_TRANSACTION的cmd和binder_transaction_data物件tr,所以get_user(cmd, (uint32_t __user *)ptr)將取出BC_TRANSACTION,然後進入對應的switch分支,通過copy_from_user將使用者空間的binder_transaction_data物件複製到核心空間的tr物件上,最終呼叫binder_transaction()

static void binder_transaction(struct binder_proc *proc,
			       struct binder_thread *thread,
			       struct binder_transaction_data *tr, int reply)
{
	struct binder_transaction *t;
	struct binder_work *tcomplete;
	binder_size_t *offp, *off_end;
	binder_size_t off_min;
	struct binder_proc *target_proc;
	struct binder_thread *target_thread = NULL;
	struct binder_node *target_node = NULL;
	struct list_head *target_list;
	wait_queue_head_t *target_wait;
	struct binder_transaction *in_reply_to = NULL;
	struct binder_transaction_log_entry *e;
	uint32_t return_error;

	e = binder_transaction_log_add(&binder_transaction_log);
	e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
	e->from_proc = proc->pid;
	e->from_thread = thread->pid;
	e->target_handle = tr->target.handle;
	e->data_size = tr->data_size;
	e->offsets_size = tr->offsets_size;

	if (reply) {
		in_reply_to = thread->transaction_stack;
	      	    ...   	
		thread->transaction_stack = in_reply_to->to_parent;
		target_thread = in_reply_to->from;
	      	    ...   
		target_proc = target_thread->proc;
	} else {
		if (tr->target.handle) {
			struct binder_ref *ref;
			ref = binder_get_ref(proc, tr->target.handle);
	          	    ...   	
			target_node = ref->node;
		} else {
	      	    ...   	
		}
		e->to_node = target_node->debug_id;
		target_proc = target_node->proc;
	      	    ...   	
	}
	if (target_thread) {
		e->to_thread = target_thread->pid;
		target_list = &target_thread->todo;
		target_wait = &target_thread->wait;
	} else {
		target_list = &target_proc->todo;
		target_wait = &target_proc->wait;
	}
	e->to_proc = target_proc->pid;

	t = kzalloc(sizeof(*t), GFP_KERNEL);
	if (t == NULL) {
		return_error = BR_FAILED_REPLY;
		goto err_alloc_t_failed;
	}
	binder_stats_created(BINDER_STAT_TRANSACTION);

	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
	if (tcomplete == NULL) {
		return_error = BR_FAILED_REPLY;
		goto err_alloc_tcomplete_failed;
	}
	binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
	      	    ...   
	if (!reply && !(tr->flags & TF_ONE_WAY))
		t->from = thread;
	else
		t->from = NULL;
	t->sender_euid = task_euid(proc->tsk);
	t->to_proc = target_proc;
	t->to_thread = target_thread;
	t->code = tr->code;
	t->flags = tr->flags;
	t->priority = task_nice(current);

	trace_binder_transaction(reply, t, target_node);

	t->buffer = binder_alloc_buf(target_proc, tr->data_size,
		tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
	if (t->buffer == NULL) {
		return_error = BR_FAILED_REPLY;
		goto err_binder_alloc_buf_failed;
	}
	t->buffer->allow_user_free = 0;
	t->buffer->debug_id = t->debug_id;
	t->buffer->transaction = t;
	t->buffer->target_node = target_node;
	trace_binder_transaction_alloc_buf(t->buffer);
	if (target_node)
		binder_inc_node(target_node, 1, 0, NULL);

	offp = (binder_size_t *)(t->buffer->data +
				 ALIGN(tr->data_size, sizeof(void *)));

	if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
			   tr->data.ptr.buffer, tr->data_size)) {
		return_error = BR_FAILED_REPLY;
		goto err_copy_data_failed;
	}
	if (copy_from_user(offp, (const void __user *)(uintptr_t)
			   tr->data.ptr.offsets, tr->offsets_size)) {
		return_error = BR_FAILED_REPLY;
		goto err_copy_data_failed;
	}
	      	    ...   	
	off_end = (void *)offp + tr->offsets_size;
	off_min = 0;
	for (; offp < off_end; offp++) {
		struct flat_binder_object *fp;
	      	    ...   	
		fp = (struct flat_binder_object *)(t->buffer->data + *offp);
		off_min = *offp + sizeof(struct flat_binder_object);
		switch (fp->type) {
		case BINDER_TYPE_BINDER:
		case BINDER_TYPE_WEAK_BINDER: {
			struct binder_ref *ref;
			struct binder_node *node = binder_get_node(proc, fp->binder);

			if (node == NULL) {
				node = binder_new_node(proc, fp->binder, fp->cookie);
				if (node == NULL) {
					return_error = BR_FAILED_REPLY;
					goto err_binder_new_node_failed;
				}
				node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
				node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
			}
	          	    ...   	
			ref = binder_get_ref_for_node(target_proc, node);
			if (ref == NULL) {
				return_error = BR_FAILED_REPLY;
				goto err_binder_get_ref_for_node_failed;
			}
			if (fp->type == BINDER_TYPE_BINDER)
				fp->type = BINDER_TYPE_HANDLE;
			else
				fp->type = BINDER_TYPE_WEAK_HANDLE;
			fp->handle = ref->desc;
			binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
				       &thread->todo);
	      	    ...   	
		} break;
	      	    ...   	
		}
	}
	if (reply) {
		BUG_ON(t->buffer->async_transaction != 0);
		binder_pop_transaction(target_thread, in_reply_to);
	} else if (!(t->flags & TF_ONE_WAY)) {
		BUG_ON(t->buffer->async_transaction != 0);
		t->need_reply = 1;
		t->from_parent = thread->transaction_stack;
		thread->transaction_stack = t;
	} else {
		BUG_ON(target_node == NULL);
		BUG_ON(t->buffer->async_transaction != 1);
		if (target_node->has_async_transaction) {
			target_list = &target_node->async_todo;
			target_wait = NULL;
		} else
			target_node->has_async_transaction = 1;
	}
	t->work.type = BINDER_WORK_TRANSACTION;
	list_add_tail(&t->work.entry, target_list);
	tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
	list_add_tail(&tcomplete->entry, &thread->todo);
	if (target_wait)
		wake_up_interruptible(target_wait);
	return;
	    ...   
}
複製程式碼

binder_transaction()是IPC最關鍵的一步,reply引數用於判斷是否是服務端回覆,因為此時是客戶端呼叫,所以binder_thread_write()呼叫此函式時此引數是false,所以此時會呼叫binder_get_ref()通過target.handle來獲取對應的binder_ref,binder_ref的由來就要回到bindService說起,所以我們放到後文中介紹,此時我們只需知道這個binder_ref是在ActivityManagerService回撥IServiceConnection物件時,插入到當前程式的proc->refs_by_desc中。當前程式每次與服務端程式通訊的時候就可以通過desc在proc->refs_by_desc中找到對應的binder_ref, 根據binder_ref可以找到對應的binder_node,然後可以根據binder_node->cookie找到binder實體在服務端程式中的地址。此處的proc是binder_proc,也就是在binder_open時建立的指標,用來管理程式中binder通訊在核心中的所有事務。接下來就介紹一下幾個主要資料結構之間的關係:

struct binder_proc {
	struct hlist_node proc_node;
	struct rb_root threads;//用於管理程式中所有binder_thread的紅黑樹
	struct rb_root nodes;//程式中的所有binder實體都會建立一個binder_node,並插入到該紅黑樹
	/*程式訪問過的所有binder server都會建立一個引用結構binder_ref, 該結構會同時插入下面兩個紅黑樹。 紅黑樹refs_by_desc的key為desc, 紅黑樹refs_by_node的key為node(binder在程式中的地址)*/
	struct rb_root refs_by_desc;
	struct rb_root refs_by_node;
	int pid;
	struct vm_area_struct *vma;
	struct mm_struct *vma_vm_mm;
	    ...   
	struct list_head todo;
	wait_queue_head_t wait;
};
複製程式碼

執行緒在發起binder通訊的時候會建立一個結構體binder_thread管理執行緒自己binder通訊相關的資訊

struct binder_thread {
	struct binder_proc *proc;//執行緒所屬的binder_proc
	struct rb_node rb_node;//用於插入proc->threads
	int pid;
	int looper;
	struct binder_transaction *transaction_stack;//執行緒傳輸資料管理結構t連結串列
	struct list_head todo;//傳輸資料新增到target_proc->todo中之後會新增一個tcomplete到這裡
	    ...   
	wait_queue_head_t wait;
	struct binder_stats stats;
};
複製程式碼

Binder實體在核心中由一個結構體binder_node來管理

struct binder_node {
	    ...   
	struct binder_proc *proc;//binder實體所屬程式的proc
	struct hlist_head refs;//引用該binder實體都會新增一個binder_ref到該雜湊表
	int internal_strong_refs;//binder實體引用計數
	int local_weak_refs;
	int local_strong_refs;
	binder_uintptr_t ptr;//binder實體在程式應用空間的引用
	binder_uintptr_t cookie;//binder實體在應用空間的地址
	    ...   
};
複製程式碼

客戶端程式每關聯一個服務端binder物件就會建立一個binder_ref,插入到當前程式的proc->refs_by_desc中,當前程式每次與服務程式通訊的時候就可以通過desc在proc->refs_by_desc中找到對應的binder_ref, 根據binder_ref可以找到對應的binder_node,然後可以根據binder_node->cookie找到binder實體在服務程式中的地址。

struct binder_ref {
	int debug_id;
	struct rb_node rb_node_desc;//用於插入紅黑樹proc->refs_by_desc
	struct rb_node rb_node_node;//用於插入紅黑樹proc->refs_by_node
	struct hlist_node node_entry;//用於插入雜湊表binder_node->refs
	struct binder_proc *proc;//引用所屬程式的proc
	struct binder_node *node;
	uint32_t desc;//binder實體引用在當前程式proc中的編號
	int strong;
	int weak;
	struct binder_ref_death *death;
};
複製程式碼

介紹完幾個主要結構體後,我們接著binder_transaction()繼續分析,通過binder_get_ref得到binder_ref之後,就能得到對應的binder_node物件target_node,通過target_node又能得到binder實體所屬程式的target_proc,因為reply為false,所以target_thread為NULL,target_list指向target_proc->todo,target_wait指向target_proc->wait,隨後會呼叫binder_alloc_buf()在目標程式申請一塊記憶體用來存放java層Proxy物件呼叫方法時將方法引數打包在Pracel中的資料,接著就呼叫copy_from_user()將Pracel中的資料從使用者空間複製到剛剛申請的核心地址t->buffer->data,在binder_mmap中我們有介紹到,申請到的記憶體在核心空間和使用者空間指向的是同一塊實體記憶體,所以後面服務程式處理IPC呼叫時,無需再將資料從核心空間複製到使用者空間。如果方法引數中有Binder物件時,將被打包在Parcel的ipcObjects()中,所以第二次copy_from_user()中 tr->data.ptr.offsets的地址是和Binder物件相關的flat_binder_object。複製完成之後,會遍歷這些flat_binder_object,通過binder_get_node從當前程式的proc->nodes.rb_node紅黑樹中查詢對應的binder_node,如果沒有查詢到,則會呼叫binder_new_node建立一個新的binder_node,並將自身的rb_node插入到proc->nodes.rb_node的紅黑樹中。接著會呼叫binder_get_ref_for_node,這是很關鍵的一步,函式首先會在目標程式的proc->refs_by_node查詢對應的node_ref,如果查詢到則直接返回,如果未查詢到,則會建立一個新的node_ref物件new_ref,然後會遍歷proc->refs_by_node紅黑樹,查詢最大的desc,然後在最大的desc上+1賦值給new_ref->desc,desc相當於binder物件在目標程式中編號,也是上文中提到的c++物件BpBinder的構造引數handle,所以在BpBinder呼叫transact()時傳入的mHandle引數就是node_ref的desc。最後,將new_ref插入到目標程式的proc->refs_by_node紅黑樹中。至此,binder_get_ref_for_node()的過程結束了。

static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
						  struct binder_node *node)
{
	struct rb_node *n;
	struct rb_node **p = &proc->refs_by_node.rb_node;
	struct rb_node *parent = NULL;
	struct binder_ref *ref, *new_ref;

	while (*p) {
		parent = *p;
		ref = rb_entry(parent, struct binder_ref, rb_node_node);

		if (node < ref->node)
			p = &(*p)->rb_left;
		else if (node > ref->node)
			p = &(*p)->rb_right;
		else
			return ref;
	}
	new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
	if (new_ref == NULL)
		return NULL;
	binder_stats_created(BINDER_STAT_REF);
	new_ref->debug_id = ++binder_last_id;
	new_ref->proc = proc;
	new_ref->node = node;
	rb_link_node(&new_ref->rb_node_node, parent, p);
	rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);

	new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1;
	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
		ref = rb_entry(n, struct binder_ref, rb_node_desc);
		if (ref->desc > new_ref->desc)
			break;
		new_ref->desc = ref->desc + 1;
	}

	p = &proc->refs_by_desc.rb_node;
	while (*p) {
		parent = *p;
		ref = rb_entry(parent, struct binder_ref, rb_node_desc);

		if (new_ref->desc < ref->desc)
			p = &(*p)->rb_left;
		else if (new_ref->desc > ref->desc)
			p = &(*p)->rb_right;
		else
			BUG();
	}
	rb_link_node(&new_ref->rb_node_desc, parent, p);
	rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);}
	return new_ref;
}
複製程式碼

隨後我們將看到fp->handle = ref->desc,這也剛剛所說的desc就是BpBinder的構造引數handle的原因。接下來就是通過list_add_tail將BINDER_WORK_TRANSACTION放到target_list(也就是之前所說的target_proc->todo)上,然後通過wake_up_interruptible將目標程式喚醒。至此,客戶端的流程就暫時結束了。

IPCThreadState::getAndExecuteCommand()

在上文中曾介紹過App啟動時會執行onZygoteInit(),在此過程中會啟動名為PoolThread的binder執行緒,此執行緒會無限迴圈執行getAndExecuteCommand(),所以此執行緒就是專門用來處理客戶端程式發來IPC呼叫

frameworks/native/libs/binder/IPCThreadState.cpp

status_t IPCThreadState::getAndExecuteCommand()
{
    status_t result;
    int32_t cmd;

    result = talkWithDriver();
    if (result >= NO_ERROR) {
        size_t IN = mIn.dataAvail();
        if (IN < sizeof(int32_t)) return result;
        cmd = mIn.readInt32();
	    ...   
        result = executeCommand(cmd);
	    ...   
    }

    return result;
}
複製程式碼

這時呼叫了上文曾講解過的talkWithDriver(),talkWithDriver()會呼叫ioctl陷入核心,最終會呼叫binder_ioctl(),而binder_ioctl()會呼叫binder_ioctl_write_read(),不過這次關注重點不再是binder_thread_write(),而是binder_thread_read(),接下來就具體分析一下binder_thread_read()

drivers/staging/android/binder.c

static int binder_thread_read(struct binder_proc *proc,
			      struct binder_thread *thread,
			      binder_uintptr_t binder_buffer, size_t size,
			      binder_size_t *consumed, int non_block)
{
	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
	void __user *ptr = buffer + *consumed;
	void __user *end = buffer + size;
	    ...   
	while (1) {
		uint32_t cmd;
		struct binder_transaction_data tr;
		struct binder_work *w;
		struct binder_transaction *t = NULL;

		if (!list_empty(&thread->todo)) {
			w = list_first_entry(&thread->todo, struct binder_work,
					     entry);
		} else if (!list_empty(&proc->todo) && wait_for_proc_work) {
			w = list_first_entry(&proc->todo, struct binder_work,
					     entry);
		} else {
			/* no data added */
			if (ptr - buffer == 4 &&
			    !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN))
				goto retry;
			break;
		}

		if (end - ptr < sizeof(tr) + 4)
			break;

		switch (w->type) {
		case BINDER_WORK_TRANSACTION: {
			t = container_of(w, struct binder_transaction, work);
		} break;
		case BINDER_WORK_TRANSACTION_COMPLETE: {
        	    ...   
		} break;
		case BINDER_WORK_NODE: {
        	    ...   
		} break;
		case BINDER_WORK_DEAD_BINDER:
		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
        	    ...   
		} break;
		}

		if (!t)
			continue;

		BUG_ON(t->buffer == NULL);
		if (t->buffer->target_node) {
			struct binder_node *target_node = t->buffer->target_node;

			tr.target.ptr = target_node->ptr;
			tr.cookie =  target_node->cookie;
			t->saved_priority = task_nice(current);
			if (t->priority < target_node->min_priority &&
			    !(t->flags & TF_ONE_WAY))
				binder_set_nice(t->priority);
			else if (!(t->flags & TF_ONE_WAY) ||
				 t->saved_priority > target_node->min_priority)
				binder_set_nice(target_node->min_priority);
			cmd = BR_TRANSACTION;
		} else {
			tr.target.ptr = 0;
			tr.cookie = 0;
			cmd = BR_REPLY;
		}
		tr.code = t->code;
		tr.flags = t->flags;
		tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);

		if (t->from) {
			struct task_struct *sender = t->from->proc->tsk;

			tr.sender_pid = task_tgid_nr_ns(sender,
							task_active_pid_ns(current));
		} else {
			tr.sender_pid = 0;
		}

		tr.data_size = t->buffer->data_size;
		tr.offsets_size = t->buffer->offsets_size;
		tr.data.ptr.buffer = (binder_uintptr_t)(
					(uintptr_t)t->buffer->data +
					proc->user_buffer_offset);
		tr.data.ptr.offsets = tr.data.ptr.buffer +
					ALIGN(t->buffer->data_size,
					    sizeof(void *));

		if (put_user(cmd, (uint32_t __user *)ptr))
			return -EFAULT;
		ptr += sizeof(uint32_t);
		if (copy_to_user(ptr, &tr, sizeof(tr)))
			return -EFAULT;
		ptr += sizeof(tr);

		trace_binder_transaction_received(t);
		binder_stat_br(proc, thread, cmd);

		list_del(&t->work.entry);
		t->buffer->allow_user_free = 1;
		if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
			t->to_parent = thread->transaction_stack;
			t->to_thread = thread;
			thread->transaction_stack = t;
		} else {
			t->buffer->transaction = NULL;
			kfree(t);
			binder_stats_deleted(BINDER_STAT_TRANSACTION);
		}
		break;
	}
	    ...   
	return 0;
}
複製程式碼

在上文binder_transaction()中的最後,list_add_tail將BINDER_WORK_TRANSACTION放到target_proc->todo上,此時通過list_first_entry()從proc->todo上取出BINDER_WORK_TRANSACTION對應的work.entry,然後container_of()通過work.entry得到binder_transaction。binder_transaction就是上文binder_transaction()中建立的物件,得到binder_transaction之後,將binder_transaction上的各個屬性賦值給binder_transaction_data物件tr,給tr.data.ptr.buffer賦值時,會將對應的值加上proc->user_buffer_offset,因為在上文binder_transaction()中複製資料時,是從客戶端的使用者空間複製到了核心空間,而上文也說過binder_mmap會申請一塊記憶體將使用者空間和核心空間對映到同一塊實體記憶體上,而proc->user_buffer_offset就是使用者空間相對於核心空間的偏移量,因為tr最終會通過copy_to_user複製到使用者空間,所以tr.data.ptr.buffer 應該指向使用者空間的地址。因為ptr指向的是binder_write_read的read_buffer,而read_buffer就是talkWithDriver()時的mIn.data(),執行完binder_thread_read()就可以回到使用者態的IPCThreadState::getAndExecuteCommand()中去了。在上文getAndExecuteCommand()中,我們可以看到,talkWithDriver()之後,會從mIn中取出取出cmd,也就是BR_TRANSACTION,並將其作為引數呼叫executeCommand()

executeCommand

frameworks/native/libs/binder/IPCThreadState.cpp

status_t IPCThreadState::executeCommand(int32_t cmd)
{
    BBinder* obj;
    RefBase::weakref_type* refs;
    status_t result = NO_ERROR;

    switch ((uint32_t)cmd) {
	    ...   
    case BR_TRANSACTION_SEC_CTX:
    case BR_TRANSACTION:
        {
            binder_transaction_data_secctx tr_secctx;
            binder_transaction_data& tr = tr_secctx.transaction_data;

            if (cmd == (int) BR_TRANSACTION_SEC_CTX) {
                result = mIn.read(&tr_secctx, sizeof(tr_secctx));
            } else {
                result = mIn.read(&tr, sizeof(tr));
                tr_secctx.secctx = 0;
            }
	        ...   
            Parcel buffer;
            buffer.ipcSetDataReference(
                reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
                tr.data_size,
                reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
                tr.offsets_size/sizeof(binder_size_t), freeBuffer, this);
	        ...   
            Parcel reply;
            status_t error;  

            if (tr.target.ptr) {
                // We only have a weak reference on the target object, so we must first try to
                // safely acquire a strong reference before doing anything else with it.
                if (reinterpret_cast<RefBase::weakref_type*>(
                        tr.target.ptr)->attemptIncStrong(this)) {
                    error = reinterpret_cast<BBinder*>(tr.cookie)->transact(tr.code, buffer,
                            &reply, tr.flags);
                    reinterpret_cast<BBinder*>(tr.cookie)->decStrong(this);
                } else {
                    error = UNKNOWN_TRANSACTION;
                }

            } else {
                error = the_context_object->transact(tr.code, buffer, &reply, tr.flags);
            }

            mIPCThreadStateBase->popCurrentState();

            if ((tr.flags & TF_ONE_WAY) == 0) {
                LOG_ONEWAY("Sending reply to %d!", mCallingPid);
                if (error < NO_ERROR) reply.setError(error);
                sendReply(reply, 0);
            } else {
                LOG_ONEWAY("NOT sending reply to %d!", mCallingPid);
            }
	        ...   
        }
        break;
	    ...   
    return result;
}
複製程式碼

在executeCommand()中,會從mIn中讀出binder_transaction_data,然後將tr.data.ptr.buffer和tr.data.ptr.offsets設定給buffer,所以buffer就是打包在Parcel裡的函式引數,而tr.code就是aidl生成類中對應函式的code,通過tr.cookie獲得BBinder物件的地址,然後呼叫對應的transact(),而transact()會呼叫onTransact(),onTransact是一個虛擬函式會呼叫到其子類JavaBBinder的onTransact,JavaBBinder的由來和上文中提到的BpBinder會在下文中介紹,接下來就看一下JavaBBinder的onTransact()

frameworks/base/core/jni/android_util_Binder.cpp

status_t onTransact(
    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags = 0) override
{
    JNIEnv* env = javavm_to_jnienv(mVM);

    IPCThreadState* thread_state = IPCThreadState::self();
    const int32_t strict_policy_before = thread_state->getStrictModePolicy();

    jboolean res = env->CallBooleanMethod(mObject, gBinderOffsets.mExecTransact,
        code, reinterpret_cast<jlong>(&data), reinterpret_cast<jlong>(reply), flags);
	    ...   
    return res != JNI_FALSE ? NO_ERROR : UNKNOWN_TRANSACTION;
}
複製程式碼
static int int_register_android_os_Binder(JNIEnv* env)
{
    jclass clazz = FindClassOrDie(env, kBinderPathName);

    gBinderOffsets.mClass = MakeGlobalRefOrDie(env, clazz);
    gBinderOffsets.mExecTransact = GetMethodIDOrDie(env, clazz, "execTransact", "(IJJI)Z");
    gBinderOffsets.mGetInterfaceDescriptor = GetMethodIDOrDie(env, clazz, "getInterfaceDescriptor",
        "()Ljava/lang/String;");
    gBinderOffsets.mObject = GetFieldIDOrDie(env, clazz, "mObject", "J");

    return RegisterMethodsOrDie(
        env, kBinderPathName,
        gBinderMethods, NELEM(gBinderMethods));
}
複製程式碼

gBinderOffsets.mExecTransact指向的是java層execTransact方法,而mObject就是aidl檔案生成類Stub的實現類,所以最終呼叫的就是我們在服務端onBind()方法中返回的Binder物件的execTransact方法。

frameworks/base/core/java/android/os/Binder.java

private boolean execTransact(int code, long dataObj, long replyObj,
    int flags) {
// At that point, the parcel request headers haven't been parsed so we do not know what
// WorkSource the caller has set. Use calling uid as the default.
final int callingUid = Binder.getCallingUid();
final long origWorkSource = ThreadLocalWorkSource.setUid(callingUid);
try {
    return execTransactInternal(code, dataObj, replyObj, flags, callingUid);
} finally {
    ThreadLocalWorkSource.restore(origWorkSource);
}
}
複製程式碼
private boolean execTransactInternal(int code, long dataObj, long replyObj, int flags,
        int callingUid) {
	    ...   
    Parcel data = Parcel.obtain(dataObj);
    Parcel reply = Parcel.obtain(replyObj);
	    ...   
    try {
	    ...   
        res = onTransact(code, data, reply, flags);
    } catch (RemoteException|RuntimeException e) {
	    ...   
    } finally {
	    ...   
    }
    reply.recycle();
    data.recycle();
	    ...   
    return res;
}
複製程式碼

所以最終會呼叫到Stub類的onTransact()方法,而onTransact()會根據code呼叫不同的方法,而這些方法就是我們在Stub實現類中重寫的方法。至此,就呼叫到了服務端中的方法,而客戶端還在上文中waitForResponse處等待服務端執行結果。回到上文executeCommand中,執行完transact(),也就是執行完服務端對應方法後,會呼叫sendReply(),接下來就分析一下sendReply()

sendReply

frameworks/native/libs/binder/IPCThreadState.cpp

status_t IPCThreadState::sendReply(const Parcel& reply, uint32_t flags)
{
    status_t err;
    status_t statusBuffer;
    err = writeTransactionData(BC_REPLY, flags, -1, 0, reply, &statusBuffer);
    if (err < NO_ERROR) return err;

    return waitForResponse(nullptr, nullptr);
}
複製程式碼

sendReply和上文中客戶端呼叫IPCThreadState::transact()很相似,都是呼叫了writeTransactionData和waitForResponse,不同的是writeTransactionData的cmd引數是BC_REPLY,接下來就和上文中的執行流程基本相似,通過writeTransactionData將binder_transaction_data寫入mOut,然後ioctl陷入核心,最終執行到binder_transaction

drivers/staging/android/binder.c

static void binder_transaction(struct binder_proc *proc,
			       struct binder_thread *thread,
			       struct binder_transaction_data *tr, int reply)
{
	struct binder_transaction *t;
	struct binder_work *tcomplete;
	binder_size_t *offp, *off_end;
	binder_size_t off_min;
	struct binder_proc *target_proc;
	struct binder_thread *target_thread = NULL;
	struct binder_node *target_node = NULL;
	struct list_head *target_list;
	wait_queue_head_t *target_wait;
	struct binder_transaction *in_reply_to = NULL;
	struct binder_transaction_log_entry *e;
	uint32_t return_error;

	e = binder_transaction_log_add(&binder_transaction_log);
	e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
	e->from_proc = proc->pid;
	e->from_thread = thread->pid;
	e->target_handle = tr->target.handle;
	e->data_size = tr->data_size;
	e->offsets_size = tr->offsets_size;

	if (reply) {
		in_reply_to = thread->transaction_stack;
	      	    ...   	
		thread->transaction_stack = in_reply_to->to_parent;
		target_thread = in_reply_to->from;
	      	    ...   
		target_proc = target_thread->proc;
	} else {
	      	    ...   	
	}
	if (target_thread) {
		e->to_thread = target_thread->pid;
		target_list = &target_thread->todo;
		target_wait = &target_thread->wait;
	} else {
		target_list = &target_proc->todo;
		target_wait = &target_proc->wait;
	}
	e->to_proc = target_proc->pid;

	t = kzalloc(sizeof(*t), GFP_KERNEL);
	if (t == NULL) {
		return_error = BR_FAILED_REPLY;
		goto err_alloc_t_failed;
	}
	binder_stats_created(BINDER_STAT_TRANSACTION);

	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
	if (tcomplete == NULL) {
		return_error = BR_FAILED_REPLY;
		goto err_alloc_tcomplete_failed;
	}
	binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
	      	    ...   
	if (!reply && !(tr->flags & TF_ONE_WAY))
		t->from = thread;
	else
		t->from = NULL;
	t->sender_euid = task_euid(proc->tsk);
	t->to_proc = target_proc;
	t->to_thread = target_thread;
	t->code = tr->code;
	t->flags = tr->flags;
	t->priority = task_nice(current);

	trace_binder_transaction(reply, t, target_node);

	t->buffer = binder_alloc_buf(target_proc, tr->data_size,
		tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
	if (t->buffer == NULL) {
		return_error = BR_FAILED_REPLY;
		goto err_binder_alloc_buf_failed;
	}
	t->buffer->allow_user_free = 0;
	t->buffer->debug_id = t->debug_id;
	t->buffer->transaction = t;
	t->buffer->target_node = target_node;
	trace_binder_transaction_alloc_buf(t->buffer);
	if (target_node)
		binder_inc_node(target_node, 1, 0, NULL);

	offp = (binder_size_t *)(t->buffer->data +
				 ALIGN(tr->data_size, sizeof(void *)));

	if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
			   tr->data.ptr.buffer, tr->data_size)) {
		return_error = BR_FAILED_REPLY;
		goto err_copy_data_failed;
	}
	if (copy_from_user(offp, (const void __user *)(uintptr_t)
			   tr->data.ptr.offsets, tr->offsets_size)) {
		return_error = BR_FAILED_REPLY;
		goto err_copy_data_failed;
	}
	      	    ...   	
	if (reply) {
		BUG_ON(t->buffer->async_transaction != 0);
		binder_pop_transaction(target_thread, in_reply_to);
	} else if (!(t->flags & TF_ONE_WAY)) {
		BUG_ON(t->buffer->async_transaction != 0);
		t->need_reply = 1;
		t->from_parent = thread->transaction_stack;
		thread->transaction_stack = t;
	} else {
		BUG_ON(target_node == NULL);
		BUG_ON(t->buffer->async_transaction != 1);
		if (target_node->has_async_transaction) {
			target_list = &target_node->async_todo;
			target_wait = NULL;
		} else
			target_node->has_async_transaction = 1;
	}
	t->work.type = BINDER_WORK_TRANSACTION;
	list_add_tail(&t->work.entry, target_list);
	tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
	list_add_tail(&tcomplete->entry, &thread->todo);
	if (target_wait)
		wake_up_interruptible(target_wait);
	return;
	    ...   
}
複製程式碼

與上次不同的是,這次是通過in_reply_to獲取到客戶端等待結果的執行緒target_thread,target_list不再指向target_proc->todo,而是指向target_thread->todo,接下來就是通過copy_from_user將資料寫入客戶端程式,最終通過list_add_tail將BINDER_WORK_TRANSACTION放到target_thread->todo上,將BINDER_WORK_TRANSACTION_COMPLETE放到當前binder執行緒的thread->todo上,後續兩端都會隨著binder_thread_read的結束回到waitForResponse()

waitForResponse

frameworks/native/libs/binder/IPCThreadState.cpp

status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
    uint32_t cmd;
    int32_t err;

    while (1) {
        if ((err=talkWithDriver()) < NO_ERROR) break;
        err = mIn.errorCheck();
        if (err < NO_ERROR) break;
        if (mIn.dataAvail() == 0) continue;

        cmd = (uint32_t)mIn.readInt32();

        switch (cmd) {
        case BR_TRANSACTION_COMPLETE:
            if (!reply && !acquireResult) goto finish;
            break;
          ...        
        case BR_REPLY:
            {
                binder_transaction_data tr;
                err = mIn.read(&tr, sizeof(tr));
                ALOG_ASSERT(err == NO_ERROR, "Not enough command data for brREPLY");
                if (err != NO_ERROR) goto finish;

                if (reply) {
                    if ((tr.flags & TF_STATUS_CODE) == 0) {
                        reply->ipcSetDataReference(
                            reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
                            tr.data_size,
                            reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
                            tr.offsets_size/sizeof(binder_size_t),
                            freeBuffer, this);
                    } else {
                        err = *reinterpret_cast<const status_t*>(tr.data.ptr.buffer);
                        freeBuffer(nullptr,
                            reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
                            tr.data_size,
                            reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
                            tr.offsets_size/sizeof(binder_size_t), this);
                    }
                } else {
                    freeBuffer(nullptr,
                        reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
                        tr.data_size,
                        reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
                        tr.offsets_size/sizeof(binder_size_t), this);
                    continue;
                }
            }
            goto finish;

        default:
            err = executeCommand(cmd);
            if (err != NO_ERROR) goto finish;
            break;
        }
    }
    ...           
    return err;
}
複製程式碼

因為服務端binder執行緒的waitForResponse讀取出cmd是BR_TRANSACTION_COMPLETE,所以最後會執行goto finish,結束waitForResponse,重新回到getAndExecuteCommand()迴圈,等待客戶端IPC呼叫。而客戶端會進入BR_REPLY分支,將binder_transaction_data的tr.data.ptr.buffer和tr.data.ptr.offsets設定給當前程式的reply,也就是客戶端BinderProxy發起呼叫時函式引數中Parcel型別的reply物件,最終通過goto finish結束waitForResponse後,就回到了我們最開始的地方

@Override public void setSurface(android.view.Surface surface) throws android.os.RemoteException
{
    android.os.Parcel _data = android.os.Parcel.obtain();
    android.os.Parcel _reply = android.os.Parcel.obtain();
    try {
        _data.writeInterfaceToken(DESCRIPTOR);
        if ((surface!=null)) {
            _data.writeInt(1);
            surface.writeToParcel(_data, 0);
        }
        else {
            _data.writeInt(0);
        }
        mRemote.transact(Stub.TRANSACTION_setSurface, _data, _reply, 0);
        _reply.readException();
    }
    finally {
        _reply.recycle();
        _data.recycle();
    }
}
複製程式碼

至此,一次IPC呼叫就結束了。

關於BPBinder和JavaBBinder

在IPC呼叫的引數中可能包含Binder物件,就例如bindService,ActivityManagerService 會呼叫目標程式ApplicationThread這個Binder物件的scheduleBindService()去繫結對應服務,然後通過Handler最終會執行到ActivityThread的handleBindService()

frameworks/base/core/java/android/app/ActivityThread.java

private void handleBindService(BindServiceData data) {
    Service s = mServices.get(data.token);
    if (s != null) {
        try {
            data.intent.setExtrasClassLoader(s.getClassLoader());
            data.intent.prepareToEnterProcess();
            try {
                if (!data.rebind) {
                    IBinder binder = s.onBind(data.intent);
                    ActivityManager.getService().publishService(
                            data.token, data.intent, binder);
                } else {
                    s.onRebind(data.intent);
                    ActivityManager.getService().serviceDoneExecuting(
                            data.token, SERVICE_DONE_EXECUTING_ANON, 0, 0);
                }
            } catch (RemoteException ex) {
                throw ex.rethrowFromSystemServer();
            }
        } catch (Exception e) {
                ...           
        }
    }
}
複製程式碼

在handleBindService()中,Service onBind()返回的IBinder物件會通過publishService()傳遞給ActivityManagerService,而publishService()就是一次IPC呼叫,其中引數就包含Binder物件。在上文中,進行IPC呼叫會將函式引數打包到Parcel中,接下來就看下Binder物件是如何打包到Parcel中去的。

frameworks/base/core/jni/android_os_Parcel.cpp

static void android_os_Parcel_writeStrongBinder(JNIEnv* env, jclass clazz, jlong nativePtr, jobject object)
{
    Parcel* parcel = reinterpret_cast<Parcel*>(nativePtr);
    if (parcel != NULL) {
        const status_t err = parcel->writeStrongBinder(ibinderForJavaObject(env, object));
        if (err != NO_ERROR) {
            signalExceptionForError(env, clazz, err);
        }
    }
}
複製程式碼

frameworks/base/core/jni/android_util_Binder.cpp

sp<IBinder> ibinderForJavaObject(JNIEnv* env, jobject obj)
{
    if (obj == NULL) return NULL;

    // Instance of Binder?
    if (env->IsInstanceOf(obj, gBinderOffsets.mClass)) {
        JavaBBinderHolder* jbh = (JavaBBinderHolder*)
            env->GetLongField(obj, gBinderOffsets.mObject);
        return jbh->get(env, obj);
    }

    // Instance of BinderProxy?
    if (env->IsInstanceOf(obj, gBinderProxyOffsets.mClass)) {
        return getBPNativeData(env, obj)->mObject;
    }

    return NULL;
}
複製程式碼
class JavaBBinderHolder
{
public:
    sp<JavaBBinder> get(JNIEnv* env, jobject obj)
    {
        AutoMutex _l(mLock);
        sp<JavaBBinder> b = mBinder.promote();
        if (b == NULL) {
            b = new JavaBBinder(env, obj);
            mBinder = b;
        }

        return b;
    }
    ...           
};
複製程式碼

這就是JavaBBinder物件的由來,在binder_transaction()中建立binder_node時,會將該物件的地址賦值給binder_node->cookie,所以上文executeCommand()中tr->cookie就是JavaBBinder物件的地址。接下來就介紹一下BPBinder的由來,在服務端程式將Binder物件傳給ActivityManagerService後,ActivityManagerService會回撥IServiceConnection將服務端的Binder物件傳給客戶端程式,這樣客戶端就可以對服務端進行IPC呼叫了。因為IServiceConnection回撥也是一次IPC呼叫,客戶端在響應IPC呼叫時,需要從Parcel中取出函式引數,所以Binder物件是從Parcel中取出的,接下來就看一下Binder物件是如何從Parcel中取出的。

frameworks/base/core/jni/android_os_Parcel.cpp

static jobject android_os_Parcel_readStrongBinder(JNIEnv* env, jclass clazz, jlong nativePtr)
{
    Parcel* parcel = reinterpret_cast<Parcel*>(nativePtr);
    if (parcel != NULL) {
        return javaObjectForIBinder(env, parcel->readStrongBinder());
    }
    return NULL;
}
複製程式碼

framework/native/libs/binder/Parcel.cpp

status_t Parcel::readNullableStrongBinder(sp<IBinder>* val) const
{
    return unflatten_binder(ProcessState::self(), *this, val);
}
複製程式碼
status_t unflatten_binder(const sp<ProcessState>& proc,
    const Parcel& in, sp<IBinder>* out)
{
    const flat_binder_object* flat = in.readObject(false);

    if (flat) {
        switch (flat->hdr.type) {
            case BINDER_TYPE_BINDER:
                *out = reinterpret_cast<IBinder*>(flat->cookie);
                return finish_unflatten_binder(nullptr, *flat, in);
            case BINDER_TYPE_HANDLE:
                *out = proc->getStrongProxyForHandle(flat->handle);
                return finish_unflatten_binder(
                    static_cast<BpBinder*>(out->get()), *flat, in);
        }
    }
    return BAD_TYPE;
}
複製程式碼

這就是BPBinder的由來,接下來我們再看一下javaObjectForIBinder()

frameworks/base/core/jni/android_util_Binder.cpp

jobject javaObjectForIBinder(JNIEnv* env, const sp<IBinder>& val)
{
    if (val == NULL) return NULL;
    ...           
    BinderProxyNativeData* nativeData = new BinderProxyNativeData();
    nativeData->mOrgue = new DeathRecipientList;
    nativeData->mObject = val;

    jobject object = env->CallStaticObjectMethod(gBinderProxyOffsets.mClass,
            gBinderProxyOffsets.mGetInstance, (jlong) nativeData, (jlong) val.get());
    ...           
    return object;
}
複製程式碼
const char* const kBinderProxyPathName = "android/os/BinderProxy";

static int int_register_android_os_BinderProxy(JNIEnv* env)
{
    jclass clazz = FindClassOrDie(env, "java/lang/Error");
    gErrorOffsets.mClass = MakeGlobalRefOrDie(env, clazz);

    clazz = FindClassOrDie(env, kBinderProxyPathName);
    gBinderProxyOffsets.mClass = MakeGlobalRefOrDie(env, clazz);
    gBinderProxyOffsets.mGetInstance = GetStaticMethodIDOrDie(env, clazz, "getInstance",
            "(JJ)Landroid/os/BinderProxy;");
    gBinderProxyOffsets.mSendDeathNotice = GetStaticMethodIDOrDie(env, clazz, "sendDeathNotice",
            "(Landroid/os/IBinder$DeathRecipient;)V");
    gBinderProxyOffsets.mNativeData = GetFieldIDOrDie(env, clazz, "mNativeData", "J");

    clazz = FindClassOrDie(env, "java/lang/Class");
    gClassOffsets.mGetName = GetMethodIDOrDie(env, clazz, "getName", "()Ljava/lang/String;");

    return RegisterMethodsOrDie(
        env, kBinderProxyPathName,
        gBinderProxyMethods, NELEM(gBinderProxyMethods));
}
複製程式碼

看到這裡是不是就明白了為什麼我們一開始要用BinderProxy去分析transact,因為javaObjectForIBinde()會呼叫BinderProxy的getInstance()獲取一個BinderProxy物件,而它就是最終從Parcel中取出的IBinder物件。

相關文章