[PATCH 23/37] binder: make sure accesses to proc/thread are safe

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



binder_thread and binder_proc may be accessed by other
threads when processing transaction. Therefore they
must be prevented from being freed while a transaction
is in progress that references them.

This is done by introducing a temporary reference
counter for threads and procs that indicates that the
object is in use and must not be freed. binder_thread_dec_tmpref()
and binder_proc_dec_tmpref() are used to decrement
the temporary reference.

It is safe to free a binder_thread if there
is no reference and it has been released
(indicated by thread->is_dead).

It is safe to free a binder_proc if it has no
remaining threads and no reference.

A spinlock is added to the binder_transaction
to safely access and set references for t->from
and for debug code to safely access t->to_thread
and t->to_proc.

Signed-off-by: Todd Kjos <tkjos@xxxxxxxxxx>
---
 drivers/android/binder.c | 233 +++++++++++++++++++++++++++++++++++++++++------
 1 file changed, 206 insertions(+), 27 deletions(-)

diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index fb79f40111eb..ca7d866b89e8 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -325,6 +325,7 @@ struct binder_proc {
 	struct files_struct *files;
 	struct hlist_node deferred_work_node;
 	int deferred_work;
+	bool is_dead;
 
 	struct list_head todo;
 	wait_queue_head_t wait;
@@ -334,6 +335,7 @@ struct binder_proc {
 	int requested_threads;
 	int requested_threads_started;
 	int ready_threads;
+	int tmp_ref;
 	long default_priority;
 	struct dentry *debugfs_entry;
 	struct binder_alloc alloc;
@@ -360,6 +362,8 @@ struct binder_thread {
 	struct binder_error reply_error;
 	wait_queue_head_t wait;
 	struct binder_stats stats;
+	atomic_t tmp_ref;
+	bool is_dead;
 };
 
 struct binder_transaction {
@@ -379,10 +383,19 @@ struct binder_transaction {
 	long	priority;
 	long	saved_priority;
 	kuid_t	sender_euid;
+	/**
+	 * @lock:  protects @from, @to_proc, and @to_thread
+	 *
+	 * @from, @to_proc, and @to_thread can be set to NULL
+	 * during thread teardown
+	 */
+	spinlock_t lock;
 };
 
 static void
 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
+static void binder_free_thread(struct binder_thread *thread);
+static void binder_free_proc(struct binder_proc *proc);
 
 static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
 {
@@ -780,6 +793,79 @@ static void binder_pop_transaction(struct binder_thread *target_thread,
 	t->from = NULL;
 }
 
+/**
+ * binder_thread_dec_tmpref() - decrement thread->tmp_ref
+ * @thread:	thread to decrement
+ *
+ * A thread needs to be kept alive while being used to create or
+ * handle a transaction. binder_get_txn_from() is used to safely
+ * extract t->from from a binder_transaction and keep the thread
+ * indicated by t->from from being freed. When done with that
+ * binder_thread, this function is called to decrement the
+ * tmp_ref and free if appropriate (thread has been released
+ * and no transaction being processed by the driver)
+ */
+static void binder_thread_dec_tmpref(struct binder_thread *thread)
+{
+	/*
+	 * atomic is used to protect the counter value while
+	 * it cannot reach zero or thread->is_dead is false
+	 *
+	 * TODO: future patch adds locking to ensure that the
+	 * check of tmp_ref and is_dead is done with a lock held
+	 */
+	atomic_dec(&thread->tmp_ref);
+	if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
+		binder_free_thread(thread);
+		return;
+	}
+}
+
+/**
+ * binder_proc_dec_tmpref() - decrement proc->tmp_ref
+ * @proc:	proc to decrement
+ *
+ * A binder_proc needs to be kept alive while being used to create or
+ * handle a transaction. proc->tmp_ref is incremented when
+ * creating a new transaction or the binder_proc is currently in-use
+ * by threads that are being released. When done with the binder_proc,
+ * this function is called to decrement the counter and free the
+ * proc if appropriate (proc has been released, all threads have
+ * been released and not currenly in-use to process a transaction).
+ */
+static void binder_proc_dec_tmpref(struct binder_proc *proc)
+{
+	proc->tmp_ref--;
+	if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
+			!proc->tmp_ref) {
+		binder_free_proc(proc);
+		return;
+	}
+}
+
+/**
+ * binder_get_txn_from() - safely extract the "from" thread in transaction
+ * @t:	binder transaction for t->from
+ *
+ * Atomically return the "from" thread and increment the tmp_ref
+ * count for the thread to ensure it stays alive until
+ * binder_thread_dec_tmpref() is called.
+ *
+ * Return: the value of t->from
+ */
+static struct binder_thread *binder_get_txn_from(
+		struct binder_transaction *t)
+{
+	struct binder_thread *from;
+
+	spin_lock(&t->lock);
+	from = t->from;
+	if (from)
+		atomic_inc(&from->tmp_ref);
+	spin_unlock(&t->lock);
+	return from;
+}
+
 static void binder_free_transaction(struct binder_transaction *t)
 {
 	if (t->buffer)
@@ -796,7 +882,7 @@ static void binder_send_failed_reply(struct binder_transaction *t,
 
 	BUG_ON(t->flags & TF_ONE_WAY);
 	while (1) {
-		target_thread = t->from;
+		target_thread = binder_get_txn_from(t);
 		if (target_thread) {
 			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
 				     "send failed reply for transaction %d to %d:%d\n",
@@ -815,6 +901,7 @@ static void binder_send_failed_reply(struct binder_transaction *t,
 				WARN(1, "Unexpected reply error: %u\n",
 						target_thread->reply_error.cmd);
 			}
+			binder_thread_dec_tmpref(target_thread);
 			binder_free_transaction(t);
 			return;
 		}
@@ -1395,7 +1482,7 @@ static void binder_transaction(struct binder_proc *proc,
 	binder_size_t *offp, *off_end, *off_start;
 	binder_size_t off_min;
 	u8 *sg_bufp, *sg_buf_end;
-	struct binder_proc *target_proc;
+	struct binder_proc *target_proc = NULL;
 	struct binder_thread *target_thread = NULL;
 	struct binder_node *target_node = NULL;
 	struct list_head *target_list;
@@ -1432,12 +1519,14 @@ static void binder_transaction(struct binder_proc *proc,
 		}
 		binder_set_nice(in_reply_to->saved_priority);
 		if (in_reply_to->to_thread != thread) {
+			spin_lock(&in_reply_to->lock);
 			binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
 				proc->pid, thread->pid, in_reply_to->debug_id,
 				in_reply_to->to_proc ?
 				in_reply_to->to_proc->pid : 0,
 				in_reply_to->to_thread ?
 				in_reply_to->to_thread->pid : 0);
+			spin_unlock(&in_reply_to->lock);
 			return_error = BR_FAILED_REPLY;
 			return_error_param = -EPROTO;
 			return_error_line = __LINE__;
@@ -1445,7 +1534,7 @@ static void binder_transaction(struct binder_proc *proc,
 			goto err_bad_call_stack;
 		}
 		thread->transaction_stack = in_reply_to->to_parent;
-		target_thread = in_reply_to->from;
+		target_thread = binder_get_txn_from(in_reply_to);
 		if (target_thread == NULL) {
 			return_error = BR_DEAD_REPLY;
 			return_error_line = __LINE__;
@@ -1465,6 +1554,7 @@ static void binder_transaction(struct binder_proc *proc,
 			goto err_dead_binder;
 		}
 		target_proc = target_thread->proc;
+		target_proc->tmp_ref++;
 	} else {
 		if (tr->target.handle) {
 			struct binder_ref *ref;
@@ -1508,6 +1598,7 @@ static void binder_transaction(struct binder_proc *proc,
 			return_error_line = __LINE__;
 			goto err_dead_binder;
 		}
+		target_proc->tmp_ref++;
 		if (security_binder_transaction(proc->tsk,
 						target_proc->tsk) < 0) {
 			return_error = BR_FAILED_REPLY;
@@ -1520,19 +1611,30 @@ static void binder_transaction(struct binder_proc *proc,
 
 			tmp = thread->transaction_stack;
 			if (tmp->to_thread != thread) {
+				spin_lock(&tmp->lock);
 				binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
 					proc->pid, thread->pid, tmp->debug_id,
 					tmp->to_proc ? tmp->to_proc->pid : 0,
 					tmp->to_thread ?
 					tmp->to_thread->pid : 0);
+				spin_unlock(&tmp->lock);
 				return_error = BR_FAILED_REPLY;
 				return_error_param = -EPROTO;
 				return_error_line = __LINE__;
 				goto err_bad_call_stack;
 			}
 			while (tmp) {
-				if (tmp->from && tmp->from->proc == target_proc)
-					target_thread = tmp->from;
+				struct binder_thread *from;
+
+				spin_lock(&tmp->lock);
+				from = tmp->from;
+				if (from && from->proc == target_proc) {
+					atomic_inc(&from->tmp_ref);
+					target_thread = from;
+					spin_unlock(&tmp->lock);
+					break;
+				}
+				spin_unlock(&tmp->lock);
 				tmp = tmp->from_parent;
 			}
 		}
@@ -1556,6 +1658,7 @@ static void binder_transaction(struct binder_proc *proc,
 		goto err_alloc_t_failed;
 	}
 	binder_stats_created(BINDER_STAT_TRANSACTION);
+	spin_lock_init(&t->lock);
 
 	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
 	if (tcomplete == NULL) {
@@ -1814,6 +1917,8 @@ static void binder_transaction(struct binder_proc *proc,
 	list_add_tail(&tcomplete->entry, &thread->todo);
 
 	if (reply) {
+		if (target_thread->is_dead)
+			goto err_dead_proc_or_thread;
 		BUG_ON(t->buffer->async_transaction != 0);
 		binder_pop_transaction(target_thread, in_reply_to);
 		binder_free_transaction(in_reply_to);
@@ -1822,6 +1927,11 @@ static void binder_transaction(struct binder_proc *proc,
 		t->need_reply = 1;
 		t->from_parent = thread->transaction_stack;
 		thread->transaction_stack = t;
+		if (target_proc->is_dead ||
+				(target_thread && target_thread->is_dead)) {
+			binder_pop_transaction(thread, t);
+			goto err_dead_proc_or_thread;
+		}
 	} else {
 		BUG_ON(target_node == NULL);
 		BUG_ON(t->buffer->async_transaction != 1);
@@ -1830,6 +1940,9 @@ static void binder_transaction(struct binder_proc *proc,
 			target_wait = NULL;
 		} else
 			target_node->has_async_transaction = 1;
+		if (target_proc->is_dead ||
+				(target_thread && target_thread->is_dead))
+			goto err_dead_proc_or_thread;
 	}
 	t->work.type = BINDER_WORK_TRANSACTION;
 	list_add_tail(&t->work.entry, target_list);
@@ -1839,6 +1952,9 @@ static void binder_transaction(struct binder_proc *proc,
 		else
 			wake_up_interruptible(target_wait);
 	}
+	if (target_thread)
+		binder_thread_dec_tmpref(target_thread);
+	binder_proc_dec_tmpref(target_proc);
 	/*
 	 * write barrier to synchronize with initialization
 	 * of log entry
@@ -1847,6 +1963,9 @@ static void binder_transaction(struct binder_proc *proc,
 	WRITE_ONCE(e->debug_id_done, t_debug_id);
 	return;
 
+err_dead_proc_or_thread:
+	return_error = BR_DEAD_REPLY;
+	return_error_line = __LINE__;
 err_translate_failed:
 err_bad_object_type:
 err_bad_offset:
@@ -1869,6 +1988,10 @@ static void binder_transaction(struct binder_proc *proc,
 err_dead_binder:
 err_invalid_target_handle:
 err_no_context_mgr_node:
+	if (target_thread)
+		binder_thread_dec_tmpref(target_thread);
+	if (target_proc)
+		binder_proc_dec_tmpref(target_proc);
 	if (target_node)
 		binder_dec_node(target_node, 1, 0);
 
@@ -2421,6 +2544,7 @@ static int binder_thread_read(struct binder_proc *proc,
 		struct binder_transaction_data tr;
 		struct binder_work *w;
 		struct binder_transaction *t = NULL;
+		struct binder_thread *t_from;
 
 		if (!list_empty(&thread->todo)) {
 			w = list_first_entry(&thread->todo, struct binder_work,
@@ -2609,8 +2733,9 @@ static int binder_thread_read(struct binder_proc *proc,
 		tr.flags = t->flags;
 		tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
 
-		if (t->from) {
-			struct task_struct *sender = t->from->proc->tsk;
+		t_from = binder_get_txn_from(t);
+		if (t_from) {
+			struct task_struct *sender = t_from->proc->tsk;
 
 			tr.sender_pid = task_tgid_nr_ns(sender,
 							task_active_pid_ns(current));
@@ -2627,11 +2752,17 @@ static int binder_thread_read(struct binder_proc *proc,
 					ALIGN(t->buffer->data_size,
 					    sizeof(void *));
 
-		if (put_user(cmd, (uint32_t __user *)ptr))
+		if (put_user(cmd, (uint32_t __user *)ptr)) {
+			if (t_from)
+				binder_thread_dec_tmpref(t_from);
 			return -EFAULT;
+		}
 		ptr += sizeof(uint32_t);
-		if (copy_to_user(ptr, &tr, sizeof(tr)))
+		if (copy_to_user(ptr, &tr, sizeof(tr))) {
+			if (t_from)
+				binder_thread_dec_tmpref(t_from);
 			return -EFAULT;
+		}
 		ptr += sizeof(tr);
 
 		trace_binder_transaction_received(t);
@@ -2641,11 +2772,13 @@ static int binder_thread_read(struct binder_proc *proc,
 			     proc->pid, thread->pid,
 			     (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
 			     "BR_REPLY",
-			     t->debug_id, t->from ? t->from->proc->pid : 0,
-			     t->from ? t->from->pid : 0, cmd,
+			     t->debug_id, t_from ? t_from->proc->pid : 0,
+			     t_from ? t_from->pid : 0, cmd,
 			     t->buffer->data_size, t->buffer->offsets_size,
 			     (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
 
+		if (t_from)
+			binder_thread_dec_tmpref(t_from);
 		list_del(&t->work.entry);
 		t->buffer->allow_user_free = 1;
 		if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
@@ -2757,6 +2890,7 @@ static struct binder_thread *binder_get_thread(struct binder_proc *proc)
 		binder_stats_created(BINDER_STAT_THREAD);
 		thread->proc = proc;
 		thread->pid = current->pid;
+		atomic_set(&thread->tmp_ref, 0);
 		init_waitqueue_head(&thread->wait);
 		INIT_LIST_HEAD(&thread->todo);
 		rb_link_node(&thread->rb_node, parent, p);
@@ -2770,18 +2904,55 @@ static struct binder_thread *binder_get_thread(struct binder_proc *proc)
 	return thread;
 }
 
-static int binder_free_thread(struct binder_proc *proc,
-			      struct binder_thread *thread)
+static void binder_free_proc(struct binder_proc *proc)
+{
+	BUG_ON(!list_empty(&proc->todo));
+	BUG_ON(!list_empty(&proc->delivered_death));
+	binder_alloc_deferred_release(&proc->alloc);
+	put_task_struct(proc->tsk);
+	binder_stats_deleted(BINDER_STAT_PROC);
+	kfree(proc);
+}
+
+static void binder_free_thread(struct binder_thread *thread)
+{
+	BUG_ON(!list_empty(&thread->todo));
+	binder_stats_deleted(BINDER_STAT_THREAD);
+	binder_proc_dec_tmpref(thread->proc);
+	kfree(thread);
+}
+
+static int binder_thread_release(struct binder_proc *proc,
+				 struct binder_thread *thread)
 {
 	struct binder_transaction *t;
 	struct binder_transaction *send_reply = NULL;
 	int active_transactions = 0;
+	struct binder_transaction *last_t = NULL;
 
+	/*
+	 * take a ref on the proc so it survives
+	 * after we remove this thread from proc->threads.
+	 * The corresponding dec is when we actually
+	 * free the thread in binder_free_thread()
+	 */
+	proc->tmp_ref++;
+	/*
+	 * take a ref on this thread to ensure it
+	 * survives while we are releasing it
+	 */
+	atomic_inc(&thread->tmp_ref);
 	rb_erase(&thread->rb_node, &proc->threads);
 	t = thread->transaction_stack;
-	if (t && t->to_thread == thread)
-		send_reply = t;
+	if (t) {
+		spin_lock(&t->lock);
+		if (t->to_thread == thread)
+			send_reply = t;
+	}
+	thread->is_dead = true;
+
 	while (t) {
+		last_t = t;
 		active_transactions++;
 		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
 			     "release %d:%d transaction %d %s, still active\n",
@@ -2802,12 +2973,15 @@ static int binder_free_thread(struct binder_proc *proc,
 			t = t->from_parent;
 		} else
 			BUG();
+		spin_unlock(&last_t->lock);
+		if (t)
+			spin_lock(&t->lock);
 	}
+
 	if (send_reply)
 		binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
 	binder_release_work(&thread->todo);
-	kfree(thread);
-	binder_stats_deleted(BINDER_STAT_THREAD);
+	binder_thread_dec_tmpref(thread);
 	return active_transactions;
 }
 
@@ -2995,7 +3169,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 	case BINDER_THREAD_EXIT:
 		binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
 			     proc->pid, thread->pid);
-		binder_free_thread(proc, thread);
+		binder_thread_release(proc, thread);
 		thread = NULL;
 		break;
 	case BINDER_VERSION: {
@@ -3265,7 +3439,13 @@ static void binder_deferred_release(struct binder_proc *proc)
 		context->binder_context_mgr_node = NULL;
 	}
 	mutex_unlock(&context->context_mgr_node_lock);
+	/*
+	 * Make sure proc stays alive after we
+	 * remove all the threads
+	 */
+	proc->tmp_ref++;
 
+	proc->is_dead = true;
 	threads = 0;
 	active_transactions = 0;
 	while ((n = rb_first(&proc->threads))) {
@@ -3273,7 +3453,7 @@ static void binder_deferred_release(struct binder_proc *proc)
 
 		thread = rb_entry(n, struct binder_thread, rb_node);
 		threads++;
-		active_transactions += binder_free_thread(proc, thread);
+		active_transactions += binder_thread_release(proc, thread);
 	}
 
 	nodes = 0;
@@ -3299,17 +3479,12 @@ static void binder_deferred_release(struct binder_proc *proc)
 	binder_release_work(&proc->todo);
 	binder_release_work(&proc->delivered_death);
 
-	binder_alloc_deferred_release(&proc->alloc);
-	binder_stats_deleted(BINDER_STAT_PROC);
-
-	put_task_struct(proc->tsk);
-
 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
 		     "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
 		     __func__, proc->pid, threads, nodes, incoming_refs,
 		     outgoing_refs, active_transactions);
 
-	kfree(proc);
+	binder_proc_dec_tmpref(proc);
 }
 
 static void binder_deferred_func(struct work_struct *work)
@@ -3370,6 +3545,7 @@ binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
 static void print_binder_transaction(struct seq_file *m, const char *prefix,
 				     struct binder_transaction *t)
 {
+	spin_lock(&t->lock);
 	seq_printf(m,
 		   "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
 		   prefix, t->debug_id, t,
@@ -3378,6 +3554,8 @@ static void print_binder_transaction(struct seq_file *m, const char *prefix,
 		   t->to_proc ? t->to_proc->pid : 0,
 		   t->to_thread ? t->to_thread->pid : 0,
 		   t->code, t->flags, t->priority, t->need_reply);
+	spin_unlock(&t->lock);
+
 	if (t->buffer == NULL) {
 		seq_puts(m, " buffer free\n");
 		return;
@@ -3442,9 +3620,10 @@ static void print_binder_thread(struct seq_file *m,
 	size_t start_pos = m->count;
 	size_t header_pos;
 
-	seq_printf(m, "  thread %d: l %02x need_return %d\n",
+	seq_printf(m, "  thread %d: l %02x need_return %d tr %d\n",
 			thread->pid, thread->looper,
-			thread->looper_need_return);
+			thread->looper_need_return,
+			atomic_read(&thread->tmp_ref));
 	header_pos = m->count;
 	t = thread->transaction_stack;
 	while (t) {
-- 
2.13.2.725.g09c95d1e9-goog

_______________________________________________
devel mailing list
devel@xxxxxxxxxxxxxxxxxxxxxx
http://driverdev.linuxdriverproject.org/mailman/listinfo/driverdev-devel



[Index of Archives]     [Linux Driver Backports]     [DMA Engine]     [Linux GPIO]     [Linux SPI]     [Video for Linux]     [Linux USB Devel]     [Linux Coverity]     [Linux Audio Users]     [Linux Kernel]     [Linux SCSI]     [Yosemite Backpacking]
  Powered by Linux