1 // SPDX-License-Identifier: GPL-2.0-only
4 * Android IPC Subsystem
6 * Copyright (C) 2007-2008 Google, Inc.
12 * There are 3 main spinlocks which must be acquired in the
15 * 1) proc->outer_lock : protects binder_ref
16 * binder_proc_lock() and binder_proc_unlock() are
18 * 2) node->lock : protects most fields of binder_node.
19 * binder_node_lock() and binder_node_unlock() are
21 * 3) proc->inner_lock : protects the thread and node lists
22 * (proc->threads, proc->waiting_threads, proc->nodes)
23 * and all todo lists associated with the binder_proc
24 * (proc->todo, thread->todo, proc->delivered_death and
25 * node->async_todo), as well as thread->transaction_stack
26 * binder_inner_proc_lock() and binder_inner_proc_unlock()
29 * Any lock under procA must never be nested under any lock at the same
30 * level or below on procB.
32 * Functions that require a lock held on entry indicate which lock
33 * in the suffix of the function name:
35 * foo_olocked() : requires node->outer_lock
36 * foo_nlocked() : requires node->lock
37 * foo_ilocked() : requires proc->inner_lock
38 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39 * foo_nilocked(): requires node->lock and proc->inner_lock
43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45 #include <linux/fdtable.h>
46 #include <linux/file.h>
47 #include <linux/freezer.h>
49 #include <linux/list.h>
50 #include <linux/miscdevice.h>
51 #include <linux/module.h>
52 #include <linux/mutex.h>
53 #include <linux/nsproxy.h>
54 #include <linux/poll.h>
55 #include <linux/debugfs.h>
56 #include <linux/rbtree.h>
57 #include <linux/sched/signal.h>
58 #include <linux/sched/mm.h>
59 #include <linux/seq_file.h>
60 #include <linux/uaccess.h>
61 #include <linux/pid_namespace.h>
62 #include <linux/security.h>
63 #include <linux/spinlock.h>
64 #include <linux/ratelimit.h>
65 #include <linux/syscalls.h>
66 #include <linux/task_work.h>
68 #include <uapi/linux/android/binder.h>
70 #include <asm/cacheflush.h>
72 #include "binder_alloc.h"
73 #include "binder_internal.h"
74 #include "binder_trace.h"
76 static HLIST_HEAD(binder_deferred_list);
77 static DEFINE_MUTEX(binder_deferred_lock);
79 static HLIST_HEAD(binder_devices);
80 static HLIST_HEAD(binder_procs);
81 static DEFINE_MUTEX(binder_procs_lock);
83 static HLIST_HEAD(binder_dead_nodes);
84 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
86 static struct dentry *binder_debugfs_dir_entry_root;
87 static struct dentry *binder_debugfs_dir_entry_proc;
88 static atomic_t binder_last_id;
90 static int proc_show(struct seq_file *m, void *unused);
91 DEFINE_SHOW_ATTRIBUTE(proc);
93 /* This is only defined in include/asm-arm/sizes.h */
99 #define SZ_4M 0x400000
102 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
105 BINDER_DEBUG_USER_ERROR = 1U << 0,
106 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
107 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
108 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
109 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
110 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
111 BINDER_DEBUG_READ_WRITE = 1U << 6,
112 BINDER_DEBUG_USER_REFS = 1U << 7,
113 BINDER_DEBUG_THREADS = 1U << 8,
114 BINDER_DEBUG_TRANSACTION = 1U << 9,
115 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
116 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
117 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
118 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
119 BINDER_DEBUG_SPINLOCKS = 1U << 14,
121 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
122 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
123 module_param_named(debug_mask, binder_debug_mask, uint, 0644);
125 char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
126 module_param_named(devices, binder_devices_param, charp, 0444);
128 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
129 static int binder_stop_on_user_error;
131 static int binder_set_stop_on_user_error(const char *val,
132 const struct kernel_param *kp)
136 ret = param_set_int(val, kp);
137 if (binder_stop_on_user_error < 2)
138 wake_up(&binder_user_error_wait);
141 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
142 param_get_int, &binder_stop_on_user_error, 0644);
144 #define binder_debug(mask, x...) \
146 if (binder_debug_mask & mask) \
147 pr_info_ratelimited(x); \
150 #define binder_user_error(x...) \
152 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
153 pr_info_ratelimited(x); \
154 if (binder_stop_on_user_error) \
155 binder_stop_on_user_error = 2; \
158 #define to_flat_binder_object(hdr) \
159 container_of(hdr, struct flat_binder_object, hdr)
161 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
163 #define to_binder_buffer_object(hdr) \
164 container_of(hdr, struct binder_buffer_object, hdr)
166 #define to_binder_fd_array_object(hdr) \
167 container_of(hdr, struct binder_fd_array_object, hdr)
169 enum binder_stat_types {
175 BINDER_STAT_TRANSACTION,
176 BINDER_STAT_TRANSACTION_COMPLETE,
180 struct binder_stats {
181 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
182 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
183 atomic_t obj_created[BINDER_STAT_COUNT];
184 atomic_t obj_deleted[BINDER_STAT_COUNT];
187 static struct binder_stats binder_stats;
189 static inline void binder_stats_deleted(enum binder_stat_types type)
191 atomic_inc(&binder_stats.obj_deleted[type]);
194 static inline void binder_stats_created(enum binder_stat_types type)
196 atomic_inc(&binder_stats.obj_created[type]);
199 struct binder_transaction_log binder_transaction_log;
200 struct binder_transaction_log binder_transaction_log_failed;
202 static struct binder_transaction_log_entry *binder_transaction_log_add(
203 struct binder_transaction_log *log)
205 struct binder_transaction_log_entry *e;
206 unsigned int cur = atomic_inc_return(&log->cur);
208 if (cur >= ARRAY_SIZE(log->entry))
210 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
211 WRITE_ONCE(e->debug_id_done, 0);
213 * write-barrier to synchronize access to e->debug_id_done.
214 * We make sure the initialized 0 value is seen before
215 * memset() other fields are zeroed by memset.
218 memset(e, 0, sizeof(*e));
223 * struct binder_work - work enqueued on a worklist
224 * @entry: node enqueued on list
225 * @type: type of work to be performed
227 * There are separate work lists for proc, thread, and node (async).
230 struct list_head entry;
233 BINDER_WORK_TRANSACTION = 1,
234 BINDER_WORK_TRANSACTION_COMPLETE,
235 BINDER_WORK_RETURN_ERROR,
237 BINDER_WORK_DEAD_BINDER,
238 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
239 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
243 struct binder_error {
244 struct binder_work work;
249 * struct binder_node - binder node bookkeeping
250 * @debug_id: unique ID for debugging
251 * (invariant after initialized)
252 * @lock: lock for node fields
253 * @work: worklist element for node work
254 * (protected by @proc->inner_lock)
255 * @rb_node: element for proc->nodes tree
256 * (protected by @proc->inner_lock)
257 * @dead_node: element for binder_dead_nodes list
258 * (protected by binder_dead_nodes_lock)
259 * @proc: binder_proc that owns this node
260 * (invariant after initialized)
261 * @refs: list of references on this node
262 * (protected by @lock)
263 * @internal_strong_refs: used to take strong references when
264 * initiating a transaction
265 * (protected by @proc->inner_lock if @proc
267 * @local_weak_refs: weak user refs from local process
268 * (protected by @proc->inner_lock if @proc
270 * @local_strong_refs: strong user refs from local process
271 * (protected by @proc->inner_lock if @proc
273 * @tmp_refs: temporary kernel refs
274 * (protected by @proc->inner_lock while @proc
275 * is valid, and by binder_dead_nodes_lock
276 * if @proc is NULL. During inc/dec and node release
277 * it is also protected by @lock to provide safety
278 * as the node dies and @proc becomes NULL)
279 * @ptr: userspace pointer for node
280 * (invariant, no lock needed)
281 * @cookie: userspace cookie for node
282 * (invariant, no lock needed)
283 * @has_strong_ref: userspace notified of strong ref
284 * (protected by @proc->inner_lock if @proc
286 * @pending_strong_ref: userspace has acked notification of strong ref
287 * (protected by @proc->inner_lock if @proc
289 * @has_weak_ref: userspace notified of weak ref
290 * (protected by @proc->inner_lock if @proc
292 * @pending_weak_ref: userspace has acked notification of weak ref
293 * (protected by @proc->inner_lock if @proc
295 * @has_async_transaction: async transaction to node in progress
296 * (protected by @lock)
297 * @accept_fds: file descriptor operations supported for node
298 * (invariant after initialized)
299 * @min_priority: minimum scheduling priority
300 * (invariant after initialized)
301 * @txn_security_ctx: require sender's security context
302 * (invariant after initialized)
303 * @async_todo: list of async work items
304 * (protected by @proc->inner_lock)
306 * Bookkeeping structure for binder nodes.
311 struct binder_work work;
313 struct rb_node rb_node;
314 struct hlist_node dead_node;
316 struct binder_proc *proc;
317 struct hlist_head refs;
318 int internal_strong_refs;
320 int local_strong_refs;
322 binder_uintptr_t ptr;
323 binder_uintptr_t cookie;
326 * bitfield elements protected by
330 u8 pending_strong_ref:1;
332 u8 pending_weak_ref:1;
336 * invariant after initialization
339 u8 txn_security_ctx:1;
342 bool has_async_transaction;
343 struct list_head async_todo;
346 struct binder_ref_death {
348 * @work: worklist element for death notifications
349 * (protected by inner_lock of the proc that
350 * this ref belongs to)
352 struct binder_work work;
353 binder_uintptr_t cookie;
357 * struct binder_ref_data - binder_ref counts and id
358 * @debug_id: unique ID for the ref
359 * @desc: unique userspace handle for ref
360 * @strong: strong ref count (debugging only if not locked)
361 * @weak: weak ref count (debugging only if not locked)
363 * Structure to hold ref count and ref id information. Since
364 * the actual ref can only be accessed with a lock, this structure
365 * is used to return information about the ref to callers of
366 * ref inc/dec functions.
368 struct binder_ref_data {
376 * struct binder_ref - struct to track references on nodes
377 * @data: binder_ref_data containing id, handle, and current refcounts
378 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
379 * @rb_node_node: node for lookup by @node in proc's rb_tree
380 * @node_entry: list entry for node->refs list in target node
381 * (protected by @node->lock)
382 * @proc: binder_proc containing ref
383 * @node: binder_node of target node. When cleaning up a
384 * ref for deletion in binder_cleanup_ref, a non-NULL
385 * @node indicates the node must be freed
386 * @death: pointer to death notification (ref_death) if requested
387 * (protected by @node->lock)
389 * Structure to track references from procA to target node (on procB). This
390 * structure is unsafe to access without holding @proc->outer_lock.
393 /* Lookups needed: */
394 /* node + proc => ref (transaction) */
395 /* desc + proc => ref (transaction, inc/dec ref) */
396 /* node => refs + procs (proc exit) */
397 struct binder_ref_data data;
398 struct rb_node rb_node_desc;
399 struct rb_node rb_node_node;
400 struct hlist_node node_entry;
401 struct binder_proc *proc;
402 struct binder_node *node;
403 struct binder_ref_death *death;
406 enum binder_deferred_state {
407 BINDER_DEFERRED_FLUSH = 0x01,
408 BINDER_DEFERRED_RELEASE = 0x02,
412 * struct binder_proc - binder process bookkeeping
413 * @proc_node: element for binder_procs list
414 * @threads: rbtree of binder_threads in this proc
415 * (protected by @inner_lock)
416 * @nodes: rbtree of binder nodes associated with
417 * this proc ordered by node->ptr
418 * (protected by @inner_lock)
419 * @refs_by_desc: rbtree of refs ordered by ref->desc
420 * (protected by @outer_lock)
421 * @refs_by_node: rbtree of refs ordered by ref->node
422 * (protected by @outer_lock)
423 * @waiting_threads: threads currently waiting for proc work
424 * (protected by @inner_lock)
425 * @pid PID of group_leader of process
426 * (invariant after initialized)
427 * @tsk task_struct for group_leader of process
428 * (invariant after initialized)
429 * @deferred_work_node: element for binder_deferred_list
430 * (protected by binder_deferred_lock)
431 * @deferred_work: bitmap of deferred work to perform
432 * (protected by binder_deferred_lock)
433 * @is_dead: process is dead and awaiting free
434 * when outstanding transactions are cleaned up
435 * (protected by @inner_lock)
436 * @todo: list of work for this process
437 * (protected by @inner_lock)
438 * @stats: per-process binder statistics
439 * (atomics, no lock needed)
440 * @delivered_death: list of delivered death notification
441 * (protected by @inner_lock)
442 * @max_threads: cap on number of binder threads
443 * (protected by @inner_lock)
444 * @requested_threads: number of binder threads requested but not
445 * yet started. In current implementation, can
447 * (protected by @inner_lock)
448 * @requested_threads_started: number binder threads started
449 * (protected by @inner_lock)
450 * @tmp_ref: temporary reference to indicate proc is in use
451 * (protected by @inner_lock)
452 * @default_priority: default scheduler priority
453 * (invariant after initialized)
454 * @debugfs_entry: debugfs node
455 * @alloc: binder allocator bookkeeping
456 * @context: binder_context for this proc
457 * (invariant after initialized)
458 * @inner_lock: can nest under outer_lock and/or node lock
459 * @outer_lock: no nesting under innor or node lock
460 * Lock order: 1) outer, 2) node, 3) inner
461 * @binderfs_entry: process-specific binderfs log file
463 * Bookkeeping structure for binder processes
466 struct hlist_node proc_node;
467 struct rb_root threads;
468 struct rb_root nodes;
469 struct rb_root refs_by_desc;
470 struct rb_root refs_by_node;
471 struct list_head waiting_threads;
473 struct task_struct *tsk;
474 struct hlist_node deferred_work_node;
478 struct list_head todo;
479 struct binder_stats stats;
480 struct list_head delivered_death;
482 int requested_threads;
483 int requested_threads_started;
485 long default_priority;
486 struct dentry *debugfs_entry;
487 struct binder_alloc alloc;
488 struct binder_context *context;
489 spinlock_t inner_lock;
490 spinlock_t outer_lock;
491 struct dentry *binderfs_entry;
495 BINDER_LOOPER_STATE_REGISTERED = 0x01,
496 BINDER_LOOPER_STATE_ENTERED = 0x02,
497 BINDER_LOOPER_STATE_EXITED = 0x04,
498 BINDER_LOOPER_STATE_INVALID = 0x08,
499 BINDER_LOOPER_STATE_WAITING = 0x10,
500 BINDER_LOOPER_STATE_POLL = 0x20,
504 * struct binder_thread - binder thread bookkeeping
505 * @proc: binder process for this thread
506 * (invariant after initialization)
507 * @rb_node: element for proc->threads rbtree
508 * (protected by @proc->inner_lock)
509 * @waiting_thread_node: element for @proc->waiting_threads list
510 * (protected by @proc->inner_lock)
511 * @pid: PID for this thread
512 * (invariant after initialization)
513 * @looper: bitmap of looping state
514 * (only accessed by this thread)
515 * @looper_needs_return: looping thread needs to exit driver
517 * @transaction_stack: stack of in-progress transactions for this thread
518 * (protected by @proc->inner_lock)
519 * @todo: list of work to do for this thread
520 * (protected by @proc->inner_lock)
521 * @process_todo: whether work in @todo should be processed
522 * (protected by @proc->inner_lock)
523 * @return_error: transaction errors reported by this thread
524 * (only accessed by this thread)
525 * @reply_error: transaction errors reported by target thread
526 * (protected by @proc->inner_lock)
527 * @wait: wait queue for thread work
528 * @stats: per-thread statistics
529 * (atomics, no lock needed)
530 * @tmp_ref: temporary reference to indicate thread is in use
531 * (atomic since @proc->inner_lock cannot
532 * always be acquired)
533 * @is_dead: thread is dead and awaiting free
534 * when outstanding transactions are cleaned up
535 * (protected by @proc->inner_lock)
537 * Bookkeeping structure for binder threads.
539 struct binder_thread {
540 struct binder_proc *proc;
541 struct rb_node rb_node;
542 struct list_head waiting_thread_node;
544 int looper; /* only modified by this thread */
545 bool looper_need_return; /* can be written by other thread */
546 struct binder_transaction *transaction_stack;
547 struct list_head todo;
549 struct binder_error return_error;
550 struct binder_error reply_error;
551 wait_queue_head_t wait;
552 struct binder_stats stats;
558 * struct binder_txn_fd_fixup - transaction fd fixup list element
559 * @fixup_entry: list entry
560 * @file: struct file to be associated with new fd
561 * @offset: offset in buffer data to this fixup
563 * List element for fd fixups in a transaction. Since file
564 * descriptors need to be allocated in the context of the
565 * target process, we pass each fd to be processed in this
568 struct binder_txn_fd_fixup {
569 struct list_head fixup_entry;
574 struct binder_transaction {
576 struct binder_work work;
577 struct binder_thread *from;
578 struct binder_transaction *from_parent;
579 struct binder_proc *to_proc;
580 struct binder_thread *to_thread;
581 struct binder_transaction *to_parent;
582 unsigned need_reply:1;
583 /* unsigned is_dead:1; */ /* not used at the moment */
585 struct binder_buffer *buffer;
591 struct list_head fd_fixups;
592 binder_uintptr_t security_ctx;
594 * @lock: protects @from, @to_proc, and @to_thread
596 * @from, @to_proc, and @to_thread can be set to NULL
597 * during thread teardown
603 * struct binder_object - union of flat binder object types
604 * @hdr: generic object header
605 * @fbo: binder object (nodes and refs)
606 * @fdo: file descriptor object
607 * @bbo: binder buffer pointer
608 * @fdao: file descriptor array
610 * Used for type-independent object copies
612 struct binder_object {
614 struct binder_object_header hdr;
615 struct flat_binder_object fbo;
616 struct binder_fd_object fdo;
617 struct binder_buffer_object bbo;
618 struct binder_fd_array_object fdao;
623 * binder_proc_lock() - Acquire outer lock for given binder_proc
624 * @proc: struct binder_proc to acquire
626 * Acquires proc->outer_lock. Used to protect binder_ref
627 * structures associated with the given proc.
629 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
631 _binder_proc_lock(struct binder_proc *proc, int line)
632 __acquires(&proc->outer_lock)
634 binder_debug(BINDER_DEBUG_SPINLOCKS,
635 "%s: line=%d\n", __func__, line);
636 spin_lock(&proc->outer_lock);
640 * binder_proc_unlock() - Release spinlock for given binder_proc
641 * @proc: struct binder_proc to acquire
643 * Release lock acquired via binder_proc_lock()
645 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
647 _binder_proc_unlock(struct binder_proc *proc, int line)
648 __releases(&proc->outer_lock)
650 binder_debug(BINDER_DEBUG_SPINLOCKS,
651 "%s: line=%d\n", __func__, line);
652 spin_unlock(&proc->outer_lock);
656 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
657 * @proc: struct binder_proc to acquire
659 * Acquires proc->inner_lock. Used to protect todo lists
661 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
663 _binder_inner_proc_lock(struct binder_proc *proc, int line)
664 __acquires(&proc->inner_lock)
666 binder_debug(BINDER_DEBUG_SPINLOCKS,
667 "%s: line=%d\n", __func__, line);
668 spin_lock(&proc->inner_lock);
672 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
673 * @proc: struct binder_proc to acquire
675 * Release lock acquired via binder_inner_proc_lock()
677 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
679 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
680 __releases(&proc->inner_lock)
682 binder_debug(BINDER_DEBUG_SPINLOCKS,
683 "%s: line=%d\n", __func__, line);
684 spin_unlock(&proc->inner_lock);
688 * binder_node_lock() - Acquire spinlock for given binder_node
689 * @node: struct binder_node to acquire
691 * Acquires node->lock. Used to protect binder_node fields
693 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
695 _binder_node_lock(struct binder_node *node, int line)
696 __acquires(&node->lock)
698 binder_debug(BINDER_DEBUG_SPINLOCKS,
699 "%s: line=%d\n", __func__, line);
700 spin_lock(&node->lock);
704 * binder_node_unlock() - Release spinlock for given binder_proc
705 * @node: struct binder_node to acquire
707 * Release lock acquired via binder_node_lock()
709 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
711 _binder_node_unlock(struct binder_node *node, int line)
712 __releases(&node->lock)
714 binder_debug(BINDER_DEBUG_SPINLOCKS,
715 "%s: line=%d\n", __func__, line);
716 spin_unlock(&node->lock);
720 * binder_node_inner_lock() - Acquire node and inner locks
721 * @node: struct binder_node to acquire
723 * Acquires node->lock. If node->proc also acquires
724 * proc->inner_lock. Used to protect binder_node fields
726 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
728 _binder_node_inner_lock(struct binder_node *node, int line)
729 __acquires(&node->lock) __acquires(&node->proc->inner_lock)
731 binder_debug(BINDER_DEBUG_SPINLOCKS,
732 "%s: line=%d\n", __func__, line);
733 spin_lock(&node->lock);
735 binder_inner_proc_lock(node->proc);
737 /* annotation for sparse */
738 __acquire(&node->proc->inner_lock);
742 * binder_node_unlock() - Release node and inner locks
743 * @node: struct binder_node to acquire
745 * Release lock acquired via binder_node_lock()
747 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
749 _binder_node_inner_unlock(struct binder_node *node, int line)
750 __releases(&node->lock) __releases(&node->proc->inner_lock)
752 struct binder_proc *proc = node->proc;
754 binder_debug(BINDER_DEBUG_SPINLOCKS,
755 "%s: line=%d\n", __func__, line);
757 binder_inner_proc_unlock(proc);
759 /* annotation for sparse */
760 __release(&node->proc->inner_lock);
761 spin_unlock(&node->lock);
764 static bool binder_worklist_empty_ilocked(struct list_head *list)
766 return list_empty(list);
770 * binder_worklist_empty() - Check if no items on the work list
771 * @proc: binder_proc associated with list
772 * @list: list to check
774 * Return: true if there are no items on list, else false
776 static bool binder_worklist_empty(struct binder_proc *proc,
777 struct list_head *list)
781 binder_inner_proc_lock(proc);
782 ret = binder_worklist_empty_ilocked(list);
783 binder_inner_proc_unlock(proc);
788 * binder_enqueue_work_ilocked() - Add an item to the work list
789 * @work: struct binder_work to add to list
790 * @target_list: list to add work to
792 * Adds the work to the specified list. Asserts that work
793 * is not already on a list.
795 * Requires the proc->inner_lock to be held.
798 binder_enqueue_work_ilocked(struct binder_work *work,
799 struct list_head *target_list)
801 BUG_ON(target_list == NULL);
802 BUG_ON(work->entry.next && !list_empty(&work->entry));
803 list_add_tail(&work->entry, target_list);
807 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
808 * @thread: thread to queue work to
809 * @work: struct binder_work to add to list
811 * Adds the work to the todo list of the thread. Doesn't set the process_todo
812 * flag, which means that (if it wasn't already set) the thread will go to
813 * sleep without handling this work when it calls read.
815 * Requires the proc->inner_lock to be held.
818 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
819 struct binder_work *work)
821 WARN_ON(!list_empty(&thread->waiting_thread_node));
822 binder_enqueue_work_ilocked(work, &thread->todo);
826 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
827 * @thread: thread to queue work to
828 * @work: struct binder_work to add to list
830 * Adds the work to the todo list of the thread, and enables processing
833 * Requires the proc->inner_lock to be held.
836 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
837 struct binder_work *work)
839 WARN_ON(!list_empty(&thread->waiting_thread_node));
840 binder_enqueue_work_ilocked(work, &thread->todo);
841 thread->process_todo = true;
845 * binder_enqueue_thread_work() - Add an item to the thread work list
846 * @thread: thread to queue work to
847 * @work: struct binder_work to add to list
849 * Adds the work to the todo list of the thread, and enables processing
853 binder_enqueue_thread_work(struct binder_thread *thread,
854 struct binder_work *work)
856 binder_inner_proc_lock(thread->proc);
857 binder_enqueue_thread_work_ilocked(thread, work);
858 binder_inner_proc_unlock(thread->proc);
862 binder_dequeue_work_ilocked(struct binder_work *work)
864 list_del_init(&work->entry);
868 * binder_dequeue_work() - Removes an item from the work list
869 * @proc: binder_proc associated with list
870 * @work: struct binder_work to remove from list
872 * Removes the specified work item from whatever list it is on.
873 * Can safely be called if work is not on any list.
876 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
878 binder_inner_proc_lock(proc);
879 binder_dequeue_work_ilocked(work);
880 binder_inner_proc_unlock(proc);
883 static struct binder_work *binder_dequeue_work_head_ilocked(
884 struct list_head *list)
886 struct binder_work *w;
888 w = list_first_entry_or_null(list, struct binder_work, entry);
890 list_del_init(&w->entry);
895 * binder_dequeue_work_head() - Dequeues the item at head of list
896 * @proc: binder_proc associated with list
897 * @list: list to dequeue head
899 * Removes the head of the list if there are items on the list
901 * Return: pointer dequeued binder_work, NULL if list was empty
903 static struct binder_work *binder_dequeue_work_head(
904 struct binder_proc *proc,
905 struct list_head *list)
907 struct binder_work *w;
909 binder_inner_proc_lock(proc);
910 w = binder_dequeue_work_head_ilocked(list);
911 binder_inner_proc_unlock(proc);
916 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
917 static void binder_free_thread(struct binder_thread *thread);
918 static void binder_free_proc(struct binder_proc *proc);
919 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
921 static bool binder_has_work_ilocked(struct binder_thread *thread,
924 return thread->process_todo ||
925 thread->looper_need_return ||
927 !binder_worklist_empty_ilocked(&thread->proc->todo));
930 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
934 binder_inner_proc_lock(thread->proc);
935 has_work = binder_has_work_ilocked(thread, do_proc_work);
936 binder_inner_proc_unlock(thread->proc);
941 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
943 return !thread->transaction_stack &&
944 binder_worklist_empty_ilocked(&thread->todo) &&
945 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
946 BINDER_LOOPER_STATE_REGISTERED));
949 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
953 struct binder_thread *thread;
955 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
956 thread = rb_entry(n, struct binder_thread, rb_node);
957 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
958 binder_available_for_proc_work_ilocked(thread)) {
960 wake_up_interruptible_sync(&thread->wait);
962 wake_up_interruptible(&thread->wait);
968 * binder_select_thread_ilocked() - selects a thread for doing proc work.
969 * @proc: process to select a thread from
971 * Note that calling this function moves the thread off the waiting_threads
972 * list, so it can only be woken up by the caller of this function, or a
973 * signal. Therefore, callers *should* always wake up the thread this function
976 * Return: If there's a thread currently waiting for process work,
977 * returns that thread. Otherwise returns NULL.
979 static struct binder_thread *
980 binder_select_thread_ilocked(struct binder_proc *proc)
982 struct binder_thread *thread;
984 assert_spin_locked(&proc->inner_lock);
985 thread = list_first_entry_or_null(&proc->waiting_threads,
986 struct binder_thread,
987 waiting_thread_node);
990 list_del_init(&thread->waiting_thread_node);
996 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
997 * @proc: process to wake up a thread in
998 * @thread: specific thread to wake-up (may be NULL)
999 * @sync: whether to do a synchronous wake-up
1001 * This function wakes up a thread in the @proc process.
1002 * The caller may provide a specific thread to wake-up in
1003 * the @thread parameter. If @thread is NULL, this function
1004 * will wake up threads that have called poll().
1006 * Note that for this function to work as expected, callers
1007 * should first call binder_select_thread() to find a thread
1008 * to handle the work (if they don't have a thread already),
1009 * and pass the result into the @thread parameter.
1011 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
1012 struct binder_thread *thread,
1015 assert_spin_locked(&proc->inner_lock);
1019 wake_up_interruptible_sync(&thread->wait);
1021 wake_up_interruptible(&thread->wait);
1025 /* Didn't find a thread waiting for proc work; this can happen
1027 * 1. All threads are busy handling transactions
1028 * In that case, one of those threads should call back into
1029 * the kernel driver soon and pick up this work.
1030 * 2. Threads are using the (e)poll interface, in which case
1031 * they may be blocked on the waitqueue without having been
1032 * added to waiting_threads. For this case, we just iterate
1033 * over all threads not handling transaction work, and
1034 * wake them all up. We wake all because we don't know whether
1035 * a thread that called into (e)poll is handling non-binder
1038 binder_wakeup_poll_threads_ilocked(proc, sync);
1041 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1043 struct binder_thread *thread = binder_select_thread_ilocked(proc);
1045 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
1048 static void binder_set_nice(long nice)
1052 if (can_nice(current, nice)) {
1053 set_user_nice(current, nice);
1056 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
1057 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
1058 "%d: nice value %ld not allowed use %ld instead\n",
1059 current->pid, nice, min_nice);
1060 set_user_nice(current, min_nice);
1061 if (min_nice <= MAX_NICE)
1063 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
1066 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1067 binder_uintptr_t ptr)
1069 struct rb_node *n = proc->nodes.rb_node;
1070 struct binder_node *node;
1072 assert_spin_locked(&proc->inner_lock);
1075 node = rb_entry(n, struct binder_node, rb_node);
1077 if (ptr < node->ptr)
1079 else if (ptr > node->ptr)
1083 * take an implicit weak reference
1084 * to ensure node stays alive until
1085 * call to binder_put_node()
1087 binder_inc_node_tmpref_ilocked(node);
1094 static struct binder_node *binder_get_node(struct binder_proc *proc,
1095 binder_uintptr_t ptr)
1097 struct binder_node *node;
1099 binder_inner_proc_lock(proc);
1100 node = binder_get_node_ilocked(proc, ptr);
1101 binder_inner_proc_unlock(proc);
1105 static struct binder_node *binder_init_node_ilocked(
1106 struct binder_proc *proc,
1107 struct binder_node *new_node,
1108 struct flat_binder_object *fp)
1110 struct rb_node **p = &proc->nodes.rb_node;
1111 struct rb_node *parent = NULL;
1112 struct binder_node *node;
1113 binder_uintptr_t ptr = fp ? fp->binder : 0;
1114 binder_uintptr_t cookie = fp ? fp->cookie : 0;
1115 __u32 flags = fp ? fp->flags : 0;
1117 assert_spin_locked(&proc->inner_lock);
1122 node = rb_entry(parent, struct binder_node, rb_node);
1124 if (ptr < node->ptr)
1126 else if (ptr > node->ptr)
1127 p = &(*p)->rb_right;
1130 * A matching node is already in
1131 * the rb tree. Abandon the init
1134 binder_inc_node_tmpref_ilocked(node);
1139 binder_stats_created(BINDER_STAT_NODE);
1141 rb_link_node(&node->rb_node, parent, p);
1142 rb_insert_color(&node->rb_node, &proc->nodes);
1143 node->debug_id = atomic_inc_return(&binder_last_id);
1146 node->cookie = cookie;
1147 node->work.type = BINDER_WORK_NODE;
1148 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1149 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1150 node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
1151 spin_lock_init(&node->lock);
1152 INIT_LIST_HEAD(&node->work.entry);
1153 INIT_LIST_HEAD(&node->async_todo);
1154 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1155 "%d:%d node %d u%016llx c%016llx created\n",
1156 proc->pid, current->pid, node->debug_id,
1157 (u64)node->ptr, (u64)node->cookie);
1162 static struct binder_node *binder_new_node(struct binder_proc *proc,
1163 struct flat_binder_object *fp)
1165 struct binder_node *node;
1166 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1170 binder_inner_proc_lock(proc);
1171 node = binder_init_node_ilocked(proc, new_node, fp);
1172 binder_inner_proc_unlock(proc);
1173 if (node != new_node)
1175 * The node was already added by another thread
1182 static void binder_free_node(struct binder_node *node)
1185 binder_stats_deleted(BINDER_STAT_NODE);
1188 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1190 struct list_head *target_list)
1192 struct binder_proc *proc = node->proc;
1194 assert_spin_locked(&node->lock);
1196 assert_spin_locked(&proc->inner_lock);
1199 if (target_list == NULL &&
1200 node->internal_strong_refs == 0 &&
1202 node == node->proc->context->binder_context_mgr_node &&
1203 node->has_strong_ref)) {
1204 pr_err("invalid inc strong node for %d\n",
1208 node->internal_strong_refs++;
1210 node->local_strong_refs++;
1211 if (!node->has_strong_ref && target_list) {
1212 struct binder_thread *thread = container_of(target_list,
1213 struct binder_thread, todo);
1214 binder_dequeue_work_ilocked(&node->work);
1215 BUG_ON(&thread->todo != target_list);
1216 binder_enqueue_deferred_thread_work_ilocked(thread,
1221 node->local_weak_refs++;
1222 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1223 if (target_list == NULL) {
1224 pr_err("invalid inc weak node for %d\n",
1231 binder_enqueue_work_ilocked(&node->work, target_list);
1237 static int binder_inc_node(struct binder_node *node, int strong, int internal,
1238 struct list_head *target_list)
1242 binder_node_inner_lock(node);
1243 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1244 binder_node_inner_unlock(node);
1249 static bool binder_dec_node_nilocked(struct binder_node *node,
1250 int strong, int internal)
1252 struct binder_proc *proc = node->proc;
1254 assert_spin_locked(&node->lock);
1256 assert_spin_locked(&proc->inner_lock);
1259 node->internal_strong_refs--;
1261 node->local_strong_refs--;
1262 if (node->local_strong_refs || node->internal_strong_refs)
1266 node->local_weak_refs--;
1267 if (node->local_weak_refs || node->tmp_refs ||
1268 !hlist_empty(&node->refs))
1272 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
1273 if (list_empty(&node->work.entry)) {
1274 binder_enqueue_work_ilocked(&node->work, &proc->todo);
1275 binder_wakeup_proc_ilocked(proc);
1278 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1279 !node->local_weak_refs && !node->tmp_refs) {
1281 binder_dequeue_work_ilocked(&node->work);
1282 rb_erase(&node->rb_node, &proc->nodes);
1283 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1284 "refless node %d deleted\n",
1287 BUG_ON(!list_empty(&node->work.entry));
1288 spin_lock(&binder_dead_nodes_lock);
1290 * tmp_refs could have changed so
1293 if (node->tmp_refs) {
1294 spin_unlock(&binder_dead_nodes_lock);
1297 hlist_del(&node->dead_node);
1298 spin_unlock(&binder_dead_nodes_lock);
1299 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1300 "dead node %d deleted\n",
1309 static void binder_dec_node(struct binder_node *node, int strong, int internal)
1313 binder_node_inner_lock(node);
1314 free_node = binder_dec_node_nilocked(node, strong, internal);
1315 binder_node_inner_unlock(node);
1317 binder_free_node(node);
1320 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
1323 * No call to binder_inc_node() is needed since we
1324 * don't need to inform userspace of any changes to
1331 * binder_inc_node_tmpref() - take a temporary reference on node
1332 * @node: node to reference
1334 * Take reference on node to prevent the node from being freed
1335 * while referenced only by a local variable. The inner lock is
1336 * needed to serialize with the node work on the queue (which
1337 * isn't needed after the node is dead). If the node is dead
1338 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1339 * node->tmp_refs against dead-node-only cases where the node
1340 * lock cannot be acquired (eg traversing the dead node list to
1343 static void binder_inc_node_tmpref(struct binder_node *node)
1345 binder_node_lock(node);
1347 binder_inner_proc_lock(node->proc);
1349 spin_lock(&binder_dead_nodes_lock);
1350 binder_inc_node_tmpref_ilocked(node);
1352 binder_inner_proc_unlock(node->proc);
1354 spin_unlock(&binder_dead_nodes_lock);
1355 binder_node_unlock(node);
1359 * binder_dec_node_tmpref() - remove a temporary reference on node
1360 * @node: node to reference
1362 * Release temporary reference on node taken via binder_inc_node_tmpref()
1364 static void binder_dec_node_tmpref(struct binder_node *node)
1368 binder_node_inner_lock(node);
1370 spin_lock(&binder_dead_nodes_lock);
1372 __acquire(&binder_dead_nodes_lock);
1374 BUG_ON(node->tmp_refs < 0);
1376 spin_unlock(&binder_dead_nodes_lock);
1378 __release(&binder_dead_nodes_lock);
1380 * Call binder_dec_node() to check if all refcounts are 0
1381 * and cleanup is needed. Calling with strong=0 and internal=1
1382 * causes no actual reference to be released in binder_dec_node().
1383 * If that changes, a change is needed here too.
1385 free_node = binder_dec_node_nilocked(node, 0, 1);
1386 binder_node_inner_unlock(node);
1388 binder_free_node(node);
1391 static void binder_put_node(struct binder_node *node)
1393 binder_dec_node_tmpref(node);
1396 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1397 u32 desc, bool need_strong_ref)
1399 struct rb_node *n = proc->refs_by_desc.rb_node;
1400 struct binder_ref *ref;
1403 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1405 if (desc < ref->data.desc) {
1407 } else if (desc > ref->data.desc) {
1409 } else if (need_strong_ref && !ref->data.strong) {
1410 binder_user_error("tried to use weak ref as strong ref\n");
1420 * binder_get_ref_for_node_olocked() - get the ref associated with given node
1421 * @proc: binder_proc that owns the ref
1422 * @node: binder_node of target
1423 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1425 * Look up the ref for the given node and return it if it exists
1427 * If it doesn't exist and the caller provides a newly allocated
1428 * ref, initialize the fields of the newly allocated ref and insert
1429 * into the given proc rb_trees and node refs list.
1431 * Return: the ref for node. It is possible that another thread
1432 * allocated/initialized the ref first in which case the
1433 * returned ref would be different than the passed-in
1434 * new_ref. new_ref must be kfree'd by the caller in
1437 static struct binder_ref *binder_get_ref_for_node_olocked(
1438 struct binder_proc *proc,
1439 struct binder_node *node,
1440 struct binder_ref *new_ref)
1442 struct binder_context *context = proc->context;
1443 struct rb_node **p = &proc->refs_by_node.rb_node;
1444 struct rb_node *parent = NULL;
1445 struct binder_ref *ref;
1450 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1452 if (node < ref->node)
1454 else if (node > ref->node)
1455 p = &(*p)->rb_right;
1462 binder_stats_created(BINDER_STAT_REF);
1463 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1464 new_ref->proc = proc;
1465 new_ref->node = node;
1466 rb_link_node(&new_ref->rb_node_node, parent, p);
1467 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1469 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1470 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1471 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1472 if (ref->data.desc > new_ref->data.desc)
1474 new_ref->data.desc = ref->data.desc + 1;
1477 p = &proc->refs_by_desc.rb_node;
1480 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1482 if (new_ref->data.desc < ref->data.desc)
1484 else if (new_ref->data.desc > ref->data.desc)
1485 p = &(*p)->rb_right;
1489 rb_link_node(&new_ref->rb_node_desc, parent, p);
1490 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1492 binder_node_lock(node);
1493 hlist_add_head(&new_ref->node_entry, &node->refs);
1495 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1496 "%d new ref %d desc %d for node %d\n",
1497 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1499 binder_node_unlock(node);
1503 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1505 bool delete_node = false;
1507 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1508 "%d delete ref %d desc %d for node %d\n",
1509 ref->proc->pid, ref->data.debug_id, ref->data.desc,
1510 ref->node->debug_id);
1512 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1513 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1515 binder_node_inner_lock(ref->node);
1516 if (ref->data.strong)
1517 binder_dec_node_nilocked(ref->node, 1, 1);
1519 hlist_del(&ref->node_entry);
1520 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1521 binder_node_inner_unlock(ref->node);
1523 * Clear ref->node unless we want the caller to free the node
1527 * The caller uses ref->node to determine
1528 * whether the node needs to be freed. Clear
1529 * it since the node is still alive.
1535 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1536 "%d delete ref %d desc %d has death notification\n",
1537 ref->proc->pid, ref->data.debug_id,
1539 binder_dequeue_work(ref->proc, &ref->death->work);
1540 binder_stats_deleted(BINDER_STAT_DEATH);
1542 binder_stats_deleted(BINDER_STAT_REF);
1546 * binder_inc_ref_olocked() - increment the ref for given handle
1547 * @ref: ref to be incremented
1548 * @strong: if true, strong increment, else weak
1549 * @target_list: list to queue node work on
1551 * Increment the ref. @ref->proc->outer_lock must be held on entry
1553 * Return: 0, if successful, else errno
1555 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1556 struct list_head *target_list)
1561 if (ref->data.strong == 0) {
1562 ret = binder_inc_node(ref->node, 1, 1, target_list);
1568 if (ref->data.weak == 0) {
1569 ret = binder_inc_node(ref->node, 0, 1, target_list);
1579 * binder_dec_ref() - dec the ref for given handle
1580 * @ref: ref to be decremented
1581 * @strong: if true, strong decrement, else weak
1583 * Decrement the ref.
1585 * Return: true if ref is cleaned up and ready to be freed
1587 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1590 if (ref->data.strong == 0) {
1591 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1592 ref->proc->pid, ref->data.debug_id,
1593 ref->data.desc, ref->data.strong,
1598 if (ref->data.strong == 0)
1599 binder_dec_node(ref->node, strong, 1);
1601 if (ref->data.weak == 0) {
1602 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1603 ref->proc->pid, ref->data.debug_id,
1604 ref->data.desc, ref->data.strong,
1610 if (ref->data.strong == 0 && ref->data.weak == 0) {
1611 binder_cleanup_ref_olocked(ref);
1618 * binder_get_node_from_ref() - get the node from the given proc/desc
1619 * @proc: proc containing the ref
1620 * @desc: the handle associated with the ref
1621 * @need_strong_ref: if true, only return node if ref is strong
1622 * @rdata: the id/refcount data for the ref
1624 * Given a proc and ref handle, return the associated binder_node
1626 * Return: a binder_node or NULL if not found or not strong when strong required
1628 static struct binder_node *binder_get_node_from_ref(
1629 struct binder_proc *proc,
1630 u32 desc, bool need_strong_ref,
1631 struct binder_ref_data *rdata)
1633 struct binder_node *node;
1634 struct binder_ref *ref;
1636 binder_proc_lock(proc);
1637 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1642 * Take an implicit reference on the node to ensure
1643 * it stays alive until the call to binder_put_node()
1645 binder_inc_node_tmpref(node);
1648 binder_proc_unlock(proc);
1653 binder_proc_unlock(proc);
1658 * binder_free_ref() - free the binder_ref
1661 * Free the binder_ref. Free the binder_node indicated by ref->node
1662 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1664 static void binder_free_ref(struct binder_ref *ref)
1667 binder_free_node(ref->node);
1673 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1674 * @proc: proc containing the ref
1675 * @desc: the handle associated with the ref
1676 * @increment: true=inc reference, false=dec reference
1677 * @strong: true=strong reference, false=weak reference
1678 * @rdata: the id/refcount data for the ref
1680 * Given a proc and ref handle, increment or decrement the ref
1681 * according to "increment" arg.
1683 * Return: 0 if successful, else errno
1685 static int binder_update_ref_for_handle(struct binder_proc *proc,
1686 uint32_t desc, bool increment, bool strong,
1687 struct binder_ref_data *rdata)
1690 struct binder_ref *ref;
1691 bool delete_ref = false;
1693 binder_proc_lock(proc);
1694 ref = binder_get_ref_olocked(proc, desc, strong);
1700 ret = binder_inc_ref_olocked(ref, strong, NULL);
1702 delete_ref = binder_dec_ref_olocked(ref, strong);
1706 binder_proc_unlock(proc);
1709 binder_free_ref(ref);
1713 binder_proc_unlock(proc);
1718 * binder_dec_ref_for_handle() - dec the ref for given handle
1719 * @proc: proc containing the ref
1720 * @desc: the handle associated with the ref
1721 * @strong: true=strong reference, false=weak reference
1722 * @rdata: the id/refcount data for the ref
1724 * Just calls binder_update_ref_for_handle() to decrement the ref.
1726 * Return: 0 if successful, else errno
1728 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1729 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1731 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1736 * binder_inc_ref_for_node() - increment the ref for given proc/node
1737 * @proc: proc containing the ref
1738 * @node: target node
1739 * @strong: true=strong reference, false=weak reference
1740 * @target_list: worklist to use if node is incremented
1741 * @rdata: the id/refcount data for the ref
1743 * Given a proc and node, increment the ref. Create the ref if it
1744 * doesn't already exist
1746 * Return: 0 if successful, else errno
1748 static int binder_inc_ref_for_node(struct binder_proc *proc,
1749 struct binder_node *node,
1751 struct list_head *target_list,
1752 struct binder_ref_data *rdata)
1754 struct binder_ref *ref;
1755 struct binder_ref *new_ref = NULL;
1758 binder_proc_lock(proc);
1759 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1761 binder_proc_unlock(proc);
1762 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1765 binder_proc_lock(proc);
1766 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1768 ret = binder_inc_ref_olocked(ref, strong, target_list);
1770 binder_proc_unlock(proc);
1771 if (new_ref && ref != new_ref)
1773 * Another thread created the ref first so
1774 * free the one we allocated
1780 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1781 struct binder_transaction *t)
1783 BUG_ON(!target_thread);
1784 assert_spin_locked(&target_thread->proc->inner_lock);
1785 BUG_ON(target_thread->transaction_stack != t);
1786 BUG_ON(target_thread->transaction_stack->from != target_thread);
1787 target_thread->transaction_stack =
1788 target_thread->transaction_stack->from_parent;
1793 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1794 * @thread: thread to decrement
1796 * A thread needs to be kept alive while being used to create or
1797 * handle a transaction. binder_get_txn_from() is used to safely
1798 * extract t->from from a binder_transaction and keep the thread
1799 * indicated by t->from from being freed. When done with that
1800 * binder_thread, this function is called to decrement the
1801 * tmp_ref and free if appropriate (thread has been released
1802 * and no transaction being processed by the driver)
1804 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1807 * atomic is used to protect the counter value while
1808 * it cannot reach zero or thread->is_dead is false
1810 binder_inner_proc_lock(thread->proc);
1811 atomic_dec(&thread->tmp_ref);
1812 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1813 binder_inner_proc_unlock(thread->proc);
1814 binder_free_thread(thread);
1817 binder_inner_proc_unlock(thread->proc);
1821 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1822 * @proc: proc to decrement
1824 * A binder_proc needs to be kept alive while being used to create or
1825 * handle a transaction. proc->tmp_ref is incremented when
1826 * creating a new transaction or the binder_proc is currently in-use
1827 * by threads that are being released. When done with the binder_proc,
1828 * this function is called to decrement the counter and free the
1829 * proc if appropriate (proc has been released, all threads have
1830 * been released and not currenly in-use to process a transaction).
1832 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1834 binder_inner_proc_lock(proc);
1836 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1838 binder_inner_proc_unlock(proc);
1839 binder_free_proc(proc);
1842 binder_inner_proc_unlock(proc);
1846 * binder_get_txn_from() - safely extract the "from" thread in transaction
1847 * @t: binder transaction for t->from
1849 * Atomically return the "from" thread and increment the tmp_ref
1850 * count for the thread to ensure it stays alive until
1851 * binder_thread_dec_tmpref() is called.
1853 * Return: the value of t->from
1855 static struct binder_thread *binder_get_txn_from(
1856 struct binder_transaction *t)
1858 struct binder_thread *from;
1860 spin_lock(&t->lock);
1863 atomic_inc(&from->tmp_ref);
1864 spin_unlock(&t->lock);
1869 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1870 * @t: binder transaction for t->from
1872 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1873 * to guarantee that the thread cannot be released while operating on it.
1874 * The caller must call binder_inner_proc_unlock() to release the inner lock
1875 * as well as call binder_dec_thread_txn() to release the reference.
1877 * Return: the value of t->from
1879 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1880 struct binder_transaction *t)
1881 __acquires(&t->from->proc->inner_lock)
1883 struct binder_thread *from;
1885 from = binder_get_txn_from(t);
1887 __acquire(&from->proc->inner_lock);
1890 binder_inner_proc_lock(from->proc);
1892 BUG_ON(from != t->from);
1895 binder_inner_proc_unlock(from->proc);
1896 __acquire(&from->proc->inner_lock);
1897 binder_thread_dec_tmpref(from);
1902 * binder_free_txn_fixups() - free unprocessed fd fixups
1903 * @t: binder transaction for t->from
1905 * If the transaction is being torn down prior to being
1906 * processed by the target process, free all of the
1907 * fd fixups and fput the file structs. It is safe to
1908 * call this function after the fixups have been
1909 * processed -- in that case, the list will be empty.
1911 static void binder_free_txn_fixups(struct binder_transaction *t)
1913 struct binder_txn_fd_fixup *fixup, *tmp;
1915 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1917 list_del(&fixup->fixup_entry);
1922 static void binder_free_transaction(struct binder_transaction *t)
1924 struct binder_proc *target_proc = t->to_proc;
1927 binder_inner_proc_lock(target_proc);
1929 t->buffer->transaction = NULL;
1930 binder_inner_proc_unlock(target_proc);
1933 * If the transaction has no target_proc, then
1934 * t->buffer->transaction has already been cleared.
1936 binder_free_txn_fixups(t);
1938 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1941 static void binder_send_failed_reply(struct binder_transaction *t,
1942 uint32_t error_code)
1944 struct binder_thread *target_thread;
1945 struct binder_transaction *next;
1947 BUG_ON(t->flags & TF_ONE_WAY);
1949 target_thread = binder_get_txn_from_and_acq_inner(t);
1950 if (target_thread) {
1951 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1952 "send failed reply for transaction %d to %d:%d\n",
1954 target_thread->proc->pid,
1955 target_thread->pid);
1957 binder_pop_transaction_ilocked(target_thread, t);
1958 if (target_thread->reply_error.cmd == BR_OK) {
1959 target_thread->reply_error.cmd = error_code;
1960 binder_enqueue_thread_work_ilocked(
1962 &target_thread->reply_error.work);
1963 wake_up_interruptible(&target_thread->wait);
1966 * Cannot get here for normal operation, but
1967 * we can if multiple synchronous transactions
1968 * are sent without blocking for responses.
1969 * Just ignore the 2nd error in this case.
1971 pr_warn("Unexpected reply error: %u\n",
1972 target_thread->reply_error.cmd);
1974 binder_inner_proc_unlock(target_thread->proc);
1975 binder_thread_dec_tmpref(target_thread);
1976 binder_free_transaction(t);
1979 __release(&target_thread->proc->inner_lock);
1981 next = t->from_parent;
1983 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1984 "send failed reply for transaction %d, target dead\n",
1987 binder_free_transaction(t);
1989 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1990 "reply failed, no target thread at root\n");
1994 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1995 "reply failed, no target thread -- retry %d\n",
2001 * binder_cleanup_transaction() - cleans up undelivered transaction
2002 * @t: transaction that needs to be cleaned up
2003 * @reason: reason the transaction wasn't delivered
2004 * @error_code: error to return to caller (if synchronous call)
2006 static void binder_cleanup_transaction(struct binder_transaction *t,
2008 uint32_t error_code)
2010 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
2011 binder_send_failed_reply(t, error_code);
2013 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2014 "undelivered transaction %d, %s\n",
2015 t->debug_id, reason);
2016 binder_free_transaction(t);
2021 * binder_get_object() - gets object and checks for valid metadata
2022 * @proc: binder_proc owning the buffer
2023 * @buffer: binder_buffer that we're parsing.
2024 * @offset: offset in the @buffer at which to validate an object.
2025 * @object: struct binder_object to read into
2027 * Return: If there's a valid metadata object at @offset in @buffer, the
2028 * size of that object. Otherwise, it returns zero. The object
2029 * is read into the struct binder_object pointed to by @object.
2031 static size_t binder_get_object(struct binder_proc *proc,
2032 struct binder_buffer *buffer,
2033 unsigned long offset,
2034 struct binder_object *object)
2037 struct binder_object_header *hdr;
2038 size_t object_size = 0;
2040 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
2041 if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
2042 binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
2046 /* Ok, now see if we read a complete object. */
2048 switch (hdr->type) {
2049 case BINDER_TYPE_BINDER:
2050 case BINDER_TYPE_WEAK_BINDER:
2051 case BINDER_TYPE_HANDLE:
2052 case BINDER_TYPE_WEAK_HANDLE:
2053 object_size = sizeof(struct flat_binder_object);
2055 case BINDER_TYPE_FD:
2056 object_size = sizeof(struct binder_fd_object);
2058 case BINDER_TYPE_PTR:
2059 object_size = sizeof(struct binder_buffer_object);
2061 case BINDER_TYPE_FDA:
2062 object_size = sizeof(struct binder_fd_array_object);
2067 if (offset <= buffer->data_size - object_size &&
2068 buffer->data_size >= object_size)
2075 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2076 * @proc: binder_proc owning the buffer
2077 * @b: binder_buffer containing the object
2078 * @object: struct binder_object to read into
2079 * @index: index in offset array at which the binder_buffer_object is
2081 * @start_offset: points to the start of the offset array
2082 * @object_offsetp: offset of @object read from @b
2083 * @num_valid: the number of valid offsets in the offset array
2085 * Return: If @index is within the valid range of the offset array
2086 * described by @start and @num_valid, and if there's a valid
2087 * binder_buffer_object at the offset found in index @index
2088 * of the offset array, that object is returned. Otherwise,
2089 * %NULL is returned.
2090 * Note that the offset found in index @index itself is not
2091 * verified; this function assumes that @num_valid elements
2092 * from @start were previously verified to have valid offsets.
2093 * If @object_offsetp is non-NULL, then the offset within
2094 * @b is written to it.
2096 static struct binder_buffer_object *binder_validate_ptr(
2097 struct binder_proc *proc,
2098 struct binder_buffer *b,
2099 struct binder_object *object,
2100 binder_size_t index,
2101 binder_size_t start_offset,
2102 binder_size_t *object_offsetp,
2103 binder_size_t num_valid)
2106 binder_size_t object_offset;
2107 unsigned long buffer_offset;
2109 if (index >= num_valid)
2112 buffer_offset = start_offset + sizeof(binder_size_t) * index;
2113 if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2115 sizeof(object_offset)))
2117 object_size = binder_get_object(proc, b, object_offset, object);
2118 if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
2121 *object_offsetp = object_offset;
2123 return &object->bbo;
2127 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2128 * @proc: binder_proc owning the buffer
2129 * @b: transaction buffer
2130 * @objects_start_offset: offset to start of objects buffer
2131 * @buffer_obj_offset: offset to binder_buffer_object in which to fix up
2132 * @fixup_offset: start offset in @buffer to fix up
2133 * @last_obj_offset: offset to last binder_buffer_object that we fixed
2134 * @last_min_offset: minimum fixup offset in object at @last_obj_offset
2136 * Return: %true if a fixup in buffer @buffer at offset @offset is
2139 * For safety reasons, we only allow fixups inside a buffer to happen
2140 * at increasing offsets; additionally, we only allow fixup on the last
2141 * buffer object that was verified, or one of its parents.
2143 * Example of what is allowed:
2146 * B (parent = A, offset = 0)
2147 * C (parent = A, offset = 16)
2148 * D (parent = C, offset = 0)
2149 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2151 * Examples of what is not allowed:
2153 * Decreasing offsets within the same parent:
2155 * C (parent = A, offset = 16)
2156 * B (parent = A, offset = 0) // decreasing offset within A
2158 * Referring to a parent that wasn't the last object or any of its parents:
2160 * B (parent = A, offset = 0)
2161 * C (parent = A, offset = 0)
2162 * C (parent = A, offset = 16)
2163 * D (parent = B, offset = 0) // B is not A or any of A's parents
2165 static bool binder_validate_fixup(struct binder_proc *proc,
2166 struct binder_buffer *b,
2167 binder_size_t objects_start_offset,
2168 binder_size_t buffer_obj_offset,
2169 binder_size_t fixup_offset,
2170 binder_size_t last_obj_offset,
2171 binder_size_t last_min_offset)
2173 if (!last_obj_offset) {
2174 /* Nothing to fix up in */
2178 while (last_obj_offset != buffer_obj_offset) {
2179 unsigned long buffer_offset;
2180 struct binder_object last_object;
2181 struct binder_buffer_object *last_bbo;
2182 size_t object_size = binder_get_object(proc, b, last_obj_offset,
2184 if (object_size != sizeof(*last_bbo))
2187 last_bbo = &last_object.bbo;
2189 * Safe to retrieve the parent of last_obj, since it
2190 * was already previously verified by the driver.
2192 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2194 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
2195 buffer_offset = objects_start_offset +
2196 sizeof(binder_size_t) * last_bbo->parent;
2197 if (binder_alloc_copy_from_buffer(&proc->alloc,
2200 sizeof(last_obj_offset)))
2203 return (fixup_offset >= last_min_offset);
2207 * struct binder_task_work_cb - for deferred close
2209 * @twork: callback_head for task work
2212 * Structure to pass task work to be handled after
2213 * returning from binder_ioctl() via task_work_add().
2215 struct binder_task_work_cb {
2216 struct callback_head twork;
2221 * binder_do_fd_close() - close list of file descriptors
2222 * @twork: callback head for task work
2224 * It is not safe to call ksys_close() during the binder_ioctl()
2225 * function if there is a chance that binder's own file descriptor
2226 * might be closed. This is to meet the requirements for using
2227 * fdget() (see comments for __fget_light()). Therefore use
2228 * task_work_add() to schedule the close operation once we have
2229 * returned from binder_ioctl(). This function is a callback
2230 * for that mechanism and does the actual ksys_close() on the
2231 * given file descriptor.
2233 static void binder_do_fd_close(struct callback_head *twork)
2235 struct binder_task_work_cb *twcb = container_of(twork,
2236 struct binder_task_work_cb, twork);
2243 * binder_deferred_fd_close() - schedule a close for the given file-descriptor
2244 * @fd: file-descriptor to close
2246 * See comments in binder_do_fd_close(). This function is used to schedule
2247 * a file-descriptor to be closed after returning from binder_ioctl().
2249 static void binder_deferred_fd_close(int fd)
2251 struct binder_task_work_cb *twcb;
2253 twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
2256 init_task_work(&twcb->twork, binder_do_fd_close);
2257 __close_fd_get_file(fd, &twcb->file);
2259 task_work_add(current, &twcb->twork, true);
2264 static void binder_transaction_buffer_release(struct binder_proc *proc,
2265 struct binder_buffer *buffer,
2266 binder_size_t failed_at,
2269 int debug_id = buffer->debug_id;
2270 binder_size_t off_start_offset, buffer_offset, off_end_offset;
2272 binder_debug(BINDER_DEBUG_TRANSACTION,
2273 "%d buffer release %d, size %zd-%zd, failed at %llx\n",
2274 proc->pid, buffer->debug_id,
2275 buffer->data_size, buffer->offsets_size,
2276 (unsigned long long)failed_at);
2278 if (buffer->target_node)
2279 binder_dec_node(buffer->target_node, 1, 0);
2281 off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
2282 off_end_offset = is_failure ? failed_at :
2283 off_start_offset + buffer->offsets_size;
2284 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
2285 buffer_offset += sizeof(binder_size_t)) {
2286 struct binder_object_header *hdr;
2287 size_t object_size = 0;
2288 struct binder_object object;
2289 binder_size_t object_offset;
2291 if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2292 buffer, buffer_offset,
2293 sizeof(object_offset)))
2294 object_size = binder_get_object(proc, buffer,
2295 object_offset, &object);
2296 if (object_size == 0) {
2297 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2298 debug_id, (u64)object_offset, buffer->data_size);
2302 switch (hdr->type) {
2303 case BINDER_TYPE_BINDER:
2304 case BINDER_TYPE_WEAK_BINDER: {
2305 struct flat_binder_object *fp;
2306 struct binder_node *node;
2308 fp = to_flat_binder_object(hdr);
2309 node = binder_get_node(proc, fp->binder);
2311 pr_err("transaction release %d bad node %016llx\n",
2312 debug_id, (u64)fp->binder);
2315 binder_debug(BINDER_DEBUG_TRANSACTION,
2316 " node %d u%016llx\n",
2317 node->debug_id, (u64)node->ptr);
2318 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2320 binder_put_node(node);
2322 case BINDER_TYPE_HANDLE:
2323 case BINDER_TYPE_WEAK_HANDLE: {
2324 struct flat_binder_object *fp;
2325 struct binder_ref_data rdata;
2328 fp = to_flat_binder_object(hdr);
2329 ret = binder_dec_ref_for_handle(proc, fp->handle,
2330 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2333 pr_err("transaction release %d bad handle %d, ret = %d\n",
2334 debug_id, fp->handle, ret);
2337 binder_debug(BINDER_DEBUG_TRANSACTION,
2338 " ref %d desc %d\n",
2339 rdata.debug_id, rdata.desc);
2342 case BINDER_TYPE_FD: {
2344 * No need to close the file here since user-space
2345 * closes it for for successfully delivered
2346 * transactions. For transactions that weren't
2347 * delivered, the new fd was never allocated so
2348 * there is no need to close and the fput on the
2349 * file is done when the transaction is torn
2352 WARN_ON(failed_at &&
2353 proc->tsk == current->group_leader);
2355 case BINDER_TYPE_PTR:
2357 * Nothing to do here, this will get cleaned up when the
2358 * transaction buffer gets freed
2361 case BINDER_TYPE_FDA: {
2362 struct binder_fd_array_object *fda;
2363 struct binder_buffer_object *parent;
2364 struct binder_object ptr_object;
2365 binder_size_t fda_offset;
2367 binder_size_t fd_buf_size;
2368 binder_size_t num_valid;
2370 if (proc->tsk != current->group_leader) {
2372 * Nothing to do if running in sender context
2373 * The fd fixups have not been applied so no
2374 * fds need to be closed.
2379 num_valid = (buffer_offset - off_start_offset) /
2380 sizeof(binder_size_t);
2381 fda = to_binder_fd_array_object(hdr);
2382 parent = binder_validate_ptr(proc, buffer, &ptr_object,
2388 pr_err("transaction release %d bad parent offset\n",
2392 fd_buf_size = sizeof(u32) * fda->num_fds;
2393 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2394 pr_err("transaction release %d invalid number of fds (%lld)\n",
2395 debug_id, (u64)fda->num_fds);
2398 if (fd_buf_size > parent->length ||
2399 fda->parent_offset > parent->length - fd_buf_size) {
2400 /* No space for all file descriptors here. */
2401 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2402 debug_id, (u64)fda->num_fds);
2406 * the source data for binder_buffer_object is visible
2407 * to user-space and the @buffer element is the user
2408 * pointer to the buffer_object containing the fd_array.
2409 * Convert the address to an offset relative to
2410 * the base of the transaction buffer.
2413 (parent->buffer - (uintptr_t)buffer->user_data) +
2415 for (fd_index = 0; fd_index < fda->num_fds;
2419 binder_size_t offset = fda_offset +
2420 fd_index * sizeof(fd);
2422 err = binder_alloc_copy_from_buffer(
2423 &proc->alloc, &fd, buffer,
2424 offset, sizeof(fd));
2427 binder_deferred_fd_close(fd);
2431 pr_err("transaction release %d bad object type %x\n",
2432 debug_id, hdr->type);
2438 static int binder_translate_binder(struct flat_binder_object *fp,
2439 struct binder_transaction *t,
2440 struct binder_thread *thread)
2442 struct binder_node *node;
2443 struct binder_proc *proc = thread->proc;
2444 struct binder_proc *target_proc = t->to_proc;
2445 struct binder_ref_data rdata;
2448 node = binder_get_node(proc, fp->binder);
2450 node = binder_new_node(proc, fp);
2454 if (fp->cookie != node->cookie) {
2455 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2456 proc->pid, thread->pid, (u64)fp->binder,
2457 node->debug_id, (u64)fp->cookie,
2462 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2467 ret = binder_inc_ref_for_node(target_proc, node,
2468 fp->hdr.type == BINDER_TYPE_BINDER,
2469 &thread->todo, &rdata);
2473 if (fp->hdr.type == BINDER_TYPE_BINDER)
2474 fp->hdr.type = BINDER_TYPE_HANDLE;
2476 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2478 fp->handle = rdata.desc;
2481 trace_binder_transaction_node_to_ref(t, node, &rdata);
2482 binder_debug(BINDER_DEBUG_TRANSACTION,
2483 " node %d u%016llx -> ref %d desc %d\n",
2484 node->debug_id, (u64)node->ptr,
2485 rdata.debug_id, rdata.desc);
2487 binder_put_node(node);
2491 static int binder_translate_handle(struct flat_binder_object *fp,
2492 struct binder_transaction *t,
2493 struct binder_thread *thread)
2495 struct binder_proc *proc = thread->proc;
2496 struct binder_proc *target_proc = t->to_proc;
2497 struct binder_node *node;
2498 struct binder_ref_data src_rdata;
2501 node = binder_get_node_from_ref(proc, fp->handle,
2502 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2504 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2505 proc->pid, thread->pid, fp->handle);
2508 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2513 binder_node_lock(node);
2514 if (node->proc == target_proc) {
2515 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2516 fp->hdr.type = BINDER_TYPE_BINDER;
2518 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2519 fp->binder = node->ptr;
2520 fp->cookie = node->cookie;
2522 binder_inner_proc_lock(node->proc);
2524 __acquire(&node->proc->inner_lock);
2525 binder_inc_node_nilocked(node,
2526 fp->hdr.type == BINDER_TYPE_BINDER,
2529 binder_inner_proc_unlock(node->proc);
2531 __release(&node->proc->inner_lock);
2532 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2533 binder_debug(BINDER_DEBUG_TRANSACTION,
2534 " ref %d desc %d -> node %d u%016llx\n",
2535 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2537 binder_node_unlock(node);
2539 struct binder_ref_data dest_rdata;
2541 binder_node_unlock(node);
2542 ret = binder_inc_ref_for_node(target_proc, node,
2543 fp->hdr.type == BINDER_TYPE_HANDLE,
2549 fp->handle = dest_rdata.desc;
2551 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2553 binder_debug(BINDER_DEBUG_TRANSACTION,
2554 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2555 src_rdata.debug_id, src_rdata.desc,
2556 dest_rdata.debug_id, dest_rdata.desc,
2560 binder_put_node(node);
2564 static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2565 struct binder_transaction *t,
2566 struct binder_thread *thread,
2567 struct binder_transaction *in_reply_to)
2569 struct binder_proc *proc = thread->proc;
2570 struct binder_proc *target_proc = t->to_proc;
2571 struct binder_txn_fd_fixup *fixup;
2574 bool target_allows_fd;
2577 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2579 target_allows_fd = t->buffer->target_node->accept_fds;
2580 if (!target_allows_fd) {
2581 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2582 proc->pid, thread->pid,
2583 in_reply_to ? "reply" : "transaction",
2586 goto err_fd_not_accepted;
2591 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2592 proc->pid, thread->pid, fd);
2596 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2603 * Add fixup record for this transaction. The allocation
2604 * of the fd in the target needs to be done from a
2607 fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2613 fixup->offset = fd_offset;
2614 trace_binder_transaction_fd_send(t, fd, fixup->offset);
2615 list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2623 err_fd_not_accepted:
2627 static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2628 struct binder_buffer_object *parent,
2629 struct binder_transaction *t,
2630 struct binder_thread *thread,
2631 struct binder_transaction *in_reply_to)
2633 binder_size_t fdi, fd_buf_size;
2634 binder_size_t fda_offset;
2635 struct binder_proc *proc = thread->proc;
2636 struct binder_proc *target_proc = t->to_proc;
2638 fd_buf_size = sizeof(u32) * fda->num_fds;
2639 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2640 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2641 proc->pid, thread->pid, (u64)fda->num_fds);
2644 if (fd_buf_size > parent->length ||
2645 fda->parent_offset > parent->length - fd_buf_size) {
2646 /* No space for all file descriptors here. */
2647 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2648 proc->pid, thread->pid, (u64)fda->num_fds);
2652 * the source data for binder_buffer_object is visible
2653 * to user-space and the @buffer element is the user
2654 * pointer to the buffer_object containing the fd_array.
2655 * Convert the address to an offset relative to
2656 * the base of the transaction buffer.
2658 fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
2660 if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32))) {
2661 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2662 proc->pid, thread->pid);
2665 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2668 binder_size_t offset = fda_offset + fdi * sizeof(fd);
2670 ret = binder_alloc_copy_from_buffer(&target_proc->alloc,
2672 offset, sizeof(fd));
2674 ret = binder_translate_fd(fd, offset, t, thread,
2682 static int binder_fixup_parent(struct binder_transaction *t,
2683 struct binder_thread *thread,
2684 struct binder_buffer_object *bp,
2685 binder_size_t off_start_offset,
2686 binder_size_t num_valid,
2687 binder_size_t last_fixup_obj_off,
2688 binder_size_t last_fixup_min_off)
2690 struct binder_buffer_object *parent;
2691 struct binder_buffer *b = t->buffer;
2692 struct binder_proc *proc = thread->proc;
2693 struct binder_proc *target_proc = t->to_proc;
2694 struct binder_object object;
2695 binder_size_t buffer_offset;
2696 binder_size_t parent_offset;
2698 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2701 parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2702 off_start_offset, &parent_offset,
2705 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2706 proc->pid, thread->pid);
2710 if (!binder_validate_fixup(target_proc, b, off_start_offset,
2711 parent_offset, bp->parent_offset,
2713 last_fixup_min_off)) {
2714 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2715 proc->pid, thread->pid);
2719 if (parent->length < sizeof(binder_uintptr_t) ||
2720 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2721 /* No space for a pointer here! */
2722 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2723 proc->pid, thread->pid);
2726 buffer_offset = bp->parent_offset +
2727 (uintptr_t)parent->buffer - (uintptr_t)b->user_data;
2728 if (binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset,
2729 &bp->buffer, sizeof(bp->buffer))) {
2730 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2731 proc->pid, thread->pid);
2739 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2740 * @t: transaction to send
2741 * @proc: process to send the transaction to
2742 * @thread: thread in @proc to send the transaction to (may be NULL)
2744 * This function queues a transaction to the specified process. It will try
2745 * to find a thread in the target process to handle the transaction and
2746 * wake it up. If no thread is found, the work is queued to the proc
2749 * If the @thread parameter is not NULL, the transaction is always queued
2750 * to the waitlist of that specific thread.
2752 * Return: true if the transactions was successfully queued
2753 * false if the target process or thread is dead
2755 static bool binder_proc_transaction(struct binder_transaction *t,
2756 struct binder_proc *proc,
2757 struct binder_thread *thread)
2759 struct binder_node *node = t->buffer->target_node;
2760 bool oneway = !!(t->flags & TF_ONE_WAY);
2761 bool pending_async = false;
2764 binder_node_lock(node);
2767 if (node->has_async_transaction) {
2768 pending_async = true;
2770 node->has_async_transaction = true;
2774 binder_inner_proc_lock(proc);
2776 if (proc->is_dead || (thread && thread->is_dead)) {
2777 binder_inner_proc_unlock(proc);
2778 binder_node_unlock(node);
2782 if (!thread && !pending_async)
2783 thread = binder_select_thread_ilocked(proc);
2786 binder_enqueue_thread_work_ilocked(thread, &t->work);
2787 else if (!pending_async)
2788 binder_enqueue_work_ilocked(&t->work, &proc->todo);
2790 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2793 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2795 binder_inner_proc_unlock(proc);
2796 binder_node_unlock(node);
2802 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2803 * @node: struct binder_node for which to get refs
2804 * @proc: returns @node->proc if valid
2805 * @error: if no @proc then returns BR_DEAD_REPLY
2807 * User-space normally keeps the node alive when creating a transaction
2808 * since it has a reference to the target. The local strong ref keeps it
2809 * alive if the sending process dies before the target process processes
2810 * the transaction. If the source process is malicious or has a reference
2811 * counting bug, relying on the local strong ref can fail.
2813 * Since user-space can cause the local strong ref to go away, we also take
2814 * a tmpref on the node to ensure it survives while we are constructing
2815 * the transaction. We also need a tmpref on the proc while we are
2816 * constructing the transaction, so we take that here as well.
2818 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2819 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2820 * target proc has died, @error is set to BR_DEAD_REPLY
2822 static struct binder_node *binder_get_node_refs_for_txn(
2823 struct binder_node *node,
2824 struct binder_proc **procp,
2827 struct binder_node *target_node = NULL;
2829 binder_node_inner_lock(node);
2832 binder_inc_node_nilocked(node, 1, 0, NULL);
2833 binder_inc_node_tmpref_ilocked(node);
2834 node->proc->tmp_ref++;
2835 *procp = node->proc;
2837 *error = BR_DEAD_REPLY;
2838 binder_node_inner_unlock(node);
2843 static void binder_transaction(struct binder_proc *proc,
2844 struct binder_thread *thread,
2845 struct binder_transaction_data *tr, int reply,
2846 binder_size_t extra_buffers_size)
2849 struct binder_transaction *t;
2850 struct binder_work *w;
2851 struct binder_work *tcomplete;
2852 binder_size_t buffer_offset = 0;
2853 binder_size_t off_start_offset, off_end_offset;
2854 binder_size_t off_min;
2855 binder_size_t sg_buf_offset, sg_buf_end_offset;
2856 struct binder_proc *target_proc = NULL;
2857 struct binder_thread *target_thread = NULL;
2858 struct binder_node *target_node = NULL;
2859 struct binder_transaction *in_reply_to = NULL;
2860 struct binder_transaction_log_entry *e;
2861 uint32_t return_error = 0;
2862 uint32_t return_error_param = 0;
2863 uint32_t return_error_line = 0;
2864 binder_size_t last_fixup_obj_off = 0;
2865 binder_size_t last_fixup_min_off = 0;
2866 struct binder_context *context = proc->context;
2867 int t_debug_id = atomic_inc_return(&binder_last_id);
2868 char *secctx = NULL;
2871 e = binder_transaction_log_add(&binder_transaction_log);
2872 e->debug_id = t_debug_id;
2873 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2874 e->from_proc = proc->pid;
2875 e->from_thread = thread->pid;
2876 e->target_handle = tr->target.handle;
2877 e->data_size = tr->data_size;
2878 e->offsets_size = tr->offsets_size;
2879 e->context_name = proc->context->name;
2882 binder_inner_proc_lock(proc);
2883 in_reply_to = thread->transaction_stack;
2884 if (in_reply_to == NULL) {
2885 binder_inner_proc_unlock(proc);
2886 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2887 proc->pid, thread->pid);
2888 return_error = BR_FAILED_REPLY;
2889 return_error_param = -EPROTO;
2890 return_error_line = __LINE__;
2891 goto err_empty_call_stack;
2893 if (in_reply_to->to_thread != thread) {
2894 spin_lock(&in_reply_to->lock);
2895 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2896 proc->pid, thread->pid, in_reply_to->debug_id,
2897 in_reply_to->to_proc ?
2898 in_reply_to->to_proc->pid : 0,
2899 in_reply_to->to_thread ?
2900 in_reply_to->to_thread->pid : 0);
2901 spin_unlock(&in_reply_to->lock);
2902 binder_inner_proc_unlock(proc);
2903 return_error = BR_FAILED_REPLY;
2904 return_error_param = -EPROTO;
2905 return_error_line = __LINE__;
2907 goto err_bad_call_stack;
2909 thread->transaction_stack = in_reply_to->to_parent;
2910 binder_inner_proc_unlock(proc);
2911 binder_set_nice(in_reply_to->saved_priority);
2912 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2913 if (target_thread == NULL) {
2914 /* annotation for sparse */
2915 __release(&target_thread->proc->inner_lock);
2916 return_error = BR_DEAD_REPLY;
2917 return_error_line = __LINE__;
2918 goto err_dead_binder;
2920 if (target_thread->transaction_stack != in_reply_to) {
2921 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2922 proc->pid, thread->pid,
2923 target_thread->transaction_stack ?
2924 target_thread->transaction_stack->debug_id : 0,
2925 in_reply_to->debug_id);
2926 binder_inner_proc_unlock(target_thread->proc);
2927 return_error = BR_FAILED_REPLY;
2928 return_error_param = -EPROTO;
2929 return_error_line = __LINE__;
2931 target_thread = NULL;
2932 goto err_dead_binder;
2934 target_proc = target_thread->proc;
2935 target_proc->tmp_ref++;
2936 binder_inner_proc_unlock(target_thread->proc);
2938 if (tr->target.handle) {
2939 struct binder_ref *ref;
2942 * There must already be a strong ref
2943 * on this node. If so, do a strong
2944 * increment on the node to ensure it
2945 * stays alive until the transaction is
2948 binder_proc_lock(proc);
2949 ref = binder_get_ref_olocked(proc, tr->target.handle,
2952 target_node = binder_get_node_refs_for_txn(
2953 ref->node, &target_proc,
2956 binder_user_error("%d:%d got transaction to invalid handle\n",
2957 proc->pid, thread->pid);
2958 return_error = BR_FAILED_REPLY;
2960 binder_proc_unlock(proc);
2962 mutex_lock(&context->context_mgr_node_lock);
2963 target_node = context->binder_context_mgr_node;
2965 target_node = binder_get_node_refs_for_txn(
2966 target_node, &target_proc,
2969 return_error = BR_DEAD_REPLY;
2970 mutex_unlock(&context->context_mgr_node_lock);
2971 if (target_node && target_proc->pid == proc->pid) {
2972 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
2973 proc->pid, thread->pid);
2974 return_error = BR_FAILED_REPLY;
2975 return_error_param = -EINVAL;
2976 return_error_line = __LINE__;
2977 goto err_invalid_target_handle;
2982 * return_error is set above
2984 return_error_param = -EINVAL;
2985 return_error_line = __LINE__;
2986 goto err_dead_binder;
2988 e->to_node = target_node->debug_id;
2989 if (security_binder_transaction(proc->tsk,
2990 target_proc->tsk) < 0) {
2991 return_error = BR_FAILED_REPLY;
2992 return_error_param = -EPERM;
2993 return_error_line = __LINE__;
2994 goto err_invalid_target_handle;
2996 binder_inner_proc_lock(proc);
2998 w = list_first_entry_or_null(&thread->todo,
2999 struct binder_work, entry);
3000 if (!(tr->flags & TF_ONE_WAY) && w &&
3001 w->type == BINDER_WORK_TRANSACTION) {
3003 * Do not allow new outgoing transaction from a
3004 * thread that has a transaction at the head of
3005 * its todo list. Only need to check the head
3006 * because binder_select_thread_ilocked picks a
3007 * thread from proc->waiting_threads to enqueue
3008 * the transaction, and nothing is queued to the
3009 * todo list while the thread is on waiting_threads.
3011 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
3012 proc->pid, thread->pid);
3013 binder_inner_proc_unlock(proc);
3014 return_error = BR_FAILED_REPLY;
3015 return_error_param = -EPROTO;
3016 return_error_line = __LINE__;
3017 goto err_bad_todo_list;
3020 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3021 struct binder_transaction *tmp;
3023 tmp = thread->transaction_stack;
3024 if (tmp->to_thread != thread) {
3025 spin_lock(&tmp->lock);
3026 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3027 proc->pid, thread->pid, tmp->debug_id,
3028 tmp->to_proc ? tmp->to_proc->pid : 0,
3030 tmp->to_thread->pid : 0);
3031 spin_unlock(&tmp->lock);
3032 binder_inner_proc_unlock(proc);
3033 return_error = BR_FAILED_REPLY;
3034 return_error_param = -EPROTO;
3035 return_error_line = __LINE__;
3036 goto err_bad_call_stack;
3039 struct binder_thread *from;
3041 spin_lock(&tmp->lock);
3043 if (from && from->proc == target_proc) {
3044 atomic_inc(&from->tmp_ref);
3045 target_thread = from;
3046 spin_unlock(&tmp->lock);
3049 spin_unlock(&tmp->lock);
3050 tmp = tmp->from_parent;
3053 binder_inner_proc_unlock(proc);
3056 e->to_thread = target_thread->pid;
3057 e->to_proc = target_proc->pid;
3059 /* TODO: reuse incoming transaction for reply */
3060 t = kzalloc(sizeof(*t), GFP_KERNEL);
3062 return_error = BR_FAILED_REPLY;
3063 return_error_param = -ENOMEM;
3064 return_error_line = __LINE__;
3065 goto err_alloc_t_failed;
3067 INIT_LIST_HEAD(&t->fd_fixups);
3068 binder_stats_created(BINDER_STAT_TRANSACTION);
3069 spin_lock_init(&t->lock);
3071 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3072 if (tcomplete == NULL) {
3073 return_error = BR_FAILED_REPLY;
3074 return_error_param = -ENOMEM;
3075 return_error_line = __LINE__;
3076 goto err_alloc_tcomplete_failed;
3078 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3080 t->debug_id = t_debug_id;
3083 binder_debug(BINDER_DEBUG_TRANSACTION,
3084 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3085 proc->pid, thread->pid, t->debug_id,
3086 target_proc->pid, target_thread->pid,
3087 (u64)tr->data.ptr.buffer,
3088 (u64)tr->data.ptr.offsets,
3089 (u64)tr->data_size, (u64)tr->offsets_size,
3090 (u64)extra_buffers_size);
3092 binder_debug(BINDER_DEBUG_TRANSACTION,
3093 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3094 proc->pid, thread->pid, t->debug_id,
3095 target_proc->pid, target_node->debug_id,
3096 (u64)tr->data.ptr.buffer,
3097 (u64)tr->data.ptr.offsets,
3098 (u64)tr->data_size, (u64)tr->offsets_size,
3099 (u64)extra_buffers_size);
3101 if (!reply && !(tr->flags & TF_ONE_WAY))
3105 t->sender_euid = task_euid(proc->tsk);
3106 t->to_proc = target_proc;
3107 t->to_thread = target_thread;
3109 t->flags = tr->flags;
3110 t->priority = task_nice(current);
3112 if (target_node && target_node->txn_security_ctx) {
3116 security_task_getsecid(proc->tsk, &secid);
3117 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
3119 return_error = BR_FAILED_REPLY;
3120 return_error_param = ret;
3121 return_error_line = __LINE__;
3122 goto err_get_secctx_failed;
3124 added_size = ALIGN(secctx_sz, sizeof(u64));
3125 extra_buffers_size += added_size;
3126 if (extra_buffers_size < added_size) {
3127 /* integer overflow of extra_buffers_size */
3128 return_error = BR_FAILED_REPLY;
3129 return_error_param = EINVAL;
3130 return_error_line = __LINE__;
3131 goto err_bad_extra_size;
3135 trace_binder_transaction(reply, t, target_node);
3137 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3138 tr->offsets_size, extra_buffers_size,
3139 !reply && (t->flags & TF_ONE_WAY));
3140 if (IS_ERR(t->buffer)) {
3142 * -ESRCH indicates VMA cleared. The target is dying.
3144 return_error_param = PTR_ERR(t->buffer);
3145 return_error = return_error_param == -ESRCH ?
3146 BR_DEAD_REPLY : BR_FAILED_REPLY;
3147 return_error_line = __LINE__;
3149 goto err_binder_alloc_buf_failed;
3153 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3154 ALIGN(tr->offsets_size, sizeof(void *)) +
3155 ALIGN(extra_buffers_size, sizeof(void *)) -
3156 ALIGN(secctx_sz, sizeof(u64));
3158 t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
3159 err = binder_alloc_copy_to_buffer(&target_proc->alloc,
3160 t->buffer, buf_offset,
3163 t->security_ctx = 0;
3166 security_release_secctx(secctx, secctx_sz);
3169 t->buffer->debug_id = t->debug_id;
3170 t->buffer->transaction = t;
3171 t->buffer->target_node = target_node;
3172 trace_binder_transaction_alloc_buf(t->buffer);
3174 if (binder_alloc_copy_user_to_buffer(
3175 &target_proc->alloc,
3177 (const void __user *)
3178 (uintptr_t)tr->data.ptr.buffer,
3180 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3181 proc->pid, thread->pid);
3182 return_error = BR_FAILED_REPLY;
3183 return_error_param = -EFAULT;
3184 return_error_line = __LINE__;
3185 goto err_copy_data_failed;
3187 if (binder_alloc_copy_user_to_buffer(
3188 &target_proc->alloc,
3190 ALIGN(tr->data_size, sizeof(void *)),
3191 (const void __user *)
3192 (uintptr_t)tr->data.ptr.offsets,
3193 tr->offsets_size)) {
3194 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3195 proc->pid, thread->pid);
3196 return_error = BR_FAILED_REPLY;
3197 return_error_param = -EFAULT;
3198 return_error_line = __LINE__;
3199 goto err_copy_data_failed;
3201 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3202 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3203 proc->pid, thread->pid, (u64)tr->offsets_size);
3204 return_error = BR_FAILED_REPLY;
3205 return_error_param = -EINVAL;
3206 return_error_line = __LINE__;
3207 goto err_bad_offset;
3209 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3210 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3211 proc->pid, thread->pid,
3212 (u64)extra_buffers_size);
3213 return_error = BR_FAILED_REPLY;
3214 return_error_param = -EINVAL;
3215 return_error_line = __LINE__;
3216 goto err_bad_offset;
3218 off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3219 buffer_offset = off_start_offset;
3220 off_end_offset = off_start_offset + tr->offsets_size;
3221 sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3222 sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
3223 ALIGN(secctx_sz, sizeof(u64));
3225 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3226 buffer_offset += sizeof(binder_size_t)) {
3227 struct binder_object_header *hdr;
3229 struct binder_object object;
3230 binder_size_t object_offset;
3232 if (binder_alloc_copy_from_buffer(&target_proc->alloc,
3236 sizeof(object_offset))) {
3237 return_error = BR_FAILED_REPLY;
3238 return_error_param = -EINVAL;
3239 return_error_line = __LINE__;
3240 goto err_bad_offset;
3242 object_size = binder_get_object(target_proc, t->buffer,
3243 object_offset, &object);
3244 if (object_size == 0 || object_offset < off_min) {
3245 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3246 proc->pid, thread->pid,
3249 (u64)t->buffer->data_size);
3250 return_error = BR_FAILED_REPLY;
3251 return_error_param = -EINVAL;
3252 return_error_line = __LINE__;
3253 goto err_bad_offset;
3257 off_min = object_offset + object_size;
3258 switch (hdr->type) {
3259 case BINDER_TYPE_BINDER:
3260 case BINDER_TYPE_WEAK_BINDER: {
3261 struct flat_binder_object *fp;
3263 fp = to_flat_binder_object(hdr);
3264 ret = binder_translate_binder(fp, t, thread);
3267 binder_alloc_copy_to_buffer(&target_proc->alloc,
3271 return_error = BR_FAILED_REPLY;
3272 return_error_param = ret;
3273 return_error_line = __LINE__;
3274 goto err_translate_failed;
3277 case BINDER_TYPE_HANDLE:
3278 case BINDER_TYPE_WEAK_HANDLE: {
3279 struct flat_binder_object *fp;
3281 fp = to_flat_binder_object(hdr);
3282 ret = binder_translate_handle(fp, t, thread);
3284 binder_alloc_copy_to_buffer(&target_proc->alloc,
3288 return_error = BR_FAILED_REPLY;
3289 return_error_param = ret;
3290 return_error_line = __LINE__;
3291 goto err_translate_failed;
3295 case BINDER_TYPE_FD: {
3296 struct binder_fd_object *fp = to_binder_fd_object(hdr);
3297 binder_size_t fd_offset = object_offset +
3298 (uintptr_t)&fp->fd - (uintptr_t)fp;
3299 int ret = binder_translate_fd(fp->fd, fd_offset, t,
3300 thread, in_reply_to);
3304 binder_alloc_copy_to_buffer(&target_proc->alloc,
3308 return_error = BR_FAILED_REPLY;
3309 return_error_param = ret;
3310 return_error_line = __LINE__;
3311 goto err_translate_failed;
3314 case BINDER_TYPE_FDA: {
3315 struct binder_object ptr_object;
3316 binder_size_t parent_offset;
3317 struct binder_fd_array_object *fda =
3318 to_binder_fd_array_object(hdr);
3319 size_t num_valid = (buffer_offset - off_start_offset) *
3320 sizeof(binder_size_t);
3321 struct binder_buffer_object *parent =
3322 binder_validate_ptr(target_proc, t->buffer,
3323 &ptr_object, fda->parent,
3328 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3329 proc->pid, thread->pid);
3330 return_error = BR_FAILED_REPLY;
3331 return_error_param = -EINVAL;
3332 return_error_line = __LINE__;
3333 goto err_bad_parent;
3335 if (!binder_validate_fixup(target_proc, t->buffer,
3340 last_fixup_min_off)) {
3341 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3342 proc->pid, thread->pid);
3343 return_error = BR_FAILED_REPLY;
3344 return_error_param = -EINVAL;
3345 return_error_line = __LINE__;
3346 goto err_bad_parent;
3348 ret = binder_translate_fd_array(fda, parent, t, thread,
3351 return_error = BR_FAILED_REPLY;
3352 return_error_param = ret;
3353 return_error_line = __LINE__;
3354 goto err_translate_failed;
3356 last_fixup_obj_off = parent_offset;
3357 last_fixup_min_off =
3358 fda->parent_offset + sizeof(u32) * fda->num_fds;
3360 case BINDER_TYPE_PTR: {
3361 struct binder_buffer_object *bp =
3362 to_binder_buffer_object(hdr);
3363 size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3366 if (bp->length > buf_left) {
3367 binder_user_error("%d:%d got transaction with too large buffer\n",
3368 proc->pid, thread->pid);
3369 return_error = BR_FAILED_REPLY;
3370 return_error_param = -EINVAL;
3371 return_error_line = __LINE__;
3372 goto err_bad_offset;
3374 if (binder_alloc_copy_user_to_buffer(
3375 &target_proc->alloc,
3378 (const void __user *)
3379 (uintptr_t)bp->buffer,
3381 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3382 proc->pid, thread->pid);
3383 return_error_param = -EFAULT;
3384 return_error = BR_FAILED_REPLY;
3385 return_error_line = __LINE__;
3386 goto err_copy_data_failed;
3388 /* Fixup buffer pointer to target proc address space */
3389 bp->buffer = (uintptr_t)
3390 t->buffer->user_data + sg_buf_offset;
3391 sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3393 num_valid = (buffer_offset - off_start_offset) *
3394 sizeof(binder_size_t);
3395 ret = binder_fixup_parent(t, thread, bp,
3399 last_fixup_min_off);
3401 binder_alloc_copy_to_buffer(&target_proc->alloc,
3405 return_error = BR_FAILED_REPLY;
3406 return_error_param = ret;
3407 return_error_line = __LINE__;
3408 goto err_translate_failed;
3410 last_fixup_obj_off = object_offset;
3411 last_fixup_min_off = 0;
3414 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3415 proc->pid, thread->pid, hdr->type);
3416 return_error = BR_FAILED_REPLY;
3417 return_error_param = -EINVAL;
3418 return_error_line = __LINE__;
3419 goto err_bad_object_type;
3422 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3423 t->work.type = BINDER_WORK_TRANSACTION;
3426 binder_enqueue_thread_work(thread, tcomplete);
3427 binder_inner_proc_lock(target_proc);
3428 if (target_thread->is_dead) {
3429 binder_inner_proc_unlock(target_proc);
3430 goto err_dead_proc_or_thread;
3432 BUG_ON(t->buffer->async_transaction != 0);
3433 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3434 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3435 binder_inner_proc_unlock(target_proc);
3436 wake_up_interruptible_sync(&target_thread->wait);
3437 binder_free_transaction(in_reply_to);
3438 } else if (!(t->flags & TF_ONE_WAY)) {
3439 BUG_ON(t->buffer->async_transaction != 0);
3440 binder_inner_proc_lock(proc);
3442 * Defer the TRANSACTION_COMPLETE, so we don't return to
3443 * userspace immediately; this allows the target process to
3444 * immediately start processing this transaction, reducing
3445 * latency. We will then return the TRANSACTION_COMPLETE when
3446 * the target replies (or there is an error).
3448 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3450 t->from_parent = thread->transaction_stack;
3451 thread->transaction_stack = t;
3452 binder_inner_proc_unlock(proc);
3453 if (!binder_proc_transaction(t, target_proc, target_thread)) {
3454 binder_inner_proc_lock(proc);
3455 binder_pop_transaction_ilocked(thread, t);
3456 binder_inner_proc_unlock(proc);
3457 goto err_dead_proc_or_thread;
3460 BUG_ON(target_node == NULL);
3461 BUG_ON(t->buffer->async_transaction != 1);
3462 binder_enqueue_thread_work(thread, tcomplete);
3463 if (!binder_proc_transaction(t, target_proc, NULL))
3464 goto err_dead_proc_or_thread;
3467 binder_thread_dec_tmpref(target_thread);
3468 binder_proc_dec_tmpref(target_proc);
3470 binder_dec_node_tmpref(target_node);
3472 * write barrier to synchronize with initialization
3476 WRITE_ONCE(e->debug_id_done, t_debug_id);
3479 err_dead_proc_or_thread:
3480 return_error = BR_DEAD_REPLY;
3481 return_error_line = __LINE__;
3482 binder_dequeue_work(proc, tcomplete);
3483 err_translate_failed:
3484 err_bad_object_type:
3487 err_copy_data_failed:
3488 binder_free_txn_fixups(t);
3489 trace_binder_transaction_failed_buffer_release(t->buffer);
3490 binder_transaction_buffer_release(target_proc, t->buffer,
3491 buffer_offset, true);
3493 binder_dec_node_tmpref(target_node);
3495 t->buffer->transaction = NULL;
3496 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3497 err_binder_alloc_buf_failed:
3500 security_release_secctx(secctx, secctx_sz);
3501 err_get_secctx_failed:
3503 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3504 err_alloc_tcomplete_failed:
3506 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3510 err_empty_call_stack:
3512 err_invalid_target_handle:
3514 binder_thread_dec_tmpref(target_thread);
3516 binder_proc_dec_tmpref(target_proc);
3518 binder_dec_node(target_node, 1, 0);
3519 binder_dec_node_tmpref(target_node);
3522 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3523 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3524 proc->pid, thread->pid, return_error, return_error_param,
3525 (u64)tr->data_size, (u64)tr->offsets_size,
3529 struct binder_transaction_log_entry *fe;
3531 e->return_error = return_error;
3532 e->return_error_param = return_error_param;
3533 e->return_error_line = return_error_line;
3534 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3537 * write barrier to synchronize with initialization
3541 WRITE_ONCE(e->debug_id_done, t_debug_id);
3542 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3545 BUG_ON(thread->return_error.cmd != BR_OK);
3547 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3548 binder_enqueue_thread_work(thread, &thread->return_error.work);
3549 binder_send_failed_reply(in_reply_to, return_error);
3551 thread->return_error.cmd = return_error;
3552 binder_enqueue_thread_work(thread, &thread->return_error.work);
3557 * binder_free_buf() - free the specified buffer
3558 * @proc: binder proc that owns buffer
3559 * @buffer: buffer to be freed
3561 * If buffer for an async transaction, enqueue the next async
3562 * transaction from the node.
3564 * Cleanup buffer and free it.
3567 binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)
3569 binder_inner_proc_lock(proc);
3570 if (buffer->transaction) {
3571 buffer->transaction->buffer = NULL;
3572 buffer->transaction = NULL;
3574 binder_inner_proc_unlock(proc);
3575 if (buffer->async_transaction && buffer->target_node) {
3576 struct binder_node *buf_node;
3577 struct binder_work *w;
3579 buf_node = buffer->target_node;
3580 binder_node_inner_lock(buf_node);
3581 BUG_ON(!buf_node->has_async_transaction);
3582 BUG_ON(buf_node->proc != proc);
3583 w = binder_dequeue_work_head_ilocked(
3584 &buf_node->async_todo);
3586 buf_node->has_async_transaction = false;
3588 binder_enqueue_work_ilocked(
3590 binder_wakeup_proc_ilocked(proc);
3592 binder_node_inner_unlock(buf_node);
3594 trace_binder_transaction_buffer_release(buffer);
3595 binder_transaction_buffer_release(proc, buffer, 0, false);
3596 binder_alloc_free_buf(&proc->alloc, buffer);
3599 static int binder_thread_write(struct binder_proc *proc,
3600 struct binder_thread *thread,
3601 binder_uintptr_t binder_buffer, size_t size,
3602 binder_size_t *consumed)
3605 struct binder_context *context = proc->context;
3606 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3607 void __user *ptr = buffer + *consumed;
3608 void __user *end = buffer + size;
3610 while (ptr < end && thread->return_error.cmd == BR_OK) {
3613 if (get_user(cmd, (uint32_t __user *)ptr))
3615 ptr += sizeof(uint32_t);
3616 trace_binder_command(cmd);
3617 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3618 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3619 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3620 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3628 const char *debug_string;
3629 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3630 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3631 struct binder_ref_data rdata;
3633 if (get_user(target, (uint32_t __user *)ptr))
3636 ptr += sizeof(uint32_t);
3638 if (increment && !target) {
3639 struct binder_node *ctx_mgr_node;
3640 mutex_lock(&context->context_mgr_node_lock);
3641 ctx_mgr_node = context->binder_context_mgr_node;
3643 ret = binder_inc_ref_for_node(
3645 strong, NULL, &rdata);
3646 mutex_unlock(&context->context_mgr_node_lock);
3649 ret = binder_update_ref_for_handle(
3650 proc, target, increment, strong,
3652 if (!ret && rdata.desc != target) {
3653 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3654 proc->pid, thread->pid,
3655 target, rdata.desc);
3659 debug_string = "IncRefs";
3662 debug_string = "Acquire";
3665 debug_string = "Release";
3669 debug_string = "DecRefs";
3673 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3674 proc->pid, thread->pid, debug_string,
3675 strong, target, ret);
3678 binder_debug(BINDER_DEBUG_USER_REFS,
3679 "%d:%d %s ref %d desc %d s %d w %d\n",
3680 proc->pid, thread->pid, debug_string,
3681 rdata.debug_id, rdata.desc, rdata.strong,
3685 case BC_INCREFS_DONE:
3686 case BC_ACQUIRE_DONE: {
3687 binder_uintptr_t node_ptr;
3688 binder_uintptr_t cookie;
3689 struct binder_node *node;
3692 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3694 ptr += sizeof(binder_uintptr_t);
3695 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3697 ptr += sizeof(binder_uintptr_t);
3698 node = binder_get_node(proc, node_ptr);
3700 binder_user_error("%d:%d %s u%016llx no match\n",
3701 proc->pid, thread->pid,
3702 cmd == BC_INCREFS_DONE ?
3708 if (cookie != node->cookie) {
3709 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3710 proc->pid, thread->pid,
3711 cmd == BC_INCREFS_DONE ?
3712 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3713 (u64)node_ptr, node->debug_id,
3714 (u64)cookie, (u64)node->cookie);
3715 binder_put_node(node);
3718 binder_node_inner_lock(node);
3719 if (cmd == BC_ACQUIRE_DONE) {
3720 if (node->pending_strong_ref == 0) {
3721 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3722 proc->pid, thread->pid,
3724 binder_node_inner_unlock(node);
3725 binder_put_node(node);
3728 node->pending_strong_ref = 0;
3730 if (node->pending_weak_ref == 0) {
3731 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3732 proc->pid, thread->pid,
3734 binder_node_inner_unlock(node);
3735 binder_put_node(node);
3738 node->pending_weak_ref = 0;
3740 free_node = binder_dec_node_nilocked(node,
3741 cmd == BC_ACQUIRE_DONE, 0);
3743 binder_debug(BINDER_DEBUG_USER_REFS,
3744 "%d:%d %s node %d ls %d lw %d tr %d\n",
3745 proc->pid, thread->pid,
3746 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3747 node->debug_id, node->local_strong_refs,
3748 node->local_weak_refs, node->tmp_refs);
3749 binder_node_inner_unlock(node);
3750 binder_put_node(node);
3753 case BC_ATTEMPT_ACQUIRE:
3754 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3756 case BC_ACQUIRE_RESULT:
3757 pr_err("BC_ACQUIRE_RESULT not supported\n");
3760 case BC_FREE_BUFFER: {
3761 binder_uintptr_t data_ptr;
3762 struct binder_buffer *buffer;
3764 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3766 ptr += sizeof(binder_uintptr_t);
3768 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3770 if (IS_ERR_OR_NULL(buffer)) {
3771 if (PTR_ERR(buffer) == -EPERM) {
3773 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3774 proc->pid, thread->pid,
3778 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
3779 proc->pid, thread->pid,
3784 binder_debug(BINDER_DEBUG_FREE_BUFFER,
3785 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3786 proc->pid, thread->pid, (u64)data_ptr,
3788 buffer->transaction ? "active" : "finished");
3789 binder_free_buf(proc, buffer);
3793 case BC_TRANSACTION_SG:
3795 struct binder_transaction_data_sg tr;
3797 if (copy_from_user(&tr, ptr, sizeof(tr)))
3800 binder_transaction(proc, thread, &tr.transaction_data,
3801 cmd == BC_REPLY_SG, tr.buffers_size);
3804 case BC_TRANSACTION:
3806 struct binder_transaction_data tr;
3808 if (copy_from_user(&tr, ptr, sizeof(tr)))
3811 binder_transaction(proc, thread, &tr,
3812 cmd == BC_REPLY, 0);
3816 case BC_REGISTER_LOOPER:
3817 binder_debug(BINDER_DEBUG_THREADS,
3818 "%d:%d BC_REGISTER_LOOPER\n",
3819 proc->pid, thread->pid);
3820 binder_inner_proc_lock(proc);
3821 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3822 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3823 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3824 proc->pid, thread->pid);
3825 } else if (proc->requested_threads == 0) {
3826 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3827 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3828 proc->pid, thread->pid);
3830 proc->requested_threads--;
3831 proc->requested_threads_started++;
3833 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3834 binder_inner_proc_unlock(proc);
3836 case BC_ENTER_LOOPER:
3837 binder_debug(BINDER_DEBUG_THREADS,
3838 "%d:%d BC_ENTER_LOOPER\n",
3839 proc->pid, thread->pid);
3840 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3841 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3842 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3843 proc->pid, thread->pid);
3845 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3847 case BC_EXIT_LOOPER:
3848 binder_debug(BINDER_DEBUG_THREADS,
3849 "%d:%d BC_EXIT_LOOPER\n",
3850 proc->pid, thread->pid);
3851 thread->looper |= BINDER_LOOPER_STATE_EXITED;
3854 case BC_REQUEST_DEATH_NOTIFICATION:
3855 case BC_CLEAR_DEATH_NOTIFICATION: {
3857 binder_uintptr_t cookie;
3858 struct binder_ref *ref;
3859 struct binder_ref_death *death = NULL;
3861 if (get_user(target, (uint32_t __user *)ptr))
3863 ptr += sizeof(uint32_t);
3864 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3866 ptr += sizeof(binder_uintptr_t);
3867 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3869 * Allocate memory for death notification
3870 * before taking lock
3872 death = kzalloc(sizeof(*death), GFP_KERNEL);
3873 if (death == NULL) {
3874 WARN_ON(thread->return_error.cmd !=
3876 thread->return_error.cmd = BR_ERROR;
3877 binder_enqueue_thread_work(
3879 &thread->return_error.work);
3881 BINDER_DEBUG_FAILED_TRANSACTION,
3882 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3883 proc->pid, thread->pid);
3887 binder_proc_lock(proc);
3888 ref = binder_get_ref_olocked(proc, target, false);
3890 binder_user_error("%d:%d %s invalid ref %d\n",
3891 proc->pid, thread->pid,
3892 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3893 "BC_REQUEST_DEATH_NOTIFICATION" :
3894 "BC_CLEAR_DEATH_NOTIFICATION",
3896 binder_proc_unlock(proc);
3901 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3902 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3903 proc->pid, thread->pid,
3904 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3905 "BC_REQUEST_DEATH_NOTIFICATION" :
3906 "BC_CLEAR_DEATH_NOTIFICATION",
3907 (u64)cookie, ref->data.debug_id,
3908 ref->data.desc, ref->data.strong,
3909 ref->data.weak, ref->node->debug_id);
3911 binder_node_lock(ref->node);
3912 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3914 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3915 proc->pid, thread->pid);
3916 binder_node_unlock(ref->node);
3917 binder_proc_unlock(proc);
3921 binder_stats_created(BINDER_STAT_DEATH);
3922 INIT_LIST_HEAD(&death->work.entry);
3923 death->cookie = cookie;
3925 if (ref->node->proc == NULL) {
3926 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3928 binder_inner_proc_lock(proc);
3929 binder_enqueue_work_ilocked(
3930 &ref->death->work, &proc->todo);
3931 binder_wakeup_proc_ilocked(proc);
3932 binder_inner_proc_unlock(proc);
3935 if (ref->death == NULL) {
3936 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3937 proc->pid, thread->pid);
3938 binder_node_unlock(ref->node);
3939 binder_proc_unlock(proc);
3943 if (death->cookie != cookie) {
3944 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3945 proc->pid, thread->pid,
3948 binder_node_unlock(ref->node);
3949 binder_proc_unlock(proc);
3953 binder_inner_proc_lock(proc);
3954 if (list_empty(&death->work.entry)) {
3955 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3956 if (thread->looper &
3957 (BINDER_LOOPER_STATE_REGISTERED |
3958 BINDER_LOOPER_STATE_ENTERED))
3959 binder_enqueue_thread_work_ilocked(
3963 binder_enqueue_work_ilocked(
3966 binder_wakeup_proc_ilocked(
3970 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3971 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3973 binder_inner_proc_unlock(proc);
3975 binder_node_unlock(ref->node);
3976 binder_proc_unlock(proc);
3978 case BC_DEAD_BINDER_DONE: {
3979 struct binder_work *w;
3980 binder_uintptr_t cookie;
3981 struct binder_ref_death *death = NULL;
3983 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3986 ptr += sizeof(cookie);
3987 binder_inner_proc_lock(proc);
3988 list_for_each_entry(w, &proc->delivered_death,
3990 struct binder_ref_death *tmp_death =
3992 struct binder_ref_death,
3995 if (tmp_death->cookie == cookie) {
4000 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4001 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
4002 proc->pid, thread->pid, (u64)cookie,
4004 if (death == NULL) {
4005 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
4006 proc->pid, thread->pid, (u64)cookie);
4007 binder_inner_proc_unlock(proc);
4010 binder_dequeue_work_ilocked(&death->work);
4011 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
4012 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4013 if (thread->looper &
4014 (BINDER_LOOPER_STATE_REGISTERED |
4015 BINDER_LOOPER_STATE_ENTERED))
4016 binder_enqueue_thread_work_ilocked(
4017 thread, &death->work);
4019 binder_enqueue_work_ilocked(
4022 binder_wakeup_proc_ilocked(proc);
4025 binder_inner_proc_unlock(proc);
4029 pr_err("%d:%d unknown command %d\n",
4030 proc->pid, thread->pid, cmd);
4033 *consumed = ptr - buffer;
4038 static void binder_stat_br(struct binder_proc *proc,
4039 struct binder_thread *thread, uint32_t cmd)
4041 trace_binder_return(cmd);
4042 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4043 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4044 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4045 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4049 static int binder_put_node_cmd(struct binder_proc *proc,
4050 struct binder_thread *thread,
4052 binder_uintptr_t node_ptr,
4053 binder_uintptr_t node_cookie,
4055 uint32_t cmd, const char *cmd_name)
4057 void __user *ptr = *ptrp;
4059 if (put_user(cmd, (uint32_t __user *)ptr))
4061 ptr += sizeof(uint32_t);
4063 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4065 ptr += sizeof(binder_uintptr_t);
4067 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4069 ptr += sizeof(binder_uintptr_t);
4071 binder_stat_br(proc, thread, cmd);
4072 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4073 proc->pid, thread->pid, cmd_name, node_debug_id,
4074 (u64)node_ptr, (u64)node_cookie);
4080 static int binder_wait_for_work(struct binder_thread *thread,
4084 struct binder_proc *proc = thread->proc;
4087 freezer_do_not_count();
4088 binder_inner_proc_lock(proc);
4090 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
4091 if (binder_has_work_ilocked(thread, do_proc_work))
4094 list_add(&thread->waiting_thread_node,
4095 &proc->waiting_threads);
4096 binder_inner_proc_unlock(proc);
4098 binder_inner_proc_lock(proc);
4099 list_del_init(&thread->waiting_thread_node);
4100 if (signal_pending(current)) {
4105 finish_wait(&thread->wait, &wait);
4106 binder_inner_proc_unlock(proc);
4113 * binder_apply_fd_fixups() - finish fd translation
4114 * @proc: binder_proc associated @t->buffer
4115 * @t: binder transaction with list of fd fixups
4117 * Now that we are in the context of the transaction target
4118 * process, we can allocate and install fds. Process the
4119 * list of fds to translate and fixup the buffer with the
4122 * If we fail to allocate an fd, then free the resources by
4123 * fput'ing files that have not been processed and ksys_close'ing
4124 * any fds that have already been allocated.
4126 static int binder_apply_fd_fixups(struct binder_proc *proc,
4127 struct binder_transaction *t)
4129 struct binder_txn_fd_fixup *fixup, *tmp;
4132 list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4133 int fd = get_unused_fd_flags(O_CLOEXEC);
4136 binder_debug(BINDER_DEBUG_TRANSACTION,
4137 "failed fd fixup txn %d fd %d\n",
4142 binder_debug(BINDER_DEBUG_TRANSACTION,
4143 "fd fixup txn %d fd %d\n",
4145 trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4146 fd_install(fd, fixup->file);
4148 if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4155 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4162 err = binder_alloc_copy_from_buffer(&proc->alloc, &fd,
4168 binder_deferred_fd_close(fd);
4170 list_del(&fixup->fixup_entry);
4177 static int binder_thread_read(struct binder_proc *proc,
4178 struct binder_thread *thread,
4179 binder_uintptr_t binder_buffer, size_t size,
4180 binder_size_t *consumed, int non_block)
4182 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4183 void __user *ptr = buffer + *consumed;
4184 void __user *end = buffer + size;
4187 int wait_for_proc_work;
4189 if (*consumed == 0) {
4190 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4192 ptr += sizeof(uint32_t);
4196 binder_inner_proc_lock(proc);
4197 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4198 binder_inner_proc_unlock(proc);
4200 thread->looper |= BINDER_LOOPER_STATE_WAITING;
4202 trace_binder_wait_for_work(wait_for_proc_work,
4203 !!thread->transaction_stack,
4204 !binder_worklist_empty(proc, &thread->todo));
4205 if (wait_for_proc_work) {
4206 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4207 BINDER_LOOPER_STATE_ENTERED))) {
4208 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4209 proc->pid, thread->pid, thread->looper);
4210 wait_event_interruptible(binder_user_error_wait,
4211 binder_stop_on_user_error < 2);
4213 binder_set_nice(proc->default_priority);
4217 if (!binder_has_work(thread, wait_for_proc_work))
4220 ret = binder_wait_for_work(thread, wait_for_proc_work);
4223 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4230 struct binder_transaction_data_secctx tr;
4231 struct binder_transaction_data *trd = &tr.transaction_data;
4232 struct binder_work *w = NULL;
4233 struct list_head *list = NULL;
4234 struct binder_transaction *t = NULL;
4235 struct binder_thread *t_from;
4236 size_t trsize = sizeof(*trd);
4238 binder_inner_proc_lock(proc);
4239 if (!binder_worklist_empty_ilocked(&thread->todo))
4240 list = &thread->todo;
4241 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4245 binder_inner_proc_unlock(proc);
4248 if (ptr - buffer == 4 && !thread->looper_need_return)
4253 if (end - ptr < sizeof(tr) + 4) {
4254 binder_inner_proc_unlock(proc);
4257 w = binder_dequeue_work_head_ilocked(list);
4258 if (binder_worklist_empty_ilocked(&thread->todo))
4259 thread->process_todo = false;
4262 case BINDER_WORK_TRANSACTION: {
4263 binder_inner_proc_unlock(proc);
4264 t = container_of(w, struct binder_transaction, work);
4266 case BINDER_WORK_RETURN_ERROR: {
4267 struct binder_error *e = container_of(
4268 w, struct binder_error, work);
4270 WARN_ON(e->cmd == BR_OK);
4271 binder_inner_proc_unlock(proc);
4272 if (put_user(e->cmd, (uint32_t __user *)ptr))
4276 ptr += sizeof(uint32_t);
4278 binder_stat_br(proc, thread, cmd);
4280 case BINDER_WORK_TRANSACTION_COMPLETE: {
4281 binder_inner_proc_unlock(proc);
4282 cmd = BR_TRANSACTION_COMPLETE;
4284 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4285 if (put_user(cmd, (uint32_t __user *)ptr))
4287 ptr += sizeof(uint32_t);
4289 binder_stat_br(proc, thread, cmd);
4290 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4291 "%d:%d BR_TRANSACTION_COMPLETE\n",
4292 proc->pid, thread->pid);
4294 case BINDER_WORK_NODE: {
4295 struct binder_node *node = container_of(w, struct binder_node, work);
4297 binder_uintptr_t node_ptr = node->ptr;
4298 binder_uintptr_t node_cookie = node->cookie;
4299 int node_debug_id = node->debug_id;
4302 void __user *orig_ptr = ptr;
4304 BUG_ON(proc != node->proc);
4305 strong = node->internal_strong_refs ||
4306 node->local_strong_refs;
4307 weak = !hlist_empty(&node->refs) ||
4308 node->local_weak_refs ||
4309 node->tmp_refs || strong;
4310 has_strong_ref = node->has_strong_ref;
4311 has_weak_ref = node->has_weak_ref;
4313 if (weak && !has_weak_ref) {
4314 node->has_weak_ref = 1;
4315 node->pending_weak_ref = 1;
4316 node->local_weak_refs++;
4318 if (strong && !has_strong_ref) {
4319 node->has_strong_ref = 1;
4320 node->pending_strong_ref = 1;
4321 node->local_strong_refs++;
4323 if (!strong && has_strong_ref)
4324 node->has_strong_ref = 0;
4325 if (!weak && has_weak_ref)
4326 node->has_weak_ref = 0;
4327 if (!weak && !strong) {
4328 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4329 "%d:%d node %d u%016llx c%016llx deleted\n",
4330 proc->pid, thread->pid,
4334 rb_erase(&node->rb_node, &proc->nodes);
4335 binder_inner_proc_unlock(proc);
4336 binder_node_lock(node);
4338 * Acquire the node lock before freeing the
4339 * node to serialize with other threads that
4340 * may have been holding the node lock while
4341 * decrementing this node (avoids race where
4342 * this thread frees while the other thread
4343 * is unlocking the node after the final
4346 binder_node_unlock(node);
4347 binder_free_node(node);
4349 binder_inner_proc_unlock(proc);
4351 if (weak && !has_weak_ref)
4352 ret = binder_put_node_cmd(
4353 proc, thread, &ptr, node_ptr,
4354 node_cookie, node_debug_id,
4355 BR_INCREFS, "BR_INCREFS");
4356 if (!ret && strong && !has_strong_ref)
4357 ret = binder_put_node_cmd(
4358 proc, thread, &ptr, node_ptr,
4359 node_cookie, node_debug_id,
4360 BR_ACQUIRE, "BR_ACQUIRE");
4361 if (!ret && !strong && has_strong_ref)
4362 ret = binder_put_node_cmd(
4363 proc, thread, &ptr, node_ptr,
4364 node_cookie, node_debug_id,
4365 BR_RELEASE, "BR_RELEASE");
4366 if (!ret && !weak && has_weak_ref)
4367 ret = binder_put_node_cmd(
4368 proc, thread, &ptr, node_ptr,
4369 node_cookie, node_debug_id,
4370 BR_DECREFS, "BR_DECREFS");
4371 if (orig_ptr == ptr)
4372 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4373 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4374 proc->pid, thread->pid,
4381 case BINDER_WORK_DEAD_BINDER:
4382 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4383 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4384 struct binder_ref_death *death;
4386 binder_uintptr_t cookie;
4388 death = container_of(w, struct binder_ref_death, work);
4389 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4390 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4392 cmd = BR_DEAD_BINDER;
4393 cookie = death->cookie;
4395 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4396 "%d:%d %s %016llx\n",
4397 proc->pid, thread->pid,
4398 cmd == BR_DEAD_BINDER ?
4400 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4402 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4403 binder_inner_proc_unlock(proc);
4405 binder_stats_deleted(BINDER_STAT_DEATH);
4407 binder_enqueue_work_ilocked(
4408 w, &proc->delivered_death);
4409 binder_inner_proc_unlock(proc);
4411 if (put_user(cmd, (uint32_t __user *)ptr))
4413 ptr += sizeof(uint32_t);
4414 if (put_user(cookie,
4415 (binder_uintptr_t __user *)ptr))
4417 ptr += sizeof(binder_uintptr_t);
4418 binder_stat_br(proc, thread, cmd);
4419 if (cmd == BR_DEAD_BINDER)
4420 goto done; /* DEAD_BINDER notifications can cause transactions */
4423 binder_inner_proc_unlock(proc);
4424 pr_err("%d:%d: bad work type %d\n",
4425 proc->pid, thread->pid, w->type);
4432 BUG_ON(t->buffer == NULL);
4433 if (t->buffer->target_node) {
4434 struct binder_node *target_node = t->buffer->target_node;
4436 trd->target.ptr = target_node->ptr;
4437 trd->cookie = target_node->cookie;
4438 t->saved_priority = task_nice(current);
4439 if (t->priority < target_node->min_priority &&
4440 !(t->flags & TF_ONE_WAY))
4441 binder_set_nice(t->priority);
4442 else if (!(t->flags & TF_ONE_WAY) ||
4443 t->saved_priority > target_node->min_priority)
4444 binder_set_nice(target_node->min_priority);
4445 cmd = BR_TRANSACTION;
4447 trd->target.ptr = 0;
4451 trd->code = t->code;
4452 trd->flags = t->flags;
4453 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4455 t_from = binder_get_txn_from(t);
4457 struct task_struct *sender = t_from->proc->tsk;
4460 task_tgid_nr_ns(sender,
4461 task_active_pid_ns(current));
4463 trd->sender_pid = 0;
4466 ret = binder_apply_fd_fixups(proc, t);
4468 struct binder_buffer *buffer = t->buffer;
4469 bool oneway = !!(t->flags & TF_ONE_WAY);
4470 int tid = t->debug_id;
4473 binder_thread_dec_tmpref(t_from);
4474 buffer->transaction = NULL;
4475 binder_cleanup_transaction(t, "fd fixups failed",
4477 binder_free_buf(proc, buffer);
4478 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4479 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4480 proc->pid, thread->pid,
4482 (cmd == BR_REPLY ? "reply " : ""),
4483 tid, BR_FAILED_REPLY, ret, __LINE__);
4484 if (cmd == BR_REPLY) {
4485 cmd = BR_FAILED_REPLY;
4486 if (put_user(cmd, (uint32_t __user *)ptr))
4488 ptr += sizeof(uint32_t);
4489 binder_stat_br(proc, thread, cmd);
4494 trd->data_size = t->buffer->data_size;
4495 trd->offsets_size = t->buffer->offsets_size;
4496 trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
4497 trd->data.ptr.offsets = trd->data.ptr.buffer +
4498 ALIGN(t->buffer->data_size,
4501 tr.secctx = t->security_ctx;
4502 if (t->security_ctx) {
4503 cmd = BR_TRANSACTION_SEC_CTX;
4504 trsize = sizeof(tr);
4506 if (put_user(cmd, (uint32_t __user *)ptr)) {
4508 binder_thread_dec_tmpref(t_from);
4510 binder_cleanup_transaction(t, "put_user failed",
4515 ptr += sizeof(uint32_t);
4516 if (copy_to_user(ptr, &tr, trsize)) {
4518 binder_thread_dec_tmpref(t_from);
4520 binder_cleanup_transaction(t, "copy_to_user failed",
4527 trace_binder_transaction_received(t);
4528 binder_stat_br(proc, thread, cmd);
4529 binder_debug(BINDER_DEBUG_TRANSACTION,
4530 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4531 proc->pid, thread->pid,
4532 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4533 (cmd == BR_TRANSACTION_SEC_CTX) ?
4534 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4535 t->debug_id, t_from ? t_from->proc->pid : 0,
4536 t_from ? t_from->pid : 0, cmd,
4537 t->buffer->data_size, t->buffer->offsets_size,
4538 (u64)trd->data.ptr.buffer,
4539 (u64)trd->data.ptr.offsets);
4542 binder_thread_dec_tmpref(t_from);
4543 t->buffer->allow_user_free = 1;
4544 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
4545 binder_inner_proc_lock(thread->proc);
4546 t->to_parent = thread->transaction_stack;
4547 t->to_thread = thread;
4548 thread->transaction_stack = t;
4549 binder_inner_proc_unlock(thread->proc);
4551 binder_free_transaction(t);
4558 *consumed = ptr - buffer;
4559 binder_inner_proc_lock(proc);
4560 if (proc->requested_threads == 0 &&
4561 list_empty(&thread->proc->waiting_threads) &&
4562 proc->requested_threads_started < proc->max_threads &&
4563 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4564 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4565 /*spawn a new thread if we leave this out */) {
4566 proc->requested_threads++;
4567 binder_inner_proc_unlock(proc);
4568 binder_debug(BINDER_DEBUG_THREADS,
4569 "%d:%d BR_SPAWN_LOOPER\n",
4570 proc->pid, thread->pid);
4571 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4573 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4575 binder_inner_proc_unlock(proc);
4579 static void binder_release_work(struct binder_proc *proc,
4580 struct list_head *list)
4582 struct binder_work *w;
4585 w = binder_dequeue_work_head(proc, list);
4590 case BINDER_WORK_TRANSACTION: {
4591 struct binder_transaction *t;
4593 t = container_of(w, struct binder_transaction, work);
4595 binder_cleanup_transaction(t, "process died.",
4598 case BINDER_WORK_RETURN_ERROR: {
4599 struct binder_error *e = container_of(
4600 w, struct binder_error, work);
4602 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4603 "undelivered TRANSACTION_ERROR: %u\n",
4606 case BINDER_WORK_TRANSACTION_COMPLETE: {
4607 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4608 "undelivered TRANSACTION_COMPLETE\n");
4610 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4612 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4613 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4614 struct binder_ref_death *death;
4616 death = container_of(w, struct binder_ref_death, work);
4617 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4618 "undelivered death notification, %016llx\n",
4619 (u64)death->cookie);
4621 binder_stats_deleted(BINDER_STAT_DEATH);
4624 pr_err("unexpected work type, %d, not freed\n",
4632 static struct binder_thread *binder_get_thread_ilocked(
4633 struct binder_proc *proc, struct binder_thread *new_thread)
4635 struct binder_thread *thread = NULL;
4636 struct rb_node *parent = NULL;
4637 struct rb_node **p = &proc->threads.rb_node;
4641 thread = rb_entry(parent, struct binder_thread, rb_node);
4643 if (current->pid < thread->pid)
4645 else if (current->pid > thread->pid)
4646 p = &(*p)->rb_right;
4652 thread = new_thread;
4653 binder_stats_created(BINDER_STAT_THREAD);
4654 thread->proc = proc;
4655 thread->pid = current->pid;
4656 atomic_set(&thread->tmp_ref, 0);
4657 init_waitqueue_head(&thread->wait);
4658 INIT_LIST_HEAD(&thread->todo);
4659 rb_link_node(&thread->rb_node, parent, p);
4660 rb_insert_color(&thread->rb_node, &proc->threads);
4661 thread->looper_need_return = true;
4662 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4663 thread->return_error.cmd = BR_OK;
4664 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4665 thread->reply_error.cmd = BR_OK;
4666 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4670 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4672 struct binder_thread *thread;
4673 struct binder_thread *new_thread;
4675 binder_inner_proc_lock(proc);
4676 thread = binder_get_thread_ilocked(proc, NULL);
4677 binder_inner_proc_unlock(proc);
4679 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4680 if (new_thread == NULL)
4682 binder_inner_proc_lock(proc);
4683 thread = binder_get_thread_ilocked(proc, new_thread);
4684 binder_inner_proc_unlock(proc);
4685 if (thread != new_thread)
4691 static void binder_free_proc(struct binder_proc *proc)
4693 BUG_ON(!list_empty(&proc->todo));
4694 BUG_ON(!list_empty(&proc->delivered_death));
4695 binder_alloc_deferred_release(&proc->alloc);
4696 put_task_struct(proc->tsk);
4697 binder_stats_deleted(BINDER_STAT_PROC);
4701 static void binder_free_thread(struct binder_thread *thread)
4703 BUG_ON(!list_empty(&thread->todo));
4704 binder_stats_deleted(BINDER_STAT_THREAD);
4705 binder_proc_dec_tmpref(thread->proc);
4709 static int binder_thread_release(struct binder_proc *proc,
4710 struct binder_thread *thread)
4712 struct binder_transaction *t;
4713 struct binder_transaction *send_reply = NULL;
4714 int active_transactions = 0;
4715 struct binder_transaction *last_t = NULL;
4717 binder_inner_proc_lock(thread->proc);
4719 * take a ref on the proc so it survives
4720 * after we remove this thread from proc->threads.
4721 * The corresponding dec is when we actually
4722 * free the thread in binder_free_thread()
4726 * take a ref on this thread to ensure it
4727 * survives while we are releasing it
4729 atomic_inc(&thread->tmp_ref);
4730 rb_erase(&thread->rb_node, &proc->threads);
4731 t = thread->transaction_stack;
4733 spin_lock(&t->lock);
4734 if (t->to_thread == thread)
4737 __acquire(&t->lock);
4739 thread->is_dead = true;
4743 active_transactions++;
4744 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4745 "release %d:%d transaction %d %s, still active\n",
4746 proc->pid, thread->pid,
4748 (t->to_thread == thread) ? "in" : "out");
4750 if (t->to_thread == thread) {
4752 t->to_thread = NULL;
4754 t->buffer->transaction = NULL;
4758 } else if (t->from == thread) {
4763 spin_unlock(&last_t->lock);
4765 spin_lock(&t->lock);
4767 __acquire(&t->lock);
4769 /* annotation for sparse, lock not acquired in last iteration above */
4770 __release(&t->lock);
4773 * If this thread used poll, make sure we remove the waitqueue
4774 * from any epoll data structures holding it with POLLFREE.
4775 * waitqueue_active() is safe to use here because we're holding
4778 if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
4779 waitqueue_active(&thread->wait)) {
4780 wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE);
4783 binder_inner_proc_unlock(thread->proc);
4786 * This is needed to avoid races between wake_up_poll() above and
4787 * and ep_remove_waitqueue() called for other reasons (eg the epoll file
4788 * descriptor being closed); ep_remove_waitqueue() holds an RCU read
4789 * lock, so we can be sure it's done after calling synchronize_rcu().
4791 if (thread->looper & BINDER_LOOPER_STATE_POLL)
4795 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
4796 binder_release_work(proc, &thread->todo);
4797 binder_thread_dec_tmpref(thread);
4798 return active_transactions;
4801 static __poll_t binder_poll(struct file *filp,
4802 struct poll_table_struct *wait)
4804 struct binder_proc *proc = filp->private_data;
4805 struct binder_thread *thread = NULL;
4806 bool wait_for_proc_work;
4808 thread = binder_get_thread(proc);
4812 binder_inner_proc_lock(thread->proc);
4813 thread->looper |= BINDER_LOOPER_STATE_POLL;
4814 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4816 binder_inner_proc_unlock(thread->proc);
4818 poll_wait(filp, &thread->wait, wait);
4820 if (binder_has_work(thread, wait_for_proc_work))
4826 static int binder_ioctl_write_read(struct file *filp,
4827 unsigned int cmd, unsigned long arg,
4828 struct binder_thread *thread)
4831 struct binder_proc *proc = filp->private_data;
4832 unsigned int size = _IOC_SIZE(cmd);
4833 void __user *ubuf = (void __user *)arg;
4834 struct binder_write_read bwr;
4836 if (size != sizeof(struct binder_write_read)) {
4840 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4844 binder_debug(BINDER_DEBUG_READ_WRITE,
4845 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4846 proc->pid, thread->pid,
4847 (u64)bwr.write_size, (u64)bwr.write_buffer,
4848 (u64)bwr.read_size, (u64)bwr.read_buffer);
4850 if (bwr.write_size > 0) {
4851 ret = binder_thread_write(proc, thread,
4854 &bwr.write_consumed);
4855 trace_binder_write_done(ret);
4857 bwr.read_consumed = 0;
4858 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4863 if (bwr.read_size > 0) {
4864 ret = binder_thread_read(proc, thread, bwr.read_buffer,
4867 filp->f_flags & O_NONBLOCK);
4868 trace_binder_read_done(ret);
4869 binder_inner_proc_lock(proc);
4870 if (!binder_worklist_empty_ilocked(&proc->todo))
4871 binder_wakeup_proc_ilocked(proc);
4872 binder_inner_proc_unlock(proc);
4874 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4879 binder_debug(BINDER_DEBUG_READ_WRITE,
4880 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4881 proc->pid, thread->pid,
4882 (u64)bwr.write_consumed, (u64)bwr.write_size,
4883 (u64)bwr.read_consumed, (u64)bwr.read_size);
4884 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4892 static int binder_ioctl_set_ctx_mgr(struct file *filp,
4893 struct flat_binder_object *fbo)
4896 struct binder_proc *proc = filp->private_data;
4897 struct binder_context *context = proc->context;
4898 struct binder_node *new_node;
4899 kuid_t curr_euid = current_euid();
4901 mutex_lock(&context->context_mgr_node_lock);
4902 if (context->binder_context_mgr_node) {
4903 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4907 ret = security_binder_set_context_mgr(proc->tsk);
4910 if (uid_valid(context->binder_context_mgr_uid)) {
4911 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
4912 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4913 from_kuid(&init_user_ns, curr_euid),
4914 from_kuid(&init_user_ns,
4915 context->binder_context_mgr_uid));
4920 context->binder_context_mgr_uid = curr_euid;
4922 new_node = binder_new_node(proc, fbo);
4927 binder_node_lock(new_node);
4928 new_node->local_weak_refs++;
4929 new_node->local_strong_refs++;
4930 new_node->has_strong_ref = 1;
4931 new_node->has_weak_ref = 1;
4932 context->binder_context_mgr_node = new_node;
4933 binder_node_unlock(new_node);
4934 binder_put_node(new_node);
4936 mutex_unlock(&context->context_mgr_node_lock);
4940 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
4941 struct binder_node_info_for_ref *info)
4943 struct binder_node *node;
4944 struct binder_context *context = proc->context;
4945 __u32 handle = info->handle;
4947 if (info->strong_count || info->weak_count || info->reserved1 ||
4948 info->reserved2 || info->reserved3) {
4949 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
4954 /* This ioctl may only be used by the context manager */
4955 mutex_lock(&context->context_mgr_node_lock);
4956 if (!context->binder_context_mgr_node ||
4957 context->binder_context_mgr_node->proc != proc) {
4958 mutex_unlock(&context->context_mgr_node_lock);
4961 mutex_unlock(&context->context_mgr_node_lock);
4963 node = binder_get_node_from_ref(proc, handle, true, NULL);
4967 info->strong_count = node->local_strong_refs +
4968 node->internal_strong_refs;
4969 info->weak_count = node->local_weak_refs;
4971 binder_put_node(node);
4976 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4977 struct binder_node_debug_info *info)
4980 binder_uintptr_t ptr = info->ptr;
4982 memset(info, 0, sizeof(*info));
4984 binder_inner_proc_lock(proc);
4985 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
4986 struct binder_node *node = rb_entry(n, struct binder_node,
4988 if (node->ptr > ptr) {
4989 info->ptr = node->ptr;
4990 info->cookie = node->cookie;
4991 info->has_strong_ref = node->has_strong_ref;
4992 info->has_weak_ref = node->has_weak_ref;
4996 binder_inner_proc_unlock(proc);
5001 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
5004 struct binder_proc *proc = filp->private_data;
5005 struct binder_thread *thread;
5006 unsigned int size = _IOC_SIZE(cmd);
5007 void __user *ubuf = (void __user *)arg;
5009 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
5010 proc->pid, current->pid, cmd, arg);*/
5012 binder_selftest_alloc(&proc->alloc);
5014 trace_binder_ioctl(cmd, arg);
5016 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5020 thread = binder_get_thread(proc);
5021 if (thread == NULL) {
5027 case BINDER_WRITE_READ:
5028 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
5032 case BINDER_SET_MAX_THREADS: {
5035 if (copy_from_user(&max_threads, ubuf,
5036 sizeof(max_threads))) {
5040 binder_inner_proc_lock(proc);
5041 proc->max_threads = max_threads;
5042 binder_inner_proc_unlock(proc);
5045 case BINDER_SET_CONTEXT_MGR_EXT: {
5046 struct flat_binder_object fbo;
5048 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5052 ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5057 case BINDER_SET_CONTEXT_MGR:
5058 ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5062 case BINDER_THREAD_EXIT:
5063 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5064 proc->pid, thread->pid);
5065 binder_thread_release(proc, thread);
5068 case BINDER_VERSION: {
5069 struct binder_version __user *ver = ubuf;
5071 if (size != sizeof(struct binder_version)) {
5075 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5076 &ver->protocol_version)) {
5082 case BINDER_GET_NODE_INFO_FOR_REF: {
5083 struct binder_node_info_for_ref info;
5085 if (copy_from_user(&info, ubuf, sizeof(info))) {
5090 ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5094 if (copy_to_user(ubuf, &info, sizeof(info))) {
5101 case BINDER_GET_NODE_DEBUG_INFO: {
5102 struct binder_node_debug_info info;
5104 if (copy_from_user(&info, ubuf, sizeof(info))) {
5109 ret = binder_ioctl_get_node_debug_info(proc, &info);
5113 if (copy_to_user(ubuf, &info, sizeof(info))) {
5126 thread->looper_need_return = false;
5127 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5128 if (ret && ret != -ERESTARTSYS)
5129 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5131 trace_binder_ioctl_done(ret);
5135 static void binder_vma_open(struct vm_area_struct *vma)
5137 struct binder_proc *proc = vma->vm_private_data;
5139 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5140 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5141 proc->pid, vma->vm_start, vma->vm_end,
5142 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5143 (unsigned long)pgprot_val(vma->vm_page_prot));
5146 static void binder_vma_close(struct vm_area_struct *vma)
5148 struct binder_proc *proc = vma->vm_private_data;
5150 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5151 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5152 proc->pid, vma->vm_start, vma->vm_end,
5153 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5154 (unsigned long)pgprot_val(vma->vm_page_prot));
5155 binder_alloc_vma_close(&proc->alloc);
5158 static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
5160 return VM_FAULT_SIGBUS;
5163 static const struct vm_operations_struct binder_vm_ops = {
5164 .open = binder_vma_open,
5165 .close = binder_vma_close,
5166 .fault = binder_vm_fault,
5169 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5172 struct binder_proc *proc = filp->private_data;
5173 const char *failure_string;
5175 if (proc->tsk != current->group_leader)
5178 if ((vma->vm_end - vma->vm_start) > SZ_4M)
5179 vma->vm_end = vma->vm_start + SZ_4M;
5181 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5182 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5183 __func__, proc->pid, vma->vm_start, vma->vm_end,
5184 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5185 (unsigned long)pgprot_val(vma->vm_page_prot));
5187 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5189 failure_string = "bad vm_flags";
5192 vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
5193 vma->vm_flags &= ~VM_MAYWRITE;
5195 vma->vm_ops = &binder_vm_ops;
5196 vma->vm_private_data = proc;
5198 ret = binder_alloc_mmap_handler(&proc->alloc, vma);
5204 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5205 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
5209 static int binder_open(struct inode *nodp, struct file *filp)
5211 struct binder_proc *proc;
5212 struct binder_device *binder_dev;
5213 struct binderfs_info *info;
5214 struct dentry *binder_binderfs_dir_entry_proc = NULL;
5216 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5217 current->group_leader->pid, current->pid);
5219 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5222 spin_lock_init(&proc->inner_lock);
5223 spin_lock_init(&proc->outer_lock);
5224 get_task_struct(current->group_leader);
5225 proc->tsk = current->group_leader;
5226 INIT_LIST_HEAD(&proc->todo);
5227 proc->default_priority = task_nice(current);
5228 /* binderfs stashes devices in i_private */
5229 if (is_binderfs_device(nodp)) {
5230 binder_dev = nodp->i_private;
5231 info = nodp->i_sb->s_fs_info;
5232 binder_binderfs_dir_entry_proc = info->proc_log_dir;
5234 binder_dev = container_of(filp->private_data,
5235 struct binder_device, miscdev);
5237 proc->context = &binder_dev->context;
5238 binder_alloc_init(&proc->alloc);
5240 binder_stats_created(BINDER_STAT_PROC);
5241 proc->pid = current->group_leader->pid;
5242 INIT_LIST_HEAD(&proc->delivered_death);
5243 INIT_LIST_HEAD(&proc->waiting_threads);
5244 filp->private_data = proc;
5246 mutex_lock(&binder_procs_lock);
5247 hlist_add_head(&proc->proc_node, &binder_procs);
5248 mutex_unlock(&binder_procs_lock);
5250 if (binder_debugfs_dir_entry_proc) {
5253 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5255 * proc debug entries are shared between contexts, so
5256 * this will fail if the process tries to open the driver
5257 * again with a different context. The priting code will
5258 * anyway print all contexts that a given PID has, so this
5261 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5262 binder_debugfs_dir_entry_proc,
5263 (void *)(unsigned long)proc->pid,
5267 if (binder_binderfs_dir_entry_proc) {
5269 struct dentry *binderfs_entry;
5271 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5273 * Similar to debugfs, the process specific log file is shared
5274 * between contexts. If the file has already been created for a
5275 * process, the following binderfs_create_file() call will
5276 * fail with error code EEXIST if another context of the same
5277 * process invoked binder_open(). This is ok since same as
5278 * debugfs, the log file will contain information on all
5279 * contexts of a given PID.
5281 binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
5282 strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
5283 if (!IS_ERR(binderfs_entry)) {
5284 proc->binderfs_entry = binderfs_entry;
5288 error = PTR_ERR(binderfs_entry);
5289 if (error != -EEXIST) {
5290 pr_warn("Unable to create file %s in binderfs (error %d)\n",
5299 static int binder_flush(struct file *filp, fl_owner_t id)
5301 struct binder_proc *proc = filp->private_data;
5303 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5308 static void binder_deferred_flush(struct binder_proc *proc)
5313 binder_inner_proc_lock(proc);
5314 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5315 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5317 thread->looper_need_return = true;
5318 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5319 wake_up_interruptible(&thread->wait);
5323 binder_inner_proc_unlock(proc);
5325 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5326 "binder_flush: %d woke %d threads\n", proc->pid,
5330 static int binder_release(struct inode *nodp, struct file *filp)
5332 struct binder_proc *proc = filp->private_data;
5334 debugfs_remove(proc->debugfs_entry);
5336 if (proc->binderfs_entry) {
5337 binderfs_remove_file(proc->binderfs_entry);
5338 proc->binderfs_entry = NULL;
5341 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5346 static int binder_node_release(struct binder_node *node, int refs)
5348 struct binder_ref *ref;
5350 struct binder_proc *proc = node->proc;
5352 binder_release_work(proc, &node->async_todo);
5354 binder_node_lock(node);
5355 binder_inner_proc_lock(proc);
5356 binder_dequeue_work_ilocked(&node->work);
5358 * The caller must have taken a temporary ref on the node,
5360 BUG_ON(!node->tmp_refs);
5361 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5362 binder_inner_proc_unlock(proc);
5363 binder_node_unlock(node);
5364 binder_free_node(node);
5370 node->local_strong_refs = 0;
5371 node->local_weak_refs = 0;
5372 binder_inner_proc_unlock(proc);
5374 spin_lock(&binder_dead_nodes_lock);
5375 hlist_add_head(&node->dead_node, &binder_dead_nodes);
5376 spin_unlock(&binder_dead_nodes_lock);
5378 hlist_for_each_entry(ref, &node->refs, node_entry) {
5381 * Need the node lock to synchronize
5382 * with new notification requests and the
5383 * inner lock to synchronize with queued
5384 * death notifications.
5386 binder_inner_proc_lock(ref->proc);
5388 binder_inner_proc_unlock(ref->proc);
5394 BUG_ON(!list_empty(&ref->death->work.entry));
5395 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5396 binder_enqueue_work_ilocked(&ref->death->work,
5398 binder_wakeup_proc_ilocked(ref->proc);
5399 binder_inner_proc_unlock(ref->proc);
5402 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5403 "node %d now dead, refs %d, death %d\n",
5404 node->debug_id, refs, death);
5405 binder_node_unlock(node);
5406 binder_put_node(node);
5411 static void binder_deferred_release(struct binder_proc *proc)
5413 struct binder_context *context = proc->context;
5415 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5417 mutex_lock(&binder_procs_lock);
5418 hlist_del(&proc->proc_node);
5419 mutex_unlock(&binder_procs_lock);
5421 mutex_lock(&context->context_mgr_node_lock);
5422 if (context->binder_context_mgr_node &&
5423 context->binder_context_mgr_node->proc == proc) {
5424 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5425 "%s: %d context_mgr_node gone\n",
5426 __func__, proc->pid);
5427 context->binder_context_mgr_node = NULL;
5429 mutex_unlock(&context->context_mgr_node_lock);
5430 binder_inner_proc_lock(proc);
5432 * Make sure proc stays alive after we
5433 * remove all the threads
5437 proc->is_dead = true;
5439 active_transactions = 0;
5440 while ((n = rb_first(&proc->threads))) {
5441 struct binder_thread *thread;
5443 thread = rb_entry(n, struct binder_thread, rb_node);
5444 binder_inner_proc_unlock(proc);
5446 active_transactions += binder_thread_release(proc, thread);
5447 binder_inner_proc_lock(proc);
5452 while ((n = rb_first(&proc->nodes))) {
5453 struct binder_node *node;
5455 node = rb_entry(n, struct binder_node, rb_node);
5458 * take a temporary ref on the node before
5459 * calling binder_node_release() which will either
5460 * kfree() the node or call binder_put_node()
5462 binder_inc_node_tmpref_ilocked(node);
5463 rb_erase(&node->rb_node, &proc->nodes);
5464 binder_inner_proc_unlock(proc);
5465 incoming_refs = binder_node_release(node, incoming_refs);
5466 binder_inner_proc_lock(proc);
5468 binder_inner_proc_unlock(proc);
5471 binder_proc_lock(proc);
5472 while ((n = rb_first(&proc->refs_by_desc))) {
5473 struct binder_ref *ref;
5475 ref = rb_entry(n, struct binder_ref, rb_node_desc);
5477 binder_cleanup_ref_olocked(ref);
5478 binder_proc_unlock(proc);
5479 binder_free_ref(ref);
5480 binder_proc_lock(proc);
5482 binder_proc_unlock(proc);
5484 binder_release_work(proc, &proc->todo);
5485 binder_release_work(proc, &proc->delivered_death);
5487 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5488 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5489 __func__, proc->pid, threads, nodes, incoming_refs,
5490 outgoing_refs, active_transactions);
5492 binder_proc_dec_tmpref(proc);
5495 static void binder_deferred_func(struct work_struct *work)
5497 struct binder_proc *proc;
5502 mutex_lock(&binder_deferred_lock);
5503 if (!hlist_empty(&binder_deferred_list)) {
5504 proc = hlist_entry(binder_deferred_list.first,
5505 struct binder_proc, deferred_work_node);
5506 hlist_del_init(&proc->deferred_work_node);
5507 defer = proc->deferred_work;
5508 proc->deferred_work = 0;
5513 mutex_unlock(&binder_deferred_lock);
5515 if (defer & BINDER_DEFERRED_FLUSH)
5516 binder_deferred_flush(proc);
5518 if (defer & BINDER_DEFERRED_RELEASE)
5519 binder_deferred_release(proc); /* frees proc */
5522 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5525 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5527 mutex_lock(&binder_deferred_lock);
5528 proc->deferred_work |= defer;
5529 if (hlist_unhashed(&proc->deferred_work_node)) {
5530 hlist_add_head(&proc->deferred_work_node,
5531 &binder_deferred_list);
5532 schedule_work(&binder_deferred_work);
5534 mutex_unlock(&binder_deferred_lock);
5537 static void print_binder_transaction_ilocked(struct seq_file *m,
5538 struct binder_proc *proc,
5540 struct binder_transaction *t)
5542 struct binder_proc *to_proc;
5543 struct binder_buffer *buffer = t->buffer;
5545 spin_lock(&t->lock);
5546 to_proc = t->to_proc;
5548 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
5549 prefix, t->debug_id, t,
5550 t->from ? t->from->proc->pid : 0,
5551 t->from ? t->from->pid : 0,
5552 to_proc ? to_proc->pid : 0,
5553 t->to_thread ? t->to_thread->pid : 0,
5554 t->code, t->flags, t->priority, t->need_reply);
5555 spin_unlock(&t->lock);
5557 if (proc != to_proc) {
5559 * Can only safely deref buffer if we are holding the
5560 * correct proc inner lock for this node
5566 if (buffer == NULL) {
5567 seq_puts(m, " buffer free\n");
5570 if (buffer->target_node)
5571 seq_printf(m, " node %d", buffer->target_node->debug_id);
5572 seq_printf(m, " size %zd:%zd data %pK\n",
5573 buffer->data_size, buffer->offsets_size,
5577 static void print_binder_work_ilocked(struct seq_file *m,
5578 struct binder_proc *proc,
5580 const char *transaction_prefix,
5581 struct binder_work *w)
5583 struct binder_node *node;
5584 struct binder_transaction *t;
5587 case BINDER_WORK_TRANSACTION:
5588 t = container_of(w, struct binder_transaction, work);
5589 print_binder_transaction_ilocked(
5590 m, proc, transaction_prefix, t);
5592 case BINDER_WORK_RETURN_ERROR: {
5593 struct binder_error *e = container_of(
5594 w, struct binder_error, work);
5596 seq_printf(m, "%stransaction error: %u\n",
5599 case BINDER_WORK_TRANSACTION_COMPLETE:
5600 seq_printf(m, "%stransaction complete\n", prefix);
5602 case BINDER_WORK_NODE:
5603 node = container_of(w, struct binder_node, work);
5604 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5605 prefix, node->debug_id,
5606 (u64)node->ptr, (u64)node->cookie);
5608 case BINDER_WORK_DEAD_BINDER:
5609 seq_printf(m, "%shas dead binder\n", prefix);
5611 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5612 seq_printf(m, "%shas cleared dead binder\n", prefix);
5614 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5615 seq_printf(m, "%shas cleared death notification\n", prefix);
5618 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
5623 static void print_binder_thread_ilocked(struct seq_file *m,
5624 struct binder_thread *thread,
5627 struct binder_transaction *t;
5628 struct binder_work *w;
5629 size_t start_pos = m->count;
5632 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
5633 thread->pid, thread->looper,
5634 thread->looper_need_return,
5635 atomic_read(&thread->tmp_ref));
5636 header_pos = m->count;
5637 t = thread->transaction_stack;
5639 if (t->from == thread) {
5640 print_binder_transaction_ilocked(m, thread->proc,
5641 " outgoing transaction", t);
5643 } else if (t->to_thread == thread) {
5644 print_binder_transaction_ilocked(m, thread->proc,
5645 " incoming transaction", t);
5648 print_binder_transaction_ilocked(m, thread->proc,
5649 " bad transaction", t);
5653 list_for_each_entry(w, &thread->todo, entry) {
5654 print_binder_work_ilocked(m, thread->proc, " ",
5655 " pending transaction", w);
5657 if (!print_always && m->count == header_pos)
5658 m->count = start_pos;
5661 static void print_binder_node_nilocked(struct seq_file *m,
5662 struct binder_node *node)
5664 struct binder_ref *ref;
5665 struct binder_work *w;
5669 hlist_for_each_entry(ref, &node->refs, node_entry)
5672 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5673 node->debug_id, (u64)node->ptr, (u64)node->cookie,
5674 node->has_strong_ref, node->has_weak_ref,
5675 node->local_strong_refs, node->local_weak_refs,
5676 node->internal_strong_refs, count, node->tmp_refs);
5678 seq_puts(m, " proc");
5679 hlist_for_each_entry(ref, &node->refs, node_entry)
5680 seq_printf(m, " %d", ref->proc->pid);
5684 list_for_each_entry(w, &node->async_todo, entry)
5685 print_binder_work_ilocked(m, node->proc, " ",
5686 " pending async transaction", w);
5690 static void print_binder_ref_olocked(struct seq_file *m,
5691 struct binder_ref *ref)
5693 binder_node_lock(ref->node);
5694 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
5695 ref->data.debug_id, ref->data.desc,
5696 ref->node->proc ? "" : "dead ",
5697 ref->node->debug_id, ref->data.strong,
5698 ref->data.weak, ref->death);
5699 binder_node_unlock(ref->node);
5702 static void print_binder_proc(struct seq_file *m,
5703 struct binder_proc *proc, int print_all)
5705 struct binder_work *w;
5707 size_t start_pos = m->count;
5709 struct binder_node *last_node = NULL;
5711 seq_printf(m, "proc %d\n", proc->pid);
5712 seq_printf(m, "context %s\n", proc->context->name);
5713 header_pos = m->count;
5715 binder_inner_proc_lock(proc);
5716 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5717 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5718 rb_node), print_all);
5720 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5721 struct binder_node *node = rb_entry(n, struct binder_node,
5723 if (!print_all && !node->has_async_transaction)
5727 * take a temporary reference on the node so it
5728 * survives and isn't removed from the tree
5729 * while we print it.
5731 binder_inc_node_tmpref_ilocked(node);
5732 /* Need to drop inner lock to take node lock */
5733 binder_inner_proc_unlock(proc);
5735 binder_put_node(last_node);
5736 binder_node_inner_lock(node);
5737 print_binder_node_nilocked(m, node);
5738 binder_node_inner_unlock(node);
5740 binder_inner_proc_lock(proc);
5742 binder_inner_proc_unlock(proc);
5744 binder_put_node(last_node);
5747 binder_proc_lock(proc);
5748 for (n = rb_first(&proc->refs_by_desc);
5751 print_binder_ref_olocked(m, rb_entry(n,
5754 binder_proc_unlock(proc);
5756 binder_alloc_print_allocated(m, &proc->alloc);
5757 binder_inner_proc_lock(proc);
5758 list_for_each_entry(w, &proc->todo, entry)
5759 print_binder_work_ilocked(m, proc, " ",
5760 " pending transaction", w);
5761 list_for_each_entry(w, &proc->delivered_death, entry) {
5762 seq_puts(m, " has delivered dead binder\n");
5765 binder_inner_proc_unlock(proc);
5766 if (!print_all && m->count == header_pos)
5767 m->count = start_pos;
5770 static const char * const binder_return_strings[] = {
5775 "BR_ACQUIRE_RESULT",
5777 "BR_TRANSACTION_COMPLETE",
5782 "BR_ATTEMPT_ACQUIRE",
5787 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5791 static const char * const binder_command_strings[] = {
5794 "BC_ACQUIRE_RESULT",
5802 "BC_ATTEMPT_ACQUIRE",
5803 "BC_REGISTER_LOOPER",
5806 "BC_REQUEST_DEATH_NOTIFICATION",
5807 "BC_CLEAR_DEATH_NOTIFICATION",
5808 "BC_DEAD_BINDER_DONE",
5809 "BC_TRANSACTION_SG",
5813 static const char * const binder_objstat_strings[] = {
5820 "transaction_complete"
5823 static void print_binder_stats(struct seq_file *m, const char *prefix,
5824 struct binder_stats *stats)
5828 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5829 ARRAY_SIZE(binder_command_strings));
5830 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
5831 int temp = atomic_read(&stats->bc[i]);
5834 seq_printf(m, "%s%s: %d\n", prefix,
5835 binder_command_strings[i], temp);
5838 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5839 ARRAY_SIZE(binder_return_strings));
5840 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
5841 int temp = atomic_read(&stats->br[i]);
5844 seq_printf(m, "%s%s: %d\n", prefix,
5845 binder_return_strings[i], temp);
5848 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5849 ARRAY_SIZE(binder_objstat_strings));
5850 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5851 ARRAY_SIZE(stats->obj_deleted));
5852 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
5853 int created = atomic_read(&stats->obj_created[i]);
5854 int deleted = atomic_read(&stats->obj_deleted[i]);
5856 if (created || deleted)
5857 seq_printf(m, "%s%s: active %d total %d\n",
5859 binder_objstat_strings[i],
5865 static void print_binder_proc_stats(struct seq_file *m,
5866 struct binder_proc *proc)
5868 struct binder_work *w;
5869 struct binder_thread *thread;
5871 int count, strong, weak, ready_threads;
5872 size_t free_async_space =
5873 binder_alloc_get_free_async_space(&proc->alloc);
5875 seq_printf(m, "proc %d\n", proc->pid);
5876 seq_printf(m, "context %s\n", proc->context->name);
5879 binder_inner_proc_lock(proc);
5880 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5883 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5886 seq_printf(m, " threads: %d\n", count);
5887 seq_printf(m, " requested threads: %d+%d/%d\n"
5888 " ready threads %d\n"
5889 " free async space %zd\n", proc->requested_threads,
5890 proc->requested_threads_started, proc->max_threads,
5894 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5896 binder_inner_proc_unlock(proc);
5897 seq_printf(m, " nodes: %d\n", count);
5901 binder_proc_lock(proc);
5902 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5903 struct binder_ref *ref = rb_entry(n, struct binder_ref,
5906 strong += ref->data.strong;
5907 weak += ref->data.weak;
5909 binder_proc_unlock(proc);
5910 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
5912 count = binder_alloc_get_allocated_count(&proc->alloc);
5913 seq_printf(m, " buffers: %d\n", count);
5915 binder_alloc_print_pages(m, &proc->alloc);
5918 binder_inner_proc_lock(proc);
5919 list_for_each_entry(w, &proc->todo, entry) {
5920 if (w->type == BINDER_WORK_TRANSACTION)
5923 binder_inner_proc_unlock(proc);
5924 seq_printf(m, " pending transactions: %d\n", count);
5926 print_binder_stats(m, " ", &proc->stats);
5930 int binder_state_show(struct seq_file *m, void *unused)
5932 struct binder_proc *proc;
5933 struct binder_node *node;
5934 struct binder_node *last_node = NULL;
5936 seq_puts(m, "binder state:\n");
5938 spin_lock(&binder_dead_nodes_lock);
5939 if (!hlist_empty(&binder_dead_nodes))
5940 seq_puts(m, "dead nodes:\n");
5941 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5943 * take a temporary reference on the node so it
5944 * survives and isn't removed from the list
5945 * while we print it.
5948 spin_unlock(&binder_dead_nodes_lock);
5950 binder_put_node(last_node);
5951 binder_node_lock(node);
5952 print_binder_node_nilocked(m, node);
5953 binder_node_unlock(node);
5955 spin_lock(&binder_dead_nodes_lock);
5957 spin_unlock(&binder_dead_nodes_lock);
5959 binder_put_node(last_node);
5961 mutex_lock(&binder_procs_lock);
5962 hlist_for_each_entry(proc, &binder_procs, proc_node)
5963 print_binder_proc(m, proc, 1);
5964 mutex_unlock(&binder_procs_lock);
5969 int binder_stats_show(struct seq_file *m, void *unused)
5971 struct binder_proc *proc;
5973 seq_puts(m, "binder stats:\n");
5975 print_binder_stats(m, "", &binder_stats);
5977 mutex_lock(&binder_procs_lock);
5978 hlist_for_each_entry(proc, &binder_procs, proc_node)
5979 print_binder_proc_stats(m, proc);
5980 mutex_unlock(&binder_procs_lock);
5985 int binder_transactions_show(struct seq_file *m, void *unused)
5987 struct binder_proc *proc;
5989 seq_puts(m, "binder transactions:\n");
5990 mutex_lock(&binder_procs_lock);
5991 hlist_for_each_entry(proc, &binder_procs, proc_node)
5992 print_binder_proc(m, proc, 0);
5993 mutex_unlock(&binder_procs_lock);
5998 static int proc_show(struct seq_file *m, void *unused)
6000 struct binder_proc *itr;
6001 int pid = (unsigned long)m->private;
6003 mutex_lock(&binder_procs_lock);
6004 hlist_for_each_entry(itr, &binder_procs, proc_node) {
6005 if (itr->pid == pid) {
6006 seq_puts(m, "binder proc state:\n");
6007 print_binder_proc(m, itr, 1);
6010 mutex_unlock(&binder_procs_lock);
6015 static void print_binder_transaction_log_entry(struct seq_file *m,
6016 struct binder_transaction_log_entry *e)
6018 int debug_id = READ_ONCE(e->debug_id_done);
6020 * read barrier to guarantee debug_id_done read before
6021 * we print the log values
6025 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
6026 e->debug_id, (e->call_type == 2) ? "reply" :
6027 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
6028 e->from_thread, e->to_proc, e->to_thread, e->context_name,
6029 e->to_node, e->target_handle, e->data_size, e->offsets_size,
6030 e->return_error, e->return_error_param,
6031 e->return_error_line);
6033 * read-barrier to guarantee read of debug_id_done after
6034 * done printing the fields of the entry
6037 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
6038 "\n" : " (incomplete)\n");
6041 int binder_transaction_log_show(struct seq_file *m, void *unused)
6043 struct binder_transaction_log *log = m->private;
6044 unsigned int log_cur = atomic_read(&log->cur);
6049 count = log_cur + 1;
6050 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
6051 0 : count % ARRAY_SIZE(log->entry);
6052 if (count > ARRAY_SIZE(log->entry) || log->full)
6053 count = ARRAY_SIZE(log->entry);
6054 for (i = 0; i < count; i++) {
6055 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
6057 print_binder_transaction_log_entry(m, &log->entry[index]);
6062 const struct file_operations binder_fops = {
6063 .owner = THIS_MODULE,
6064 .poll = binder_poll,
6065 .unlocked_ioctl = binder_ioctl,
6066 .compat_ioctl = binder_ioctl,
6067 .mmap = binder_mmap,
6068 .open = binder_open,
6069 .flush = binder_flush,
6070 .release = binder_release,
6073 static int __init init_binder_device(const char *name)
6076 struct binder_device *binder_device;
6078 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
6082 binder_device->miscdev.fops = &binder_fops;
6083 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
6084 binder_device->miscdev.name = name;
6086 binder_device->context.binder_context_mgr_uid = INVALID_UID;
6087 binder_device->context.name = name;
6088 mutex_init(&binder_device->context.context_mgr_node_lock);
6090 ret = misc_register(&binder_device->miscdev);
6092 kfree(binder_device);
6096 hlist_add_head(&binder_device->hlist, &binder_devices);
6101 static int __init binder_init(void)
6104 char *device_name, *device_tmp;
6105 struct binder_device *device;
6106 struct hlist_node *tmp;
6107 char *device_names = NULL;
6109 ret = binder_alloc_shrinker_init();
6113 atomic_set(&binder_transaction_log.cur, ~0U);
6114 atomic_set(&binder_transaction_log_failed.cur, ~0U);
6116 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
6117 if (binder_debugfs_dir_entry_root)
6118 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
6119 binder_debugfs_dir_entry_root);
6121 if (binder_debugfs_dir_entry_root) {
6122 debugfs_create_file("state",
6124 binder_debugfs_dir_entry_root,
6126 &binder_state_fops);
6127 debugfs_create_file("stats",
6129 binder_debugfs_dir_entry_root,
6131 &binder_stats_fops);
6132 debugfs_create_file("transactions",
6134 binder_debugfs_dir_entry_root,
6136 &binder_transactions_fops);
6137 debugfs_create_file("transaction_log",
6139 binder_debugfs_dir_entry_root,
6140 &binder_transaction_log,
6141 &binder_transaction_log_fops);
6142 debugfs_create_file("failed_transaction_log",
6144 binder_debugfs_dir_entry_root,
6145 &binder_transaction_log_failed,
6146 &binder_transaction_log_fops);
6149 if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
6150 strcmp(binder_devices_param, "") != 0) {
6152 * Copy the module_parameter string, because we don't want to
6153 * tokenize it in-place.
6155 device_names = kstrdup(binder_devices_param, GFP_KERNEL);
6156 if (!device_names) {
6158 goto err_alloc_device_names_failed;
6161 device_tmp = device_names;
6162 while ((device_name = strsep(&device_tmp, ","))) {
6163 ret = init_binder_device(device_name);
6165 goto err_init_binder_device_failed;
6169 ret = init_binderfs();
6171 goto err_init_binder_device_failed;
6175 err_init_binder_device_failed:
6176 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
6177 misc_deregister(&device->miscdev);
6178 hlist_del(&device->hlist);
6182 kfree(device_names);
6184 err_alloc_device_names_failed:
6185 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6190 device_initcall(binder_init);
6192 #define CREATE_TRACE_POINTS
6193 #include "binder_trace.h"
6195 MODULE_LICENSE("GPL v2");