Differences
This shows you the differences between two versions of the page.
| Both sides previous revision Previous revision Next revision | Previous revision | ||
| arm_kernel_panic [2018/11/28 12:55] – [Disassembling ''futex_wait_setup''] rpjday | arm_kernel_panic [2018/11/28 13:02] (current) – [The offending(?) kernbel code] rpjday | ||
|---|---|---|---|
| Line 29: | Line 29: | ||
| * register '' | * register '' | ||
| + | ===== The offending(? | ||
| + | |||
| + | From '' | ||
| + | |||
| + | < | ||
| + | /* The key must be already stored in q->key. */ | ||
| + | static inline struct futex_hash_bucket *queue_lock(struct futex_q *q) | ||
| + | __acquires(& | ||
| + | { | ||
| + | struct futex_hash_bucket *hb; | ||
| + | |||
| + | hb = hash_futex(& | ||
| + | |||
| + | /* | ||
| + | * Increment the counter before taking the lock so that | ||
| + | * a potential waker won't miss a to-be-slept task that is | ||
| + | * waiting for the spinlock. This is safe as all queue_lock() | ||
| + | * users end up calling queue_me(). Similarly, for housekeeping, | ||
| + | * decrement the counter at queue_unlock() when some error has | ||
| + | * occurred and we don't end up adding the task to the list. | ||
| + | */ | ||
| + | hb_waiters_inc(hb); | ||
| + | |||
| + | q-> | ||
| + | |||
| + | spin_lock(& | ||
| + | return hb; | ||
| + | } | ||
| + | </ | ||
| + | |||
| + | < | ||
| + | /** | ||
| + | * futex_wait_setup() - Prepare to wait on a futex | ||
| + | * @uaddr: | ||
| + | * @val: the expected value | ||
| + | * @flags: | ||
| + | * @q: the associated futex_q | ||
| + | * @hb: | ||
| + | * | ||
| + | * Setup the futex_q and locate the hash_bucket. | ||
| + | * compare it with the expected value. | ||
| + | * Return with the hb lock held and a q.key reference on success, and unlocked | ||
| + | * with no q.key reference on failure. | ||
| + | * | ||
| + | * Return: | ||
| + | | ||
| + | * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked | ||
| + | */ | ||
| + | static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags, | ||
| + | | ||
| + | { | ||
| + | u32 uval; | ||
| + | int ret; | ||
| + | |||
| + | /* | ||
| + | * Access the page AFTER the hash-bucket is locked. | ||
| + | * Order is important: | ||
| + | * | ||
| + | | ||
| + | | ||
| + | * | ||
| + | * The basic logical guarantee of a futex is that it blocks ONLY | ||
| + | * if cond(var) is known to be true at the time of blocking, for | ||
| + | * any cond. If we locked the hash-bucket after testing *uaddr, that | ||
| + | * would open a race condition where we could block indefinitely with | ||
| + | * cond(var) false, which would violate the guarantee. | ||
| + | | ||
| + | * On the other hand, we insert q and release the hash-bucket only | ||
| + | * after testing *uaddr. | ||
| + | * absorb a wakeup if *uaddr does not match the desired values | ||
| + | * while the syscall executes. | ||
| + | */ | ||
| + | retry: | ||
| + | ret = get_futex_key(uaddr, | ||
| + | if (unlikely(ret != 0)) | ||
| + | return ret; | ||
| + | |||
| + | retry_private: | ||
| + | *hb = queue_lock(q); | ||
| + | |||
| + | ret = get_futex_value_locked(& | ||
| + | |||
| + | if (ret) { | ||
| + | queue_unlock(*hb); | ||
| + | |||
| + | ret = get_user(uval, | ||
| + | if (ret) | ||
| + | goto out; | ||
| + | |||
| + | if (!(flags & FLAGS_SHARED)) | ||
| + | goto retry_private; | ||
| + | |||
| + | put_futex_key(& | ||
| + | goto retry; | ||
| + | } | ||
| + | |||
| + | if (uval != val) { | ||
| + | queue_unlock(*hb); | ||
| + | ret = -EWOULDBLOCK; | ||
| + | } | ||
| + | |||
| + | out: | ||
| + | if (ret) | ||
| + | put_futex_key(& | ||
| + | return ret; | ||
| + | } | ||
| + | </ | ||
| ===== Disassembling '' | ===== Disassembling '' | ||