]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
userfaultfd: use fault_wqh lock
authorMatthew Wilcox <mawilcox@microsoft.com>
Wed, 22 Aug 2018 04:56:30 +0000 (21:56 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 22 Aug 2018 17:52:47 +0000 (10:52 -0700)
The userfaultfd code currently uses the unlocked waitqueue helpers for
managing fault_wqh, but instead of holding the waitqueue lock for this
waitqueue around these calls, it the waitqueue lock of
fault_pending_wq, which is a different waitqueue instance.  Given that
the waitqueue is not exposed to the rest of the kernel this actually
works ok at the moment, but prevents the userfaultfd locking rules from
being enforced using lockdep.

Switch to the internally locked waitqueue helpers instead.  This means
that the lock inside fault_wqh now nests inside the fault_pending_wqh
lock, but that's not a problem since it was entirely unused before.

[hch@lst.de: slight changelog updates]
[rppt@linux.vnet.ibm.com: spotted changelog spellos]
Link: http://lkml.kernel.org/r/20171214152344.6880-3-hch@lst.de
Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Mike Rapoport <rppt@linux.vnet.ibm.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jason Baron <jbaron@akamai.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Matthew Wilcox <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
fs/userfaultfd.c

index 15c265d450bf897568aed207c8fccdb6a00f40f9..f649023b19b5cec6a44c57189ddd754ffa6b2bce 100644 (file)
@@ -910,7 +910,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
         */
        spin_lock(&ctx->fault_pending_wqh.lock);
        __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, &range);
-       __wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, &range);
+       __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, &range);
        spin_unlock(&ctx->fault_pending_wqh.lock);
 
        /* Flush pending events that may still wait on event_wqh */
@@ -1066,7 +1066,7 @@ static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
                         * anyway.
                         */
                        list_del(&uwq->wq.entry);
-                       __add_wait_queue(&ctx->fault_wqh, &uwq->wq);
+                       add_wait_queue(&ctx->fault_wqh, &uwq->wq);
 
                        write_seqcount_end(&ctx->refile_seq);
 
@@ -1215,7 +1215,7 @@ static void __wake_userfault(struct userfaultfd_ctx *ctx,
                __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL,
                                     range);
        if (waitqueue_active(&ctx->fault_wqh))
-               __wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, range);
+               __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, range);
        spin_unlock(&ctx->fault_pending_wqh.lock);
 }