]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
block: add bio_set_polled() helper
authorJens Axboe <axboe@kernel.dk>
Fri, 21 Dec 2018 16:10:46 +0000 (09:10 -0700)
committerJens Axboe <axboe@kernel.dk>
Sun, 24 Feb 2019 15:20:17 +0000 (08:20 -0700)
For the upcoming async polled IO, we can't sleep allocating requests.
If we do, then we introduce a deadlock where the submitter already
has async polled IO in-flight, but can't wait for them to complete
since polled requests must be active found and reaped.

Utilize the helper in the blockdev DIRECT_IO code.

Reviewed-by: Hannes Reinecke <hare@suse.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
fs/block_dev.c
include/linux/bio.h

index 1fe498b08f1bc4303b5f8e206f149ed08a6eae4c..e9faa52bb489c424775af69ccf9362c1c4b07c2f 100644 (file)
@@ -248,7 +248,7 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
                task_io_account_write(ret);
        }
        if (iocb->ki_flags & IOCB_HIPRI)
-               bio.bi_opf |= REQ_HIPRI;
+               bio_set_polled(&bio, iocb);
 
        qc = submit_bio(&bio);
        for (;;) {
@@ -419,7 +419,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
                        bool polled = false;
 
                        if (iocb->ki_flags & IOCB_HIPRI) {
-                               bio->bi_opf |= REQ_HIPRI;
+                               bio_set_polled(bio, iocb);
                                polled = true;
                        }
 
index bdd11d4c2f057aa50d2748b0d36742026659da5e..bb6090aa165d362ae399194fdca8d58b7cb8f5bf 100644 (file)
@@ -826,5 +826,19 @@ static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
 
 #endif /* CONFIG_BLK_DEV_INTEGRITY */
 
+/*
+ * Mark a bio as polled. Note that for async polled IO, the caller must
+ * expect -EWOULDBLOCK if we cannot allocate a request (or other resources).
+ * We cannot block waiting for requests on polled IO, as those completions
+ * must be found by the caller. This is different than IRQ driven IO, where
+ * it's safe to wait for IO to complete.
+ */
+static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb)
+{
+       bio->bi_opf |= REQ_HIPRI;
+       if (!is_sync_kiocb(kiocb))
+               bio->bi_opf |= REQ_NOWAIT;
+}
+
 #endif /* CONFIG_BLOCK */
 #endif /* __LINUX_BIO_H */