]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
staging: erofs: decompress asynchronously if PG_readahead page at first
authorGao Xiang <gaoxiang25@huawei.com>
Thu, 22 Nov 2018 17:21:48 +0000 (01:21 +0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 23 Nov 2018 09:53:08 +0000 (10:53 +0100)
For the case of nr_to_read == lookahead_size, it is better to
decompress asynchronously as well since no page will be needed immediately.

Reviewed-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/staging/erofs/unzip_vle.c

index 6aa3c989dd4e1ff77b12f3cfd3255207ca4af58e..fab907e0fe06c06f9c0473f5952b8031571b854e 100644 (file)
@@ -1345,8 +1345,8 @@ static int z_erofs_vle_normalaccess_readpages(struct file *filp,
 {
        struct inode *const inode = mapping->host;
        struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
-       const bool sync = __should_decompress_synchronously(sbi, nr_pages);
 
+       bool sync = __should_decompress_synchronously(sbi, nr_pages);
        struct z_erofs_vle_frontend f = VLE_FRONTEND_INIT(inode);
        gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL);
        struct page *head = NULL;
@@ -1364,6 +1364,13 @@ static int z_erofs_vle_normalaccess_readpages(struct file *filp,
                prefetchw(&page->flags);
                list_del(&page->lru);
 
+               /*
+                * A pure asynchronous readahead is indicated if
+                * a PG_readahead marked page is hitted at first.
+                * Let's also do asynchronous decompression for this case.
+                */
+               sync &= !(PageReadahead(page) && !head);
+
                if (add_to_page_cache_lru(page, mapping, page->index, gfp)) {
                        list_add(&page->lru, &pagepool);
                        continue;