]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
ext4: decrypt only the needed blocks in ext4_block_write_begin()
authorChandan Rajendra <chandan@linux.ibm.com>
Mon, 20 May 2019 16:29:50 +0000 (09:29 -0700)
committerEric Biggers <ebiggers@google.com>
Tue, 28 May 2019 17:27:53 +0000 (10:27 -0700)
In ext4_block_write_begin(), only decrypt the blocks that actually need
to be decrypted (up to two blocks which intersect the boundaries of the
region that will be written to), rather than assuming blocksize ==
PAGE_SIZE and decrypting the whole page.

This is in preparation for allowing encryption on ext4 filesystems with
blocksize != PAGE_SIZE.

Signed-off-by: Chandan Rajendra <chandan@linux.ibm.com>
(EB: rebase onto previous changes, improve the commit message,
 and move the check for encrypted inode)
Signed-off-by: Eric Biggers <ebiggers@google.com>
fs/ext4/inode.c

index 92776d0ff9b96f1a588f5a4e2e2894b7b45b6716..8e48feddad83edb0504914d3537e27f8dc1da354 100644 (file)
@@ -1164,8 +1164,9 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
        int err = 0;
        unsigned blocksize = inode->i_sb->s_blocksize;
        unsigned bbits;
-       struct buffer_head *bh, *head, *wait[2], **wait_bh = wait;
-       bool decrypt = false;
+       struct buffer_head *bh, *head, *wait[2];
+       int nr_wait = 0;
+       int i;
 
        BUG_ON(!PageLocked(page));
        BUG_ON(from > PAGE_SIZE);
@@ -1217,24 +1218,30 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
                    !buffer_unwritten(bh) &&
                    (block_start < from || block_end > to)) {
                        ll_rw_block(REQ_OP_READ, 0, 1, &bh);
-                       *wait_bh++ = bh;
-                       decrypt = IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode);
+                       wait[nr_wait++] = bh;
                }
        }
        /*
         * If we issued read requests, let them complete.
         */
-       while (wait_bh > wait) {
-               wait_on_buffer(*--wait_bh);
-               if (!buffer_uptodate(*wait_bh))
+       for (i = 0; i < nr_wait; i++) {
+               wait_on_buffer(wait[i]);
+               if (!buffer_uptodate(wait[i]))
                        err = -EIO;
        }
        if (unlikely(err)) {
                page_zero_new_buffers(page, from, to);
-       } else if (decrypt) {
-               err = fscrypt_decrypt_pagecache_blocks(page, PAGE_SIZE, 0);
-               if (err)
-                       clear_buffer_uptodate(*wait_bh);
+       } else if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)) {
+               for (i = 0; i < nr_wait; i++) {
+                       int err2;
+
+                       err2 = fscrypt_decrypt_pagecache_blocks(page, blocksize,
+                                                               bh_offset(wait[i]));
+                       if (err2) {
+                               clear_buffer_uptodate(wait[i]);
+                               err = err2;
+                       }
+               }
        }
 
        return err;