]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - drivers/staging/erofs/unzip_vle.c
staging: erofs: fix mis-acted TAIL merging behavior
[linux.git] / drivers / staging / erofs / unzip_vle.c
index 74c13b0a3d33e95293631f45e9fa67e814f6cbeb..02f34a83147d21874121a8eee5a39068cfb812d2 100644 (file)
@@ -107,15 +107,30 @@ enum z_erofs_vle_work_role {
        Z_EROFS_VLE_WORK_SECONDARY,
        Z_EROFS_VLE_WORK_PRIMARY,
        /*
-        * The current work has at least been linked with the following
-        * processed chained works, which means if the processing page
-        * is the tail partial page of the work, the current work can
-        * safely use the whole page, as illustrated below:
-        * +--------------+-------------------------------------------+
-        * |  tail page   |      head page (of the previous work)     |
-        * +--------------+-------------------------------------------+
-        *   /\  which belongs to the current work
-        * [  (*) this page can be used for the current work itself.  ]
+        * The current work was the tail of an exist chain, and the previous
+        * processed chained works are all decided to be hooked up to it.
+        * A new chain should be created for the remaining unprocessed works,
+        * therefore different from Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED,
+        * the next work cannot reuse the whole page in the following scenario:
+        *  ________________________________________________________________
+        * |      tail (partial) page     |       head (partial) page       |
+        * |  (belongs to the next work)  |  (belongs to the current work)  |
+        * |_______PRIMARY_FOLLOWED_______|________PRIMARY_HOOKED___________|
+        */
+       Z_EROFS_VLE_WORK_PRIMARY_HOOKED,
+       /*
+        * The current work has been linked with the processed chained works,
+        * and could be also linked with the potential remaining works, which
+        * means if the processing page is the tail partial page of the work,
+        * the current work can safely use the whole page (since the next work
+        * is under control) for in-place decompression, as illustrated below:
+        *  ________________________________________________________________
+        * |  tail (partial) page  |          head (partial) page           |
+        * | (of the current work) |         (of the previous work)         |
+        * |  PRIMARY_FOLLOWED or  |                                        |
+        * |_____PRIMARY_HOOKED____|____________PRIMARY_FOLLOWED____________|
+        *
+        * [  (*) the above page can be used for the current work itself.  ]
         */
        Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED,
        Z_EROFS_VLE_WORK_MAX
@@ -309,10 +324,10 @@ static int z_erofs_vle_work_add_page(
        return ret ? 0 : -EAGAIN;
 }
 
-static inline bool try_to_claim_workgroup(
-       struct z_erofs_vle_workgroup *grp,
-       z_erofs_vle_owned_workgrp_t *owned_head,
-       bool *hosted)
+static enum z_erofs_vle_work_role
+try_to_claim_workgroup(struct z_erofs_vle_workgroup *grp,
+                      z_erofs_vle_owned_workgrp_t *owned_head,
+                      bool *hosted)
 {
        DBG_BUGON(*hosted == true);
 
@@ -326,6 +341,9 @@ static inline bool try_to_claim_workgroup(
 
                *owned_head = &grp->next;
                *hosted = true;
+               /* lucky, I am the followee :) */
+               return Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
+
        } else if (grp->next == Z_EROFS_VLE_WORKGRP_TAIL) {
                /*
                 * type 2, link to the end of a existing open chain,
@@ -335,12 +353,11 @@ static inline bool try_to_claim_workgroup(
                if (cmpxchg(&grp->next, Z_EROFS_VLE_WORKGRP_TAIL,
                            *owned_head) != Z_EROFS_VLE_WORKGRP_TAIL)
                        goto retry;
-
                *owned_head = Z_EROFS_VLE_WORKGRP_TAIL;
-       } else
-               return false;   /* :( better luck next time */
+               return Z_EROFS_VLE_WORK_PRIMARY_HOOKED;
+       }
 
-       return true;    /* lucky, I am the followee :) */
+       return Z_EROFS_VLE_WORK_PRIMARY; /* :( better luck next time */
 }
 
 struct z_erofs_vle_work_finder {
@@ -418,12 +435,9 @@ z_erofs_vle_work_lookup(const struct z_erofs_vle_work_finder *f)
        *f->hosted = false;
        if (!primary)
                *f->role = Z_EROFS_VLE_WORK_SECONDARY;
-       /* claim the workgroup if possible */
-       else if (try_to_claim_workgroup(grp, f->owned_head, f->hosted))
-               *f->role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
-       else
-               *f->role = Z_EROFS_VLE_WORK_PRIMARY;
-
+       else    /* claim the workgroup if possible */
+               *f->role = try_to_claim_workgroup(grp, f->owned_head,
+                                                 f->hosted);
        return work;
 }
 
@@ -487,6 +501,9 @@ z_erofs_vle_work_register(const struct z_erofs_vle_work_finder *f,
        return work;
 }
 
+#define builder_is_hooked(builder) \
+       ((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_HOOKED)
+
 #define builder_is_followed(builder) \
        ((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED)
 
@@ -680,7 +697,7 @@ static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
        struct z_erofs_vle_work_builder *const builder = &fe->builder;
        const loff_t offset = page_offset(page);
 
-       bool tight = builder_is_followed(builder);
+       bool tight = builder_is_hooked(builder);
        struct z_erofs_vle_work *work = builder->work;
 
        enum z_erofs_cache_alloctype cache_strategy;
@@ -739,7 +756,7 @@ static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
                                 map->m_plen / PAGE_SIZE,
                                 cache_strategy, page_pool, GFP_KERNEL);
 
-       tight &= builder_is_followed(builder);
+       tight &= builder_is_hooked(builder);
        work = builder->work;
 hitted:
        cur = end - min_t(unsigned int, offset + end - map->m_la, end);
@@ -754,6 +771,9 @@ static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
                        (tight ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
                                Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED));
 
+       if (cur)
+               tight &= builder_is_followed(builder);
+
 retry:
        err = z_erofs_vle_work_add_page(builder, page, page_type);
        /* should allocate an additional staging page for pagevec */