This patch introduces MNGD_MAPPING to wrap up
sbi->managed_cache->i_mapping, which will be used
to solve too many #ifdefs in a single function.
No logic changes.
Reviewed-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
struct erofs_workgroup *egrp);
extern int erofs_try_to_free_cached_page(struct address_space *mapping,
struct page *page);
struct erofs_workgroup *egrp);
extern int erofs_try_to_free_cached_page(struct address_space *mapping,
struct page *page);
+
+#define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping)
+#else
+#define MNGD_MAPPING(sbi) (NULL)
#endif
#define DEFAULT_MAX_SYNC_DECOMPRESS_PAGES 3
#endif
#define DEFAULT_MAX_SYNC_DECOMPRESS_PAGES 3
{
struct z_erofs_vle_workgroup *const grp =
container_of(egrp, struct z_erofs_vle_workgroup, obj);
{
struct z_erofs_vle_workgroup *const grp =
container_of(egrp, struct z_erofs_vle_workgroup, obj);
- struct address_space *const mapping = sbi->managed_cache->i_mapping;
+ struct address_space *const mapping = MNGD_MAPPING(sbi);
const int clusterpages = erofs_clusterpages(sbi);
int i;
const int clusterpages = erofs_clusterpages(sbi);
int i;
struct z_erofs_vle_work *work = builder->work;
#ifdef EROFS_FS_HAS_MANAGED_CACHE
struct z_erofs_vle_work *work = builder->work;
#ifdef EROFS_FS_HAS_MANAGED_CACHE
- struct address_space *const mngda = sbi->managed_cache->i_mapping;
+ struct address_space *const mc = MNGD_MAPPING(sbi);
struct z_erofs_vle_workgroup *grp;
bool noio_outoforder;
#endif
struct z_erofs_vle_workgroup *grp;
bool noio_outoforder;
#endif
grp = fe->builder.grp;
/* let's do out-of-order decompression for noio */
grp = fe->builder.grp;
/* let's do out-of-order decompression for noio */
- noio_outoforder = grab_managed_cache_pages(mngda,
+ noio_outoforder = grab_managed_cache_pages(mc,
erofs_blknr(map->m_pa),
grp->compressed_pages, erofs_blknr(map->m_plen),
/* compressed page caching selection strategy */
erofs_blknr(map->m_pa),
grp->compressed_pages, erofs_blknr(map->m_plen),
/* compressed page caching selection strategy */
unsigned int i;
struct bio_vec *bvec;
#ifdef EROFS_FS_HAS_MANAGED_CACHE
unsigned int i;
struct bio_vec *bvec;
#ifdef EROFS_FS_HAS_MANAGED_CACHE
- struct address_space *mngda = NULL;
+ struct address_space *mc = NULL;
#endif
bio_for_each_segment_all(bvec, bio, i) {
#endif
bio_for_each_segment_all(bvec, bio, i) {
BUG_ON(!page->mapping);
#ifdef EROFS_FS_HAS_MANAGED_CACHE
BUG_ON(!page->mapping);
#ifdef EROFS_FS_HAS_MANAGED_CACHE
- if (unlikely(!mngda && !z_erofs_is_stagingpage(page))) {
+ if (unlikely(!mc && !z_erofs_is_stagingpage(page))) {
struct inode *const inode = page->mapping->host;
struct super_block *const sb = inode->i_sb;
struct inode *const inode = page->mapping->host;
struct super_block *const sb = inode->i_sb;
- mngda = EROFS_SB(sb)->managed_cache->i_mapping;
+ mc = MNGD_MAPPING(EROFS_SB(sb));
- * If mngda has not gotten, it equals NULL,
+ * If mc has not gotten, it equals NULL,
* however, page->mapping never be NULL if working properly.
*/
* however, page->mapping never be NULL if working properly.
*/
- cachemngd = (page->mapping == mngda);
+ cachemngd = (page->mapping == mc);
#endif
if (unlikely(err))
#endif
if (unlikely(err))
struct list_head *page_pool)
{
struct erofs_sb_info *const sbi = EROFS_SB(sb);
struct list_head *page_pool)
{
struct erofs_sb_info *const sbi = EROFS_SB(sb);
-#ifdef EROFS_FS_HAS_MANAGED_CACHE
- struct address_space *const mngda = sbi->managed_cache->i_mapping;
-#endif
const unsigned int clusterpages = erofs_clusterpages(sbi);
struct z_erofs_pagevec_ctor ctor;
const unsigned int clusterpages = erofs_clusterpages(sbi);
struct z_erofs_pagevec_ctor ctor;
if (z_erofs_is_stagingpage(page))
continue;
#ifdef EROFS_FS_HAS_MANAGED_CACHE
if (z_erofs_is_stagingpage(page))
continue;
#ifdef EROFS_FS_HAS_MANAGED_CACHE
- else if (page->mapping == mngda) {
+ if (page->mapping == MNGD_MAPPING(sbi)) {
BUG_ON(PageLocked(page));
BUG_ON(!PageUptodate(page));
continue;
BUG_ON(PageLocked(page));
BUG_ON(!PageUptodate(page));
continue;
page = compressed_pages[i];
#ifdef EROFS_FS_HAS_MANAGED_CACHE
page = compressed_pages[i];
#ifdef EROFS_FS_HAS_MANAGED_CACHE
- if (page->mapping == mngda)
+ if (page->mapping == MNGD_MAPPING(sbi))
continue;
#endif
/* recycle all individual staging pages */
continue;
#endif
/* recycle all individual staging pages */
const unsigned int clusterpages = erofs_clusterpages(sbi);
const gfp_t gfp = GFP_NOFS;
#ifdef EROFS_FS_HAS_MANAGED_CACHE
const unsigned int clusterpages = erofs_clusterpages(sbi);
const gfp_t gfp = GFP_NOFS;
#ifdef EROFS_FS_HAS_MANAGED_CACHE
- struct address_space *const mngda = sbi->managed_cache->i_mapping;
+ struct address_space *const mc = MNGD_MAPPING(sbi);
struct z_erofs_vle_workgroup *lstgrp_noio = NULL, *lstgrp_io = NULL;
#endif
struct z_erofs_vle_unzip_io *ios[1 + __FSIO_1];
struct z_erofs_vle_workgroup *lstgrp_noio = NULL, *lstgrp_io = NULL;
#endif
struct z_erofs_vle_unzip_io *ios[1 + __FSIO_1];
cachemngd = true;
goto do_allocpage;
} else if (page) {
cachemngd = true;
goto do_allocpage;
} else if (page) {
- if (page->mapping != mngda)
+ if (page->mapping != mc)
BUG_ON(PageUptodate(page));
else if (recover_managed_page(grp, page)) {
/* page is uptodate, skip io submission */
BUG_ON(PageUptodate(page));
else if (recover_managed_page(grp, page)) {
/* page is uptodate, skip io submission */
goto repeat;
#ifdef EROFS_FS_HAS_MANAGED_CACHE
} else if (cachemngd && !add_to_page_cache_lru(page,
goto repeat;
#ifdef EROFS_FS_HAS_MANAGED_CACHE
} else if (cachemngd && !add_to_page_cache_lru(page,
- mngda, first_index + i, gfp)) {
+ mc, first_index + i, gfp)) {
set_page_private(page, (unsigned long)grp);
SetPagePrivate(page);
#endif
set_page_private(page, (unsigned long)grp);
SetPagePrivate(page);
#endif