remove all redundant BUG_ONs, and turn the rest
useful usages to DBG_BUGONs.
Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
Reviewed-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
erofs_vtptr_t t;
if (unlikely(ctor->index >= ctor->nr)) {
erofs_vtptr_t t;
if (unlikely(ctor->index >= ctor->nr)) {
- BUG_ON(ctor->next == NULL);
+ DBG_BUGON(!ctor->next);
z_erofs_pagevec_ctor_pagedown(ctor, true);
}
z_erofs_pagevec_ctor_pagedown(ctor, true);
}
void z_erofs_exit_zip_subsystem(void)
{
void z_erofs_exit_zip_subsystem(void)
{
- BUG_ON(!z_erofs_workqueue);
- BUG_ON(!z_erofs_workgroup_cachep);
-
destroy_workqueue(z_erofs_workqueue);
kmem_cache_destroy(z_erofs_workgroup_cachep);
}
destroy_workqueue(z_erofs_workqueue);
kmem_cache_destroy(z_erofs_workgroup_cachep);
}
struct z_erofs_vle_work *work;
/* if multiref is disabled, grp should never be nullptr */
struct z_erofs_vle_work *work;
/* if multiref is disabled, grp should never be nullptr */
+ if (unlikely(grp)) {
+ DBG_BUGON(1);
+ return ERR_PTR(-EINVAL);
+ }
/* no available workgroup, let's allocate one */
grp = kmem_cache_alloc(z_erofs_workgroup_cachep, GFP_NOFS);
/* no available workgroup, let's allocate one */
grp = kmem_cache_alloc(z_erofs_workgroup_cachep, GFP_NOFS);
bool cachemngd = false;
DBG_BUGON(PageUptodate(page));
bool cachemngd = false;
DBG_BUGON(PageUptodate(page));
- BUG_ON(!page->mapping);
+ DBG_BUGON(!page->mapping);
#ifdef EROFS_FS_HAS_MANAGED_CACHE
if (unlikely(!mc && !z_erofs_is_stagingpage(page))) {
#ifdef EROFS_FS_HAS_MANAGED_CACHE
if (unlikely(!mc && !z_erofs_is_stagingpage(page))) {
might_sleep();
work = z_erofs_vle_grab_primary_work(grp);
might_sleep();
work = z_erofs_vle_grab_primary_work(grp);
- BUG_ON(!READ_ONCE(work->nr_pages));
+ DBG_BUGON(!READ_ONCE(work->nr_pages));
mutex_lock(&work->lock);
nr_pages = work->nr_pages;
mutex_lock(&work->lock);
nr_pages = work->nr_pages;
else
pagenr = z_erofs_onlinepage_index(page);
else
pagenr = z_erofs_onlinepage_index(page);
- BUG_ON(pagenr >= nr_pages);
- BUG_ON(pages[pagenr]);
+ DBG_BUGON(pagenr >= nr_pages);
+ DBG_BUGON(pages[pagenr]);
continue;
#ifdef EROFS_FS_HAS_MANAGED_CACHE
if (page->mapping == MNGD_MAPPING(sbi)) {
continue;
#ifdef EROFS_FS_HAS_MANAGED_CACHE
if (page->mapping == MNGD_MAPPING(sbi)) {
- BUG_ON(PageLocked(page));
- BUG_ON(!PageUptodate(page));
+ DBG_BUGON(!PageUptodate(page));
/* only non-head page could be reused as a compressed page */
pagenr = z_erofs_onlinepage_index(page);
/* only non-head page could be reused as a compressed page */
pagenr = z_erofs_onlinepage_index(page);
- BUG_ON(pagenr >= nr_pages);
- BUG_ON(pages[pagenr]);
+ DBG_BUGON(pagenr >= nr_pages);
+ DBG_BUGON(pages[pagenr]);
++sparsemem_pages;
pages[pagenr] = page;
++sparsemem_pages;
pages[pagenr] = page;
llen = (nr_pages << PAGE_SHIFT) - work->pageofs;
if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN) {
llen = (nr_pages << PAGE_SHIFT) - work->pageofs;
if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN) {
- /* FIXME! this should be fixed in the future */
- BUG_ON(grp->llen != llen);
-
err = z_erofs_vle_plain_copy(compressed_pages, clusterpages,
pages, nr_pages, work->pageofs);
goto out;
err = z_erofs_vle_plain_copy(compressed_pages, clusterpages,
pages, nr_pages, work->pageofs);
goto out;
if (err != -ENOTSUPP)
goto out_percpu;
if (err != -ENOTSUPP)
goto out_percpu;
- if (sparsemem_pages >= nr_pages) {
- BUG_ON(sparsemem_pages > nr_pages);
+ if (sparsemem_pages >= nr_pages)
for (i = 0; i < nr_pages; ++i) {
if (pages[i])
for (i = 0; i < nr_pages; ++i) {
if (pages[i])
struct z_erofs_vle_unzip_io_sb, io.u.work);
LIST_HEAD(page_pool);
struct z_erofs_vle_unzip_io_sb, io.u.work);
LIST_HEAD(page_pool);
- BUG_ON(iosb->io.head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
+ DBG_BUGON(iosb->io.head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
z_erofs_vle_unzip_all(iosb->sb, &iosb->io, &page_pool);
put_pages_list(&page_pool);
z_erofs_vle_unzip_all(iosb->sb, &iosb->io, &page_pool);
put_pages_list(&page_pool);
- BUG_ON(PagePrivate(page));
set_page_private(page, (unsigned long)head);
head = page;
}
set_page_private(page, (unsigned long)head);
head = page;
}