]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
drm/ttm: roundup the shrink request to prevent skip huge pool
authorRoger He <Hongbo.He@amd.com>
Tue, 21 Nov 2017 08:47:16 +0000 (16:47 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Wed, 6 Dec 2017 17:48:15 +0000 (12:48 -0500)
e.g. shrink reqeust is less than 512, the logic will skip huge pool

Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Roger He <Hongbo.He@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/ttm/ttm_page_alloc.c

index 337c228b44ad27551c6ed29339cb8e588db63737..116897a205141156af7498b8c0708431ea49d489 100644 (file)
@@ -442,17 +442,19 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
        /* select start pool in round robin fashion */
        for (i = 0; i < NUM_POOLS; ++i) {
                unsigned nr_free = shrink_pages;
+               unsigned page_nr;
+
                if (shrink_pages == 0)
                        break;
 
                pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
+               page_nr = (1 << pool->order);
                /* OK to use static buffer since global mutex is held. */
-               nr_free_pool = (nr_free >> pool->order);
-               if (nr_free_pool == 0)
-                       continue;
-
+               nr_free_pool = roundup(nr_free, page_nr) >> pool->order;
                shrink_pages = ttm_page_pool_free(pool, nr_free_pool, true);
-               freed += ((nr_free_pool - shrink_pages) << pool->order);
+               freed += (nr_free_pool - shrink_pages) << pool->order;
+               if (freed >= sc->nr_to_scan)
+                       break;
        }
        mutex_unlock(&lock);
        return freed;