1 /* SPDX-License-Identifier: GPL-2.0
4 * Author: Jesper Dangaard Brouer <netoptimizer@brouer.com>
5 * Copyright (C) 2016 Red Hat, Inc.
9 * DOC: page_pool allocator
11 * This page_pool allocator is optimized for the XDP mode that
12 * uses one-frame-per-page, but have fallbacks that act like the
13 * regular page allocator APIs.
15 * Basic use involve replacing alloc_pages() calls with the
16 * page_pool_alloc_pages() call. Drivers should likely use
17 * page_pool_dev_alloc_pages() replacing dev_alloc_pages().
19 * API keeps track of in-flight pages, in-order to let API user know
20 * when it is safe to dealloactor page_pool object. Thus, API users
21 * must make sure to call page_pool_release_page() when a page is
22 * "leaving" the page_pool. Or call page_pool_put_page() where
23 * appropiate. For maintaining correct accounting.
25 * API user must only call page_pool_put_page() once on a page, as it
26 * will either recycle the page, or in case of elevated refcnt, it
27 * will release the DMA mapping and in-flight state accounting. We
28 * hope to lift this requirement in the future.
30 #ifndef _NET_PAGE_POOL_H
31 #define _NET_PAGE_POOL_H
33 #include <linux/mm.h> /* Needed by ptr_ring */
34 #include <linux/ptr_ring.h>
35 #include <linux/dma-direction.h>
37 #define PP_FLAG_DMA_MAP 1 /* Should page_pool do the DMA map/unmap */
38 #define PP_FLAG_ALL PP_FLAG_DMA_MAP
41 * Fast allocation side cache array/stack
43 * The cache size and refill watermark is related to the network
44 * use-case. The NAPI budget is 64 packets. After a NAPI poll the RX
45 * ring is usually refilled and the max consumed elements will be 64,
46 * thus a natural max size of objects needed in the cache.
48 * Keeping room for more objects, is due to XDP_DROP use-case. As
49 * XDP_DROP allows the opportunity to recycle objects directly into
50 * this array, as it shares the same softirq/NAPI protection. If
51 * cache is already full (or partly full) then the XDP_DROP recycles
52 * would have to take a slower code path.
54 #define PP_ALLOC_CACHE_SIZE 128
55 #define PP_ALLOC_CACHE_REFILL 64
56 struct pp_alloc_cache {
58 void *cache[PP_ALLOC_CACHE_SIZE];
61 struct page_pool_params {
64 unsigned int pool_size;
65 int nid; /* Numa node id to allocate from pages from */
66 struct device *dev; /* device, for DMA pre-mapping purposes */
67 enum dma_data_direction dma_dir; /* DMA mapping direction */
71 struct page_pool_params p;
73 u32 pages_state_hold_cnt;
76 * Data structure for allocation side
78 * Drivers allocation side usually already perform some kind
79 * of resource protection. Piggyback on this protection, and
80 * require driver to protect allocation side.
82 * For NIC drivers this means, allocate a page_pool per
83 * RX-queue. As the RX-queue is already protected by
84 * Softirq/BH scheduling and napi_schedule. NAPI schedule
85 * guarantee that a single napi_struct will only be scheduled
86 * on a single CPU (see napi_schedule).
88 struct pp_alloc_cache alloc ____cacheline_aligned_in_smp;
90 /* Data structure for storing recycled pages.
92 * Returning/freeing pages is more complicated synchronization
93 * wise, because free's can happen on remote CPUs, with no
94 * association with allocation resource.
96 * Use ptr_ring, as it separates consumer and producer
97 * effeciently, it a way that doesn't bounce cache-lines.
99 * TODO: Implement bulk return pages into this structure.
101 struct ptr_ring ring;
103 atomic_t pages_state_release_cnt;
106 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
108 static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool)
110 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
112 return page_pool_alloc_pages(pool, gfp);
115 /* get the stored dma direction. A driver might decide to treat this locally and
116 * avoid the extra cache line from page_pool to determine the direction
119 inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool)
121 return pool->p.dma_dir;
124 struct page_pool *page_pool_create(const struct page_pool_params *params);
126 void __page_pool_free(struct page_pool *pool);
127 static inline void page_pool_free(struct page_pool *pool)
129 /* When page_pool isn't compiled-in, net/core/xdp.c doesn't
130 * allow registering MEM_TYPE_PAGE_POOL, but shield linker.
132 #ifdef CONFIG_PAGE_POOL
133 __page_pool_free(pool);
137 /* Never call this directly, use helpers below */
138 void __page_pool_put_page(struct page_pool *pool,
139 struct page *page, bool allow_direct);
141 static inline void page_pool_put_page(struct page_pool *pool,
142 struct page *page, bool allow_direct)
144 /* When page_pool isn't compiled-in, net/core/xdp.c doesn't
145 * allow registering MEM_TYPE_PAGE_POOL, but shield linker.
147 #ifdef CONFIG_PAGE_POOL
148 __page_pool_put_page(pool, page, allow_direct);
151 /* Very limited use-cases allow recycle direct */
152 static inline void page_pool_recycle_direct(struct page_pool *pool,
155 __page_pool_put_page(pool, page, true);
158 /* API user MUST have disconnected alloc-side (not allowed to call
159 * page_pool_alloc_pages()) before calling this. The free-side can
160 * still run concurrently, to handle in-flight packet-pages.
162 * A request to shutdown can fail (with false) if there are still
163 * in-flight packet-pages.
165 bool __page_pool_request_shutdown(struct page_pool *pool);
166 static inline bool page_pool_request_shutdown(struct page_pool *pool)
168 bool safe_to_remove = false;
170 #ifdef CONFIG_PAGE_POOL
171 safe_to_remove = __page_pool_request_shutdown(pool);
173 return safe_to_remove;
176 /* Disconnects a page (from a page_pool). API users can have a need
177 * to disconnect a page (from a page_pool), to allow it to be used as
178 * a regular page (that will eventually be returned to the normal
179 * page-allocator via put_page).
181 void page_pool_unmap_page(struct page_pool *pool, struct page *page);
182 static inline void page_pool_release_page(struct page_pool *pool,
185 #ifdef CONFIG_PAGE_POOL
186 page_pool_unmap_page(pool, page);
190 static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
192 return page->dma_addr;
195 static inline bool is_page_pool_compiled_in(void)
197 #ifdef CONFIG_PAGE_POOL
204 #endif /* _NET_PAGE_POOL_H */