]> asedeno.scripts.mit.edu Git - linux.git/blob - net/core/page_pool.c
Merge tag 'misc-habanalabs-fixes-2020-02-11' of git://people.freedesktop.org/~gabbayo...
[linux.git] / net / core / page_pool.c
1 /* SPDX-License-Identifier: GPL-2.0
2  *
3  * page_pool.c
4  *      Author: Jesper Dangaard Brouer <netoptimizer@brouer.com>
5  *      Copyright (C) 2016 Red Hat, Inc.
6  */
7
8 #include <linux/types.h>
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/device.h>
12
13 #include <net/page_pool.h>
14 #include <linux/dma-direction.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/page-flags.h>
17 #include <linux/mm.h> /* for __put_page() */
18
19 #include <trace/events/page_pool.h>
20
21 #define DEFER_TIME (msecs_to_jiffies(1000))
22 #define DEFER_WARN_INTERVAL (60 * HZ)
23
24 static int page_pool_init(struct page_pool *pool,
25                           const struct page_pool_params *params)
26 {
27         unsigned int ring_qsize = 1024; /* Default */
28
29         memcpy(&pool->p, params, sizeof(pool->p));
30
31         /* Validate only known flags were used */
32         if (pool->p.flags & ~(PP_FLAG_ALL))
33                 return -EINVAL;
34
35         if (pool->p.pool_size)
36                 ring_qsize = pool->p.pool_size;
37
38         /* Sanity limit mem that can be pinned down */
39         if (ring_qsize > 32768)
40                 return -E2BIG;
41
42         /* DMA direction is either DMA_FROM_DEVICE or DMA_BIDIRECTIONAL.
43          * DMA_BIDIRECTIONAL is for allowing page used for DMA sending,
44          * which is the XDP_TX use-case.
45          */
46         if ((pool->p.dma_dir != DMA_FROM_DEVICE) &&
47             (pool->p.dma_dir != DMA_BIDIRECTIONAL))
48                 return -EINVAL;
49
50         if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) {
51                 /* In order to request DMA-sync-for-device the page
52                  * needs to be mapped
53                  */
54                 if (!(pool->p.flags & PP_FLAG_DMA_MAP))
55                         return -EINVAL;
56
57                 if (!pool->p.max_len)
58                         return -EINVAL;
59
60                 /* pool->p.offset has to be set according to the address
61                  * offset used by the DMA engine to start copying rx data
62                  */
63         }
64
65         if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0)
66                 return -ENOMEM;
67
68         atomic_set(&pool->pages_state_release_cnt, 0);
69
70         /* Driver calling page_pool_create() also call page_pool_destroy() */
71         refcount_set(&pool->user_cnt, 1);
72
73         if (pool->p.flags & PP_FLAG_DMA_MAP)
74                 get_device(pool->p.dev);
75
76         return 0;
77 }
78
79 struct page_pool *page_pool_create(const struct page_pool_params *params)
80 {
81         struct page_pool *pool;
82         int err;
83
84         pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, params->nid);
85         if (!pool)
86                 return ERR_PTR(-ENOMEM);
87
88         err = page_pool_init(pool, params);
89         if (err < 0) {
90                 pr_warn("%s() gave up with errno %d\n", __func__, err);
91                 kfree(pool);
92                 return ERR_PTR(err);
93         }
94
95         return pool;
96 }
97 EXPORT_SYMBOL(page_pool_create);
98
99 static void __page_pool_return_page(struct page_pool *pool, struct page *page);
100
101 noinline
102 static struct page *page_pool_refill_alloc_cache(struct page_pool *pool,
103                                                  bool refill)
104 {
105         struct ptr_ring *r = &pool->ring;
106         struct page *page;
107         int pref_nid; /* preferred NUMA node */
108
109         /* Quicker fallback, avoid locks when ring is empty */
110         if (__ptr_ring_empty(r))
111                 return NULL;
112
113         /* Softirq guarantee CPU and thus NUMA node is stable. This,
114          * assumes CPU refilling driver RX-ring will also run RX-NAPI.
115          */
116 #ifdef CONFIG_NUMA
117         pref_nid = (pool->p.nid == NUMA_NO_NODE) ? numa_mem_id() : pool->p.nid;
118 #else
119         /* Ignore pool->p.nid setting if !CONFIG_NUMA, helps compiler */
120         pref_nid = numa_mem_id(); /* will be zero like page_to_nid() */
121 #endif
122
123         /* Slower-path: Get pages from locked ring queue */
124         spin_lock(&r->consumer_lock);
125
126         /* Refill alloc array, but only if NUMA match */
127         do {
128                 page = __ptr_ring_consume(r);
129                 if (unlikely(!page))
130                         break;
131
132                 if (likely(page_to_nid(page) == pref_nid)) {
133                         pool->alloc.cache[pool->alloc.count++] = page;
134                 } else {
135                         /* NUMA mismatch;
136                          * (1) release 1 page to page-allocator and
137                          * (2) break out to fallthrough to alloc_pages_node.
138                          * This limit stress on page buddy alloactor.
139                          */
140                         __page_pool_return_page(pool, page);
141                         page = NULL;
142                         break;
143                 }
144         } while (pool->alloc.count < PP_ALLOC_CACHE_REFILL &&
145                  refill);
146
147         /* Return last page */
148         if (likely(pool->alloc.count > 0))
149                 page = pool->alloc.cache[--pool->alloc.count];
150
151         spin_unlock(&r->consumer_lock);
152         return page;
153 }
154
155 /* fast path */
156 static struct page *__page_pool_get_cached(struct page_pool *pool)
157 {
158         bool refill = false;
159         struct page *page;
160
161         /* Test for safe-context, caller should provide this guarantee */
162         if (likely(in_serving_softirq())) {
163                 if (likely(pool->alloc.count)) {
164                         /* Fast-path */
165                         page = pool->alloc.cache[--pool->alloc.count];
166                         return page;
167                 }
168                 refill = true;
169         }
170
171         page = page_pool_refill_alloc_cache(pool, refill);
172         return page;
173 }
174
175 static void page_pool_dma_sync_for_device(struct page_pool *pool,
176                                           struct page *page,
177                                           unsigned int dma_sync_size)
178 {
179         dma_sync_size = min(dma_sync_size, pool->p.max_len);
180         dma_sync_single_range_for_device(pool->p.dev, page->dma_addr,
181                                          pool->p.offset, dma_sync_size,
182                                          pool->p.dma_dir);
183 }
184
185 /* slow path */
186 noinline
187 static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
188                                                  gfp_t _gfp)
189 {
190         struct page *page;
191         gfp_t gfp = _gfp;
192         dma_addr_t dma;
193
194         /* We could always set __GFP_COMP, and avoid this branch, as
195          * prep_new_page() can handle order-0 with __GFP_COMP.
196          */
197         if (pool->p.order)
198                 gfp |= __GFP_COMP;
199
200         /* FUTURE development:
201          *
202          * Current slow-path essentially falls back to single page
203          * allocations, which doesn't improve performance.  This code
204          * need bulk allocation support from the page allocator code.
205          */
206
207         /* Cache was empty, do real allocation */
208 #ifdef CONFIG_NUMA
209         page = alloc_pages_node(pool->p.nid, gfp, pool->p.order);
210 #else
211         page = alloc_pages(gfp, pool->p.order);
212 #endif
213         if (!page)
214                 return NULL;
215
216         if (!(pool->p.flags & PP_FLAG_DMA_MAP))
217                 goto skip_dma_map;
218
219         /* Setup DMA mapping: use 'struct page' area for storing DMA-addr
220          * since dma_addr_t can be either 32 or 64 bits and does not always fit
221          * into page private data (i.e 32bit cpu with 64bit DMA caps)
222          * This mapping is kept for lifetime of page, until leaving pool.
223          */
224         dma = dma_map_page_attrs(pool->p.dev, page, 0,
225                                  (PAGE_SIZE << pool->p.order),
226                                  pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
227         if (dma_mapping_error(pool->p.dev, dma)) {
228                 put_page(page);
229                 return NULL;
230         }
231         page->dma_addr = dma;
232
233         if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
234                 page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
235
236 skip_dma_map:
237         /* Track how many pages are held 'in-flight' */
238         pool->pages_state_hold_cnt++;
239
240         trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt);
241
242         /* When page just alloc'ed is should/must have refcnt 1. */
243         return page;
244 }
245
246 /* For using page_pool replace: alloc_pages() API calls, but provide
247  * synchronization guarantee for allocation side.
248  */
249 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp)
250 {
251         struct page *page;
252
253         /* Fast-path: Get a page from cache */
254         page = __page_pool_get_cached(pool);
255         if (page)
256                 return page;
257
258         /* Slow-path: cache empty, do real allocation */
259         page = __page_pool_alloc_pages_slow(pool, gfp);
260         return page;
261 }
262 EXPORT_SYMBOL(page_pool_alloc_pages);
263
264 /* Calculate distance between two u32 values, valid if distance is below 2^(31)
265  *  https://en.wikipedia.org/wiki/Serial_number_arithmetic#General_Solution
266  */
267 #define _distance(a, b) (s32)((a) - (b))
268
269 static s32 page_pool_inflight(struct page_pool *pool)
270 {
271         u32 release_cnt = atomic_read(&pool->pages_state_release_cnt);
272         u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt);
273         s32 inflight;
274
275         inflight = _distance(hold_cnt, release_cnt);
276
277         trace_page_pool_release(pool, inflight, hold_cnt, release_cnt);
278         WARN(inflight < 0, "Negative(%d) inflight packet-pages", inflight);
279
280         return inflight;
281 }
282
283 /* Cleanup page_pool state from page */
284 static void __page_pool_clean_page(struct page_pool *pool,
285                                    struct page *page)
286 {
287         dma_addr_t dma;
288         int count;
289
290         if (!(pool->p.flags & PP_FLAG_DMA_MAP))
291                 goto skip_dma_unmap;
292
293         dma = page->dma_addr;
294         /* DMA unmap */
295         dma_unmap_page_attrs(pool->p.dev, dma,
296                              PAGE_SIZE << pool->p.order, pool->p.dma_dir,
297                              DMA_ATTR_SKIP_CPU_SYNC);
298         page->dma_addr = 0;
299 skip_dma_unmap:
300         /* This may be the last page returned, releasing the pool, so
301          * it is not safe to reference pool afterwards.
302          */
303         count = atomic_inc_return(&pool->pages_state_release_cnt);
304         trace_page_pool_state_release(pool, page, count);
305 }
306
307 /* unmap the page and clean our state */
308 void page_pool_unmap_page(struct page_pool *pool, struct page *page)
309 {
310         /* When page is unmapped, this implies page will not be
311          * returned to page_pool.
312          */
313         __page_pool_clean_page(pool, page);
314 }
315 EXPORT_SYMBOL(page_pool_unmap_page);
316
317 /* Return a page to the page allocator, cleaning up our state */
318 static void __page_pool_return_page(struct page_pool *pool, struct page *page)
319 {
320         __page_pool_clean_page(pool, page);
321
322         put_page(page);
323         /* An optimization would be to call __free_pages(page, pool->p.order)
324          * knowing page is not part of page-cache (thus avoiding a
325          * __page_cache_release() call).
326          */
327 }
328
329 static bool __page_pool_recycle_into_ring(struct page_pool *pool,
330                                    struct page *page)
331 {
332         int ret;
333         /* BH protection not needed if current is serving softirq */
334         if (in_serving_softirq())
335                 ret = ptr_ring_produce(&pool->ring, page);
336         else
337                 ret = ptr_ring_produce_bh(&pool->ring, page);
338
339         return (ret == 0) ? true : false;
340 }
341
342 /* Only allow direct recycling in special circumstances, into the
343  * alloc side cache.  E.g. during RX-NAPI processing for XDP_DROP use-case.
344  *
345  * Caller must provide appropriate safe context.
346  */
347 static bool __page_pool_recycle_direct(struct page *page,
348                                        struct page_pool *pool)
349 {
350         if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE))
351                 return false;
352
353         /* Caller MUST have verified/know (page_ref_count(page) == 1) */
354         pool->alloc.cache[pool->alloc.count++] = page;
355         return true;
356 }
357
358 /* page is NOT reusable when:
359  * 1) allocated when system is under some pressure. (page_is_pfmemalloc)
360  */
361 static bool pool_page_reusable(struct page_pool *pool, struct page *page)
362 {
363         return !page_is_pfmemalloc(page);
364 }
365
366 void __page_pool_put_page(struct page_pool *pool, struct page *page,
367                           unsigned int dma_sync_size, bool allow_direct)
368 {
369         /* This allocator is optimized for the XDP mode that uses
370          * one-frame-per-page, but have fallbacks that act like the
371          * regular page allocator APIs.
372          *
373          * refcnt == 1 means page_pool owns page, and can recycle it.
374          */
375         if (likely(page_ref_count(page) == 1 &&
376                    pool_page_reusable(pool, page))) {
377                 /* Read barrier done in page_ref_count / READ_ONCE */
378
379                 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
380                         page_pool_dma_sync_for_device(pool, page,
381                                                       dma_sync_size);
382
383                 if (allow_direct && in_serving_softirq())
384                         if (__page_pool_recycle_direct(page, pool))
385                                 return;
386
387                 if (!__page_pool_recycle_into_ring(pool, page)) {
388                         /* Cache full, fallback to free pages */
389                         __page_pool_return_page(pool, page);
390                 }
391                 return;
392         }
393         /* Fallback/non-XDP mode: API user have elevated refcnt.
394          *
395          * Many drivers split up the page into fragments, and some
396          * want to keep doing this to save memory and do refcnt based
397          * recycling. Support this use case too, to ease drivers
398          * switching between XDP/non-XDP.
399          *
400          * In-case page_pool maintains the DMA mapping, API user must
401          * call page_pool_put_page once.  In this elevated refcnt
402          * case, the DMA is unmapped/released, as driver is likely
403          * doing refcnt based recycle tricks, meaning another process
404          * will be invoking put_page.
405          */
406         __page_pool_clean_page(pool, page);
407         put_page(page);
408 }
409 EXPORT_SYMBOL(__page_pool_put_page);
410
411 static void __page_pool_empty_ring(struct page_pool *pool)
412 {
413         struct page *page;
414
415         /* Empty recycle ring */
416         while ((page = ptr_ring_consume_bh(&pool->ring))) {
417                 /* Verify the refcnt invariant of cached pages */
418                 if (!(page_ref_count(page) == 1))
419                         pr_crit("%s() page_pool refcnt %d violation\n",
420                                 __func__, page_ref_count(page));
421
422                 __page_pool_return_page(pool, page);
423         }
424 }
425
426 static void page_pool_free(struct page_pool *pool)
427 {
428         if (pool->disconnect)
429                 pool->disconnect(pool);
430
431         ptr_ring_cleanup(&pool->ring, NULL);
432
433         if (pool->p.flags & PP_FLAG_DMA_MAP)
434                 put_device(pool->p.dev);
435
436         kfree(pool);
437 }
438
439 static void page_pool_empty_alloc_cache_once(struct page_pool *pool)
440 {
441         struct page *page;
442
443         if (pool->destroy_cnt)
444                 return;
445
446         /* Empty alloc cache, assume caller made sure this is
447          * no-longer in use, and page_pool_alloc_pages() cannot be
448          * call concurrently.
449          */
450         while (pool->alloc.count) {
451                 page = pool->alloc.cache[--pool->alloc.count];
452                 __page_pool_return_page(pool, page);
453         }
454 }
455
456 static void page_pool_scrub(struct page_pool *pool)
457 {
458         page_pool_empty_alloc_cache_once(pool);
459         pool->destroy_cnt++;
460
461         /* No more consumers should exist, but producers could still
462          * be in-flight.
463          */
464         __page_pool_empty_ring(pool);
465 }
466
467 static int page_pool_release(struct page_pool *pool)
468 {
469         int inflight;
470
471         page_pool_scrub(pool);
472         inflight = page_pool_inflight(pool);
473         if (!inflight)
474                 page_pool_free(pool);
475
476         return inflight;
477 }
478
479 static void page_pool_release_retry(struct work_struct *wq)
480 {
481         struct delayed_work *dwq = to_delayed_work(wq);
482         struct page_pool *pool = container_of(dwq, typeof(*pool), release_dw);
483         int inflight;
484
485         inflight = page_pool_release(pool);
486         if (!inflight)
487                 return;
488
489         /* Periodic warning */
490         if (time_after_eq(jiffies, pool->defer_warn)) {
491                 int sec = (s32)((u32)jiffies - (u32)pool->defer_start) / HZ;
492
493                 pr_warn("%s() stalled pool shutdown %d inflight %d sec\n",
494                         __func__, inflight, sec);
495                 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL;
496         }
497
498         /* Still not ready to be disconnected, retry later */
499         schedule_delayed_work(&pool->release_dw, DEFER_TIME);
500 }
501
502 void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *))
503 {
504         refcount_inc(&pool->user_cnt);
505         pool->disconnect = disconnect;
506 }
507
508 void page_pool_destroy(struct page_pool *pool)
509 {
510         if (!pool)
511                 return;
512
513         if (!page_pool_put(pool))
514                 return;
515
516         if (!page_pool_release(pool))
517                 return;
518
519         pool->defer_start = jiffies;
520         pool->defer_warn  = jiffies + DEFER_WARN_INTERVAL;
521
522         INIT_DELAYED_WORK(&pool->release_dw, page_pool_release_retry);
523         schedule_delayed_work(&pool->release_dw, DEFER_TIME);
524 }
525 EXPORT_SYMBOL(page_pool_destroy);
526
527 /* Caller must provide appropriate safe context, e.g. NAPI. */
528 void page_pool_update_nid(struct page_pool *pool, int new_nid)
529 {
530         struct page *page;
531
532         trace_page_pool_update_nid(pool, new_nid);
533         pool->p.nid = new_nid;
534
535         /* Flush pool alloc cache, as refill will check NUMA node */
536         while (pool->alloc.count) {
537                 page = pool->alloc.cache[--pool->alloc.count];
538                 __page_pool_return_page(pool, page);
539         }
540 }
541 EXPORT_SYMBOL(page_pool_update_nid);