1 // SPDX-License-Identifier: GPL-2.0
2 /* XDP user-space packet buffer
3 * Copyright(c) 2018 Intel Corporation.
6 #include <linux/init.h>
7 #include <linux/sched/mm.h>
8 #include <linux/sched/signal.h>
9 #include <linux/sched/task.h>
10 #include <linux/uaccess.h>
11 #include <linux/slab.h>
12 #include <linux/bpf.h>
14 #include <linux/netdevice.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/idr.h>
17 #include <linux/vmalloc.h>
20 #include "xsk_queue.h"
22 #define XDP_UMEM_MIN_CHUNK_SIZE 2048
24 static DEFINE_IDA(umem_ida);
26 void xdp_add_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
30 spin_lock_irqsave(&umem->xsk_list_lock, flags);
31 list_add_rcu(&xs->list, &umem->xsk_list);
32 spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
35 void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
39 spin_lock_irqsave(&umem->xsk_list_lock, flags);
40 list_del_rcu(&xs->list);
41 spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
44 /* The umem is stored both in the _rx struct and the _tx struct as we do
45 * not know if the device has more tx queues than rx, or the opposite.
46 * This might also change during run time.
48 static int xdp_reg_umem_at_qid(struct net_device *dev, struct xdp_umem *umem,
51 if (queue_id >= max_t(unsigned int,
52 dev->real_num_rx_queues,
53 dev->real_num_tx_queues))
56 if (queue_id < dev->real_num_rx_queues)
57 dev->_rx[queue_id].umem = umem;
58 if (queue_id < dev->real_num_tx_queues)
59 dev->_tx[queue_id].umem = umem;
64 struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
67 if (queue_id < dev->real_num_rx_queues)
68 return dev->_rx[queue_id].umem;
69 if (queue_id < dev->real_num_tx_queues)
70 return dev->_tx[queue_id].umem;
74 EXPORT_SYMBOL(xdp_get_umem_from_qid);
76 static void xdp_clear_umem_at_qid(struct net_device *dev, u16 queue_id)
78 if (queue_id < dev->real_num_rx_queues)
79 dev->_rx[queue_id].umem = NULL;
80 if (queue_id < dev->real_num_tx_queues)
81 dev->_tx[queue_id].umem = NULL;
84 int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
85 u16 queue_id, u16 flags)
87 bool force_zc, force_copy;
88 struct netdev_bpf bpf;
93 force_zc = flags & XDP_ZEROCOPY;
94 force_copy = flags & XDP_COPY;
96 if (force_zc && force_copy)
99 if (xdp_get_umem_from_qid(dev, queue_id))
102 err = xdp_reg_umem_at_qid(dev, umem, queue_id);
107 umem->queue_id = queue_id;
109 if (flags & XDP_USE_NEED_WAKEUP) {
110 umem->flags |= XDP_UMEM_USES_NEED_WAKEUP;
111 /* Tx needs to be explicitly woken up the first time.
112 * Also for supporting drivers that do not implement this
113 * feature. They will always have to call sendto().
115 xsk_set_tx_need_wakeup(umem);
121 /* For copy-mode, we are done. */
124 if (!dev->netdev_ops->ndo_bpf || !dev->netdev_ops->ndo_xsk_wakeup) {
129 bpf.command = XDP_SETUP_XSK_UMEM;
131 bpf.xsk.queue_id = queue_id;
133 err = dev->netdev_ops->ndo_bpf(dev, &bpf);
142 err = 0; /* fallback to copy mode */
144 xdp_clear_umem_at_qid(dev, queue_id);
148 void xdp_umem_clear_dev(struct xdp_umem *umem)
150 struct netdev_bpf bpf;
159 bpf.command = XDP_SETUP_XSK_UMEM;
161 bpf.xsk.queue_id = umem->queue_id;
163 err = umem->dev->netdev_ops->ndo_bpf(umem->dev, &bpf);
166 WARN(1, "failed to disable umem!\n");
169 xdp_clear_umem_at_qid(umem->dev, umem->queue_id);
176 static void xdp_umem_unmap_pages(struct xdp_umem *umem)
180 for (i = 0; i < umem->npgs; i++)
181 if (PageHighMem(umem->pgs[i]))
182 vunmap(umem->pages[i].addr);
185 static int xdp_umem_map_pages(struct xdp_umem *umem)
190 for (i = 0; i < umem->npgs; i++) {
191 if (PageHighMem(umem->pgs[i]))
192 addr = vmap(&umem->pgs[i], 1, VM_MAP, PAGE_KERNEL);
194 addr = page_address(umem->pgs[i]);
197 xdp_umem_unmap_pages(umem);
201 umem->pages[i].addr = addr;
207 static void xdp_umem_unpin_pages(struct xdp_umem *umem)
209 put_user_pages_dirty_lock(umem->pgs, umem->npgs, true);
215 static void xdp_umem_unaccount_pages(struct xdp_umem *umem)
218 atomic_long_sub(umem->npgs, &umem->user->locked_vm);
219 free_uid(umem->user);
223 static void xdp_umem_release(struct xdp_umem *umem)
226 xdp_umem_clear_dev(umem);
229 ida_simple_remove(&umem_ida, umem->id);
232 xskq_destroy(umem->fq);
237 xskq_destroy(umem->cq);
241 xsk_reuseq_destroy(umem);
243 xdp_umem_unmap_pages(umem);
244 xdp_umem_unpin_pages(umem);
249 xdp_umem_unaccount_pages(umem);
253 static void xdp_umem_release_deferred(struct work_struct *work)
255 struct xdp_umem *umem = container_of(work, struct xdp_umem, work);
257 xdp_umem_release(umem);
260 void xdp_get_umem(struct xdp_umem *umem)
262 refcount_inc(&umem->users);
265 void xdp_put_umem(struct xdp_umem *umem)
270 if (refcount_dec_and_test(&umem->users)) {
271 INIT_WORK(&umem->work, xdp_umem_release_deferred);
272 schedule_work(&umem->work);
276 static int xdp_umem_pin_pages(struct xdp_umem *umem)
278 unsigned int gup_flags = FOLL_WRITE;
282 umem->pgs = kcalloc(umem->npgs, sizeof(*umem->pgs),
283 GFP_KERNEL | __GFP_NOWARN);
287 down_read(¤t->mm->mmap_sem);
288 npgs = get_user_pages(umem->address, umem->npgs,
289 gup_flags | FOLL_LONGTERM, &umem->pgs[0], NULL);
290 up_read(¤t->mm->mmap_sem);
292 if (npgs != umem->npgs) {
304 xdp_umem_unpin_pages(umem);
311 static int xdp_umem_account_pages(struct xdp_umem *umem)
313 unsigned long lock_limit, new_npgs, old_npgs;
315 if (capable(CAP_IPC_LOCK))
318 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
319 umem->user = get_uid(current_user());
322 old_npgs = atomic_long_read(&umem->user->locked_vm);
323 new_npgs = old_npgs + umem->npgs;
324 if (new_npgs > lock_limit) {
325 free_uid(umem->user);
329 } while (atomic_long_cmpxchg(&umem->user->locked_vm, old_npgs,
330 new_npgs) != old_npgs);
334 static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
336 bool unaligned_chunks = mr->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG;
337 u32 chunk_size = mr->chunk_size, headroom = mr->headroom;
338 unsigned int chunks, chunks_per_page;
339 u64 addr = mr->addr, size = mr->len;
342 if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) {
343 /* Strictly speaking we could support this, if:
345 * - using an IOMMU, or
346 * - making sure the memory area is consecutive
347 * but for now, we simply say "computer says no".
352 if (mr->flags & ~(XDP_UMEM_UNALIGNED_CHUNK_FLAG |
353 XDP_UMEM_USES_NEED_WAKEUP))
356 if (!unaligned_chunks && !is_power_of_2(chunk_size))
359 if (!PAGE_ALIGNED(addr)) {
360 /* Memory area has to be page size aligned. For
361 * simplicity, this might change.
366 if ((addr + size) < addr)
369 chunks = (unsigned int)div_u64(size, chunk_size);
373 if (!unaligned_chunks) {
374 chunks_per_page = PAGE_SIZE / chunk_size;
375 if (chunks < chunks_per_page || chunks % chunks_per_page)
379 headroom = ALIGN(headroom, 64);
381 size_chk = chunk_size - headroom - XDP_PACKET_HEADROOM;
385 umem->address = (unsigned long)addr;
386 umem->chunk_mask = unaligned_chunks ? XSK_UNALIGNED_BUF_ADDR_MASK
387 : ~((u64)chunk_size - 1);
389 umem->headroom = headroom;
390 umem->chunk_size_nohr = chunk_size - headroom;
391 umem->npgs = size / PAGE_SIZE;
394 umem->flags = mr->flags;
395 INIT_LIST_HEAD(&umem->xsk_list);
396 spin_lock_init(&umem->xsk_list_lock);
398 refcount_set(&umem->users, 1);
400 err = xdp_umem_account_pages(umem);
404 err = xdp_umem_pin_pages(umem);
408 umem->pages = kcalloc(umem->npgs, sizeof(*umem->pages), GFP_KERNEL);
414 err = xdp_umem_map_pages(umem);
421 xdp_umem_unpin_pages(umem);
423 xdp_umem_unaccount_pages(umem);
427 struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr)
429 struct xdp_umem *umem;
432 umem = kzalloc(sizeof(*umem), GFP_KERNEL);
434 return ERR_PTR(-ENOMEM);
436 err = ida_simple_get(&umem_ida, 0, 0, GFP_KERNEL);
443 err = xdp_umem_reg(umem, mr);
445 ida_simple_remove(&umem_ida, umem->id);
453 bool xdp_umem_validate_queues(struct xdp_umem *umem)
455 return umem->fq && umem->cq;