]> asedeno.scripts.mit.edu Git - linux.git/blob - net/xdp/xdp_umem.c
Merge tag 'microblaze-v5.4-rc1' of git://git.monstr.eu/linux-2.6-microblaze
[linux.git] / net / xdp / xdp_umem.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* XDP user-space packet buffer
3  * Copyright(c) 2018 Intel Corporation.
4  */
5
6 #include <linux/init.h>
7 #include <linux/sched/mm.h>
8 #include <linux/sched/signal.h>
9 #include <linux/sched/task.h>
10 #include <linux/uaccess.h>
11 #include <linux/slab.h>
12 #include <linux/bpf.h>
13 #include <linux/mm.h>
14 #include <linux/netdevice.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/idr.h>
17 #include <linux/vmalloc.h>
18
19 #include "xdp_umem.h"
20 #include "xsk_queue.h"
21
22 #define XDP_UMEM_MIN_CHUNK_SIZE 2048
23
24 static DEFINE_IDA(umem_ida);
25
26 void xdp_add_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
27 {
28         unsigned long flags;
29
30         spin_lock_irqsave(&umem->xsk_list_lock, flags);
31         list_add_rcu(&xs->list, &umem->xsk_list);
32         spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
33 }
34
35 void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
36 {
37         unsigned long flags;
38
39         spin_lock_irqsave(&umem->xsk_list_lock, flags);
40         list_del_rcu(&xs->list);
41         spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
42 }
43
44 /* The umem is stored both in the _rx struct and the _tx struct as we do
45  * not know if the device has more tx queues than rx, or the opposite.
46  * This might also change during run time.
47  */
48 static int xdp_reg_umem_at_qid(struct net_device *dev, struct xdp_umem *umem,
49                                u16 queue_id)
50 {
51         if (queue_id >= max_t(unsigned int,
52                               dev->real_num_rx_queues,
53                               dev->real_num_tx_queues))
54                 return -EINVAL;
55
56         if (queue_id < dev->real_num_rx_queues)
57                 dev->_rx[queue_id].umem = umem;
58         if (queue_id < dev->real_num_tx_queues)
59                 dev->_tx[queue_id].umem = umem;
60
61         return 0;
62 }
63
64 struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
65                                        u16 queue_id)
66 {
67         if (queue_id < dev->real_num_rx_queues)
68                 return dev->_rx[queue_id].umem;
69         if (queue_id < dev->real_num_tx_queues)
70                 return dev->_tx[queue_id].umem;
71
72         return NULL;
73 }
74 EXPORT_SYMBOL(xdp_get_umem_from_qid);
75
76 static void xdp_clear_umem_at_qid(struct net_device *dev, u16 queue_id)
77 {
78         if (queue_id < dev->real_num_rx_queues)
79                 dev->_rx[queue_id].umem = NULL;
80         if (queue_id < dev->real_num_tx_queues)
81                 dev->_tx[queue_id].umem = NULL;
82 }
83
84 int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
85                         u16 queue_id, u16 flags)
86 {
87         bool force_zc, force_copy;
88         struct netdev_bpf bpf;
89         int err = 0;
90
91         ASSERT_RTNL();
92
93         force_zc = flags & XDP_ZEROCOPY;
94         force_copy = flags & XDP_COPY;
95
96         if (force_zc && force_copy)
97                 return -EINVAL;
98
99         if (xdp_get_umem_from_qid(dev, queue_id))
100                 return -EBUSY;
101
102         err = xdp_reg_umem_at_qid(dev, umem, queue_id);
103         if (err)
104                 return err;
105
106         umem->dev = dev;
107         umem->queue_id = queue_id;
108
109         if (flags & XDP_USE_NEED_WAKEUP) {
110                 umem->flags |= XDP_UMEM_USES_NEED_WAKEUP;
111                 /* Tx needs to be explicitly woken up the first time.
112                  * Also for supporting drivers that do not implement this
113                  * feature. They will always have to call sendto().
114                  */
115                 xsk_set_tx_need_wakeup(umem);
116         }
117
118         dev_hold(dev);
119
120         if (force_copy)
121                 /* For copy-mode, we are done. */
122                 return 0;
123
124         if (!dev->netdev_ops->ndo_bpf || !dev->netdev_ops->ndo_xsk_wakeup) {
125                 err = -EOPNOTSUPP;
126                 goto err_unreg_umem;
127         }
128
129         bpf.command = XDP_SETUP_XSK_UMEM;
130         bpf.xsk.umem = umem;
131         bpf.xsk.queue_id = queue_id;
132
133         err = dev->netdev_ops->ndo_bpf(dev, &bpf);
134         if (err)
135                 goto err_unreg_umem;
136
137         umem->zc = true;
138         return 0;
139
140 err_unreg_umem:
141         if (!force_zc)
142                 err = 0; /* fallback to copy mode */
143         if (err)
144                 xdp_clear_umem_at_qid(dev, queue_id);
145         return err;
146 }
147
148 void xdp_umem_clear_dev(struct xdp_umem *umem)
149 {
150         struct netdev_bpf bpf;
151         int err;
152
153         ASSERT_RTNL();
154
155         if (!umem->dev)
156                 return;
157
158         if (umem->zc) {
159                 bpf.command = XDP_SETUP_XSK_UMEM;
160                 bpf.xsk.umem = NULL;
161                 bpf.xsk.queue_id = umem->queue_id;
162
163                 err = umem->dev->netdev_ops->ndo_bpf(umem->dev, &bpf);
164
165                 if (err)
166                         WARN(1, "failed to disable umem!\n");
167         }
168
169         xdp_clear_umem_at_qid(umem->dev, umem->queue_id);
170
171         dev_put(umem->dev);
172         umem->dev = NULL;
173         umem->zc = false;
174 }
175
176 static void xdp_umem_unmap_pages(struct xdp_umem *umem)
177 {
178         unsigned int i;
179
180         for (i = 0; i < umem->npgs; i++)
181                 if (PageHighMem(umem->pgs[i]))
182                         vunmap(umem->pages[i].addr);
183 }
184
185 static int xdp_umem_map_pages(struct xdp_umem *umem)
186 {
187         unsigned int i;
188         void *addr;
189
190         for (i = 0; i < umem->npgs; i++) {
191                 if (PageHighMem(umem->pgs[i]))
192                         addr = vmap(&umem->pgs[i], 1, VM_MAP, PAGE_KERNEL);
193                 else
194                         addr = page_address(umem->pgs[i]);
195
196                 if (!addr) {
197                         xdp_umem_unmap_pages(umem);
198                         return -ENOMEM;
199                 }
200
201                 umem->pages[i].addr = addr;
202         }
203
204         return 0;
205 }
206
207 static void xdp_umem_unpin_pages(struct xdp_umem *umem)
208 {
209         unsigned int i;
210
211         for (i = 0; i < umem->npgs; i++) {
212                 struct page *page = umem->pgs[i];
213
214                 set_page_dirty_lock(page);
215                 put_page(page);
216         }
217
218         kfree(umem->pgs);
219         umem->pgs = NULL;
220 }
221
222 static void xdp_umem_unaccount_pages(struct xdp_umem *umem)
223 {
224         if (umem->user) {
225                 atomic_long_sub(umem->npgs, &umem->user->locked_vm);
226                 free_uid(umem->user);
227         }
228 }
229
230 static void xdp_umem_release(struct xdp_umem *umem)
231 {
232         rtnl_lock();
233         xdp_umem_clear_dev(umem);
234         rtnl_unlock();
235
236         ida_simple_remove(&umem_ida, umem->id);
237
238         if (umem->fq) {
239                 xskq_destroy(umem->fq);
240                 umem->fq = NULL;
241         }
242
243         if (umem->cq) {
244                 xskq_destroy(umem->cq);
245                 umem->cq = NULL;
246         }
247
248         xsk_reuseq_destroy(umem);
249
250         xdp_umem_unmap_pages(umem);
251         xdp_umem_unpin_pages(umem);
252
253         kfree(umem->pages);
254         umem->pages = NULL;
255
256         xdp_umem_unaccount_pages(umem);
257         kfree(umem);
258 }
259
260 static void xdp_umem_release_deferred(struct work_struct *work)
261 {
262         struct xdp_umem *umem = container_of(work, struct xdp_umem, work);
263
264         xdp_umem_release(umem);
265 }
266
267 void xdp_get_umem(struct xdp_umem *umem)
268 {
269         refcount_inc(&umem->users);
270 }
271
272 void xdp_put_umem(struct xdp_umem *umem)
273 {
274         if (!umem)
275                 return;
276
277         if (refcount_dec_and_test(&umem->users)) {
278                 INIT_WORK(&umem->work, xdp_umem_release_deferred);
279                 schedule_work(&umem->work);
280         }
281 }
282
283 static int xdp_umem_pin_pages(struct xdp_umem *umem)
284 {
285         unsigned int gup_flags = FOLL_WRITE;
286         long npgs;
287         int err;
288
289         umem->pgs = kcalloc(umem->npgs, sizeof(*umem->pgs),
290                             GFP_KERNEL | __GFP_NOWARN);
291         if (!umem->pgs)
292                 return -ENOMEM;
293
294         down_read(&current->mm->mmap_sem);
295         npgs = get_user_pages(umem->address, umem->npgs,
296                               gup_flags | FOLL_LONGTERM, &umem->pgs[0], NULL);
297         up_read(&current->mm->mmap_sem);
298
299         if (npgs != umem->npgs) {
300                 if (npgs >= 0) {
301                         umem->npgs = npgs;
302                         err = -ENOMEM;
303                         goto out_pin;
304                 }
305                 err = npgs;
306                 goto out_pgs;
307         }
308         return 0;
309
310 out_pin:
311         xdp_umem_unpin_pages(umem);
312 out_pgs:
313         kfree(umem->pgs);
314         umem->pgs = NULL;
315         return err;
316 }
317
318 static int xdp_umem_account_pages(struct xdp_umem *umem)
319 {
320         unsigned long lock_limit, new_npgs, old_npgs;
321
322         if (capable(CAP_IPC_LOCK))
323                 return 0;
324
325         lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
326         umem->user = get_uid(current_user());
327
328         do {
329                 old_npgs = atomic_long_read(&umem->user->locked_vm);
330                 new_npgs = old_npgs + umem->npgs;
331                 if (new_npgs > lock_limit) {
332                         free_uid(umem->user);
333                         umem->user = NULL;
334                         return -ENOBUFS;
335                 }
336         } while (atomic_long_cmpxchg(&umem->user->locked_vm, old_npgs,
337                                      new_npgs) != old_npgs);
338         return 0;
339 }
340
341 static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
342 {
343         bool unaligned_chunks = mr->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG;
344         u32 chunk_size = mr->chunk_size, headroom = mr->headroom;
345         unsigned int chunks, chunks_per_page;
346         u64 addr = mr->addr, size = mr->len;
347         int size_chk, err;
348
349         if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) {
350                 /* Strictly speaking we could support this, if:
351                  * - huge pages, or*
352                  * - using an IOMMU, or
353                  * - making sure the memory area is consecutive
354                  * but for now, we simply say "computer says no".
355                  */
356                 return -EINVAL;
357         }
358
359         if (mr->flags & ~(XDP_UMEM_UNALIGNED_CHUNK_FLAG |
360                         XDP_UMEM_USES_NEED_WAKEUP))
361                 return -EINVAL;
362
363         if (!unaligned_chunks && !is_power_of_2(chunk_size))
364                 return -EINVAL;
365
366         if (!PAGE_ALIGNED(addr)) {
367                 /* Memory area has to be page size aligned. For
368                  * simplicity, this might change.
369                  */
370                 return -EINVAL;
371         }
372
373         if ((addr + size) < addr)
374                 return -EINVAL;
375
376         chunks = (unsigned int)div_u64(size, chunk_size);
377         if (chunks == 0)
378                 return -EINVAL;
379
380         if (!unaligned_chunks) {
381                 chunks_per_page = PAGE_SIZE / chunk_size;
382                 if (chunks < chunks_per_page || chunks % chunks_per_page)
383                         return -EINVAL;
384         }
385
386         headroom = ALIGN(headroom, 64);
387
388         size_chk = chunk_size - headroom - XDP_PACKET_HEADROOM;
389         if (size_chk < 0)
390                 return -EINVAL;
391
392         umem->address = (unsigned long)addr;
393         umem->chunk_mask = unaligned_chunks ? XSK_UNALIGNED_BUF_ADDR_MASK
394                                             : ~((u64)chunk_size - 1);
395         umem->size = size;
396         umem->headroom = headroom;
397         umem->chunk_size_nohr = chunk_size - headroom;
398         umem->npgs = size / PAGE_SIZE;
399         umem->pgs = NULL;
400         umem->user = NULL;
401         umem->flags = mr->flags;
402         INIT_LIST_HEAD(&umem->xsk_list);
403         spin_lock_init(&umem->xsk_list_lock);
404
405         refcount_set(&umem->users, 1);
406
407         err = xdp_umem_account_pages(umem);
408         if (err)
409                 return err;
410
411         err = xdp_umem_pin_pages(umem);
412         if (err)
413                 goto out_account;
414
415         umem->pages = kcalloc(umem->npgs, sizeof(*umem->pages), GFP_KERNEL);
416         if (!umem->pages) {
417                 err = -ENOMEM;
418                 goto out_pin;
419         }
420
421         err = xdp_umem_map_pages(umem);
422         if (!err)
423                 return 0;
424
425         kfree(umem->pages);
426
427 out_pin:
428         xdp_umem_unpin_pages(umem);
429 out_account:
430         xdp_umem_unaccount_pages(umem);
431         return err;
432 }
433
434 struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr)
435 {
436         struct xdp_umem *umem;
437         int err;
438
439         umem = kzalloc(sizeof(*umem), GFP_KERNEL);
440         if (!umem)
441                 return ERR_PTR(-ENOMEM);
442
443         err = ida_simple_get(&umem_ida, 0, 0, GFP_KERNEL);
444         if (err < 0) {
445                 kfree(umem);
446                 return ERR_PTR(err);
447         }
448         umem->id = err;
449
450         err = xdp_umem_reg(umem, mr);
451         if (err) {
452                 ida_simple_remove(&umem_ida, umem->id);
453                 kfree(umem);
454                 return ERR_PTR(err);
455         }
456
457         return umem;
458 }
459
460 bool xdp_umem_validate_queues(struct xdp_umem *umem)
461 {
462         return umem->fq && umem->cq;
463 }