1 // SPDX-License-Identifier: GPL-2.0
2 /* XSKMAP used for AF_XDP sockets
3 * Copyright(c) 2018 Intel Corporation.
7 #include <linux/capability.h>
8 #include <net/xdp_sock.h>
9 #include <linux/slab.h>
10 #include <linux/sched.h>
14 struct xdp_sock **xsk_map;
15 struct list_head __percpu *flush_list;
16 spinlock_t lock; /* Synchronize map updates */
19 int xsk_map_inc(struct xsk_map *map)
21 struct bpf_map *m = &map->map;
23 m = bpf_map_inc(m, false);
24 return PTR_ERR_OR_ZERO(m);
27 void xsk_map_put(struct xsk_map *map)
29 bpf_map_put(&map->map);
32 static struct xsk_map_node *xsk_map_node_alloc(struct xsk_map *map,
33 struct xdp_sock **map_entry)
35 struct xsk_map_node *node;
38 node = kzalloc(sizeof(*node), GFP_ATOMIC | __GFP_NOWARN);
40 return ERR_PTR(-ENOMEM);
42 err = xsk_map_inc(map);
49 node->map_entry = map_entry;
53 static void xsk_map_node_free(struct xsk_map_node *node)
55 xsk_map_put(node->map);
59 static void xsk_map_sock_add(struct xdp_sock *xs, struct xsk_map_node *node)
61 spin_lock_bh(&xs->map_list_lock);
62 list_add_tail(&node->node, &xs->map_list);
63 spin_unlock_bh(&xs->map_list_lock);
66 static void xsk_map_sock_delete(struct xdp_sock *xs,
67 struct xdp_sock **map_entry)
69 struct xsk_map_node *n, *tmp;
71 spin_lock_bh(&xs->map_list_lock);
72 list_for_each_entry_safe(n, tmp, &xs->map_list, node) {
73 if (map_entry == n->map_entry) {
78 spin_unlock_bh(&xs->map_list_lock);
81 static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
87 if (!capable(CAP_NET_ADMIN))
88 return ERR_PTR(-EPERM);
90 if (attr->max_entries == 0 || attr->key_size != 4 ||
91 attr->value_size != 4 ||
92 attr->map_flags & ~(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY))
93 return ERR_PTR(-EINVAL);
95 m = kzalloc(sizeof(*m), GFP_USER);
97 return ERR_PTR(-ENOMEM);
99 bpf_map_init_from_attr(&m->map, attr);
100 spin_lock_init(&m->lock);
102 cost = (u64)m->map.max_entries * sizeof(struct xdp_sock *);
103 cost += sizeof(struct list_head) * num_possible_cpus();
105 /* Notice returns -EPERM on if map size is larger than memlock limit */
106 err = bpf_map_charge_init(&m->map.memory, cost);
112 m->flush_list = alloc_percpu(struct list_head);
116 for_each_possible_cpu(cpu)
117 INIT_LIST_HEAD(per_cpu_ptr(m->flush_list, cpu));
119 m->xsk_map = bpf_map_area_alloc(m->map.max_entries *
120 sizeof(struct xdp_sock *),
127 free_percpu(m->flush_list);
129 bpf_map_charge_finish(&m->map.memory);
135 static void xsk_map_free(struct bpf_map *map)
137 struct xsk_map *m = container_of(map, struct xsk_map, map);
139 bpf_clear_redirect_map(map);
141 free_percpu(m->flush_list);
142 bpf_map_area_free(m->xsk_map);
146 static int xsk_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
148 struct xsk_map *m = container_of(map, struct xsk_map, map);
149 u32 index = key ? *(u32 *)key : U32_MAX;
150 u32 *next = next_key;
152 if (index >= m->map.max_entries) {
157 if (index == m->map.max_entries - 1)
163 struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, u32 key)
165 struct xsk_map *m = container_of(map, struct xsk_map, map);
168 if (key >= map->max_entries)
171 xs = READ_ONCE(m->xsk_map[key]);
175 int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
178 struct xsk_map *m = container_of(map, struct xsk_map, map);
179 struct list_head *flush_list = this_cpu_ptr(m->flush_list);
182 err = xsk_rcv(xs, xdp);
186 if (!xs->flush_node.prev)
187 list_add(&xs->flush_node, flush_list);
192 void __xsk_map_flush(struct bpf_map *map)
194 struct xsk_map *m = container_of(map, struct xsk_map, map);
195 struct list_head *flush_list = this_cpu_ptr(m->flush_list);
196 struct xdp_sock *xs, *tmp;
198 list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
200 __list_del_clearprev(&xs->flush_node);
204 static void *xsk_map_lookup_elem(struct bpf_map *map, void *key)
206 WARN_ON_ONCE(!rcu_read_lock_held());
207 return __xsk_map_lookup_elem(map, *(u32 *)key);
210 static void *xsk_map_lookup_elem_sys_only(struct bpf_map *map, void *key)
212 return ERR_PTR(-EOPNOTSUPP);
215 static int xsk_map_update_elem(struct bpf_map *map, void *key, void *value,
218 struct xsk_map *m = container_of(map, struct xsk_map, map);
219 struct xdp_sock *xs, *old_xs, **map_entry;
220 u32 i = *(u32 *)key, fd = *(u32 *)value;
221 struct xsk_map_node *node;
225 if (unlikely(map_flags > BPF_EXIST))
227 if (unlikely(i >= m->map.max_entries))
230 sock = sockfd_lookup(fd, &err);
234 if (sock->sk->sk_family != PF_XDP) {
239 xs = (struct xdp_sock *)sock->sk;
241 if (!xsk_is_setup_for_bpf_map(xs)) {
246 map_entry = &m->xsk_map[i];
247 node = xsk_map_node_alloc(m, map_entry);
250 return PTR_ERR(node);
253 spin_lock_bh(&m->lock);
254 old_xs = READ_ONCE(*map_entry);
258 } else if (old_xs && map_flags == BPF_NOEXIST) {
261 } else if (!old_xs && map_flags == BPF_EXIST) {
265 xsk_map_sock_add(xs, node);
266 WRITE_ONCE(*map_entry, xs);
268 xsk_map_sock_delete(old_xs, map_entry);
269 spin_unlock_bh(&m->lock);
274 spin_unlock_bh(&m->lock);
276 xsk_map_node_free(node);
280 static int xsk_map_delete_elem(struct bpf_map *map, void *key)
282 struct xsk_map *m = container_of(map, struct xsk_map, map);
283 struct xdp_sock *old_xs, **map_entry;
286 if (k >= map->max_entries)
289 spin_lock_bh(&m->lock);
290 map_entry = &m->xsk_map[k];
291 old_xs = xchg(map_entry, NULL);
293 xsk_map_sock_delete(old_xs, map_entry);
294 spin_unlock_bh(&m->lock);
299 void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs,
300 struct xdp_sock **map_entry)
302 spin_lock_bh(&map->lock);
303 if (READ_ONCE(*map_entry) == xs) {
304 WRITE_ONCE(*map_entry, NULL);
305 xsk_map_sock_delete(xs, map_entry);
307 spin_unlock_bh(&map->lock);
310 const struct bpf_map_ops xsk_map_ops = {
311 .map_alloc = xsk_map_alloc,
312 .map_free = xsk_map_free,
313 .map_get_next_key = xsk_map_get_next_key,
314 .map_lookup_elem = xsk_map_lookup_elem,
315 .map_lookup_elem_sys_only = xsk_map_lookup_elem_sys_only,
316 .map_update_elem = xsk_map_update_elem,
317 .map_delete_elem = xsk_map_delete_elem,
318 .map_check_btf = map_check_no_btf,