1 // SPDX-License-Identifier: GPL-2.0
2 /* XSKMAP used for AF_XDP sockets
3 * Copyright(c) 2018 Intel Corporation.
7 #include <linux/capability.h>
8 #include <net/xdp_sock.h>
9 #include <linux/slab.h>
10 #include <linux/sched.h>
14 struct xdp_sock **xsk_map;
15 struct list_head __percpu *flush_list;
18 static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
20 int cpu, err = -EINVAL;
24 if (!capable(CAP_NET_ADMIN))
25 return ERR_PTR(-EPERM);
27 if (attr->max_entries == 0 || attr->key_size != 4 ||
28 attr->value_size != 4 ||
29 attr->map_flags & ~(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY))
30 return ERR_PTR(-EINVAL);
32 m = kzalloc(sizeof(*m), GFP_USER);
34 return ERR_PTR(-ENOMEM);
36 bpf_map_init_from_attr(&m->map, attr);
38 cost = (u64)m->map.max_entries * sizeof(struct xdp_sock *);
39 cost += sizeof(struct list_head) * num_possible_cpus();
40 if (cost >= U32_MAX - PAGE_SIZE)
43 m->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
45 /* Notice returns -EPERM on if map size is larger than memlock limit */
46 err = bpf_map_precharge_memlock(m->map.pages);
52 m->flush_list = alloc_percpu(struct list_head);
56 for_each_possible_cpu(cpu)
57 INIT_LIST_HEAD(per_cpu_ptr(m->flush_list, cpu));
59 m->xsk_map = bpf_map_area_alloc(m->map.max_entries *
60 sizeof(struct xdp_sock *),
67 free_percpu(m->flush_list);
73 static void xsk_map_free(struct bpf_map *map)
75 struct xsk_map *m = container_of(map, struct xsk_map, map);
78 bpf_clear_redirect_map(map);
81 for (i = 0; i < map->max_entries; i++) {
88 sock_put((struct sock *)xs);
91 free_percpu(m->flush_list);
92 bpf_map_area_free(m->xsk_map);
96 static int xsk_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
98 struct xsk_map *m = container_of(map, struct xsk_map, map);
99 u32 index = key ? *(u32 *)key : U32_MAX;
100 u32 *next = next_key;
102 if (index >= m->map.max_entries) {
107 if (index == m->map.max_entries - 1)
113 struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, u32 key)
115 struct xsk_map *m = container_of(map, struct xsk_map, map);
118 if (key >= map->max_entries)
121 xs = READ_ONCE(m->xsk_map[key]);
125 int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
128 struct xsk_map *m = container_of(map, struct xsk_map, map);
129 struct list_head *flush_list = this_cpu_ptr(m->flush_list);
132 err = xsk_rcv(xs, xdp);
136 if (!xs->flush_node.prev)
137 list_add(&xs->flush_node, flush_list);
142 void __xsk_map_flush(struct bpf_map *map)
144 struct xsk_map *m = container_of(map, struct xsk_map, map);
145 struct list_head *flush_list = this_cpu_ptr(m->flush_list);
146 struct xdp_sock *xs, *tmp;
148 list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
150 __list_del(xs->flush_node.prev, xs->flush_node.next);
151 xs->flush_node.prev = NULL;
155 static void *xsk_map_lookup_elem(struct bpf_map *map, void *key)
157 return ERR_PTR(-EOPNOTSUPP);
160 static int xsk_map_update_elem(struct bpf_map *map, void *key, void *value,
163 struct xsk_map *m = container_of(map, struct xsk_map, map);
164 u32 i = *(u32 *)key, fd = *(u32 *)value;
165 struct xdp_sock *xs, *old_xs;
169 if (unlikely(map_flags > BPF_EXIST))
171 if (unlikely(i >= m->map.max_entries))
173 if (unlikely(map_flags == BPF_NOEXIST))
176 sock = sockfd_lookup(fd, &err);
180 if (sock->sk->sk_family != PF_XDP) {
185 xs = (struct xdp_sock *)sock->sk;
187 if (!xsk_is_setup_for_bpf_map(xs)) {
194 old_xs = xchg(&m->xsk_map[i], xs);
196 sock_put((struct sock *)old_xs);
202 static int xsk_map_delete_elem(struct bpf_map *map, void *key)
204 struct xsk_map *m = container_of(map, struct xsk_map, map);
205 struct xdp_sock *old_xs;
208 if (k >= map->max_entries)
211 old_xs = xchg(&m->xsk_map[k], NULL);
213 sock_put((struct sock *)old_xs);
218 const struct bpf_map_ops xsk_map_ops = {
219 .map_alloc = xsk_map_alloc,
220 .map_free = xsk_map_free,
221 .map_get_next_key = xsk_map_get_next_key,
222 .map_lookup_elem = xsk_map_lookup_elem,
223 .map_update_elem = xsk_map_update_elem,
224 .map_delete_elem = xsk_map_delete_elem,
225 .map_check_btf = map_check_no_btf,