1 // SPDX-License-Identifier: GPL-2.0
2 /* XSKMAP used for AF_XDP sockets
3 * Copyright(c) 2018 Intel Corporation.
7 #include <linux/capability.h>
8 #include <net/xdp_sock.h>
9 #include <linux/slab.h>
10 #include <linux/sched.h>
12 int xsk_map_inc(struct xsk_map *map)
14 bpf_map_inc(&map->map);
18 void xsk_map_put(struct xsk_map *map)
20 bpf_map_put(&map->map);
23 static struct xsk_map_node *xsk_map_node_alloc(struct xsk_map *map,
24 struct xdp_sock **map_entry)
26 struct xsk_map_node *node;
29 node = kzalloc(sizeof(*node), GFP_ATOMIC | __GFP_NOWARN);
31 return ERR_PTR(-ENOMEM);
33 err = xsk_map_inc(map);
40 node->map_entry = map_entry;
44 static void xsk_map_node_free(struct xsk_map_node *node)
46 xsk_map_put(node->map);
50 static void xsk_map_sock_add(struct xdp_sock *xs, struct xsk_map_node *node)
52 spin_lock_bh(&xs->map_list_lock);
53 list_add_tail(&node->node, &xs->map_list);
54 spin_unlock_bh(&xs->map_list_lock);
57 static void xsk_map_sock_delete(struct xdp_sock *xs,
58 struct xdp_sock **map_entry)
60 struct xsk_map_node *n, *tmp;
62 spin_lock_bh(&xs->map_list_lock);
63 list_for_each_entry_safe(n, tmp, &xs->map_list, node) {
64 if (map_entry == n->map_entry) {
69 spin_unlock_bh(&xs->map_list_lock);
72 static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
74 struct bpf_map_memory mem;
79 if (!capable(CAP_NET_ADMIN))
80 return ERR_PTR(-EPERM);
82 if (attr->max_entries == 0 || attr->key_size != 4 ||
83 attr->value_size != 4 ||
84 attr->map_flags & ~(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY))
85 return ERR_PTR(-EINVAL);
87 numa_node = bpf_map_attr_numa_node(attr);
88 size = struct_size(m, xsk_map, attr->max_entries);
90 err = bpf_map_charge_init(&mem, size);
94 m = bpf_map_area_alloc(size, numa_node);
96 bpf_map_charge_finish(&mem);
97 return ERR_PTR(-ENOMEM);
100 bpf_map_init_from_attr(&m->map, attr);
101 bpf_map_charge_move(&m->map.memory, &mem);
102 spin_lock_init(&m->lock);
107 static void xsk_map_free(struct bpf_map *map)
109 struct xsk_map *m = container_of(map, struct xsk_map, map);
111 bpf_clear_redirect_map(map);
113 bpf_map_area_free(m);
116 static int xsk_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
118 struct xsk_map *m = container_of(map, struct xsk_map, map);
119 u32 index = key ? *(u32 *)key : U32_MAX;
120 u32 *next = next_key;
122 if (index >= m->map.max_entries) {
127 if (index == m->map.max_entries - 1)
133 static u32 xsk_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
135 const int ret = BPF_REG_0, mp = BPF_REG_1, index = BPF_REG_2;
136 struct bpf_insn *insn = insn_buf;
138 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
139 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
140 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(sizeof(struct xsk_sock *)));
141 *insn++ = BPF_ALU64_IMM(BPF_ADD, mp, offsetof(struct xsk_map, xsk_map));
142 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, mp);
143 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(struct xsk_sock *), ret, ret, 0);
144 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
145 *insn++ = BPF_MOV64_IMM(ret, 0);
146 return insn - insn_buf;
149 static void *xsk_map_lookup_elem(struct bpf_map *map, void *key)
151 WARN_ON_ONCE(!rcu_read_lock_held());
152 return __xsk_map_lookup_elem(map, *(u32 *)key);
155 static void *xsk_map_lookup_elem_sys_only(struct bpf_map *map, void *key)
157 return ERR_PTR(-EOPNOTSUPP);
160 static int xsk_map_update_elem(struct bpf_map *map, void *key, void *value,
163 struct xsk_map *m = container_of(map, struct xsk_map, map);
164 struct xdp_sock *xs, *old_xs, **map_entry;
165 u32 i = *(u32 *)key, fd = *(u32 *)value;
166 struct xsk_map_node *node;
170 if (unlikely(map_flags > BPF_EXIST))
172 if (unlikely(i >= m->map.max_entries))
175 sock = sockfd_lookup(fd, &err);
179 if (sock->sk->sk_family != PF_XDP) {
184 xs = (struct xdp_sock *)sock->sk;
186 if (!xsk_is_setup_for_bpf_map(xs)) {
191 map_entry = &m->xsk_map[i];
192 node = xsk_map_node_alloc(m, map_entry);
195 return PTR_ERR(node);
198 spin_lock_bh(&m->lock);
199 old_xs = READ_ONCE(*map_entry);
203 } else if (old_xs && map_flags == BPF_NOEXIST) {
206 } else if (!old_xs && map_flags == BPF_EXIST) {
210 xsk_map_sock_add(xs, node);
211 WRITE_ONCE(*map_entry, xs);
213 xsk_map_sock_delete(old_xs, map_entry);
214 spin_unlock_bh(&m->lock);
219 spin_unlock_bh(&m->lock);
221 xsk_map_node_free(node);
225 static int xsk_map_delete_elem(struct bpf_map *map, void *key)
227 struct xsk_map *m = container_of(map, struct xsk_map, map);
228 struct xdp_sock *old_xs, **map_entry;
231 if (k >= map->max_entries)
234 spin_lock_bh(&m->lock);
235 map_entry = &m->xsk_map[k];
236 old_xs = xchg(map_entry, NULL);
238 xsk_map_sock_delete(old_xs, map_entry);
239 spin_unlock_bh(&m->lock);
244 void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs,
245 struct xdp_sock **map_entry)
247 spin_lock_bh(&map->lock);
248 if (READ_ONCE(*map_entry) == xs) {
249 WRITE_ONCE(*map_entry, NULL);
250 xsk_map_sock_delete(xs, map_entry);
252 spin_unlock_bh(&map->lock);
255 const struct bpf_map_ops xsk_map_ops = {
256 .map_alloc = xsk_map_alloc,
257 .map_free = xsk_map_free,
258 .map_get_next_key = xsk_map_get_next_key,
259 .map_lookup_elem = xsk_map_lookup_elem,
260 .map_gen_lookup = xsk_map_gen_lookup,
261 .map_lookup_elem_sys_only = xsk_map_lookup_elem_sys_only,
262 .map_update_elem = xsk_map_update_elem,
263 .map_delete_elem = xsk_map_delete_elem,
264 .map_check_btf = map_check_no_btf,