]> asedeno.scripts.mit.edu Git - linux.git/blob - net/core/filter.c
3095f1ba7015f560cccf96c45642490b20c96cc1
[linux.git] / net / core / filter.c
1 /*
2  * Linux Socket Filter - Kernel level socket filtering
3  *
4  * Based on the design of the Berkeley Packet Filter. The new
5  * internal format has been designed by PLUMgrid:
6  *
7  *      Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
8  *
9  * Authors:
10  *
11  *      Jay Schulist <jschlst@samba.org>
12  *      Alexei Starovoitov <ast@plumgrid.com>
13  *      Daniel Borkmann <dborkman@redhat.com>
14  *
15  * This program is free software; you can redistribute it and/or
16  * modify it under the terms of the GNU General Public License
17  * as published by the Free Software Foundation; either version
18  * 2 of the License, or (at your option) any later version.
19  *
20  * Andi Kleen - Fix a few bad bugs and races.
21  * Kris Katterjohn - Added many additional checks in bpf_check_classic()
22  */
23
24 #include <linux/module.h>
25 #include <linux/types.h>
26 #include <linux/mm.h>
27 #include <linux/fcntl.h>
28 #include <linux/socket.h>
29 #include <linux/sock_diag.h>
30 #include <linux/in.h>
31 #include <linux/inet.h>
32 #include <linux/netdevice.h>
33 #include <linux/if_packet.h>
34 #include <linux/if_arp.h>
35 #include <linux/gfp.h>
36 #include <net/inet_common.h>
37 #include <net/ip.h>
38 #include <net/protocol.h>
39 #include <net/netlink.h>
40 #include <linux/skbuff.h>
41 #include <net/sock.h>
42 #include <net/flow_dissector.h>
43 #include <linux/errno.h>
44 #include <linux/timer.h>
45 #include <linux/uaccess.h>
46 #include <asm/unaligned.h>
47 #include <asm/cmpxchg.h>
48 #include <linux/filter.h>
49 #include <linux/ratelimit.h>
50 #include <linux/seccomp.h>
51 #include <linux/if_vlan.h>
52 #include <linux/bpf.h>
53 #include <net/sch_generic.h>
54 #include <net/cls_cgroup.h>
55 #include <net/dst_metadata.h>
56 #include <net/dst.h>
57 #include <net/sock_reuseport.h>
58 #include <net/busy_poll.h>
59 #include <net/tcp.h>
60 #include <net/xfrm.h>
61 #include <linux/bpf_trace.h>
62 #include <net/xdp_sock.h>
63 #include <linux/inetdevice.h>
64 #include <net/ip_fib.h>
65 #include <net/flow.h>
66 #include <net/arp.h>
67 #include <net/ipv6.h>
68 #include <linux/seg6_local.h>
69 #include <net/seg6.h>
70 #include <net/seg6_local.h>
71
72 /**
73  *      sk_filter_trim_cap - run a packet through a socket filter
74  *      @sk: sock associated with &sk_buff
75  *      @skb: buffer to filter
76  *      @cap: limit on how short the eBPF program may trim the packet
77  *
78  * Run the eBPF program and then cut skb->data to correct size returned by
79  * the program. If pkt_len is 0 we toss packet. If skb->len is smaller
80  * than pkt_len we keep whole skb->data. This is the socket level
81  * wrapper to BPF_PROG_RUN. It returns 0 if the packet should
82  * be accepted or -EPERM if the packet should be tossed.
83  *
84  */
85 int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap)
86 {
87         int err;
88         struct sk_filter *filter;
89
90         /*
91          * If the skb was allocated from pfmemalloc reserves, only
92          * allow SOCK_MEMALLOC sockets to use it as this socket is
93          * helping free memory
94          */
95         if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) {
96                 NET_INC_STATS(sock_net(sk), LINUX_MIB_PFMEMALLOCDROP);
97                 return -ENOMEM;
98         }
99         err = BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb);
100         if (err)
101                 return err;
102
103         err = security_sock_rcv_skb(sk, skb);
104         if (err)
105                 return err;
106
107         rcu_read_lock();
108         filter = rcu_dereference(sk->sk_filter);
109         if (filter) {
110                 struct sock *save_sk = skb->sk;
111                 unsigned int pkt_len;
112
113                 skb->sk = sk;
114                 pkt_len = bpf_prog_run_save_cb(filter->prog, skb);
115                 skb->sk = save_sk;
116                 err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM;
117         }
118         rcu_read_unlock();
119
120         return err;
121 }
122 EXPORT_SYMBOL(sk_filter_trim_cap);
123
124 BPF_CALL_1(bpf_skb_get_pay_offset, struct sk_buff *, skb)
125 {
126         return skb_get_poff(skb);
127 }
128
129 BPF_CALL_3(bpf_skb_get_nlattr, struct sk_buff *, skb, u32, a, u32, x)
130 {
131         struct nlattr *nla;
132
133         if (skb_is_nonlinear(skb))
134                 return 0;
135
136         if (skb->len < sizeof(struct nlattr))
137                 return 0;
138
139         if (a > skb->len - sizeof(struct nlattr))
140                 return 0;
141
142         nla = nla_find((struct nlattr *) &skb->data[a], skb->len - a, x);
143         if (nla)
144                 return (void *) nla - (void *) skb->data;
145
146         return 0;
147 }
148
149 BPF_CALL_3(bpf_skb_get_nlattr_nest, struct sk_buff *, skb, u32, a, u32, x)
150 {
151         struct nlattr *nla;
152
153         if (skb_is_nonlinear(skb))
154                 return 0;
155
156         if (skb->len < sizeof(struct nlattr))
157                 return 0;
158
159         if (a > skb->len - sizeof(struct nlattr))
160                 return 0;
161
162         nla = (struct nlattr *) &skb->data[a];
163         if (nla->nla_len > skb->len - a)
164                 return 0;
165
166         nla = nla_find_nested(nla, x);
167         if (nla)
168                 return (void *) nla - (void *) skb->data;
169
170         return 0;
171 }
172
173 BPF_CALL_4(bpf_skb_load_helper_8, const struct sk_buff *, skb, const void *,
174            data, int, headlen, int, offset)
175 {
176         u8 tmp, *ptr;
177         const int len = sizeof(tmp);
178
179         if (offset >= 0) {
180                 if (headlen - offset >= len)
181                         return *(u8 *)(data + offset);
182                 if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
183                         return tmp;
184         } else {
185                 ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
186                 if (likely(ptr))
187                         return *(u8 *)ptr;
188         }
189
190         return -EFAULT;
191 }
192
193 BPF_CALL_2(bpf_skb_load_helper_8_no_cache, const struct sk_buff *, skb,
194            int, offset)
195 {
196         return ____bpf_skb_load_helper_8(skb, skb->data, skb->len - skb->data_len,
197                                          offset);
198 }
199
200 BPF_CALL_4(bpf_skb_load_helper_16, const struct sk_buff *, skb, const void *,
201            data, int, headlen, int, offset)
202 {
203         u16 tmp, *ptr;
204         const int len = sizeof(tmp);
205
206         if (offset >= 0) {
207                 if (headlen - offset >= len)
208                         return get_unaligned_be16(data + offset);
209                 if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
210                         return be16_to_cpu(tmp);
211         } else {
212                 ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
213                 if (likely(ptr))
214                         return get_unaligned_be16(ptr);
215         }
216
217         return -EFAULT;
218 }
219
220 BPF_CALL_2(bpf_skb_load_helper_16_no_cache, const struct sk_buff *, skb,
221            int, offset)
222 {
223         return ____bpf_skb_load_helper_16(skb, skb->data, skb->len - skb->data_len,
224                                           offset);
225 }
226
227 BPF_CALL_4(bpf_skb_load_helper_32, const struct sk_buff *, skb, const void *,
228            data, int, headlen, int, offset)
229 {
230         u32 tmp, *ptr;
231         const int len = sizeof(tmp);
232
233         if (likely(offset >= 0)) {
234                 if (headlen - offset >= len)
235                         return get_unaligned_be32(data + offset);
236                 if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
237                         return be32_to_cpu(tmp);
238         } else {
239                 ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
240                 if (likely(ptr))
241                         return get_unaligned_be32(ptr);
242         }
243
244         return -EFAULT;
245 }
246
247 BPF_CALL_2(bpf_skb_load_helper_32_no_cache, const struct sk_buff *, skb,
248            int, offset)
249 {
250         return ____bpf_skb_load_helper_32(skb, skb->data, skb->len - skb->data_len,
251                                           offset);
252 }
253
254 BPF_CALL_0(bpf_get_raw_cpu_id)
255 {
256         return raw_smp_processor_id();
257 }
258
259 static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = {
260         .func           = bpf_get_raw_cpu_id,
261         .gpl_only       = false,
262         .ret_type       = RET_INTEGER,
263 };
264
265 static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg,
266                               struct bpf_insn *insn_buf)
267 {
268         struct bpf_insn *insn = insn_buf;
269
270         switch (skb_field) {
271         case SKF_AD_MARK:
272                 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
273
274                 *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
275                                       offsetof(struct sk_buff, mark));
276                 break;
277
278         case SKF_AD_PKTTYPE:
279                 *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_TYPE_OFFSET());
280                 *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, PKT_TYPE_MAX);
281 #ifdef __BIG_ENDIAN_BITFIELD
282                 *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 5);
283 #endif
284                 break;
285
286         case SKF_AD_QUEUE:
287                 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
288
289                 *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
290                                       offsetof(struct sk_buff, queue_mapping));
291                 break;
292
293         case SKF_AD_VLAN_TAG:
294         case SKF_AD_VLAN_TAG_PRESENT:
295                 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
296                 BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
297
298                 /* dst_reg = *(u16 *) (src_reg + offsetof(vlan_tci)) */
299                 *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
300                                       offsetof(struct sk_buff, vlan_tci));
301                 if (skb_field == SKF_AD_VLAN_TAG) {
302                         *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg,
303                                                 ~VLAN_TAG_PRESENT);
304                 } else {
305                         /* dst_reg >>= 12 */
306                         *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 12);
307                         /* dst_reg &= 1 */
308                         *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, 1);
309                 }
310                 break;
311         }
312
313         return insn - insn_buf;
314 }
315
316 static bool convert_bpf_extensions(struct sock_filter *fp,
317                                    struct bpf_insn **insnp)
318 {
319         struct bpf_insn *insn = *insnp;
320         u32 cnt;
321
322         switch (fp->k) {
323         case SKF_AD_OFF + SKF_AD_PROTOCOL:
324                 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
325
326                 /* A = *(u16 *) (CTX + offsetof(protocol)) */
327                 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
328                                       offsetof(struct sk_buff, protocol));
329                 /* A = ntohs(A) [emitting a nop or swap16] */
330                 *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
331                 break;
332
333         case SKF_AD_OFF + SKF_AD_PKTTYPE:
334                 cnt = convert_skb_access(SKF_AD_PKTTYPE, BPF_REG_A, BPF_REG_CTX, insn);
335                 insn += cnt - 1;
336                 break;
337
338         case SKF_AD_OFF + SKF_AD_IFINDEX:
339         case SKF_AD_OFF + SKF_AD_HATYPE:
340                 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
341                 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
342
343                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
344                                       BPF_REG_TMP, BPF_REG_CTX,
345                                       offsetof(struct sk_buff, dev));
346                 /* if (tmp != 0) goto pc + 1 */
347                 *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1);
348                 *insn++ = BPF_EXIT_INSN();
349                 if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX)
350                         *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_TMP,
351                                             offsetof(struct net_device, ifindex));
352                 else
353                         *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_TMP,
354                                             offsetof(struct net_device, type));
355                 break;
356
357         case SKF_AD_OFF + SKF_AD_MARK:
358                 cnt = convert_skb_access(SKF_AD_MARK, BPF_REG_A, BPF_REG_CTX, insn);
359                 insn += cnt - 1;
360                 break;
361
362         case SKF_AD_OFF + SKF_AD_RXHASH:
363                 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
364
365                 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX,
366                                     offsetof(struct sk_buff, hash));
367                 break;
368
369         case SKF_AD_OFF + SKF_AD_QUEUE:
370                 cnt = convert_skb_access(SKF_AD_QUEUE, BPF_REG_A, BPF_REG_CTX, insn);
371                 insn += cnt - 1;
372                 break;
373
374         case SKF_AD_OFF + SKF_AD_VLAN_TAG:
375                 cnt = convert_skb_access(SKF_AD_VLAN_TAG,
376                                          BPF_REG_A, BPF_REG_CTX, insn);
377                 insn += cnt - 1;
378                 break;
379
380         case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT:
381                 cnt = convert_skb_access(SKF_AD_VLAN_TAG_PRESENT,
382                                          BPF_REG_A, BPF_REG_CTX, insn);
383                 insn += cnt - 1;
384                 break;
385
386         case SKF_AD_OFF + SKF_AD_VLAN_TPID:
387                 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_proto) != 2);
388
389                 /* A = *(u16 *) (CTX + offsetof(vlan_proto)) */
390                 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
391                                       offsetof(struct sk_buff, vlan_proto));
392                 /* A = ntohs(A) [emitting a nop or swap16] */
393                 *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
394                 break;
395
396         case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
397         case SKF_AD_OFF + SKF_AD_NLATTR:
398         case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
399         case SKF_AD_OFF + SKF_AD_CPU:
400         case SKF_AD_OFF + SKF_AD_RANDOM:
401                 /* arg1 = CTX */
402                 *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX);
403                 /* arg2 = A */
404                 *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_A);
405                 /* arg3 = X */
406                 *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_X);
407                 /* Emit call(arg1=CTX, arg2=A, arg3=X) */
408                 switch (fp->k) {
409                 case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
410                         *insn = BPF_EMIT_CALL(bpf_skb_get_pay_offset);
411                         break;
412                 case SKF_AD_OFF + SKF_AD_NLATTR:
413                         *insn = BPF_EMIT_CALL(bpf_skb_get_nlattr);
414                         break;
415                 case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
416                         *insn = BPF_EMIT_CALL(bpf_skb_get_nlattr_nest);
417                         break;
418                 case SKF_AD_OFF + SKF_AD_CPU:
419                         *insn = BPF_EMIT_CALL(bpf_get_raw_cpu_id);
420                         break;
421                 case SKF_AD_OFF + SKF_AD_RANDOM:
422                         *insn = BPF_EMIT_CALL(bpf_user_rnd_u32);
423                         bpf_user_rnd_init_once();
424                         break;
425                 }
426                 break;
427
428         case SKF_AD_OFF + SKF_AD_ALU_XOR_X:
429                 /* A ^= X */
430                 *insn = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_X);
431                 break;
432
433         default:
434                 /* This is just a dummy call to avoid letting the compiler
435                  * evict __bpf_call_base() as an optimization. Placed here
436                  * where no-one bothers.
437                  */
438                 BUG_ON(__bpf_call_base(0, 0, 0, 0, 0) != 0);
439                 return false;
440         }
441
442         *insnp = insn;
443         return true;
444 }
445
446 static bool convert_bpf_ld_abs(struct sock_filter *fp, struct bpf_insn **insnp)
447 {
448         const bool unaligned_ok = IS_BUILTIN(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS);
449         int size = bpf_size_to_bytes(BPF_SIZE(fp->code));
450         bool endian = BPF_SIZE(fp->code) == BPF_H ||
451                       BPF_SIZE(fp->code) == BPF_W;
452         bool indirect = BPF_MODE(fp->code) == BPF_IND;
453         const int ip_align = NET_IP_ALIGN;
454         struct bpf_insn *insn = *insnp;
455         int offset = fp->k;
456
457         if (!indirect &&
458             ((unaligned_ok && offset >= 0) ||
459              (!unaligned_ok && offset >= 0 &&
460               offset + ip_align >= 0 &&
461               offset + ip_align % size == 0))) {
462                 *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_H);
463                 *insn++ = BPF_ALU64_IMM(BPF_SUB, BPF_REG_TMP, offset);
464                 *insn++ = BPF_JMP_IMM(BPF_JSLT, BPF_REG_TMP, size, 2 + endian);
465                 *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A, BPF_REG_D,
466                                       offset);
467                 if (endian)
468                         *insn++ = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, size * 8);
469                 *insn++ = BPF_JMP_A(8);
470         }
471
472         *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX);
473         *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_D);
474         *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_H);
475         if (!indirect) {
476                 *insn++ = BPF_MOV64_IMM(BPF_REG_ARG4, offset);
477         } else {
478                 *insn++ = BPF_MOV64_REG(BPF_REG_ARG4, BPF_REG_X);
479                 if (fp->k)
480                         *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG4, offset);
481         }
482
483         switch (BPF_SIZE(fp->code)) {
484         case BPF_B:
485                 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_8);
486                 break;
487         case BPF_H:
488                 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_16);
489                 break;
490         case BPF_W:
491                 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_32);
492                 break;
493         default:
494                 return false;
495         }
496
497         *insn++ = BPF_JMP_IMM(BPF_JSGE, BPF_REG_A, 0, 2);
498         *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
499         *insn   = BPF_EXIT_INSN();
500
501         *insnp = insn;
502         return true;
503 }
504
505 /**
506  *      bpf_convert_filter - convert filter program
507  *      @prog: the user passed filter program
508  *      @len: the length of the user passed filter program
509  *      @new_prog: allocated 'struct bpf_prog' or NULL
510  *      @new_len: pointer to store length of converted program
511  *      @seen_ld_abs: bool whether we've seen ld_abs/ind
512  *
513  * Remap 'sock_filter' style classic BPF (cBPF) instruction set to 'bpf_insn'
514  * style extended BPF (eBPF).
515  * Conversion workflow:
516  *
517  * 1) First pass for calculating the new program length:
518  *   bpf_convert_filter(old_prog, old_len, NULL, &new_len, &seen_ld_abs)
519  *
520  * 2) 2nd pass to remap in two passes: 1st pass finds new
521  *    jump offsets, 2nd pass remapping:
522  *   bpf_convert_filter(old_prog, old_len, new_prog, &new_len, &seen_ld_abs)
523  */
524 static int bpf_convert_filter(struct sock_filter *prog, int len,
525                               struct bpf_prog *new_prog, int *new_len,
526                               bool *seen_ld_abs)
527 {
528         int new_flen = 0, pass = 0, target, i, stack_off;
529         struct bpf_insn *new_insn, *first_insn = NULL;
530         struct sock_filter *fp;
531         int *addrs = NULL;
532         u8 bpf_src;
533
534         BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK);
535         BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
536
537         if (len <= 0 || len > BPF_MAXINSNS)
538                 return -EINVAL;
539
540         if (new_prog) {
541                 first_insn = new_prog->insnsi;
542                 addrs = kcalloc(len, sizeof(*addrs),
543                                 GFP_KERNEL | __GFP_NOWARN);
544                 if (!addrs)
545                         return -ENOMEM;
546         }
547
548 do_pass:
549         new_insn = first_insn;
550         fp = prog;
551
552         /* Classic BPF related prologue emission. */
553         if (new_prog) {
554                 /* Classic BPF expects A and X to be reset first. These need
555                  * to be guaranteed to be the first two instructions.
556                  */
557                 *new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
558                 *new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_X, BPF_REG_X);
559
560                 /* All programs must keep CTX in callee saved BPF_REG_CTX.
561                  * In eBPF case it's done by the compiler, here we need to
562                  * do this ourself. Initial CTX is present in BPF_REG_ARG1.
563                  */
564                 *new_insn++ = BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1);
565                 if (*seen_ld_abs) {
566                         /* For packet access in classic BPF, cache skb->data
567                          * in callee-saved BPF R8 and skb->len - skb->data_len
568                          * (headlen) in BPF R9. Since classic BPF is read-only
569                          * on CTX, we only need to cache it once.
570                          */
571                         *new_insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data),
572                                                   BPF_REG_D, BPF_REG_CTX,
573                                                   offsetof(struct sk_buff, data));
574                         *new_insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_H, BPF_REG_CTX,
575                                                   offsetof(struct sk_buff, len));
576                         *new_insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_TMP, BPF_REG_CTX,
577                                                   offsetof(struct sk_buff, data_len));
578                         *new_insn++ = BPF_ALU32_REG(BPF_SUB, BPF_REG_H, BPF_REG_TMP);
579                 }
580         } else {
581                 new_insn += 3;
582         }
583
584         for (i = 0; i < len; fp++, i++) {
585                 struct bpf_insn tmp_insns[32] = { };
586                 struct bpf_insn *insn = tmp_insns;
587
588                 if (addrs)
589                         addrs[i] = new_insn - first_insn;
590
591                 switch (fp->code) {
592                 /* All arithmetic insns and skb loads map as-is. */
593                 case BPF_ALU | BPF_ADD | BPF_X:
594                 case BPF_ALU | BPF_ADD | BPF_K:
595                 case BPF_ALU | BPF_SUB | BPF_X:
596                 case BPF_ALU | BPF_SUB | BPF_K:
597                 case BPF_ALU | BPF_AND | BPF_X:
598                 case BPF_ALU | BPF_AND | BPF_K:
599                 case BPF_ALU | BPF_OR | BPF_X:
600                 case BPF_ALU | BPF_OR | BPF_K:
601                 case BPF_ALU | BPF_LSH | BPF_X:
602                 case BPF_ALU | BPF_LSH | BPF_K:
603                 case BPF_ALU | BPF_RSH | BPF_X:
604                 case BPF_ALU | BPF_RSH | BPF_K:
605                 case BPF_ALU | BPF_XOR | BPF_X:
606                 case BPF_ALU | BPF_XOR | BPF_K:
607                 case BPF_ALU | BPF_MUL | BPF_X:
608                 case BPF_ALU | BPF_MUL | BPF_K:
609                 case BPF_ALU | BPF_DIV | BPF_X:
610                 case BPF_ALU | BPF_DIV | BPF_K:
611                 case BPF_ALU | BPF_MOD | BPF_X:
612                 case BPF_ALU | BPF_MOD | BPF_K:
613                 case BPF_ALU | BPF_NEG:
614                 case BPF_LD | BPF_ABS | BPF_W:
615                 case BPF_LD | BPF_ABS | BPF_H:
616                 case BPF_LD | BPF_ABS | BPF_B:
617                 case BPF_LD | BPF_IND | BPF_W:
618                 case BPF_LD | BPF_IND | BPF_H:
619                 case BPF_LD | BPF_IND | BPF_B:
620                         /* Check for overloaded BPF extension and
621                          * directly convert it if found, otherwise
622                          * just move on with mapping.
623                          */
624                         if (BPF_CLASS(fp->code) == BPF_LD &&
625                             BPF_MODE(fp->code) == BPF_ABS &&
626                             convert_bpf_extensions(fp, &insn))
627                                 break;
628                         if (BPF_CLASS(fp->code) == BPF_LD &&
629                             convert_bpf_ld_abs(fp, &insn)) {
630                                 *seen_ld_abs = true;
631                                 break;
632                         }
633
634                         if (fp->code == (BPF_ALU | BPF_DIV | BPF_X) ||
635                             fp->code == (BPF_ALU | BPF_MOD | BPF_X)) {
636                                 *insn++ = BPF_MOV32_REG(BPF_REG_X, BPF_REG_X);
637                                 /* Error with exception code on div/mod by 0.
638                                  * For cBPF programs, this was always return 0.
639                                  */
640                                 *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_X, 0, 2);
641                                 *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
642                                 *insn++ = BPF_EXIT_INSN();
643                         }
644
645                         *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k);
646                         break;
647
648                 /* Jump transformation cannot use BPF block macros
649                  * everywhere as offset calculation and target updates
650                  * require a bit more work than the rest, i.e. jump
651                  * opcodes map as-is, but offsets need adjustment.
652                  */
653
654 #define BPF_EMIT_JMP                                                    \
655         do {                                                            \
656                 const s32 off_min = S16_MIN, off_max = S16_MAX;         \
657                 s32 off;                                                \
658                                                                         \
659                 if (target >= len || target < 0)                        \
660                         goto err;                                       \
661                 off = addrs ? addrs[target] - addrs[i] - 1 : 0;         \
662                 /* Adjust pc relative offset for 2nd or 3rd insn. */    \
663                 off -= insn - tmp_insns;                                \
664                 /* Reject anything not fitting into insn->off. */       \
665                 if (off < off_min || off > off_max)                     \
666                         goto err;                                       \
667                 insn->off = off;                                        \
668         } while (0)
669
670                 case BPF_JMP | BPF_JA:
671                         target = i + fp->k + 1;
672                         insn->code = fp->code;
673                         BPF_EMIT_JMP;
674                         break;
675
676                 case BPF_JMP | BPF_JEQ | BPF_K:
677                 case BPF_JMP | BPF_JEQ | BPF_X:
678                 case BPF_JMP | BPF_JSET | BPF_K:
679                 case BPF_JMP | BPF_JSET | BPF_X:
680                 case BPF_JMP | BPF_JGT | BPF_K:
681                 case BPF_JMP | BPF_JGT | BPF_X:
682                 case BPF_JMP | BPF_JGE | BPF_K:
683                 case BPF_JMP | BPF_JGE | BPF_X:
684                         if (BPF_SRC(fp->code) == BPF_K && (int) fp->k < 0) {
685                                 /* BPF immediates are signed, zero extend
686                                  * immediate into tmp register and use it
687                                  * in compare insn.
688                                  */
689                                 *insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k);
690
691                                 insn->dst_reg = BPF_REG_A;
692                                 insn->src_reg = BPF_REG_TMP;
693                                 bpf_src = BPF_X;
694                         } else {
695                                 insn->dst_reg = BPF_REG_A;
696                                 insn->imm = fp->k;
697                                 bpf_src = BPF_SRC(fp->code);
698                                 insn->src_reg = bpf_src == BPF_X ? BPF_REG_X : 0;
699                         }
700
701                         /* Common case where 'jump_false' is next insn. */
702                         if (fp->jf == 0) {
703                                 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
704                                 target = i + fp->jt + 1;
705                                 BPF_EMIT_JMP;
706                                 break;
707                         }
708
709                         /* Convert some jumps when 'jump_true' is next insn. */
710                         if (fp->jt == 0) {
711                                 switch (BPF_OP(fp->code)) {
712                                 case BPF_JEQ:
713                                         insn->code = BPF_JMP | BPF_JNE | bpf_src;
714                                         break;
715                                 case BPF_JGT:
716                                         insn->code = BPF_JMP | BPF_JLE | bpf_src;
717                                         break;
718                                 case BPF_JGE:
719                                         insn->code = BPF_JMP | BPF_JLT | bpf_src;
720                                         break;
721                                 default:
722                                         goto jmp_rest;
723                                 }
724
725                                 target = i + fp->jf + 1;
726                                 BPF_EMIT_JMP;
727                                 break;
728                         }
729 jmp_rest:
730                         /* Other jumps are mapped into two insns: Jxx and JA. */
731                         target = i + fp->jt + 1;
732                         insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
733                         BPF_EMIT_JMP;
734                         insn++;
735
736                         insn->code = BPF_JMP | BPF_JA;
737                         target = i + fp->jf + 1;
738                         BPF_EMIT_JMP;
739                         break;
740
741                 /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */
742                 case BPF_LDX | BPF_MSH | BPF_B: {
743                         struct sock_filter tmp = {
744                                 .code   = BPF_LD | BPF_ABS | BPF_B,
745                                 .k      = fp->k,
746                         };
747
748                         *seen_ld_abs = true;
749
750                         /* X = A */
751                         *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
752                         /* A = BPF_R0 = *(u8 *) (skb->data + K) */
753                         convert_bpf_ld_abs(&tmp, &insn);
754                         insn++;
755                         /* A &= 0xf */
756                         *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf);
757                         /* A <<= 2 */
758                         *insn++ = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2);
759                         /* tmp = X */
760                         *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_X);
761                         /* X = A */
762                         *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
763                         /* A = tmp */
764                         *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP);
765                         break;
766                 }
767                 /* RET_K is remaped into 2 insns. RET_A case doesn't need an
768                  * extra mov as BPF_REG_0 is already mapped into BPF_REG_A.
769                  */
770                 case BPF_RET | BPF_A:
771                 case BPF_RET | BPF_K:
772                         if (BPF_RVAL(fp->code) == BPF_K)
773                                 *insn++ = BPF_MOV32_RAW(BPF_K, BPF_REG_0,
774                                                         0, fp->k);
775                         *insn = BPF_EXIT_INSN();
776                         break;
777
778                 /* Store to stack. */
779                 case BPF_ST:
780                 case BPF_STX:
781                         stack_off = fp->k * 4  + 4;
782                         *insn = BPF_STX_MEM(BPF_W, BPF_REG_FP, BPF_CLASS(fp->code) ==
783                                             BPF_ST ? BPF_REG_A : BPF_REG_X,
784                                             -stack_off);
785                         /* check_load_and_stores() verifies that classic BPF can
786                          * load from stack only after write, so tracking
787                          * stack_depth for ST|STX insns is enough
788                          */
789                         if (new_prog && new_prog->aux->stack_depth < stack_off)
790                                 new_prog->aux->stack_depth = stack_off;
791                         break;
792
793                 /* Load from stack. */
794                 case BPF_LD | BPF_MEM:
795                 case BPF_LDX | BPF_MEM:
796                         stack_off = fp->k * 4  + 4;
797                         *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD  ?
798                                             BPF_REG_A : BPF_REG_X, BPF_REG_FP,
799                                             -stack_off);
800                         break;
801
802                 /* A = K or X = K */
803                 case BPF_LD | BPF_IMM:
804                 case BPF_LDX | BPF_IMM:
805                         *insn = BPF_MOV32_IMM(BPF_CLASS(fp->code) == BPF_LD ?
806                                               BPF_REG_A : BPF_REG_X, fp->k);
807                         break;
808
809                 /* X = A */
810                 case BPF_MISC | BPF_TAX:
811                         *insn = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
812                         break;
813
814                 /* A = X */
815                 case BPF_MISC | BPF_TXA:
816                         *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_X);
817                         break;
818
819                 /* A = skb->len or X = skb->len */
820                 case BPF_LD | BPF_W | BPF_LEN:
821                 case BPF_LDX | BPF_W | BPF_LEN:
822                         *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
823                                             BPF_REG_A : BPF_REG_X, BPF_REG_CTX,
824                                             offsetof(struct sk_buff, len));
825                         break;
826
827                 /* Access seccomp_data fields. */
828                 case BPF_LDX | BPF_ABS | BPF_W:
829                         /* A = *(u32 *) (ctx + K) */
830                         *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k);
831                         break;
832
833                 /* Unknown instruction. */
834                 default:
835                         goto err;
836                 }
837
838                 insn++;
839                 if (new_prog)
840                         memcpy(new_insn, tmp_insns,
841                                sizeof(*insn) * (insn - tmp_insns));
842                 new_insn += insn - tmp_insns;
843         }
844
845         if (!new_prog) {
846                 /* Only calculating new length. */
847                 *new_len = new_insn - first_insn;
848                 if (*seen_ld_abs)
849                         *new_len += 4; /* Prologue bits. */
850                 return 0;
851         }
852
853         pass++;
854         if (new_flen != new_insn - first_insn) {
855                 new_flen = new_insn - first_insn;
856                 if (pass > 2)
857                         goto err;
858                 goto do_pass;
859         }
860
861         kfree(addrs);
862         BUG_ON(*new_len != new_flen);
863         return 0;
864 err:
865         kfree(addrs);
866         return -EINVAL;
867 }
868
869 /* Security:
870  *
871  * As we dont want to clear mem[] array for each packet going through
872  * __bpf_prog_run(), we check that filter loaded by user never try to read
873  * a cell if not previously written, and we check all branches to be sure
874  * a malicious user doesn't try to abuse us.
875  */
876 static int check_load_and_stores(const struct sock_filter *filter, int flen)
877 {
878         u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */
879         int pc, ret = 0;
880
881         BUILD_BUG_ON(BPF_MEMWORDS > 16);
882
883         masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
884         if (!masks)
885                 return -ENOMEM;
886
887         memset(masks, 0xff, flen * sizeof(*masks));
888
889         for (pc = 0; pc < flen; pc++) {
890                 memvalid &= masks[pc];
891
892                 switch (filter[pc].code) {
893                 case BPF_ST:
894                 case BPF_STX:
895                         memvalid |= (1 << filter[pc].k);
896                         break;
897                 case BPF_LD | BPF_MEM:
898                 case BPF_LDX | BPF_MEM:
899                         if (!(memvalid & (1 << filter[pc].k))) {
900                                 ret = -EINVAL;
901                                 goto error;
902                         }
903                         break;
904                 case BPF_JMP | BPF_JA:
905                         /* A jump must set masks on target */
906                         masks[pc + 1 + filter[pc].k] &= memvalid;
907                         memvalid = ~0;
908                         break;
909                 case BPF_JMP | BPF_JEQ | BPF_K:
910                 case BPF_JMP | BPF_JEQ | BPF_X:
911                 case BPF_JMP | BPF_JGE | BPF_K:
912                 case BPF_JMP | BPF_JGE | BPF_X:
913                 case BPF_JMP | BPF_JGT | BPF_K:
914                 case BPF_JMP | BPF_JGT | BPF_X:
915                 case BPF_JMP | BPF_JSET | BPF_K:
916                 case BPF_JMP | BPF_JSET | BPF_X:
917                         /* A jump must set masks on targets */
918                         masks[pc + 1 + filter[pc].jt] &= memvalid;
919                         masks[pc + 1 + filter[pc].jf] &= memvalid;
920                         memvalid = ~0;
921                         break;
922                 }
923         }
924 error:
925         kfree(masks);
926         return ret;
927 }
928
929 static bool chk_code_allowed(u16 code_to_probe)
930 {
931         static const bool codes[] = {
932                 /* 32 bit ALU operations */
933                 [BPF_ALU | BPF_ADD | BPF_K] = true,
934                 [BPF_ALU | BPF_ADD | BPF_X] = true,
935                 [BPF_ALU | BPF_SUB | BPF_K] = true,
936                 [BPF_ALU | BPF_SUB | BPF_X] = true,
937                 [BPF_ALU | BPF_MUL | BPF_K] = true,
938                 [BPF_ALU | BPF_MUL | BPF_X] = true,
939                 [BPF_ALU | BPF_DIV | BPF_K] = true,
940                 [BPF_ALU | BPF_DIV | BPF_X] = true,
941                 [BPF_ALU | BPF_MOD | BPF_K] = true,
942                 [BPF_ALU | BPF_MOD | BPF_X] = true,
943                 [BPF_ALU | BPF_AND | BPF_K] = true,
944                 [BPF_ALU | BPF_AND | BPF_X] = true,
945                 [BPF_ALU | BPF_OR | BPF_K] = true,
946                 [BPF_ALU | BPF_OR | BPF_X] = true,
947                 [BPF_ALU | BPF_XOR | BPF_K] = true,
948                 [BPF_ALU | BPF_XOR | BPF_X] = true,
949                 [BPF_ALU | BPF_LSH | BPF_K] = true,
950                 [BPF_ALU | BPF_LSH | BPF_X] = true,
951                 [BPF_ALU | BPF_RSH | BPF_K] = true,
952                 [BPF_ALU | BPF_RSH | BPF_X] = true,
953                 [BPF_ALU | BPF_NEG] = true,
954                 /* Load instructions */
955                 [BPF_LD | BPF_W | BPF_ABS] = true,
956                 [BPF_LD | BPF_H | BPF_ABS] = true,
957                 [BPF_LD | BPF_B | BPF_ABS] = true,
958                 [BPF_LD | BPF_W | BPF_LEN] = true,
959                 [BPF_LD | BPF_W | BPF_IND] = true,
960                 [BPF_LD | BPF_H | BPF_IND] = true,
961                 [BPF_LD | BPF_B | BPF_IND] = true,
962                 [BPF_LD | BPF_IMM] = true,
963                 [BPF_LD | BPF_MEM] = true,
964                 [BPF_LDX | BPF_W | BPF_LEN] = true,
965                 [BPF_LDX | BPF_B | BPF_MSH] = true,
966                 [BPF_LDX | BPF_IMM] = true,
967                 [BPF_LDX | BPF_MEM] = true,
968                 /* Store instructions */
969                 [BPF_ST] = true,
970                 [BPF_STX] = true,
971                 /* Misc instructions */
972                 [BPF_MISC | BPF_TAX] = true,
973                 [BPF_MISC | BPF_TXA] = true,
974                 /* Return instructions */
975                 [BPF_RET | BPF_K] = true,
976                 [BPF_RET | BPF_A] = true,
977                 /* Jump instructions */
978                 [BPF_JMP | BPF_JA] = true,
979                 [BPF_JMP | BPF_JEQ | BPF_K] = true,
980                 [BPF_JMP | BPF_JEQ | BPF_X] = true,
981                 [BPF_JMP | BPF_JGE | BPF_K] = true,
982                 [BPF_JMP | BPF_JGE | BPF_X] = true,
983                 [BPF_JMP | BPF_JGT | BPF_K] = true,
984                 [BPF_JMP | BPF_JGT | BPF_X] = true,
985                 [BPF_JMP | BPF_JSET | BPF_K] = true,
986                 [BPF_JMP | BPF_JSET | BPF_X] = true,
987         };
988
989         if (code_to_probe >= ARRAY_SIZE(codes))
990                 return false;
991
992         return codes[code_to_probe];
993 }
994
995 static bool bpf_check_basics_ok(const struct sock_filter *filter,
996                                 unsigned int flen)
997 {
998         if (filter == NULL)
999                 return false;
1000         if (flen == 0 || flen > BPF_MAXINSNS)
1001                 return false;
1002
1003         return true;
1004 }
1005
1006 /**
1007  *      bpf_check_classic - verify socket filter code
1008  *      @filter: filter to verify
1009  *      @flen: length of filter
1010  *
1011  * Check the user's filter code. If we let some ugly
1012  * filter code slip through kaboom! The filter must contain
1013  * no references or jumps that are out of range, no illegal
1014  * instructions, and must end with a RET instruction.
1015  *
1016  * All jumps are forward as they are not signed.
1017  *
1018  * Returns 0 if the rule set is legal or -EINVAL if not.
1019  */
1020 static int bpf_check_classic(const struct sock_filter *filter,
1021                              unsigned int flen)
1022 {
1023         bool anc_found;
1024         int pc;
1025
1026         /* Check the filter code now */
1027         for (pc = 0; pc < flen; pc++) {
1028                 const struct sock_filter *ftest = &filter[pc];
1029
1030                 /* May we actually operate on this code? */
1031                 if (!chk_code_allowed(ftest->code))
1032                         return -EINVAL;
1033
1034                 /* Some instructions need special checks */
1035                 switch (ftest->code) {
1036                 case BPF_ALU | BPF_DIV | BPF_K:
1037                 case BPF_ALU | BPF_MOD | BPF_K:
1038                         /* Check for division by zero */
1039                         if (ftest->k == 0)
1040                                 return -EINVAL;
1041                         break;
1042                 case BPF_ALU | BPF_LSH | BPF_K:
1043                 case BPF_ALU | BPF_RSH | BPF_K:
1044                         if (ftest->k >= 32)
1045                                 return -EINVAL;
1046                         break;
1047                 case BPF_LD | BPF_MEM:
1048                 case BPF_LDX | BPF_MEM:
1049                 case BPF_ST:
1050                 case BPF_STX:
1051                         /* Check for invalid memory addresses */
1052                         if (ftest->k >= BPF_MEMWORDS)
1053                                 return -EINVAL;
1054                         break;
1055                 case BPF_JMP | BPF_JA:
1056                         /* Note, the large ftest->k might cause loops.
1057                          * Compare this with conditional jumps below,
1058                          * where offsets are limited. --ANK (981016)
1059                          */
1060                         if (ftest->k >= (unsigned int)(flen - pc - 1))
1061                                 return -EINVAL;
1062                         break;
1063                 case BPF_JMP | BPF_JEQ | BPF_K:
1064                 case BPF_JMP | BPF_JEQ | BPF_X:
1065                 case BPF_JMP | BPF_JGE | BPF_K:
1066                 case BPF_JMP | BPF_JGE | BPF_X:
1067                 case BPF_JMP | BPF_JGT | BPF_K:
1068                 case BPF_JMP | BPF_JGT | BPF_X:
1069                 case BPF_JMP | BPF_JSET | BPF_K:
1070                 case BPF_JMP | BPF_JSET | BPF_X:
1071                         /* Both conditionals must be safe */
1072                         if (pc + ftest->jt + 1 >= flen ||
1073                             pc + ftest->jf + 1 >= flen)
1074                                 return -EINVAL;
1075                         break;
1076                 case BPF_LD | BPF_W | BPF_ABS:
1077                 case BPF_LD | BPF_H | BPF_ABS:
1078                 case BPF_LD | BPF_B | BPF_ABS:
1079                         anc_found = false;
1080                         if (bpf_anc_helper(ftest) & BPF_ANC)
1081                                 anc_found = true;
1082                         /* Ancillary operation unknown or unsupported */
1083                         if (anc_found == false && ftest->k >= SKF_AD_OFF)
1084                                 return -EINVAL;
1085                 }
1086         }
1087
1088         /* Last instruction must be a RET code */
1089         switch (filter[flen - 1].code) {
1090         case BPF_RET | BPF_K:
1091         case BPF_RET | BPF_A:
1092                 return check_load_and_stores(filter, flen);
1093         }
1094
1095         return -EINVAL;
1096 }
1097
1098 static int bpf_prog_store_orig_filter(struct bpf_prog *fp,
1099                                       const struct sock_fprog *fprog)
1100 {
1101         unsigned int fsize = bpf_classic_proglen(fprog);
1102         struct sock_fprog_kern *fkprog;
1103
1104         fp->orig_prog = kmalloc(sizeof(*fkprog), GFP_KERNEL);
1105         if (!fp->orig_prog)
1106                 return -ENOMEM;
1107
1108         fkprog = fp->orig_prog;
1109         fkprog->len = fprog->len;
1110
1111         fkprog->filter = kmemdup(fp->insns, fsize,
1112                                  GFP_KERNEL | __GFP_NOWARN);
1113         if (!fkprog->filter) {
1114                 kfree(fp->orig_prog);
1115                 return -ENOMEM;
1116         }
1117
1118         return 0;
1119 }
1120
1121 static void bpf_release_orig_filter(struct bpf_prog *fp)
1122 {
1123         struct sock_fprog_kern *fprog = fp->orig_prog;
1124
1125         if (fprog) {
1126                 kfree(fprog->filter);
1127                 kfree(fprog);
1128         }
1129 }
1130
1131 static void __bpf_prog_release(struct bpf_prog *prog)
1132 {
1133         if (prog->type == BPF_PROG_TYPE_SOCKET_FILTER) {
1134                 bpf_prog_put(prog);
1135         } else {
1136                 bpf_release_orig_filter(prog);
1137                 bpf_prog_free(prog);
1138         }
1139 }
1140
1141 static void __sk_filter_release(struct sk_filter *fp)
1142 {
1143         __bpf_prog_release(fp->prog);
1144         kfree(fp);
1145 }
1146
1147 /**
1148  *      sk_filter_release_rcu - Release a socket filter by rcu_head
1149  *      @rcu: rcu_head that contains the sk_filter to free
1150  */
1151 static void sk_filter_release_rcu(struct rcu_head *rcu)
1152 {
1153         struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
1154
1155         __sk_filter_release(fp);
1156 }
1157
1158 /**
1159  *      sk_filter_release - release a socket filter
1160  *      @fp: filter to remove
1161  *
1162  *      Remove a filter from a socket and release its resources.
1163  */
1164 static void sk_filter_release(struct sk_filter *fp)
1165 {
1166         if (refcount_dec_and_test(&fp->refcnt))
1167                 call_rcu(&fp->rcu, sk_filter_release_rcu);
1168 }
1169
1170 void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
1171 {
1172         u32 filter_size = bpf_prog_size(fp->prog->len);
1173
1174         atomic_sub(filter_size, &sk->sk_omem_alloc);
1175         sk_filter_release(fp);
1176 }
1177
1178 /* try to charge the socket memory if there is space available
1179  * return true on success
1180  */
1181 static bool __sk_filter_charge(struct sock *sk, struct sk_filter *fp)
1182 {
1183         u32 filter_size = bpf_prog_size(fp->prog->len);
1184
1185         /* same check as in sock_kmalloc() */
1186         if (filter_size <= sysctl_optmem_max &&
1187             atomic_read(&sk->sk_omem_alloc) + filter_size < sysctl_optmem_max) {
1188                 atomic_add(filter_size, &sk->sk_omem_alloc);
1189                 return true;
1190         }
1191         return false;
1192 }
1193
1194 bool sk_filter_charge(struct sock *sk, struct sk_filter *fp)
1195 {
1196         if (!refcount_inc_not_zero(&fp->refcnt))
1197                 return false;
1198
1199         if (!__sk_filter_charge(sk, fp)) {
1200                 sk_filter_release(fp);
1201                 return false;
1202         }
1203         return true;
1204 }
1205
1206 static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
1207 {
1208         struct sock_filter *old_prog;
1209         struct bpf_prog *old_fp;
1210         int err, new_len, old_len = fp->len;
1211         bool seen_ld_abs = false;
1212
1213         /* We are free to overwrite insns et al right here as it
1214          * won't be used at this point in time anymore internally
1215          * after the migration to the internal BPF instruction
1216          * representation.
1217          */
1218         BUILD_BUG_ON(sizeof(struct sock_filter) !=
1219                      sizeof(struct bpf_insn));
1220
1221         /* Conversion cannot happen on overlapping memory areas,
1222          * so we need to keep the user BPF around until the 2nd
1223          * pass. At this time, the user BPF is stored in fp->insns.
1224          */
1225         old_prog = kmemdup(fp->insns, old_len * sizeof(struct sock_filter),
1226                            GFP_KERNEL | __GFP_NOWARN);
1227         if (!old_prog) {
1228                 err = -ENOMEM;
1229                 goto out_err;
1230         }
1231
1232         /* 1st pass: calculate the new program length. */
1233         err = bpf_convert_filter(old_prog, old_len, NULL, &new_len,
1234                                  &seen_ld_abs);
1235         if (err)
1236                 goto out_err_free;
1237
1238         /* Expand fp for appending the new filter representation. */
1239         old_fp = fp;
1240         fp = bpf_prog_realloc(old_fp, bpf_prog_size(new_len), 0);
1241         if (!fp) {
1242                 /* The old_fp is still around in case we couldn't
1243                  * allocate new memory, so uncharge on that one.
1244                  */
1245                 fp = old_fp;
1246                 err = -ENOMEM;
1247                 goto out_err_free;
1248         }
1249
1250         fp->len = new_len;
1251
1252         /* 2nd pass: remap sock_filter insns into bpf_insn insns. */
1253         err = bpf_convert_filter(old_prog, old_len, fp, &new_len,
1254                                  &seen_ld_abs);
1255         if (err)
1256                 /* 2nd bpf_convert_filter() can fail only if it fails
1257                  * to allocate memory, remapping must succeed. Note,
1258                  * that at this time old_fp has already been released
1259                  * by krealloc().
1260                  */
1261                 goto out_err_free;
1262
1263         fp = bpf_prog_select_runtime(fp, &err);
1264         if (err)
1265                 goto out_err_free;
1266
1267         kfree(old_prog);
1268         return fp;
1269
1270 out_err_free:
1271         kfree(old_prog);
1272 out_err:
1273         __bpf_prog_release(fp);
1274         return ERR_PTR(err);
1275 }
1276
1277 static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp,
1278                                            bpf_aux_classic_check_t trans)
1279 {
1280         int err;
1281
1282         fp->bpf_func = NULL;
1283         fp->jited = 0;
1284
1285         err = bpf_check_classic(fp->insns, fp->len);
1286         if (err) {
1287                 __bpf_prog_release(fp);
1288                 return ERR_PTR(err);
1289         }
1290
1291         /* There might be additional checks and transformations
1292          * needed on classic filters, f.e. in case of seccomp.
1293          */
1294         if (trans) {
1295                 err = trans(fp->insns, fp->len);
1296                 if (err) {
1297                         __bpf_prog_release(fp);
1298                         return ERR_PTR(err);
1299                 }
1300         }
1301
1302         /* Probe if we can JIT compile the filter and if so, do
1303          * the compilation of the filter.
1304          */
1305         bpf_jit_compile(fp);
1306
1307         /* JIT compiler couldn't process this filter, so do the
1308          * internal BPF translation for the optimized interpreter.
1309          */
1310         if (!fp->jited)
1311                 fp = bpf_migrate_filter(fp);
1312
1313         return fp;
1314 }
1315
1316 /**
1317  *      bpf_prog_create - create an unattached filter
1318  *      @pfp: the unattached filter that is created
1319  *      @fprog: the filter program
1320  *
1321  * Create a filter independent of any socket. We first run some
1322  * sanity checks on it to make sure it does not explode on us later.
1323  * If an error occurs or there is insufficient memory for the filter
1324  * a negative errno code is returned. On success the return is zero.
1325  */
1326 int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
1327 {
1328         unsigned int fsize = bpf_classic_proglen(fprog);
1329         struct bpf_prog *fp;
1330
1331         /* Make sure new filter is there and in the right amounts. */
1332         if (!bpf_check_basics_ok(fprog->filter, fprog->len))
1333                 return -EINVAL;
1334
1335         fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
1336         if (!fp)
1337                 return -ENOMEM;
1338
1339         memcpy(fp->insns, fprog->filter, fsize);
1340
1341         fp->len = fprog->len;
1342         /* Since unattached filters are not copied back to user
1343          * space through sk_get_filter(), we do not need to hold
1344          * a copy here, and can spare us the work.
1345          */
1346         fp->orig_prog = NULL;
1347
1348         /* bpf_prepare_filter() already takes care of freeing
1349          * memory in case something goes wrong.
1350          */
1351         fp = bpf_prepare_filter(fp, NULL);
1352         if (IS_ERR(fp))
1353                 return PTR_ERR(fp);
1354
1355         *pfp = fp;
1356         return 0;
1357 }
1358 EXPORT_SYMBOL_GPL(bpf_prog_create);
1359
1360 /**
1361  *      bpf_prog_create_from_user - create an unattached filter from user buffer
1362  *      @pfp: the unattached filter that is created
1363  *      @fprog: the filter program
1364  *      @trans: post-classic verifier transformation handler
1365  *      @save_orig: save classic BPF program
1366  *
1367  * This function effectively does the same as bpf_prog_create(), only
1368  * that it builds up its insns buffer from user space provided buffer.
1369  * It also allows for passing a bpf_aux_classic_check_t handler.
1370  */
1371 int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
1372                               bpf_aux_classic_check_t trans, bool save_orig)
1373 {
1374         unsigned int fsize = bpf_classic_proglen(fprog);
1375         struct bpf_prog *fp;
1376         int err;
1377
1378         /* Make sure new filter is there and in the right amounts. */
1379         if (!bpf_check_basics_ok(fprog->filter, fprog->len))
1380                 return -EINVAL;
1381
1382         fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
1383         if (!fp)
1384                 return -ENOMEM;
1385
1386         if (copy_from_user(fp->insns, fprog->filter, fsize)) {
1387                 __bpf_prog_free(fp);
1388                 return -EFAULT;
1389         }
1390
1391         fp->len = fprog->len;
1392         fp->orig_prog = NULL;
1393
1394         if (save_orig) {
1395                 err = bpf_prog_store_orig_filter(fp, fprog);
1396                 if (err) {
1397                         __bpf_prog_free(fp);
1398                         return -ENOMEM;
1399                 }
1400         }
1401
1402         /* bpf_prepare_filter() already takes care of freeing
1403          * memory in case something goes wrong.
1404          */
1405         fp = bpf_prepare_filter(fp, trans);
1406         if (IS_ERR(fp))
1407                 return PTR_ERR(fp);
1408
1409         *pfp = fp;
1410         return 0;
1411 }
1412 EXPORT_SYMBOL_GPL(bpf_prog_create_from_user);
1413
1414 void bpf_prog_destroy(struct bpf_prog *fp)
1415 {
1416         __bpf_prog_release(fp);
1417 }
1418 EXPORT_SYMBOL_GPL(bpf_prog_destroy);
1419
1420 static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk)
1421 {
1422         struct sk_filter *fp, *old_fp;
1423
1424         fp = kmalloc(sizeof(*fp), GFP_KERNEL);
1425         if (!fp)
1426                 return -ENOMEM;
1427
1428         fp->prog = prog;
1429
1430         if (!__sk_filter_charge(sk, fp)) {
1431                 kfree(fp);
1432                 return -ENOMEM;
1433         }
1434         refcount_set(&fp->refcnt, 1);
1435
1436         old_fp = rcu_dereference_protected(sk->sk_filter,
1437                                            lockdep_sock_is_held(sk));
1438         rcu_assign_pointer(sk->sk_filter, fp);
1439
1440         if (old_fp)
1441                 sk_filter_uncharge(sk, old_fp);
1442
1443         return 0;
1444 }
1445
1446 static int __reuseport_attach_prog(struct bpf_prog *prog, struct sock *sk)
1447 {
1448         struct bpf_prog *old_prog;
1449         int err;
1450
1451         if (bpf_prog_size(prog->len) > sysctl_optmem_max)
1452                 return -ENOMEM;
1453
1454         if (sk_unhashed(sk) && sk->sk_reuseport) {
1455                 err = reuseport_alloc(sk);
1456                 if (err)
1457                         return err;
1458         } else if (!rcu_access_pointer(sk->sk_reuseport_cb)) {
1459                 /* The socket wasn't bound with SO_REUSEPORT */
1460                 return -EINVAL;
1461         }
1462
1463         old_prog = reuseport_attach_prog(sk, prog);
1464         if (old_prog)
1465                 bpf_prog_destroy(old_prog);
1466
1467         return 0;
1468 }
1469
1470 static
1471 struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk)
1472 {
1473         unsigned int fsize = bpf_classic_proglen(fprog);
1474         struct bpf_prog *prog;
1475         int err;
1476
1477         if (sock_flag(sk, SOCK_FILTER_LOCKED))
1478                 return ERR_PTR(-EPERM);
1479
1480         /* Make sure new filter is there and in the right amounts. */
1481         if (!bpf_check_basics_ok(fprog->filter, fprog->len))
1482                 return ERR_PTR(-EINVAL);
1483
1484         prog = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
1485         if (!prog)
1486                 return ERR_PTR(-ENOMEM);
1487
1488         if (copy_from_user(prog->insns, fprog->filter, fsize)) {
1489                 __bpf_prog_free(prog);
1490                 return ERR_PTR(-EFAULT);
1491         }
1492
1493         prog->len = fprog->len;
1494
1495         err = bpf_prog_store_orig_filter(prog, fprog);
1496         if (err) {
1497                 __bpf_prog_free(prog);
1498                 return ERR_PTR(-ENOMEM);
1499         }
1500
1501         /* bpf_prepare_filter() already takes care of freeing
1502          * memory in case something goes wrong.
1503          */
1504         return bpf_prepare_filter(prog, NULL);
1505 }
1506
1507 /**
1508  *      sk_attach_filter - attach a socket filter
1509  *      @fprog: the filter program
1510  *      @sk: the socket to use
1511  *
1512  * Attach the user's filter code. We first run some sanity checks on
1513  * it to make sure it does not explode on us later. If an error
1514  * occurs or there is insufficient memory for the filter a negative
1515  * errno code is returned. On success the return is zero.
1516  */
1517 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
1518 {
1519         struct bpf_prog *prog = __get_filter(fprog, sk);
1520         int err;
1521
1522         if (IS_ERR(prog))
1523                 return PTR_ERR(prog);
1524
1525         err = __sk_attach_prog(prog, sk);
1526         if (err < 0) {
1527                 __bpf_prog_release(prog);
1528                 return err;
1529         }
1530
1531         return 0;
1532 }
1533 EXPORT_SYMBOL_GPL(sk_attach_filter);
1534
1535 int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk)
1536 {
1537         struct bpf_prog *prog = __get_filter(fprog, sk);
1538         int err;
1539
1540         if (IS_ERR(prog))
1541                 return PTR_ERR(prog);
1542
1543         err = __reuseport_attach_prog(prog, sk);
1544         if (err < 0) {
1545                 __bpf_prog_release(prog);
1546                 return err;
1547         }
1548
1549         return 0;
1550 }
1551
1552 static struct bpf_prog *__get_bpf(u32 ufd, struct sock *sk)
1553 {
1554         if (sock_flag(sk, SOCK_FILTER_LOCKED))
1555                 return ERR_PTR(-EPERM);
1556
1557         return bpf_prog_get_type(ufd, BPF_PROG_TYPE_SOCKET_FILTER);
1558 }
1559
1560 int sk_attach_bpf(u32 ufd, struct sock *sk)
1561 {
1562         struct bpf_prog *prog = __get_bpf(ufd, sk);
1563         int err;
1564
1565         if (IS_ERR(prog))
1566                 return PTR_ERR(prog);
1567
1568         err = __sk_attach_prog(prog, sk);
1569         if (err < 0) {
1570                 bpf_prog_put(prog);
1571                 return err;
1572         }
1573
1574         return 0;
1575 }
1576
1577 int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk)
1578 {
1579         struct bpf_prog *prog = __get_bpf(ufd, sk);
1580         int err;
1581
1582         if (IS_ERR(prog))
1583                 return PTR_ERR(prog);
1584
1585         err = __reuseport_attach_prog(prog, sk);
1586         if (err < 0) {
1587                 bpf_prog_put(prog);
1588                 return err;
1589         }
1590
1591         return 0;
1592 }
1593
1594 struct bpf_scratchpad {
1595         union {
1596                 __be32 diff[MAX_BPF_STACK / sizeof(__be32)];
1597                 u8     buff[MAX_BPF_STACK];
1598         };
1599 };
1600
1601 static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp);
1602
1603 static inline int __bpf_try_make_writable(struct sk_buff *skb,
1604                                           unsigned int write_len)
1605 {
1606         return skb_ensure_writable(skb, write_len);
1607 }
1608
1609 static inline int bpf_try_make_writable(struct sk_buff *skb,
1610                                         unsigned int write_len)
1611 {
1612         int err = __bpf_try_make_writable(skb, write_len);
1613
1614         bpf_compute_data_pointers(skb);
1615         return err;
1616 }
1617
1618 static int bpf_try_make_head_writable(struct sk_buff *skb)
1619 {
1620         return bpf_try_make_writable(skb, skb_headlen(skb));
1621 }
1622
1623 static inline void bpf_push_mac_rcsum(struct sk_buff *skb)
1624 {
1625         if (skb_at_tc_ingress(skb))
1626                 skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len);
1627 }
1628
1629 static inline void bpf_pull_mac_rcsum(struct sk_buff *skb)
1630 {
1631         if (skb_at_tc_ingress(skb))
1632                 skb_postpull_rcsum(skb, skb_mac_header(skb), skb->mac_len);
1633 }
1634
1635 BPF_CALL_5(bpf_skb_store_bytes, struct sk_buff *, skb, u32, offset,
1636            const void *, from, u32, len, u64, flags)
1637 {
1638         void *ptr;
1639
1640         if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH)))
1641                 return -EINVAL;
1642         if (unlikely(offset > 0xffff))
1643                 return -EFAULT;
1644         if (unlikely(bpf_try_make_writable(skb, offset + len)))
1645                 return -EFAULT;
1646
1647         ptr = skb->data + offset;
1648         if (flags & BPF_F_RECOMPUTE_CSUM)
1649                 __skb_postpull_rcsum(skb, ptr, len, offset);
1650
1651         memcpy(ptr, from, len);
1652
1653         if (flags & BPF_F_RECOMPUTE_CSUM)
1654                 __skb_postpush_rcsum(skb, ptr, len, offset);
1655         if (flags & BPF_F_INVALIDATE_HASH)
1656                 skb_clear_hash(skb);
1657
1658         return 0;
1659 }
1660
1661 static const struct bpf_func_proto bpf_skb_store_bytes_proto = {
1662         .func           = bpf_skb_store_bytes,
1663         .gpl_only       = false,
1664         .ret_type       = RET_INTEGER,
1665         .arg1_type      = ARG_PTR_TO_CTX,
1666         .arg2_type      = ARG_ANYTHING,
1667         .arg3_type      = ARG_PTR_TO_MEM,
1668         .arg4_type      = ARG_CONST_SIZE,
1669         .arg5_type      = ARG_ANYTHING,
1670 };
1671
1672 BPF_CALL_4(bpf_skb_load_bytes, const struct sk_buff *, skb, u32, offset,
1673            void *, to, u32, len)
1674 {
1675         void *ptr;
1676
1677         if (unlikely(offset > 0xffff))
1678                 goto err_clear;
1679
1680         ptr = skb_header_pointer(skb, offset, len, to);
1681         if (unlikely(!ptr))
1682                 goto err_clear;
1683         if (ptr != to)
1684                 memcpy(to, ptr, len);
1685
1686         return 0;
1687 err_clear:
1688         memset(to, 0, len);
1689         return -EFAULT;
1690 }
1691
1692 static const struct bpf_func_proto bpf_skb_load_bytes_proto = {
1693         .func           = bpf_skb_load_bytes,
1694         .gpl_only       = false,
1695         .ret_type       = RET_INTEGER,
1696         .arg1_type      = ARG_PTR_TO_CTX,
1697         .arg2_type      = ARG_ANYTHING,
1698         .arg3_type      = ARG_PTR_TO_UNINIT_MEM,
1699         .arg4_type      = ARG_CONST_SIZE,
1700 };
1701
1702 BPF_CALL_5(bpf_skb_load_bytes_relative, const struct sk_buff *, skb,
1703            u32, offset, void *, to, u32, len, u32, start_header)
1704 {
1705         u8 *ptr;
1706
1707         if (unlikely(offset > 0xffff || len > skb_headlen(skb)))
1708                 goto err_clear;
1709
1710         switch (start_header) {
1711         case BPF_HDR_START_MAC:
1712                 ptr = skb_mac_header(skb) + offset;
1713                 break;
1714         case BPF_HDR_START_NET:
1715                 ptr = skb_network_header(skb) + offset;
1716                 break;
1717         default:
1718                 goto err_clear;
1719         }
1720
1721         if (likely(ptr >= skb_mac_header(skb) &&
1722                    ptr + len <= skb_tail_pointer(skb))) {
1723                 memcpy(to, ptr, len);
1724                 return 0;
1725         }
1726
1727 err_clear:
1728         memset(to, 0, len);
1729         return -EFAULT;
1730 }
1731
1732 static const struct bpf_func_proto bpf_skb_load_bytes_relative_proto = {
1733         .func           = bpf_skb_load_bytes_relative,
1734         .gpl_only       = false,
1735         .ret_type       = RET_INTEGER,
1736         .arg1_type      = ARG_PTR_TO_CTX,
1737         .arg2_type      = ARG_ANYTHING,
1738         .arg3_type      = ARG_PTR_TO_UNINIT_MEM,
1739         .arg4_type      = ARG_CONST_SIZE,
1740         .arg5_type      = ARG_ANYTHING,
1741 };
1742
1743 BPF_CALL_2(bpf_skb_pull_data, struct sk_buff *, skb, u32, len)
1744 {
1745         /* Idea is the following: should the needed direct read/write
1746          * test fail during runtime, we can pull in more data and redo
1747          * again, since implicitly, we invalidate previous checks here.
1748          *
1749          * Or, since we know how much we need to make read/writeable,
1750          * this can be done once at the program beginning for direct
1751          * access case. By this we overcome limitations of only current
1752          * headroom being accessible.
1753          */
1754         return bpf_try_make_writable(skb, len ? : skb_headlen(skb));
1755 }
1756
1757 static const struct bpf_func_proto bpf_skb_pull_data_proto = {
1758         .func           = bpf_skb_pull_data,
1759         .gpl_only       = false,
1760         .ret_type       = RET_INTEGER,
1761         .arg1_type      = ARG_PTR_TO_CTX,
1762         .arg2_type      = ARG_ANYTHING,
1763 };
1764
1765 BPF_CALL_5(bpf_l3_csum_replace, struct sk_buff *, skb, u32, offset,
1766            u64, from, u64, to, u64, flags)
1767 {
1768         __sum16 *ptr;
1769
1770         if (unlikely(flags & ~(BPF_F_HDR_FIELD_MASK)))
1771                 return -EINVAL;
1772         if (unlikely(offset > 0xffff || offset & 1))
1773                 return -EFAULT;
1774         if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr))))
1775                 return -EFAULT;
1776
1777         ptr = (__sum16 *)(skb->data + offset);
1778         switch (flags & BPF_F_HDR_FIELD_MASK) {
1779         case 0:
1780                 if (unlikely(from != 0))
1781                         return -EINVAL;
1782
1783                 csum_replace_by_diff(ptr, to);
1784                 break;
1785         case 2:
1786                 csum_replace2(ptr, from, to);
1787                 break;
1788         case 4:
1789                 csum_replace4(ptr, from, to);
1790                 break;
1791         default:
1792                 return -EINVAL;
1793         }
1794
1795         return 0;
1796 }
1797
1798 static const struct bpf_func_proto bpf_l3_csum_replace_proto = {
1799         .func           = bpf_l3_csum_replace,
1800         .gpl_only       = false,
1801         .ret_type       = RET_INTEGER,
1802         .arg1_type      = ARG_PTR_TO_CTX,
1803         .arg2_type      = ARG_ANYTHING,
1804         .arg3_type      = ARG_ANYTHING,
1805         .arg4_type      = ARG_ANYTHING,
1806         .arg5_type      = ARG_ANYTHING,
1807 };
1808
1809 BPF_CALL_5(bpf_l4_csum_replace, struct sk_buff *, skb, u32, offset,
1810            u64, from, u64, to, u64, flags)
1811 {
1812         bool is_pseudo = flags & BPF_F_PSEUDO_HDR;
1813         bool is_mmzero = flags & BPF_F_MARK_MANGLED_0;
1814         bool do_mforce = flags & BPF_F_MARK_ENFORCE;
1815         __sum16 *ptr;
1816
1817         if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_MARK_ENFORCE |
1818                                BPF_F_PSEUDO_HDR | BPF_F_HDR_FIELD_MASK)))
1819                 return -EINVAL;
1820         if (unlikely(offset > 0xffff || offset & 1))
1821                 return -EFAULT;
1822         if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr))))
1823                 return -EFAULT;
1824
1825         ptr = (__sum16 *)(skb->data + offset);
1826         if (is_mmzero && !do_mforce && !*ptr)
1827                 return 0;
1828
1829         switch (flags & BPF_F_HDR_FIELD_MASK) {
1830         case 0:
1831                 if (unlikely(from != 0))
1832                         return -EINVAL;
1833
1834                 inet_proto_csum_replace_by_diff(ptr, skb, to, is_pseudo);
1835                 break;
1836         case 2:
1837                 inet_proto_csum_replace2(ptr, skb, from, to, is_pseudo);
1838                 break;
1839         case 4:
1840                 inet_proto_csum_replace4(ptr, skb, from, to, is_pseudo);
1841                 break;
1842         default:
1843                 return -EINVAL;
1844         }
1845
1846         if (is_mmzero && !*ptr)
1847                 *ptr = CSUM_MANGLED_0;
1848         return 0;
1849 }
1850
1851 static const struct bpf_func_proto bpf_l4_csum_replace_proto = {
1852         .func           = bpf_l4_csum_replace,
1853         .gpl_only       = false,
1854         .ret_type       = RET_INTEGER,
1855         .arg1_type      = ARG_PTR_TO_CTX,
1856         .arg2_type      = ARG_ANYTHING,
1857         .arg3_type      = ARG_ANYTHING,
1858         .arg4_type      = ARG_ANYTHING,
1859         .arg5_type      = ARG_ANYTHING,
1860 };
1861
1862 BPF_CALL_5(bpf_csum_diff, __be32 *, from, u32, from_size,
1863            __be32 *, to, u32, to_size, __wsum, seed)
1864 {
1865         struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp);
1866         u32 diff_size = from_size + to_size;
1867         int i, j = 0;
1868
1869         /* This is quite flexible, some examples:
1870          *
1871          * from_size == 0, to_size > 0,  seed := csum --> pushing data
1872          * from_size > 0,  to_size == 0, seed := csum --> pulling data
1873          * from_size > 0,  to_size > 0,  seed := 0    --> diffing data
1874          *
1875          * Even for diffing, from_size and to_size don't need to be equal.
1876          */
1877         if (unlikely(((from_size | to_size) & (sizeof(__be32) - 1)) ||
1878                      diff_size > sizeof(sp->diff)))
1879                 return -EINVAL;
1880
1881         for (i = 0; i < from_size / sizeof(__be32); i++, j++)
1882                 sp->diff[j] = ~from[i];
1883         for (i = 0; i <   to_size / sizeof(__be32); i++, j++)
1884                 sp->diff[j] = to[i];
1885
1886         return csum_partial(sp->diff, diff_size, seed);
1887 }
1888
1889 static const struct bpf_func_proto bpf_csum_diff_proto = {
1890         .func           = bpf_csum_diff,
1891         .gpl_only       = false,
1892         .pkt_access     = true,
1893         .ret_type       = RET_INTEGER,
1894         .arg1_type      = ARG_PTR_TO_MEM_OR_NULL,
1895         .arg2_type      = ARG_CONST_SIZE_OR_ZERO,
1896         .arg3_type      = ARG_PTR_TO_MEM_OR_NULL,
1897         .arg4_type      = ARG_CONST_SIZE_OR_ZERO,
1898         .arg5_type      = ARG_ANYTHING,
1899 };
1900
1901 BPF_CALL_2(bpf_csum_update, struct sk_buff *, skb, __wsum, csum)
1902 {
1903         /* The interface is to be used in combination with bpf_csum_diff()
1904          * for direct packet writes. csum rotation for alignment as well
1905          * as emulating csum_sub() can be done from the eBPF program.
1906          */
1907         if (skb->ip_summed == CHECKSUM_COMPLETE)
1908                 return (skb->csum = csum_add(skb->csum, csum));
1909
1910         return -ENOTSUPP;
1911 }
1912
1913 static const struct bpf_func_proto bpf_csum_update_proto = {
1914         .func           = bpf_csum_update,
1915         .gpl_only       = false,
1916         .ret_type       = RET_INTEGER,
1917         .arg1_type      = ARG_PTR_TO_CTX,
1918         .arg2_type      = ARG_ANYTHING,
1919 };
1920
1921 static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb)
1922 {
1923         return dev_forward_skb(dev, skb);
1924 }
1925
1926 static inline int __bpf_rx_skb_no_mac(struct net_device *dev,
1927                                       struct sk_buff *skb)
1928 {
1929         int ret = ____dev_forward_skb(dev, skb);
1930
1931         if (likely(!ret)) {
1932                 skb->dev = dev;
1933                 ret = netif_rx(skb);
1934         }
1935
1936         return ret;
1937 }
1938
1939 static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
1940 {
1941         int ret;
1942
1943         if (unlikely(__this_cpu_read(xmit_recursion) > XMIT_RECURSION_LIMIT)) {
1944                 net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
1945                 kfree_skb(skb);
1946                 return -ENETDOWN;
1947         }
1948
1949         skb->dev = dev;
1950
1951         __this_cpu_inc(xmit_recursion);
1952         ret = dev_queue_xmit(skb);
1953         __this_cpu_dec(xmit_recursion);
1954
1955         return ret;
1956 }
1957
1958 static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev,
1959                                  u32 flags)
1960 {
1961         /* skb->mac_len is not set on normal egress */
1962         unsigned int mlen = skb->network_header - skb->mac_header;
1963
1964         __skb_pull(skb, mlen);
1965
1966         /* At ingress, the mac header has already been pulled once.
1967          * At egress, skb_pospull_rcsum has to be done in case that
1968          * the skb is originated from ingress (i.e. a forwarded skb)
1969          * to ensure that rcsum starts at net header.
1970          */
1971         if (!skb_at_tc_ingress(skb))
1972                 skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
1973         skb_pop_mac_header(skb);
1974         skb_reset_mac_len(skb);
1975         return flags & BPF_F_INGRESS ?
1976                __bpf_rx_skb_no_mac(dev, skb) : __bpf_tx_skb(dev, skb);
1977 }
1978
1979 static int __bpf_redirect_common(struct sk_buff *skb, struct net_device *dev,
1980                                  u32 flags)
1981 {
1982         /* Verify that a link layer header is carried */
1983         if (unlikely(skb->mac_header >= skb->network_header)) {
1984                 kfree_skb(skb);
1985                 return -ERANGE;
1986         }
1987
1988         bpf_push_mac_rcsum(skb);
1989         return flags & BPF_F_INGRESS ?
1990                __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
1991 }
1992
1993 static int __bpf_redirect(struct sk_buff *skb, struct net_device *dev,
1994                           u32 flags)
1995 {
1996         if (dev_is_mac_header_xmit(dev))
1997                 return __bpf_redirect_common(skb, dev, flags);
1998         else
1999                 return __bpf_redirect_no_mac(skb, dev, flags);
2000 }
2001
2002 BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags)
2003 {
2004         struct net_device *dev;
2005         struct sk_buff *clone;
2006         int ret;
2007
2008         if (unlikely(flags & ~(BPF_F_INGRESS)))
2009                 return -EINVAL;
2010
2011         dev = dev_get_by_index_rcu(dev_net(skb->dev), ifindex);
2012         if (unlikely(!dev))
2013                 return -EINVAL;
2014
2015         clone = skb_clone(skb, GFP_ATOMIC);
2016         if (unlikely(!clone))
2017                 return -ENOMEM;
2018
2019         /* For direct write, we need to keep the invariant that the skbs
2020          * we're dealing with need to be uncloned. Should uncloning fail
2021          * here, we need to free the just generated clone to unclone once
2022          * again.
2023          */
2024         ret = bpf_try_make_head_writable(skb);
2025         if (unlikely(ret)) {
2026                 kfree_skb(clone);
2027                 return -ENOMEM;
2028         }
2029
2030         return __bpf_redirect(clone, dev, flags);
2031 }
2032
2033 static const struct bpf_func_proto bpf_clone_redirect_proto = {
2034         .func           = bpf_clone_redirect,
2035         .gpl_only       = false,
2036         .ret_type       = RET_INTEGER,
2037         .arg1_type      = ARG_PTR_TO_CTX,
2038         .arg2_type      = ARG_ANYTHING,
2039         .arg3_type      = ARG_ANYTHING,
2040 };
2041
2042 struct redirect_info {
2043         u32 ifindex;
2044         u32 flags;
2045         struct bpf_map *map;
2046         struct bpf_map *map_to_flush;
2047         unsigned long   map_owner;
2048 };
2049
2050 static DEFINE_PER_CPU(struct redirect_info, redirect_info);
2051
2052 BPF_CALL_2(bpf_redirect, u32, ifindex, u64, flags)
2053 {
2054         struct redirect_info *ri = this_cpu_ptr(&redirect_info);
2055
2056         if (unlikely(flags & ~(BPF_F_INGRESS)))
2057                 return TC_ACT_SHOT;
2058
2059         ri->ifindex = ifindex;
2060         ri->flags = flags;
2061
2062         return TC_ACT_REDIRECT;
2063 }
2064
2065 int skb_do_redirect(struct sk_buff *skb)
2066 {
2067         struct redirect_info *ri = this_cpu_ptr(&redirect_info);
2068         struct net_device *dev;
2069
2070         dev = dev_get_by_index_rcu(dev_net(skb->dev), ri->ifindex);
2071         ri->ifindex = 0;
2072         if (unlikely(!dev)) {
2073                 kfree_skb(skb);
2074                 return -EINVAL;
2075         }
2076
2077         return __bpf_redirect(skb, dev, ri->flags);
2078 }
2079
2080 static const struct bpf_func_proto bpf_redirect_proto = {
2081         .func           = bpf_redirect,
2082         .gpl_only       = false,
2083         .ret_type       = RET_INTEGER,
2084         .arg1_type      = ARG_ANYTHING,
2085         .arg2_type      = ARG_ANYTHING,
2086 };
2087
2088 BPF_CALL_4(bpf_sk_redirect_hash, struct sk_buff *, skb,
2089            struct bpf_map *, map, void *, key, u64, flags)
2090 {
2091         struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
2092
2093         /* If user passes invalid input drop the packet. */
2094         if (unlikely(flags & ~(BPF_F_INGRESS)))
2095                 return SK_DROP;
2096
2097         tcb->bpf.flags = flags;
2098         tcb->bpf.sk_redir = __sock_hash_lookup_elem(map, key);
2099         if (!tcb->bpf.sk_redir)
2100                 return SK_DROP;
2101
2102         return SK_PASS;
2103 }
2104
2105 static const struct bpf_func_proto bpf_sk_redirect_hash_proto = {
2106         .func           = bpf_sk_redirect_hash,
2107         .gpl_only       = false,
2108         .ret_type       = RET_INTEGER,
2109         .arg1_type      = ARG_PTR_TO_CTX,
2110         .arg2_type      = ARG_CONST_MAP_PTR,
2111         .arg3_type      = ARG_PTR_TO_MAP_KEY,
2112         .arg4_type      = ARG_ANYTHING,
2113 };
2114
2115 BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb,
2116            struct bpf_map *, map, u32, key, u64, flags)
2117 {
2118         struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
2119
2120         /* If user passes invalid input drop the packet. */
2121         if (unlikely(flags & ~(BPF_F_INGRESS)))
2122                 return SK_DROP;
2123
2124         tcb->bpf.flags = flags;
2125         tcb->bpf.sk_redir = __sock_map_lookup_elem(map, key);
2126         if (!tcb->bpf.sk_redir)
2127                 return SK_DROP;
2128
2129         return SK_PASS;
2130 }
2131
2132 struct sock *do_sk_redirect_map(struct sk_buff *skb)
2133 {
2134         struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
2135
2136         return tcb->bpf.sk_redir;
2137 }
2138
2139 static const struct bpf_func_proto bpf_sk_redirect_map_proto = {
2140         .func           = bpf_sk_redirect_map,
2141         .gpl_only       = false,
2142         .ret_type       = RET_INTEGER,
2143         .arg1_type      = ARG_PTR_TO_CTX,
2144         .arg2_type      = ARG_CONST_MAP_PTR,
2145         .arg3_type      = ARG_ANYTHING,
2146         .arg4_type      = ARG_ANYTHING,
2147 };
2148
2149 BPF_CALL_4(bpf_msg_redirect_hash, struct sk_msg_buff *, msg,
2150            struct bpf_map *, map, void *, key, u64, flags)
2151 {
2152         /* If user passes invalid input drop the packet. */
2153         if (unlikely(flags & ~(BPF_F_INGRESS)))
2154                 return SK_DROP;
2155
2156         msg->flags = flags;
2157         msg->sk_redir = __sock_hash_lookup_elem(map, key);
2158         if (!msg->sk_redir)
2159                 return SK_DROP;
2160
2161         return SK_PASS;
2162 }
2163
2164 static const struct bpf_func_proto bpf_msg_redirect_hash_proto = {
2165         .func           = bpf_msg_redirect_hash,
2166         .gpl_only       = false,
2167         .ret_type       = RET_INTEGER,
2168         .arg1_type      = ARG_PTR_TO_CTX,
2169         .arg2_type      = ARG_CONST_MAP_PTR,
2170         .arg3_type      = ARG_PTR_TO_MAP_KEY,
2171         .arg4_type      = ARG_ANYTHING,
2172 };
2173
2174 BPF_CALL_4(bpf_msg_redirect_map, struct sk_msg_buff *, msg,
2175            struct bpf_map *, map, u32, key, u64, flags)
2176 {
2177         /* If user passes invalid input drop the packet. */
2178         if (unlikely(flags & ~(BPF_F_INGRESS)))
2179                 return SK_DROP;
2180
2181         msg->flags = flags;
2182         msg->sk_redir = __sock_map_lookup_elem(map, key);
2183         if (!msg->sk_redir)
2184                 return SK_DROP;
2185
2186         return SK_PASS;
2187 }
2188
2189 struct sock *do_msg_redirect_map(struct sk_msg_buff *msg)
2190 {
2191         return msg->sk_redir;
2192 }
2193
2194 static const struct bpf_func_proto bpf_msg_redirect_map_proto = {
2195         .func           = bpf_msg_redirect_map,
2196         .gpl_only       = false,
2197         .ret_type       = RET_INTEGER,
2198         .arg1_type      = ARG_PTR_TO_CTX,
2199         .arg2_type      = ARG_CONST_MAP_PTR,
2200         .arg3_type      = ARG_ANYTHING,
2201         .arg4_type      = ARG_ANYTHING,
2202 };
2203
2204 BPF_CALL_2(bpf_msg_apply_bytes, struct sk_msg_buff *, msg, u32, bytes)
2205 {
2206         msg->apply_bytes = bytes;
2207         return 0;
2208 }
2209
2210 static const struct bpf_func_proto bpf_msg_apply_bytes_proto = {
2211         .func           = bpf_msg_apply_bytes,
2212         .gpl_only       = false,
2213         .ret_type       = RET_INTEGER,
2214         .arg1_type      = ARG_PTR_TO_CTX,
2215         .arg2_type      = ARG_ANYTHING,
2216 };
2217
2218 BPF_CALL_2(bpf_msg_cork_bytes, struct sk_msg_buff *, msg, u32, bytes)
2219 {
2220         msg->cork_bytes = bytes;
2221         return 0;
2222 }
2223
2224 static const struct bpf_func_proto bpf_msg_cork_bytes_proto = {
2225         .func           = bpf_msg_cork_bytes,
2226         .gpl_only       = false,
2227         .ret_type       = RET_INTEGER,
2228         .arg1_type      = ARG_PTR_TO_CTX,
2229         .arg2_type      = ARG_ANYTHING,
2230 };
2231
2232 BPF_CALL_4(bpf_msg_pull_data,
2233            struct sk_msg_buff *, msg, u32, start, u32, end, u64, flags)
2234 {
2235         unsigned int len = 0, offset = 0, copy = 0;
2236         struct scatterlist *sg = msg->sg_data;
2237         int first_sg, last_sg, i, shift;
2238         unsigned char *p, *to, *from;
2239         int bytes = end - start;
2240         struct page *page;
2241
2242         if (unlikely(flags || end <= start))
2243                 return -EINVAL;
2244
2245         /* First find the starting scatterlist element */
2246         i = msg->sg_start;
2247         do {
2248                 len = sg[i].length;
2249                 offset += len;
2250                 if (start < offset + len)
2251                         break;
2252                 i++;
2253                 if (i == MAX_SKB_FRAGS)
2254                         i = 0;
2255         } while (i != msg->sg_end);
2256
2257         if (unlikely(start >= offset + len))
2258                 return -EINVAL;
2259
2260         if (!msg->sg_copy[i] && bytes <= len)
2261                 goto out;
2262
2263         first_sg = i;
2264
2265         /* At this point we need to linearize multiple scatterlist
2266          * elements or a single shared page. Either way we need to
2267          * copy into a linear buffer exclusively owned by BPF. Then
2268          * place the buffer in the scatterlist and fixup the original
2269          * entries by removing the entries now in the linear buffer
2270          * and shifting the remaining entries. For now we do not try
2271          * to copy partial entries to avoid complexity of running out
2272          * of sg_entry slots. The downside is reading a single byte
2273          * will copy the entire sg entry.
2274          */
2275         do {
2276                 copy += sg[i].length;
2277                 i++;
2278                 if (i == MAX_SKB_FRAGS)
2279                         i = 0;
2280                 if (bytes < copy)
2281                         break;
2282         } while (i != msg->sg_end);
2283         last_sg = i;
2284
2285         if (unlikely(copy < end - start))
2286                 return -EINVAL;
2287
2288         page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC, get_order(copy));
2289         if (unlikely(!page))
2290                 return -ENOMEM;
2291         p = page_address(page);
2292         offset = 0;
2293
2294         i = first_sg;
2295         do {
2296                 from = sg_virt(&sg[i]);
2297                 len = sg[i].length;
2298                 to = p + offset;
2299
2300                 memcpy(to, from, len);
2301                 offset += len;
2302                 sg[i].length = 0;
2303                 put_page(sg_page(&sg[i]));
2304
2305                 i++;
2306                 if (i == MAX_SKB_FRAGS)
2307                         i = 0;
2308         } while (i != last_sg);
2309
2310         sg[first_sg].length = copy;
2311         sg_set_page(&sg[first_sg], page, copy, 0);
2312
2313         /* To repair sg ring we need to shift entries. If we only
2314          * had a single entry though we can just replace it and
2315          * be done. Otherwise walk the ring and shift the entries.
2316          */
2317         shift = last_sg - first_sg - 1;
2318         if (!shift)
2319                 goto out;
2320
2321         i = first_sg + 1;
2322         do {
2323                 int move_from;
2324
2325                 if (i + shift >= MAX_SKB_FRAGS)
2326                         move_from = i + shift - MAX_SKB_FRAGS;
2327                 else
2328                         move_from = i + shift;
2329
2330                 if (move_from == msg->sg_end)
2331                         break;
2332
2333                 sg[i] = sg[move_from];
2334                 sg[move_from].length = 0;
2335                 sg[move_from].page_link = 0;
2336                 sg[move_from].offset = 0;
2337
2338                 i++;
2339                 if (i == MAX_SKB_FRAGS)
2340                         i = 0;
2341         } while (1);
2342         msg->sg_end -= shift;
2343         if (msg->sg_end < 0)
2344                 msg->sg_end += MAX_SKB_FRAGS;
2345 out:
2346         msg->data = sg_virt(&sg[i]) + start - offset;
2347         msg->data_end = msg->data + bytes;
2348
2349         return 0;
2350 }
2351
2352 static const struct bpf_func_proto bpf_msg_pull_data_proto = {
2353         .func           = bpf_msg_pull_data,
2354         .gpl_only       = false,
2355         .ret_type       = RET_INTEGER,
2356         .arg1_type      = ARG_PTR_TO_CTX,
2357         .arg2_type      = ARG_ANYTHING,
2358         .arg3_type      = ARG_ANYTHING,
2359         .arg4_type      = ARG_ANYTHING,
2360 };
2361
2362 BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb)
2363 {
2364         return task_get_classid(skb);
2365 }
2366
2367 static const struct bpf_func_proto bpf_get_cgroup_classid_proto = {
2368         .func           = bpf_get_cgroup_classid,
2369         .gpl_only       = false,
2370         .ret_type       = RET_INTEGER,
2371         .arg1_type      = ARG_PTR_TO_CTX,
2372 };
2373
2374 BPF_CALL_1(bpf_get_route_realm, const struct sk_buff *, skb)
2375 {
2376         return dst_tclassid(skb);
2377 }
2378
2379 static const struct bpf_func_proto bpf_get_route_realm_proto = {
2380         .func           = bpf_get_route_realm,
2381         .gpl_only       = false,
2382         .ret_type       = RET_INTEGER,
2383         .arg1_type      = ARG_PTR_TO_CTX,
2384 };
2385
2386 BPF_CALL_1(bpf_get_hash_recalc, struct sk_buff *, skb)
2387 {
2388         /* If skb_clear_hash() was called due to mangling, we can
2389          * trigger SW recalculation here. Later access to hash
2390          * can then use the inline skb->hash via context directly
2391          * instead of calling this helper again.
2392          */
2393         return skb_get_hash(skb);
2394 }
2395
2396 static const struct bpf_func_proto bpf_get_hash_recalc_proto = {
2397         .func           = bpf_get_hash_recalc,
2398         .gpl_only       = false,
2399         .ret_type       = RET_INTEGER,
2400         .arg1_type      = ARG_PTR_TO_CTX,
2401 };
2402
2403 BPF_CALL_1(bpf_set_hash_invalid, struct sk_buff *, skb)
2404 {
2405         /* After all direct packet write, this can be used once for
2406          * triggering a lazy recalc on next skb_get_hash() invocation.
2407          */
2408         skb_clear_hash(skb);
2409         return 0;
2410 }
2411
2412 static const struct bpf_func_proto bpf_set_hash_invalid_proto = {
2413         .func           = bpf_set_hash_invalid,
2414         .gpl_only       = false,
2415         .ret_type       = RET_INTEGER,
2416         .arg1_type      = ARG_PTR_TO_CTX,
2417 };
2418
2419 BPF_CALL_2(bpf_set_hash, struct sk_buff *, skb, u32, hash)
2420 {
2421         /* Set user specified hash as L4(+), so that it gets returned
2422          * on skb_get_hash() call unless BPF prog later on triggers a
2423          * skb_clear_hash().
2424          */
2425         __skb_set_sw_hash(skb, hash, true);
2426         return 0;
2427 }
2428
2429 static const struct bpf_func_proto bpf_set_hash_proto = {
2430         .func           = bpf_set_hash,
2431         .gpl_only       = false,
2432         .ret_type       = RET_INTEGER,
2433         .arg1_type      = ARG_PTR_TO_CTX,
2434         .arg2_type      = ARG_ANYTHING,
2435 };
2436
2437 BPF_CALL_3(bpf_skb_vlan_push, struct sk_buff *, skb, __be16, vlan_proto,
2438            u16, vlan_tci)
2439 {
2440         int ret;
2441
2442         if (unlikely(vlan_proto != htons(ETH_P_8021Q) &&
2443                      vlan_proto != htons(ETH_P_8021AD)))
2444                 vlan_proto = htons(ETH_P_8021Q);
2445
2446         bpf_push_mac_rcsum(skb);
2447         ret = skb_vlan_push(skb, vlan_proto, vlan_tci);
2448         bpf_pull_mac_rcsum(skb);
2449
2450         bpf_compute_data_pointers(skb);
2451         return ret;
2452 }
2453
2454 static const struct bpf_func_proto bpf_skb_vlan_push_proto = {
2455         .func           = bpf_skb_vlan_push,
2456         .gpl_only       = false,
2457         .ret_type       = RET_INTEGER,
2458         .arg1_type      = ARG_PTR_TO_CTX,
2459         .arg2_type      = ARG_ANYTHING,
2460         .arg3_type      = ARG_ANYTHING,
2461 };
2462
2463 BPF_CALL_1(bpf_skb_vlan_pop, struct sk_buff *, skb)
2464 {
2465         int ret;
2466
2467         bpf_push_mac_rcsum(skb);
2468         ret = skb_vlan_pop(skb);
2469         bpf_pull_mac_rcsum(skb);
2470
2471         bpf_compute_data_pointers(skb);
2472         return ret;
2473 }
2474
2475 static const struct bpf_func_proto bpf_skb_vlan_pop_proto = {
2476         .func           = bpf_skb_vlan_pop,
2477         .gpl_only       = false,
2478         .ret_type       = RET_INTEGER,
2479         .arg1_type      = ARG_PTR_TO_CTX,
2480 };
2481
2482 static int bpf_skb_generic_push(struct sk_buff *skb, u32 off, u32 len)
2483 {
2484         /* Caller already did skb_cow() with len as headroom,
2485          * so no need to do it here.
2486          */
2487         skb_push(skb, len);
2488         memmove(skb->data, skb->data + len, off);
2489         memset(skb->data + off, 0, len);
2490
2491         /* No skb_postpush_rcsum(skb, skb->data + off, len)
2492          * needed here as it does not change the skb->csum
2493          * result for checksum complete when summing over
2494          * zeroed blocks.
2495          */
2496         return 0;
2497 }
2498
2499 static int bpf_skb_generic_pop(struct sk_buff *skb, u32 off, u32 len)
2500 {
2501         /* skb_ensure_writable() is not needed here, as we're
2502          * already working on an uncloned skb.
2503          */
2504         if (unlikely(!pskb_may_pull(skb, off + len)))
2505                 return -ENOMEM;
2506
2507         skb_postpull_rcsum(skb, skb->data + off, len);
2508         memmove(skb->data + len, skb->data, off);
2509         __skb_pull(skb, len);
2510
2511         return 0;
2512 }
2513
2514 static int bpf_skb_net_hdr_push(struct sk_buff *skb, u32 off, u32 len)
2515 {
2516         bool trans_same = skb->transport_header == skb->network_header;
2517         int ret;
2518
2519         /* There's no need for __skb_push()/__skb_pull() pair to
2520          * get to the start of the mac header as we're guaranteed
2521          * to always start from here under eBPF.
2522          */
2523         ret = bpf_skb_generic_push(skb, off, len);
2524         if (likely(!ret)) {
2525                 skb->mac_header -= len;
2526                 skb->network_header -= len;
2527                 if (trans_same)
2528                         skb->transport_header = skb->network_header;
2529         }
2530
2531         return ret;
2532 }
2533
2534 static int bpf_skb_net_hdr_pop(struct sk_buff *skb, u32 off, u32 len)
2535 {
2536         bool trans_same = skb->transport_header == skb->network_header;
2537         int ret;
2538
2539         /* Same here, __skb_push()/__skb_pull() pair not needed. */
2540         ret = bpf_skb_generic_pop(skb, off, len);
2541         if (likely(!ret)) {
2542                 skb->mac_header += len;
2543                 skb->network_header += len;
2544                 if (trans_same)
2545                         skb->transport_header = skb->network_header;
2546         }
2547
2548         return ret;
2549 }
2550
2551 static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
2552 {
2553         const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr);
2554         u32 off = skb_mac_header_len(skb);
2555         int ret;
2556
2557         /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
2558         if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
2559                 return -ENOTSUPP;
2560
2561         ret = skb_cow(skb, len_diff);
2562         if (unlikely(ret < 0))
2563                 return ret;
2564
2565         ret = bpf_skb_net_hdr_push(skb, off, len_diff);
2566         if (unlikely(ret < 0))
2567                 return ret;
2568
2569         if (skb_is_gso(skb)) {
2570                 struct skb_shared_info *shinfo = skb_shinfo(skb);
2571
2572                 /* SKB_GSO_TCPV4 needs to be changed into
2573                  * SKB_GSO_TCPV6.
2574                  */
2575                 if (shinfo->gso_type & SKB_GSO_TCPV4) {
2576                         shinfo->gso_type &= ~SKB_GSO_TCPV4;
2577                         shinfo->gso_type |=  SKB_GSO_TCPV6;
2578                 }
2579
2580                 /* Due to IPv6 header, MSS needs to be downgraded. */
2581                 skb_decrease_gso_size(shinfo, len_diff);
2582                 /* Header must be checked, and gso_segs recomputed. */
2583                 shinfo->gso_type |= SKB_GSO_DODGY;
2584                 shinfo->gso_segs = 0;
2585         }
2586
2587         skb->protocol = htons(ETH_P_IPV6);
2588         skb_clear_hash(skb);
2589
2590         return 0;
2591 }
2592
2593 static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
2594 {
2595         const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr);
2596         u32 off = skb_mac_header_len(skb);
2597         int ret;
2598
2599         /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
2600         if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
2601                 return -ENOTSUPP;
2602
2603         ret = skb_unclone(skb, GFP_ATOMIC);
2604         if (unlikely(ret < 0))
2605                 return ret;
2606
2607         ret = bpf_skb_net_hdr_pop(skb, off, len_diff);
2608         if (unlikely(ret < 0))
2609                 return ret;
2610
2611         if (skb_is_gso(skb)) {
2612                 struct skb_shared_info *shinfo = skb_shinfo(skb);
2613
2614                 /* SKB_GSO_TCPV6 needs to be changed into
2615                  * SKB_GSO_TCPV4.
2616                  */
2617                 if (shinfo->gso_type & SKB_GSO_TCPV6) {
2618                         shinfo->gso_type &= ~SKB_GSO_TCPV6;
2619                         shinfo->gso_type |=  SKB_GSO_TCPV4;
2620                 }
2621
2622                 /* Due to IPv4 header, MSS can be upgraded. */
2623                 skb_increase_gso_size(shinfo, len_diff);
2624                 /* Header must be checked, and gso_segs recomputed. */
2625                 shinfo->gso_type |= SKB_GSO_DODGY;
2626                 shinfo->gso_segs = 0;
2627         }
2628
2629         skb->protocol = htons(ETH_P_IP);
2630         skb_clear_hash(skb);
2631
2632         return 0;
2633 }
2634
2635 static int bpf_skb_proto_xlat(struct sk_buff *skb, __be16 to_proto)
2636 {
2637         __be16 from_proto = skb->protocol;
2638
2639         if (from_proto == htons(ETH_P_IP) &&
2640               to_proto == htons(ETH_P_IPV6))
2641                 return bpf_skb_proto_4_to_6(skb);
2642
2643         if (from_proto == htons(ETH_P_IPV6) &&
2644               to_proto == htons(ETH_P_IP))
2645                 return bpf_skb_proto_6_to_4(skb);
2646
2647         return -ENOTSUPP;
2648 }
2649
2650 BPF_CALL_3(bpf_skb_change_proto, struct sk_buff *, skb, __be16, proto,
2651            u64, flags)
2652 {
2653         int ret;
2654
2655         if (unlikely(flags))
2656                 return -EINVAL;
2657
2658         /* General idea is that this helper does the basic groundwork
2659          * needed for changing the protocol, and eBPF program fills the
2660          * rest through bpf_skb_store_bytes(), bpf_lX_csum_replace()
2661          * and other helpers, rather than passing a raw buffer here.
2662          *
2663          * The rationale is to keep this minimal and without a need to
2664          * deal with raw packet data. F.e. even if we would pass buffers
2665          * here, the program still needs to call the bpf_lX_csum_replace()
2666          * helpers anyway. Plus, this way we keep also separation of
2667          * concerns, since f.e. bpf_skb_store_bytes() should only take
2668          * care of stores.
2669          *
2670          * Currently, additional options and extension header space are
2671          * not supported, but flags register is reserved so we can adapt
2672          * that. For offloads, we mark packet as dodgy, so that headers
2673          * need to be verified first.
2674          */
2675         ret = bpf_skb_proto_xlat(skb, proto);
2676         bpf_compute_data_pointers(skb);
2677         return ret;
2678 }
2679
2680 static const struct bpf_func_proto bpf_skb_change_proto_proto = {
2681         .func           = bpf_skb_change_proto,
2682         .gpl_only       = false,
2683         .ret_type       = RET_INTEGER,
2684         .arg1_type      = ARG_PTR_TO_CTX,
2685         .arg2_type      = ARG_ANYTHING,
2686         .arg3_type      = ARG_ANYTHING,
2687 };
2688
2689 BPF_CALL_2(bpf_skb_change_type, struct sk_buff *, skb, u32, pkt_type)
2690 {
2691         /* We only allow a restricted subset to be changed for now. */
2692         if (unlikely(!skb_pkt_type_ok(skb->pkt_type) ||
2693                      !skb_pkt_type_ok(pkt_type)))
2694                 return -EINVAL;
2695
2696         skb->pkt_type = pkt_type;
2697         return 0;
2698 }
2699
2700 static const struct bpf_func_proto bpf_skb_change_type_proto = {
2701         .func           = bpf_skb_change_type,
2702         .gpl_only       = false,
2703         .ret_type       = RET_INTEGER,
2704         .arg1_type      = ARG_PTR_TO_CTX,
2705         .arg2_type      = ARG_ANYTHING,
2706 };
2707
2708 static u32 bpf_skb_net_base_len(const struct sk_buff *skb)
2709 {
2710         switch (skb->protocol) {
2711         case htons(ETH_P_IP):
2712                 return sizeof(struct iphdr);
2713         case htons(ETH_P_IPV6):
2714                 return sizeof(struct ipv6hdr);
2715         default:
2716                 return ~0U;
2717         }
2718 }
2719
2720 static int bpf_skb_net_grow(struct sk_buff *skb, u32 len_diff)
2721 {
2722         u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
2723         int ret;
2724
2725         /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
2726         if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
2727                 return -ENOTSUPP;
2728
2729         ret = skb_cow(skb, len_diff);
2730         if (unlikely(ret < 0))
2731                 return ret;
2732
2733         ret = bpf_skb_net_hdr_push(skb, off, len_diff);
2734         if (unlikely(ret < 0))
2735                 return ret;
2736
2737         if (skb_is_gso(skb)) {
2738                 struct skb_shared_info *shinfo = skb_shinfo(skb);
2739
2740                 /* Due to header grow, MSS needs to be downgraded. */
2741                 skb_decrease_gso_size(shinfo, len_diff);
2742                 /* Header must be checked, and gso_segs recomputed. */
2743                 shinfo->gso_type |= SKB_GSO_DODGY;
2744                 shinfo->gso_segs = 0;
2745         }
2746
2747         return 0;
2748 }
2749
2750 static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff)
2751 {
2752         u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
2753         int ret;
2754
2755         /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
2756         if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
2757                 return -ENOTSUPP;
2758
2759         ret = skb_unclone(skb, GFP_ATOMIC);
2760         if (unlikely(ret < 0))
2761                 return ret;
2762
2763         ret = bpf_skb_net_hdr_pop(skb, off, len_diff);
2764         if (unlikely(ret < 0))
2765                 return ret;
2766
2767         if (skb_is_gso(skb)) {
2768                 struct skb_shared_info *shinfo = skb_shinfo(skb);
2769
2770                 /* Due to header shrink, MSS can be upgraded. */
2771                 skb_increase_gso_size(shinfo, len_diff);
2772                 /* Header must be checked, and gso_segs recomputed. */
2773                 shinfo->gso_type |= SKB_GSO_DODGY;
2774                 shinfo->gso_segs = 0;
2775         }
2776
2777         return 0;
2778 }
2779
2780 static u32 __bpf_skb_max_len(const struct sk_buff *skb)
2781 {
2782         return skb->dev ? skb->dev->mtu + skb->dev->hard_header_len :
2783                           SKB_MAX_ALLOC;
2784 }
2785
2786 static int bpf_skb_adjust_net(struct sk_buff *skb, s32 len_diff)
2787 {
2788         bool trans_same = skb->transport_header == skb->network_header;
2789         u32 len_cur, len_diff_abs = abs(len_diff);
2790         u32 len_min = bpf_skb_net_base_len(skb);
2791         u32 len_max = __bpf_skb_max_len(skb);
2792         __be16 proto = skb->protocol;
2793         bool shrink = len_diff < 0;
2794         int ret;
2795
2796         if (unlikely(len_diff_abs > 0xfffU))
2797                 return -EFAULT;
2798         if (unlikely(proto != htons(ETH_P_IP) &&
2799                      proto != htons(ETH_P_IPV6)))
2800                 return -ENOTSUPP;
2801
2802         len_cur = skb->len - skb_network_offset(skb);
2803         if (skb_transport_header_was_set(skb) && !trans_same)
2804                 len_cur = skb_network_header_len(skb);
2805         if ((shrink && (len_diff_abs >= len_cur ||
2806                         len_cur - len_diff_abs < len_min)) ||
2807             (!shrink && (skb->len + len_diff_abs > len_max &&
2808                          !skb_is_gso(skb))))
2809                 return -ENOTSUPP;
2810
2811         ret = shrink ? bpf_skb_net_shrink(skb, len_diff_abs) :
2812                        bpf_skb_net_grow(skb, len_diff_abs);
2813
2814         bpf_compute_data_pointers(skb);
2815         return ret;
2816 }
2817
2818 BPF_CALL_4(bpf_skb_adjust_room, struct sk_buff *, skb, s32, len_diff,
2819            u32, mode, u64, flags)
2820 {
2821         if (unlikely(flags))
2822                 return -EINVAL;
2823         if (likely(mode == BPF_ADJ_ROOM_NET))
2824                 return bpf_skb_adjust_net(skb, len_diff);
2825
2826         return -ENOTSUPP;
2827 }
2828
2829 static const struct bpf_func_proto bpf_skb_adjust_room_proto = {
2830         .func           = bpf_skb_adjust_room,
2831         .gpl_only       = false,
2832         .ret_type       = RET_INTEGER,
2833         .arg1_type      = ARG_PTR_TO_CTX,
2834         .arg2_type      = ARG_ANYTHING,
2835         .arg3_type      = ARG_ANYTHING,
2836         .arg4_type      = ARG_ANYTHING,
2837 };
2838
2839 static u32 __bpf_skb_min_len(const struct sk_buff *skb)
2840 {
2841         u32 min_len = skb_network_offset(skb);
2842
2843         if (skb_transport_header_was_set(skb))
2844                 min_len = skb_transport_offset(skb);
2845         if (skb->ip_summed == CHECKSUM_PARTIAL)
2846                 min_len = skb_checksum_start_offset(skb) +
2847                           skb->csum_offset + sizeof(__sum16);
2848         return min_len;
2849 }
2850
2851 static int bpf_skb_grow_rcsum(struct sk_buff *skb, unsigned int new_len)
2852 {
2853         unsigned int old_len = skb->len;
2854         int ret;
2855
2856         ret = __skb_grow_rcsum(skb, new_len);
2857         if (!ret)
2858                 memset(skb->data + old_len, 0, new_len - old_len);
2859         return ret;
2860 }
2861
2862 static int bpf_skb_trim_rcsum(struct sk_buff *skb, unsigned int new_len)
2863 {
2864         return __skb_trim_rcsum(skb, new_len);
2865 }
2866
2867 BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len,
2868            u64, flags)
2869 {
2870         u32 max_len = __bpf_skb_max_len(skb);
2871         u32 min_len = __bpf_skb_min_len(skb);
2872         int ret;
2873
2874         if (unlikely(flags || new_len > max_len || new_len < min_len))
2875                 return -EINVAL;
2876         if (skb->encapsulation)
2877                 return -ENOTSUPP;
2878
2879         /* The basic idea of this helper is that it's performing the
2880          * needed work to either grow or trim an skb, and eBPF program
2881          * rewrites the rest via helpers like bpf_skb_store_bytes(),
2882          * bpf_lX_csum_replace() and others rather than passing a raw
2883          * buffer here. This one is a slow path helper and intended
2884          * for replies with control messages.
2885          *
2886          * Like in bpf_skb_change_proto(), we want to keep this rather
2887          * minimal and without protocol specifics so that we are able
2888          * to separate concerns as in bpf_skb_store_bytes() should only
2889          * be the one responsible for writing buffers.
2890          *
2891          * It's really expected to be a slow path operation here for
2892          * control message replies, so we're implicitly linearizing,
2893          * uncloning and drop offloads from the skb by this.
2894          */
2895         ret = __bpf_try_make_writable(skb, skb->len);
2896         if (!ret) {
2897                 if (new_len > skb->len)
2898                         ret = bpf_skb_grow_rcsum(skb, new_len);
2899                 else if (new_len < skb->len)
2900                         ret = bpf_skb_trim_rcsum(skb, new_len);
2901                 if (!ret && skb_is_gso(skb))
2902                         skb_gso_reset(skb);
2903         }
2904
2905         bpf_compute_data_pointers(skb);
2906         return ret;
2907 }
2908
2909 static const struct bpf_func_proto bpf_skb_change_tail_proto = {
2910         .func           = bpf_skb_change_tail,
2911         .gpl_only       = false,
2912         .ret_type       = RET_INTEGER,
2913         .arg1_type      = ARG_PTR_TO_CTX,
2914         .arg2_type      = ARG_ANYTHING,
2915         .arg3_type      = ARG_ANYTHING,
2916 };
2917
2918 BPF_CALL_3(bpf_skb_change_head, struct sk_buff *, skb, u32, head_room,
2919            u64, flags)
2920 {
2921         u32 max_len = __bpf_skb_max_len(skb);
2922         u32 new_len = skb->len + head_room;
2923         int ret;
2924
2925         if (unlikely(flags || (!skb_is_gso(skb) && new_len > max_len) ||
2926                      new_len < skb->len))
2927                 return -EINVAL;
2928
2929         ret = skb_cow(skb, head_room);
2930         if (likely(!ret)) {
2931                 /* Idea for this helper is that we currently only
2932                  * allow to expand on mac header. This means that
2933                  * skb->protocol network header, etc, stay as is.
2934                  * Compared to bpf_skb_change_tail(), we're more
2935                  * flexible due to not needing to linearize or
2936                  * reset GSO. Intention for this helper is to be
2937                  * used by an L3 skb that needs to push mac header
2938                  * for redirection into L2 device.
2939                  */
2940                 __skb_push(skb, head_room);
2941                 memset(skb->data, 0, head_room);
2942                 skb_reset_mac_header(skb);
2943         }
2944
2945         bpf_compute_data_pointers(skb);
2946         return 0;
2947 }
2948
2949 static const struct bpf_func_proto bpf_skb_change_head_proto = {
2950         .func           = bpf_skb_change_head,
2951         .gpl_only       = false,
2952         .ret_type       = RET_INTEGER,
2953         .arg1_type      = ARG_PTR_TO_CTX,
2954         .arg2_type      = ARG_ANYTHING,
2955         .arg3_type      = ARG_ANYTHING,
2956 };
2957
2958 static unsigned long xdp_get_metalen(const struct xdp_buff *xdp)
2959 {
2960         return xdp_data_meta_unsupported(xdp) ? 0 :
2961                xdp->data - xdp->data_meta;
2962 }
2963
2964 BPF_CALL_2(bpf_xdp_adjust_head, struct xdp_buff *, xdp, int, offset)
2965 {
2966         void *xdp_frame_end = xdp->data_hard_start + sizeof(struct xdp_frame);
2967         unsigned long metalen = xdp_get_metalen(xdp);
2968         void *data_start = xdp_frame_end + metalen;
2969         void *data = xdp->data + offset;
2970
2971         if (unlikely(data < data_start ||
2972                      data > xdp->data_end - ETH_HLEN))
2973                 return -EINVAL;
2974
2975         if (metalen)
2976                 memmove(xdp->data_meta + offset,
2977                         xdp->data_meta, metalen);
2978         xdp->data_meta += offset;
2979         xdp->data = data;
2980
2981         return 0;
2982 }
2983
2984 static const struct bpf_func_proto bpf_xdp_adjust_head_proto = {
2985         .func           = bpf_xdp_adjust_head,
2986         .gpl_only       = false,
2987         .ret_type       = RET_INTEGER,
2988         .arg1_type      = ARG_PTR_TO_CTX,
2989         .arg2_type      = ARG_ANYTHING,
2990 };
2991
2992 BPF_CALL_2(bpf_xdp_adjust_tail, struct xdp_buff *, xdp, int, offset)
2993 {
2994         void *data_end = xdp->data_end + offset;
2995
2996         /* only shrinking is allowed for now. */
2997         if (unlikely(offset >= 0))
2998                 return -EINVAL;
2999
3000         if (unlikely(data_end < xdp->data + ETH_HLEN))
3001                 return -EINVAL;
3002
3003         xdp->data_end = data_end;
3004
3005         return 0;
3006 }
3007
3008 static const struct bpf_func_proto bpf_xdp_adjust_tail_proto = {
3009         .func           = bpf_xdp_adjust_tail,
3010         .gpl_only       = false,
3011         .ret_type       = RET_INTEGER,
3012         .arg1_type      = ARG_PTR_TO_CTX,
3013         .arg2_type      = ARG_ANYTHING,
3014 };
3015
3016 BPF_CALL_2(bpf_xdp_adjust_meta, struct xdp_buff *, xdp, int, offset)
3017 {
3018         void *xdp_frame_end = xdp->data_hard_start + sizeof(struct xdp_frame);
3019         void *meta = xdp->data_meta + offset;
3020         unsigned long metalen = xdp->data - meta;
3021
3022         if (xdp_data_meta_unsupported(xdp))
3023                 return -ENOTSUPP;
3024         if (unlikely(meta < xdp_frame_end ||
3025                      meta > xdp->data))
3026                 return -EINVAL;
3027         if (unlikely((metalen & (sizeof(__u32) - 1)) ||
3028                      (metalen > 32)))
3029                 return -EACCES;
3030
3031         xdp->data_meta = meta;
3032
3033         return 0;
3034 }
3035
3036 static const struct bpf_func_proto bpf_xdp_adjust_meta_proto = {
3037         .func           = bpf_xdp_adjust_meta,
3038         .gpl_only       = false,
3039         .ret_type       = RET_INTEGER,
3040         .arg1_type      = ARG_PTR_TO_CTX,
3041         .arg2_type      = ARG_ANYTHING,
3042 };
3043
3044 static int __bpf_tx_xdp(struct net_device *dev,
3045                         struct bpf_map *map,
3046                         struct xdp_buff *xdp,
3047                         u32 index)
3048 {
3049         struct xdp_frame *xdpf;
3050         int sent;
3051
3052         if (!dev->netdev_ops->ndo_xdp_xmit) {
3053                 return -EOPNOTSUPP;
3054         }
3055
3056         xdpf = convert_to_xdp_frame(xdp);
3057         if (unlikely(!xdpf))
3058                 return -EOVERFLOW;
3059
3060         sent = dev->netdev_ops->ndo_xdp_xmit(dev, 1, &xdpf, XDP_XMIT_FLUSH);
3061         if (sent <= 0)
3062                 return sent;
3063         return 0;
3064 }
3065
3066 static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd,
3067                             struct bpf_map *map,
3068                             struct xdp_buff *xdp,
3069                             u32 index)
3070 {
3071         int err;
3072
3073         switch (map->map_type) {
3074         case BPF_MAP_TYPE_DEVMAP: {
3075                 struct bpf_dtab_netdev *dst = fwd;
3076
3077                 err = dev_map_enqueue(dst, xdp, dev_rx);
3078                 if (err)
3079                         return err;
3080                 __dev_map_insert_ctx(map, index);
3081                 break;
3082         }
3083         case BPF_MAP_TYPE_CPUMAP: {
3084                 struct bpf_cpu_map_entry *rcpu = fwd;
3085
3086                 err = cpu_map_enqueue(rcpu, xdp, dev_rx);
3087                 if (err)
3088                         return err;
3089                 __cpu_map_insert_ctx(map, index);
3090                 break;
3091         }
3092         case BPF_MAP_TYPE_XSKMAP: {
3093                 struct xdp_sock *xs = fwd;
3094
3095                 err = __xsk_map_redirect(map, xdp, xs);
3096                 return err;
3097         }
3098         default:
3099                 break;
3100         }
3101         return 0;
3102 }
3103
3104 void xdp_do_flush_map(void)
3105 {
3106         struct redirect_info *ri = this_cpu_ptr(&redirect_info);
3107         struct bpf_map *map = ri->map_to_flush;
3108
3109         ri->map_to_flush = NULL;
3110         if (map) {
3111                 switch (map->map_type) {
3112                 case BPF_MAP_TYPE_DEVMAP:
3113                         __dev_map_flush(map);
3114                         break;
3115                 case BPF_MAP_TYPE_CPUMAP:
3116                         __cpu_map_flush(map);
3117                         break;
3118                 case BPF_MAP_TYPE_XSKMAP:
3119                         __xsk_map_flush(map);
3120                         break;
3121                 default:
3122                         break;
3123                 }
3124         }
3125 }
3126 EXPORT_SYMBOL_GPL(xdp_do_flush_map);
3127
3128 static void *__xdp_map_lookup_elem(struct bpf_map *map, u32 index)
3129 {
3130         switch (map->map_type) {
3131         case BPF_MAP_TYPE_DEVMAP:
3132                 return __dev_map_lookup_elem(map, index);
3133         case BPF_MAP_TYPE_CPUMAP:
3134                 return __cpu_map_lookup_elem(map, index);
3135         case BPF_MAP_TYPE_XSKMAP:
3136                 return __xsk_map_lookup_elem(map, index);
3137         default:
3138                 return NULL;
3139         }
3140 }
3141
3142 static inline bool xdp_map_invalid(const struct bpf_prog *xdp_prog,
3143                                    unsigned long aux)
3144 {
3145         return (unsigned long)xdp_prog->aux != aux;
3146 }
3147
3148 static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp,
3149                                struct bpf_prog *xdp_prog)
3150 {
3151         struct redirect_info *ri = this_cpu_ptr(&redirect_info);
3152         unsigned long map_owner = ri->map_owner;
3153         struct bpf_map *map = ri->map;
3154         u32 index = ri->ifindex;
3155         void *fwd = NULL;
3156         int err;
3157
3158         ri->ifindex = 0;
3159         ri->map = NULL;
3160         ri->map_owner = 0;
3161
3162         if (unlikely(xdp_map_invalid(xdp_prog, map_owner))) {
3163                 err = -EFAULT;
3164                 map = NULL;
3165                 goto err;
3166         }
3167
3168         fwd = __xdp_map_lookup_elem(map, index);
3169         if (!fwd) {
3170                 err = -EINVAL;
3171                 goto err;
3172         }
3173         if (ri->map_to_flush && ri->map_to_flush != map)
3174                 xdp_do_flush_map();
3175
3176         err = __bpf_tx_xdp_map(dev, fwd, map, xdp, index);
3177         if (unlikely(err))
3178                 goto err;
3179
3180         ri->map_to_flush = map;
3181         _trace_xdp_redirect_map(dev, xdp_prog, fwd, map, index);
3182         return 0;
3183 err:
3184         _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map, index, err);
3185         return err;
3186 }
3187
3188 int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
3189                     struct bpf_prog *xdp_prog)
3190 {
3191         struct redirect_info *ri = this_cpu_ptr(&redirect_info);
3192         struct net_device *fwd;
3193         u32 index = ri->ifindex;
3194         int err;
3195
3196         if (ri->map)
3197                 return xdp_do_redirect_map(dev, xdp, xdp_prog);
3198
3199         fwd = dev_get_by_index_rcu(dev_net(dev), index);
3200         ri->ifindex = 0;
3201         if (unlikely(!fwd)) {
3202                 err = -EINVAL;
3203                 goto err;
3204         }
3205
3206         err = __bpf_tx_xdp(fwd, NULL, xdp, 0);
3207         if (unlikely(err))
3208                 goto err;
3209
3210         _trace_xdp_redirect(dev, xdp_prog, index);
3211         return 0;
3212 err:
3213         _trace_xdp_redirect_err(dev, xdp_prog, index, err);
3214         return err;
3215 }
3216 EXPORT_SYMBOL_GPL(xdp_do_redirect);
3217
3218 static int xdp_do_generic_redirect_map(struct net_device *dev,
3219                                        struct sk_buff *skb,
3220                                        struct xdp_buff *xdp,
3221                                        struct bpf_prog *xdp_prog)
3222 {
3223         struct redirect_info *ri = this_cpu_ptr(&redirect_info);
3224         unsigned long map_owner = ri->map_owner;
3225         struct bpf_map *map = ri->map;
3226         u32 index = ri->ifindex;
3227         void *fwd = NULL;
3228         int err = 0;
3229
3230         ri->ifindex = 0;
3231         ri->map = NULL;
3232         ri->map_owner = 0;
3233
3234         if (unlikely(xdp_map_invalid(xdp_prog, map_owner))) {
3235                 err = -EFAULT;
3236                 map = NULL;
3237                 goto err;
3238         }
3239         fwd = __xdp_map_lookup_elem(map, index);
3240         if (unlikely(!fwd)) {
3241                 err = -EINVAL;
3242                 goto err;
3243         }
3244
3245         if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
3246                 struct bpf_dtab_netdev *dst = fwd;
3247
3248                 err = dev_map_generic_redirect(dst, skb, xdp_prog);
3249                 if (unlikely(err))
3250                         goto err;
3251         } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
3252                 struct xdp_sock *xs = fwd;
3253
3254                 err = xsk_generic_rcv(xs, xdp);
3255                 if (err)
3256                         goto err;
3257                 consume_skb(skb);
3258         } else {
3259                 /* TODO: Handle BPF_MAP_TYPE_CPUMAP */
3260                 err = -EBADRQC;
3261                 goto err;
3262         }
3263
3264         _trace_xdp_redirect_map(dev, xdp_prog, fwd, map, index);
3265         return 0;
3266 err:
3267         _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map, index, err);
3268         return err;
3269 }
3270
3271 int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
3272                             struct xdp_buff *xdp, struct bpf_prog *xdp_prog)
3273 {
3274         struct redirect_info *ri = this_cpu_ptr(&redirect_info);
3275         u32 index = ri->ifindex;
3276         struct net_device *fwd;
3277         int err = 0;
3278
3279         if (ri->map)
3280                 return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog);
3281
3282         ri->ifindex = 0;
3283         fwd = dev_get_by_index_rcu(dev_net(dev), index);
3284         if (unlikely(!fwd)) {
3285                 err = -EINVAL;
3286                 goto err;
3287         }
3288
3289         if (unlikely((err = __xdp_generic_ok_fwd_dev(skb, fwd))))
3290                 goto err;
3291
3292         skb->dev = fwd;
3293         _trace_xdp_redirect(dev, xdp_prog, index);
3294         generic_xdp_tx(skb, xdp_prog);
3295         return 0;
3296 err:
3297         _trace_xdp_redirect_err(dev, xdp_prog, index, err);
3298         return err;
3299 }
3300 EXPORT_SYMBOL_GPL(xdp_do_generic_redirect);
3301
3302 BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags)
3303 {
3304         struct redirect_info *ri = this_cpu_ptr(&redirect_info);
3305
3306         if (unlikely(flags))
3307                 return XDP_ABORTED;
3308
3309         ri->ifindex = ifindex;
3310         ri->flags = flags;
3311         ri->map = NULL;
3312         ri->map_owner = 0;
3313
3314         return XDP_REDIRECT;
3315 }
3316
3317 static const struct bpf_func_proto bpf_xdp_redirect_proto = {
3318         .func           = bpf_xdp_redirect,
3319         .gpl_only       = false,
3320         .ret_type       = RET_INTEGER,
3321         .arg1_type      = ARG_ANYTHING,
3322         .arg2_type      = ARG_ANYTHING,
3323 };
3324
3325 BPF_CALL_4(bpf_xdp_redirect_map, struct bpf_map *, map, u32, ifindex, u64, flags,
3326            unsigned long, map_owner)
3327 {
3328         struct redirect_info *ri = this_cpu_ptr(&redirect_info);
3329
3330         if (unlikely(flags))
3331                 return XDP_ABORTED;
3332
3333         ri->ifindex = ifindex;
3334         ri->flags = flags;
3335         ri->map = map;
3336         ri->map_owner = map_owner;
3337
3338         return XDP_REDIRECT;
3339 }
3340
3341 /* Note, arg4 is hidden from users and populated by the verifier
3342  * with the right pointer.
3343  */
3344 static const struct bpf_func_proto bpf_xdp_redirect_map_proto = {
3345         .func           = bpf_xdp_redirect_map,
3346         .gpl_only       = false,
3347         .ret_type       = RET_INTEGER,
3348         .arg1_type      = ARG_CONST_MAP_PTR,
3349         .arg2_type      = ARG_ANYTHING,
3350         .arg3_type      = ARG_ANYTHING,
3351 };
3352
3353 static unsigned long bpf_skb_copy(void *dst_buff, const void *skb,
3354                                   unsigned long off, unsigned long len)
3355 {
3356         void *ptr = skb_header_pointer(skb, off, len, dst_buff);
3357
3358         if (unlikely(!ptr))
3359                 return len;
3360         if (ptr != dst_buff)
3361                 memcpy(dst_buff, ptr, len);
3362
3363         return 0;
3364 }
3365
3366 BPF_CALL_5(bpf_skb_event_output, struct sk_buff *, skb, struct bpf_map *, map,
3367            u64, flags, void *, meta, u64, meta_size)
3368 {
3369         u64 skb_size = (flags & BPF_F_CTXLEN_MASK) >> 32;
3370
3371         if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK)))
3372                 return -EINVAL;
3373         if (unlikely(skb_size > skb->len))
3374                 return -EFAULT;
3375
3376         return bpf_event_output(map, flags, meta, meta_size, skb, skb_size,
3377                                 bpf_skb_copy);
3378 }
3379
3380 static const struct bpf_func_proto bpf_skb_event_output_proto = {
3381         .func           = bpf_skb_event_output,
3382         .gpl_only       = true,
3383         .ret_type       = RET_INTEGER,
3384         .arg1_type      = ARG_PTR_TO_CTX,
3385         .arg2_type      = ARG_CONST_MAP_PTR,
3386         .arg3_type      = ARG_ANYTHING,
3387         .arg4_type      = ARG_PTR_TO_MEM,
3388         .arg5_type      = ARG_CONST_SIZE_OR_ZERO,
3389 };
3390
3391 static unsigned short bpf_tunnel_key_af(u64 flags)
3392 {
3393         return flags & BPF_F_TUNINFO_IPV6 ? AF_INET6 : AF_INET;
3394 }
3395
3396 BPF_CALL_4(bpf_skb_get_tunnel_key, struct sk_buff *, skb, struct bpf_tunnel_key *, to,
3397            u32, size, u64, flags)
3398 {
3399         const struct ip_tunnel_info *info = skb_tunnel_info(skb);
3400         u8 compat[sizeof(struct bpf_tunnel_key)];
3401         void *to_orig = to;
3402         int err;
3403
3404         if (unlikely(!info || (flags & ~(BPF_F_TUNINFO_IPV6)))) {
3405                 err = -EINVAL;
3406                 goto err_clear;
3407         }
3408         if (ip_tunnel_info_af(info) != bpf_tunnel_key_af(flags)) {
3409                 err = -EPROTO;
3410                 goto err_clear;
3411         }
3412         if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
3413                 err = -EINVAL;
3414                 switch (size) {
3415                 case offsetof(struct bpf_tunnel_key, tunnel_label):
3416                 case offsetof(struct bpf_tunnel_key, tunnel_ext):
3417                         goto set_compat;
3418                 case offsetof(struct bpf_tunnel_key, remote_ipv6[1]):
3419                         /* Fixup deprecated structure layouts here, so we have
3420                          * a common path later on.
3421                          */
3422                         if (ip_tunnel_info_af(info) != AF_INET)
3423                                 goto err_clear;
3424 set_compat:
3425                         to = (struct bpf_tunnel_key *)compat;
3426                         break;
3427                 default:
3428                         goto err_clear;
3429                 }
3430         }
3431
3432         to->tunnel_id = be64_to_cpu(info->key.tun_id);
3433         to->tunnel_tos = info->key.tos;
3434         to->tunnel_ttl = info->key.ttl;
3435         to->tunnel_ext = 0;
3436
3437         if (flags & BPF_F_TUNINFO_IPV6) {
3438                 memcpy(to->remote_ipv6, &info->key.u.ipv6.src,
3439                        sizeof(to->remote_ipv6));
3440                 to->tunnel_label = be32_to_cpu(info->key.label);
3441         } else {
3442                 to->remote_ipv4 = be32_to_cpu(info->key.u.ipv4.src);
3443                 memset(&to->remote_ipv6[1], 0, sizeof(__u32) * 3);
3444                 to->tunnel_label = 0;
3445         }
3446
3447         if (unlikely(size != sizeof(struct bpf_tunnel_key)))
3448                 memcpy(to_orig, to, size);
3449
3450         return 0;
3451 err_clear:
3452         memset(to_orig, 0, size);
3453         return err;
3454 }
3455
3456 static const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = {
3457         .func           = bpf_skb_get_tunnel_key,
3458         .gpl_only       = false,
3459         .ret_type       = RET_INTEGER,
3460         .arg1_type      = ARG_PTR_TO_CTX,
3461         .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
3462         .arg3_type      = ARG_CONST_SIZE,
3463         .arg4_type      = ARG_ANYTHING,
3464 };
3465
3466 BPF_CALL_3(bpf_skb_get_tunnel_opt, struct sk_buff *, skb, u8 *, to, u32, size)
3467 {
3468         const struct ip_tunnel_info *info = skb_tunnel_info(skb);
3469         int err;
3470
3471         if (unlikely(!info ||
3472                      !(info->key.tun_flags & TUNNEL_OPTIONS_PRESENT))) {
3473                 err = -ENOENT;
3474                 goto err_clear;
3475         }
3476         if (unlikely(size < info->options_len)) {
3477                 err = -ENOMEM;
3478                 goto err_clear;
3479         }
3480
3481         ip_tunnel_info_opts_get(to, info);
3482         if (size > info->options_len)
3483                 memset(to + info->options_len, 0, size - info->options_len);
3484
3485         return info->options_len;
3486 err_clear:
3487         memset(to, 0, size);
3488         return err;
3489 }
3490
3491 static const struct bpf_func_proto bpf_skb_get_tunnel_opt_proto = {
3492         .func           = bpf_skb_get_tunnel_opt,
3493         .gpl_only       = false,
3494         .ret_type       = RET_INTEGER,
3495         .arg1_type      = ARG_PTR_TO_CTX,
3496         .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
3497         .arg3_type      = ARG_CONST_SIZE,
3498 };
3499
3500 static struct metadata_dst __percpu *md_dst;
3501
3502 BPF_CALL_4(bpf_skb_set_tunnel_key, struct sk_buff *, skb,
3503            const struct bpf_tunnel_key *, from, u32, size, u64, flags)
3504 {
3505         struct metadata_dst *md = this_cpu_ptr(md_dst);
3506         u8 compat[sizeof(struct bpf_tunnel_key)];
3507         struct ip_tunnel_info *info;
3508
3509         if (unlikely(flags & ~(BPF_F_TUNINFO_IPV6 | BPF_F_ZERO_CSUM_TX |
3510                                BPF_F_DONT_FRAGMENT | BPF_F_SEQ_NUMBER)))
3511                 return -EINVAL;
3512         if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
3513                 switch (size) {
3514                 case offsetof(struct bpf_tunnel_key, tunnel_label):
3515                 case offsetof(struct bpf_tunnel_key, tunnel_ext):
3516                 case offsetof(struct bpf_tunnel_key, remote_ipv6[1]):
3517                         /* Fixup deprecated structure layouts here, so we have
3518                          * a common path later on.
3519                          */
3520                         memcpy(compat, from, size);
3521                         memset(compat + size, 0, sizeof(compat) - size);
3522                         from = (const struct bpf_tunnel_key *) compat;
3523                         break;
3524                 default:
3525                         return -EINVAL;
3526                 }
3527         }
3528         if (unlikely((!(flags & BPF_F_TUNINFO_IPV6) && from->tunnel_label) ||
3529                      from->tunnel_ext))
3530                 return -EINVAL;
3531
3532         skb_dst_drop(skb);
3533         dst_hold((struct dst_entry *) md);
3534         skb_dst_set(skb, (struct dst_entry *) md);
3535
3536         info = &md->u.tun_info;
3537         memset(info, 0, sizeof(*info));
3538         info->mode = IP_TUNNEL_INFO_TX;
3539
3540         info->key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE;
3541         if (flags & BPF_F_DONT_FRAGMENT)
3542                 info->key.tun_flags |= TUNNEL_DONT_FRAGMENT;
3543         if (flags & BPF_F_ZERO_CSUM_TX)
3544                 info->key.tun_flags &= ~TUNNEL_CSUM;
3545         if (flags & BPF_F_SEQ_NUMBER)
3546                 info->key.tun_flags |= TUNNEL_SEQ;
3547
3548         info->key.tun_id = cpu_to_be64(from->tunnel_id);
3549         info->key.tos = from->tunnel_tos;
3550         info->key.ttl = from->tunnel_ttl;
3551
3552         if (flags & BPF_F_TUNINFO_IPV6) {
3553                 info->mode |= IP_TUNNEL_INFO_IPV6;
3554                 memcpy(&info->key.u.ipv6.dst, from->remote_ipv6,
3555                        sizeof(from->remote_ipv6));
3556                 info->key.label = cpu_to_be32(from->tunnel_label) &
3557                                   IPV6_FLOWLABEL_MASK;
3558         } else {
3559                 info->key.u.ipv4.dst = cpu_to_be32(from->remote_ipv4);
3560         }
3561
3562         return 0;
3563 }
3564
3565 static const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = {
3566         .func           = bpf_skb_set_tunnel_key,
3567         .gpl_only       = false,
3568         .ret_type       = RET_INTEGER,
3569         .arg1_type      = ARG_PTR_TO_CTX,
3570         .arg2_type      = ARG_PTR_TO_MEM,
3571         .arg3_type      = ARG_CONST_SIZE,
3572         .arg4_type      = ARG_ANYTHING,
3573 };
3574
3575 BPF_CALL_3(bpf_skb_set_tunnel_opt, struct sk_buff *, skb,
3576            const u8 *, from, u32, size)
3577 {
3578         struct ip_tunnel_info *info = skb_tunnel_info(skb);
3579         const struct metadata_dst *md = this_cpu_ptr(md_dst);
3580
3581         if (unlikely(info != &md->u.tun_info || (size & (sizeof(u32) - 1))))
3582                 return -EINVAL;
3583         if (unlikely(size > IP_TUNNEL_OPTS_MAX))
3584                 return -ENOMEM;
3585
3586         ip_tunnel_info_opts_set(info, from, size);
3587
3588         return 0;
3589 }
3590
3591 static const struct bpf_func_proto bpf_skb_set_tunnel_opt_proto = {
3592         .func           = bpf_skb_set_tunnel_opt,
3593         .gpl_only       = false,
3594         .ret_type       = RET_INTEGER,
3595         .arg1_type      = ARG_PTR_TO_CTX,
3596         .arg2_type      = ARG_PTR_TO_MEM,
3597         .arg3_type      = ARG_CONST_SIZE,
3598 };
3599
3600 static const struct bpf_func_proto *
3601 bpf_get_skb_set_tunnel_proto(enum bpf_func_id which)
3602 {
3603         if (!md_dst) {
3604                 struct metadata_dst __percpu *tmp;
3605
3606                 tmp = metadata_dst_alloc_percpu(IP_TUNNEL_OPTS_MAX,
3607                                                 METADATA_IP_TUNNEL,
3608                                                 GFP_KERNEL);
3609                 if (!tmp)
3610                         return NULL;
3611                 if (cmpxchg(&md_dst, NULL, tmp))
3612                         metadata_dst_free_percpu(tmp);
3613         }
3614
3615         switch (which) {
3616         case BPF_FUNC_skb_set_tunnel_key:
3617                 return &bpf_skb_set_tunnel_key_proto;
3618         case BPF_FUNC_skb_set_tunnel_opt:
3619                 return &bpf_skb_set_tunnel_opt_proto;
3620         default:
3621                 return NULL;
3622         }
3623 }
3624
3625 BPF_CALL_3(bpf_skb_under_cgroup, struct sk_buff *, skb, struct bpf_map *, map,
3626            u32, idx)
3627 {
3628         struct bpf_array *array = container_of(map, struct bpf_array, map);
3629         struct cgroup *cgrp;
3630         struct sock *sk;
3631
3632         sk = skb_to_full_sk(skb);
3633         if (!sk || !sk_fullsock(sk))
3634                 return -ENOENT;
3635         if (unlikely(idx >= array->map.max_entries))
3636                 return -E2BIG;
3637
3638         cgrp = READ_ONCE(array->ptrs[idx]);
3639         if (unlikely(!cgrp))
3640                 return -EAGAIN;
3641
3642         return sk_under_cgroup_hierarchy(sk, cgrp);
3643 }
3644
3645 static const struct bpf_func_proto bpf_skb_under_cgroup_proto = {
3646         .func           = bpf_skb_under_cgroup,
3647         .gpl_only       = false,
3648         .ret_type       = RET_INTEGER,
3649         .arg1_type      = ARG_PTR_TO_CTX,
3650         .arg2_type      = ARG_CONST_MAP_PTR,
3651         .arg3_type      = ARG_ANYTHING,
3652 };
3653
3654 #ifdef CONFIG_SOCK_CGROUP_DATA
3655 BPF_CALL_1(bpf_skb_cgroup_id, const struct sk_buff *, skb)
3656 {
3657         struct sock *sk = skb_to_full_sk(skb);
3658         struct cgroup *cgrp;
3659
3660         if (!sk || !sk_fullsock(sk))
3661                 return 0;
3662
3663         cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
3664         return cgrp->kn->id.id;
3665 }
3666
3667 static const struct bpf_func_proto bpf_skb_cgroup_id_proto = {
3668         .func           = bpf_skb_cgroup_id,
3669         .gpl_only       = false,
3670         .ret_type       = RET_INTEGER,
3671         .arg1_type      = ARG_PTR_TO_CTX,
3672 };
3673 #endif
3674
3675 static unsigned long bpf_xdp_copy(void *dst_buff, const void *src_buff,
3676                                   unsigned long off, unsigned long len)
3677 {
3678         memcpy(dst_buff, src_buff + off, len);
3679         return 0;
3680 }
3681
3682 BPF_CALL_5(bpf_xdp_event_output, struct xdp_buff *, xdp, struct bpf_map *, map,
3683            u64, flags, void *, meta, u64, meta_size)
3684 {
3685         u64 xdp_size = (flags & BPF_F_CTXLEN_MASK) >> 32;
3686
3687         if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK)))
3688                 return -EINVAL;
3689         if (unlikely(xdp_size > (unsigned long)(xdp->data_end - xdp->data)))
3690                 return -EFAULT;
3691
3692         return bpf_event_output(map, flags, meta, meta_size, xdp->data,
3693                                 xdp_size, bpf_xdp_copy);
3694 }
3695
3696 static const struct bpf_func_proto bpf_xdp_event_output_proto = {
3697         .func           = bpf_xdp_event_output,
3698         .gpl_only       = true,
3699         .ret_type       = RET_INTEGER,
3700         .arg1_type      = ARG_PTR_TO_CTX,
3701         .arg2_type      = ARG_CONST_MAP_PTR,
3702         .arg3_type      = ARG_ANYTHING,
3703         .arg4_type      = ARG_PTR_TO_MEM,
3704         .arg5_type      = ARG_CONST_SIZE_OR_ZERO,
3705 };
3706
3707 BPF_CALL_1(bpf_get_socket_cookie, struct sk_buff *, skb)
3708 {
3709         return skb->sk ? sock_gen_cookie(skb->sk) : 0;
3710 }
3711
3712 static const struct bpf_func_proto bpf_get_socket_cookie_proto = {
3713         .func           = bpf_get_socket_cookie,
3714         .gpl_only       = false,
3715         .ret_type       = RET_INTEGER,
3716         .arg1_type      = ARG_PTR_TO_CTX,
3717 };
3718
3719 BPF_CALL_1(bpf_get_socket_uid, struct sk_buff *, skb)
3720 {
3721         struct sock *sk = sk_to_full_sk(skb->sk);
3722         kuid_t kuid;
3723
3724         if (!sk || !sk_fullsock(sk))
3725                 return overflowuid;
3726         kuid = sock_net_uid(sock_net(sk), sk);
3727         return from_kuid_munged(sock_net(sk)->user_ns, kuid);
3728 }
3729
3730 static const struct bpf_func_proto bpf_get_socket_uid_proto = {
3731         .func           = bpf_get_socket_uid,
3732         .gpl_only       = false,
3733         .ret_type       = RET_INTEGER,
3734         .arg1_type      = ARG_PTR_TO_CTX,
3735 };
3736
3737 BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
3738            int, level, int, optname, char *, optval, int, optlen)
3739 {
3740         struct sock *sk = bpf_sock->sk;
3741         int ret = 0;
3742         int val;
3743
3744         if (!sk_fullsock(sk))
3745                 return -EINVAL;
3746
3747         if (level == SOL_SOCKET) {
3748                 if (optlen != sizeof(int))
3749                         return -EINVAL;
3750                 val = *((int *)optval);
3751
3752                 /* Only some socketops are supported */
3753                 switch (optname) {
3754                 case SO_RCVBUF:
3755                         sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
3756                         sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
3757                         break;
3758                 case SO_SNDBUF:
3759                         sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
3760                         sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
3761                         break;
3762                 case SO_MAX_PACING_RATE:
3763                         sk->sk_max_pacing_rate = val;
3764                         sk->sk_pacing_rate = min(sk->sk_pacing_rate,
3765                                                  sk->sk_max_pacing_rate);
3766                         break;
3767                 case SO_PRIORITY:
3768                         sk->sk_priority = val;
3769                         break;
3770                 case SO_RCVLOWAT:
3771                         if (val < 0)
3772                                 val = INT_MAX;
3773                         sk->sk_rcvlowat = val ? : 1;
3774                         break;
3775                 case SO_MARK:
3776                         sk->sk_mark = val;
3777                         break;
3778                 default:
3779                         ret = -EINVAL;
3780                 }
3781 #ifdef CONFIG_INET
3782         } else if (level == SOL_IP) {
3783                 if (optlen != sizeof(int) || sk->sk_family != AF_INET)
3784                         return -EINVAL;
3785
3786                 val = *((int *)optval);
3787                 /* Only some options are supported */
3788                 switch (optname) {
3789                 case IP_TOS:
3790                         if (val < -1 || val > 0xff) {
3791                                 ret = -EINVAL;
3792                         } else {
3793                                 struct inet_sock *inet = inet_sk(sk);
3794
3795                                 if (val == -1)
3796                                         val = 0;
3797                                 inet->tos = val;
3798                         }
3799                         break;
3800                 default:
3801                         ret = -EINVAL;
3802                 }
3803 #if IS_ENABLED(CONFIG_IPV6)
3804         } else if (level == SOL_IPV6) {
3805                 if (optlen != sizeof(int) || sk->sk_family != AF_INET6)
3806                         return -EINVAL;
3807
3808                 val = *((int *)optval);
3809                 /* Only some options are supported */
3810                 switch (optname) {
3811                 case IPV6_TCLASS:
3812                         if (val < -1 || val > 0xff) {
3813                                 ret = -EINVAL;
3814                         } else {
3815                                 struct ipv6_pinfo *np = inet6_sk(sk);
3816
3817                                 if (val == -1)
3818                                         val = 0;
3819                                 np->tclass = val;
3820                         }
3821                         break;
3822                 default:
3823                         ret = -EINVAL;
3824                 }
3825 #endif
3826         } else if (level == SOL_TCP &&
3827                    sk->sk_prot->setsockopt == tcp_setsockopt) {
3828                 if (optname == TCP_CONGESTION) {
3829                         char name[TCP_CA_NAME_MAX];
3830                         bool reinit = bpf_sock->op > BPF_SOCK_OPS_NEEDS_ECN;
3831
3832                         strncpy(name, optval, min_t(long, optlen,
3833                                                     TCP_CA_NAME_MAX-1));
3834                         name[TCP_CA_NAME_MAX-1] = 0;
3835                         ret = tcp_set_congestion_control(sk, name, false,
3836                                                          reinit);
3837                 } else {
3838                         struct tcp_sock *tp = tcp_sk(sk);
3839
3840                         if (optlen != sizeof(int))
3841                                 return -EINVAL;
3842
3843                         val = *((int *)optval);
3844                         /* Only some options are supported */
3845                         switch (optname) {
3846                         case TCP_BPF_IW:
3847                                 if (val <= 0 || tp->data_segs_out > 0)
3848                                         ret = -EINVAL;
3849                                 else
3850                                         tp->snd_cwnd = val;
3851                                 break;
3852                         case TCP_BPF_SNDCWND_CLAMP:
3853                                 if (val <= 0) {
3854                                         ret = -EINVAL;
3855                                 } else {
3856                                         tp->snd_cwnd_clamp = val;
3857                                         tp->snd_ssthresh = val;
3858                                 }
3859                                 break;
3860                         default:
3861                                 ret = -EINVAL;
3862                         }
3863                 }
3864 #endif
3865         } else {
3866                 ret = -EINVAL;
3867         }
3868         return ret;
3869 }
3870
3871 static const struct bpf_func_proto bpf_setsockopt_proto = {
3872         .func           = bpf_setsockopt,
3873         .gpl_only       = false,
3874         .ret_type       = RET_INTEGER,
3875         .arg1_type      = ARG_PTR_TO_CTX,
3876         .arg2_type      = ARG_ANYTHING,
3877         .arg3_type      = ARG_ANYTHING,
3878         .arg4_type      = ARG_PTR_TO_MEM,
3879         .arg5_type      = ARG_CONST_SIZE,
3880 };
3881
3882 BPF_CALL_5(bpf_getsockopt, struct bpf_sock_ops_kern *, bpf_sock,
3883            int, level, int, optname, char *, optval, int, optlen)
3884 {
3885         struct sock *sk = bpf_sock->sk;
3886
3887         if (!sk_fullsock(sk))
3888                 goto err_clear;
3889
3890 #ifdef CONFIG_INET
3891         if (level == SOL_TCP && sk->sk_prot->getsockopt == tcp_getsockopt) {
3892                 if (optname == TCP_CONGESTION) {
3893                         struct inet_connection_sock *icsk = inet_csk(sk);
3894
3895                         if (!icsk->icsk_ca_ops || optlen <= 1)
3896                                 goto err_clear;
3897                         strncpy(optval, icsk->icsk_ca_ops->name, optlen);
3898                         optval[optlen - 1] = 0;
3899                 } else {
3900                         goto err_clear;
3901                 }
3902         } else if (level == SOL_IP) {
3903                 struct inet_sock *inet = inet_sk(sk);
3904
3905                 if (optlen != sizeof(int) || sk->sk_family != AF_INET)
3906                         goto err_clear;
3907
3908                 /* Only some options are supported */
3909                 switch (optname) {
3910                 case IP_TOS:
3911                         *((int *)optval) = (int)inet->tos;
3912                         break;
3913                 default:
3914                         goto err_clear;
3915                 }
3916 #if IS_ENABLED(CONFIG_IPV6)
3917         } else if (level == SOL_IPV6) {
3918                 struct ipv6_pinfo *np = inet6_sk(sk);
3919
3920                 if (optlen != sizeof(int) || sk->sk_family != AF_INET6)
3921                         goto err_clear;
3922
3923                 /* Only some options are supported */
3924                 switch (optname) {
3925                 case IPV6_TCLASS:
3926                         *((int *)optval) = (int)np->tclass;
3927                         break;
3928                 default:
3929                         goto err_clear;
3930                 }
3931 #endif
3932         } else {
3933                 goto err_clear;
3934         }
3935         return 0;
3936 #endif
3937 err_clear:
3938         memset(optval, 0, optlen);
3939         return -EINVAL;
3940 }
3941
3942 static const struct bpf_func_proto bpf_getsockopt_proto = {
3943         .func           = bpf_getsockopt,
3944         .gpl_only       = false,
3945         .ret_type       = RET_INTEGER,
3946         .arg1_type      = ARG_PTR_TO_CTX,
3947         .arg2_type      = ARG_ANYTHING,
3948         .arg3_type      = ARG_ANYTHING,
3949         .arg4_type      = ARG_PTR_TO_UNINIT_MEM,
3950         .arg5_type      = ARG_CONST_SIZE,
3951 };
3952
3953 BPF_CALL_2(bpf_sock_ops_cb_flags_set, struct bpf_sock_ops_kern *, bpf_sock,
3954            int, argval)
3955 {
3956         struct sock *sk = bpf_sock->sk;
3957         int val = argval & BPF_SOCK_OPS_ALL_CB_FLAGS;
3958
3959         if (!IS_ENABLED(CONFIG_INET) || !sk_fullsock(sk))
3960                 return -EINVAL;
3961
3962         if (val)
3963                 tcp_sk(sk)->bpf_sock_ops_cb_flags = val;
3964
3965         return argval & (~BPF_SOCK_OPS_ALL_CB_FLAGS);
3966 }
3967
3968 static const struct bpf_func_proto bpf_sock_ops_cb_flags_set_proto = {
3969         .func           = bpf_sock_ops_cb_flags_set,
3970         .gpl_only       = false,
3971         .ret_type       = RET_INTEGER,
3972         .arg1_type      = ARG_PTR_TO_CTX,
3973         .arg2_type      = ARG_ANYTHING,
3974 };
3975
3976 const struct ipv6_bpf_stub *ipv6_bpf_stub __read_mostly;
3977 EXPORT_SYMBOL_GPL(ipv6_bpf_stub);
3978
3979 BPF_CALL_3(bpf_bind, struct bpf_sock_addr_kern *, ctx, struct sockaddr *, addr,
3980            int, addr_len)
3981 {
3982 #ifdef CONFIG_INET
3983         struct sock *sk = ctx->sk;
3984         int err;
3985
3986         /* Binding to port can be expensive so it's prohibited in the helper.
3987          * Only binding to IP is supported.
3988          */
3989         err = -EINVAL;
3990         if (addr->sa_family == AF_INET) {
3991                 if (addr_len < sizeof(struct sockaddr_in))
3992                         return err;
3993                 if (((struct sockaddr_in *)addr)->sin_port != htons(0))
3994                         return err;
3995                 return __inet_bind(sk, addr, addr_len, true, false);
3996 #if IS_ENABLED(CONFIG_IPV6)
3997         } else if (addr->sa_family == AF_INET6) {
3998                 if (addr_len < SIN6_LEN_RFC2133)
3999                         return err;
4000                 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
4001                         return err;
4002                 /* ipv6_bpf_stub cannot be NULL, since it's called from
4003                  * bpf_cgroup_inet6_connect hook and ipv6 is already loaded
4004                  */
4005                 return ipv6_bpf_stub->inet6_bind(sk, addr, addr_len, true, false);
4006 #endif /* CONFIG_IPV6 */
4007         }
4008 #endif /* CONFIG_INET */
4009
4010         return -EAFNOSUPPORT;
4011 }
4012
4013 static const struct bpf_func_proto bpf_bind_proto = {
4014         .func           = bpf_bind,
4015         .gpl_only       = false,
4016         .ret_type       = RET_INTEGER,
4017         .arg1_type      = ARG_PTR_TO_CTX,
4018         .arg2_type      = ARG_PTR_TO_MEM,
4019         .arg3_type      = ARG_CONST_SIZE,
4020 };
4021
4022 #ifdef CONFIG_XFRM
4023 BPF_CALL_5(bpf_skb_get_xfrm_state, struct sk_buff *, skb, u32, index,
4024            struct bpf_xfrm_state *, to, u32, size, u64, flags)
4025 {
4026         const struct sec_path *sp = skb_sec_path(skb);
4027         const struct xfrm_state *x;
4028
4029         if (!sp || unlikely(index >= sp->len || flags))
4030                 goto err_clear;
4031
4032         x = sp->xvec[index];
4033
4034         if (unlikely(size != sizeof(struct bpf_xfrm_state)))
4035                 goto err_clear;
4036
4037         to->reqid = x->props.reqid;
4038         to->spi = x->id.spi;
4039         to->family = x->props.family;
4040         to->ext = 0;
4041
4042         if (to->family == AF_INET6) {
4043                 memcpy(to->remote_ipv6, x->props.saddr.a6,
4044                        sizeof(to->remote_ipv6));
4045         } else {
4046                 to->remote_ipv4 = x->props.saddr.a4;
4047                 memset(&to->remote_ipv6[1], 0, sizeof(__u32) * 3);
4048         }
4049
4050         return 0;
4051 err_clear:
4052         memset(to, 0, size);
4053         return -EINVAL;
4054 }
4055
4056 static const struct bpf_func_proto bpf_skb_get_xfrm_state_proto = {
4057         .func           = bpf_skb_get_xfrm_state,
4058         .gpl_only       = false,
4059         .ret_type       = RET_INTEGER,
4060         .arg1_type      = ARG_PTR_TO_CTX,
4061         .arg2_type      = ARG_ANYTHING,
4062         .arg3_type      = ARG_PTR_TO_UNINIT_MEM,
4063         .arg4_type      = ARG_CONST_SIZE,
4064         .arg5_type      = ARG_ANYTHING,
4065 };
4066 #endif
4067
4068 #if IS_ENABLED(CONFIG_INET) || IS_ENABLED(CONFIG_IPV6)
4069 static int bpf_fib_set_fwd_params(struct bpf_fib_lookup *params,
4070                                   const struct neighbour *neigh,
4071                                   const struct net_device *dev)
4072 {
4073         memcpy(params->dmac, neigh->ha, ETH_ALEN);
4074         memcpy(params->smac, dev->dev_addr, ETH_ALEN);
4075         params->h_vlan_TCI = 0;
4076         params->h_vlan_proto = 0;
4077         params->ifindex = dev->ifindex;
4078
4079         return 0;
4080 }
4081 #endif
4082
4083 #if IS_ENABLED(CONFIG_INET)
4084 static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
4085                                u32 flags, bool check_mtu)
4086 {
4087         struct in_device *in_dev;
4088         struct neighbour *neigh;
4089         struct net_device *dev;
4090         struct fib_result res;
4091         struct fib_nh *nh;
4092         struct flowi4 fl4;
4093         int err;
4094         u32 mtu;
4095
4096         dev = dev_get_by_index_rcu(net, params->ifindex);
4097         if (unlikely(!dev))
4098                 return -ENODEV;
4099
4100         /* verify forwarding is enabled on this interface */
4101         in_dev = __in_dev_get_rcu(dev);
4102         if (unlikely(!in_dev || !IN_DEV_FORWARD(in_dev)))
4103                 return BPF_FIB_LKUP_RET_FWD_DISABLED;
4104
4105         if (flags & BPF_FIB_LOOKUP_OUTPUT) {
4106                 fl4.flowi4_iif = 1;
4107                 fl4.flowi4_oif = params->ifindex;
4108         } else {
4109                 fl4.flowi4_iif = params->ifindex;
4110                 fl4.flowi4_oif = 0;
4111         }
4112         fl4.flowi4_tos = params->tos & IPTOS_RT_MASK;
4113         fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
4114         fl4.flowi4_flags = 0;
4115
4116         fl4.flowi4_proto = params->l4_protocol;
4117         fl4.daddr = params->ipv4_dst;
4118         fl4.saddr = params->ipv4_src;
4119         fl4.fl4_sport = params->sport;
4120         fl4.fl4_dport = params->dport;
4121
4122         if (flags & BPF_FIB_LOOKUP_DIRECT) {
4123                 u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN;
4124                 struct fib_table *tb;
4125
4126                 tb = fib_get_table(net, tbid);
4127                 if (unlikely(!tb))
4128                         return BPF_FIB_LKUP_RET_NOT_FWDED;
4129
4130                 err = fib_table_lookup(tb, &fl4, &res, FIB_LOOKUP_NOREF);
4131         } else {
4132                 fl4.flowi4_mark = 0;
4133                 fl4.flowi4_secid = 0;
4134                 fl4.flowi4_tun_key.tun_id = 0;
4135                 fl4.flowi4_uid = sock_net_uid(net, NULL);
4136
4137                 err = fib_lookup(net, &fl4, &res, FIB_LOOKUP_NOREF);
4138         }
4139
4140         if (err) {
4141                 /* map fib lookup errors to RTN_ type */
4142                 if (err == -EINVAL)
4143                         return BPF_FIB_LKUP_RET_BLACKHOLE;
4144                 if (err == -EHOSTUNREACH)
4145                         return BPF_FIB_LKUP_RET_UNREACHABLE;
4146                 if (err == -EACCES)
4147                         return BPF_FIB_LKUP_RET_PROHIBIT;
4148
4149                 return BPF_FIB_LKUP_RET_NOT_FWDED;
4150         }
4151
4152         if (res.type != RTN_UNICAST)
4153                 return BPF_FIB_LKUP_RET_NOT_FWDED;
4154
4155         if (res.fi->fib_nhs > 1)
4156                 fib_select_path(net, &res, &fl4, NULL);
4157
4158         if (check_mtu) {
4159                 mtu = ip_mtu_from_fib_result(&res, params->ipv4_dst);
4160                 if (params->tot_len > mtu)
4161                         return BPF_FIB_LKUP_RET_FRAG_NEEDED;
4162         }
4163
4164         nh = &res.fi->fib_nh[res.nh_sel];
4165
4166         /* do not handle lwt encaps right now */
4167         if (nh->nh_lwtstate)
4168                 return BPF_FIB_LKUP_RET_UNSUPP_LWT;
4169
4170         dev = nh->nh_dev;
4171         if (nh->nh_gw)
4172                 params->ipv4_dst = nh->nh_gw;
4173
4174         params->rt_metric = res.fi->fib_priority;
4175
4176         /* xdp and cls_bpf programs are run in RCU-bh so
4177          * rcu_read_lock_bh is not needed here
4178          */
4179         neigh = __ipv4_neigh_lookup_noref(dev, (__force u32)params->ipv4_dst);
4180         if (!neigh)
4181                 return BPF_FIB_LKUP_RET_NO_NEIGH;
4182
4183         return bpf_fib_set_fwd_params(params, neigh, dev);
4184 }
4185 #endif
4186
4187 #if IS_ENABLED(CONFIG_IPV6)
4188 static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
4189                                u32 flags, bool check_mtu)
4190 {
4191         struct in6_addr *src = (struct in6_addr *) params->ipv6_src;
4192         struct in6_addr *dst = (struct in6_addr *) params->ipv6_dst;
4193         struct neighbour *neigh;
4194         struct net_device *dev;
4195         struct inet6_dev *idev;
4196         struct fib6_info *f6i;
4197         struct flowi6 fl6;
4198         int strict = 0;
4199         int oif;
4200         u32 mtu;
4201
4202         /* link local addresses are never forwarded */
4203         if (rt6_need_strict(dst) || rt6_need_strict(src))
4204                 return BPF_FIB_LKUP_RET_NOT_FWDED;
4205
4206         dev = dev_get_by_index_rcu(net, params->ifindex);
4207         if (unlikely(!dev))
4208                 return -ENODEV;
4209
4210         idev = __in6_dev_get_safely(dev);
4211         if (unlikely(!idev || !net->ipv6.devconf_all->forwarding))
4212                 return BPF_FIB_LKUP_RET_FWD_DISABLED;
4213
4214         if (flags & BPF_FIB_LOOKUP_OUTPUT) {
4215                 fl6.flowi6_iif = 1;
4216                 oif = fl6.flowi6_oif = params->ifindex;
4217         } else {
4218                 oif = fl6.flowi6_iif = params->ifindex;
4219                 fl6.flowi6_oif = 0;
4220                 strict = RT6_LOOKUP_F_HAS_SADDR;
4221         }
4222         fl6.flowlabel = params->flowinfo;
4223         fl6.flowi6_scope = 0;
4224         fl6.flowi6_flags = 0;
4225         fl6.mp_hash = 0;
4226
4227         fl6.flowi6_proto = params->l4_protocol;
4228         fl6.daddr = *dst;
4229         fl6.saddr = *src;
4230         fl6.fl6_sport = params->sport;
4231         fl6.fl6_dport = params->dport;
4232
4233         if (flags & BPF_FIB_LOOKUP_DIRECT) {
4234                 u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN;
4235                 struct fib6_table *tb;
4236
4237                 tb = ipv6_stub->fib6_get_table(net, tbid);
4238                 if (unlikely(!tb))
4239                         return BPF_FIB_LKUP_RET_NOT_FWDED;
4240
4241                 f6i = ipv6_stub->fib6_table_lookup(net, tb, oif, &fl6, strict);
4242         } else {
4243                 fl6.flowi6_mark = 0;
4244                 fl6.flowi6_secid = 0;
4245                 fl6.flowi6_tun_key.tun_id = 0;
4246                 fl6.flowi6_uid = sock_net_uid(net, NULL);
4247
4248                 f6i = ipv6_stub->fib6_lookup(net, oif, &fl6, strict);
4249         }
4250
4251         if (unlikely(IS_ERR_OR_NULL(f6i) || f6i == net->ipv6.fib6_null_entry))
4252                 return BPF_FIB_LKUP_RET_NOT_FWDED;
4253
4254         if (unlikely(f6i->fib6_flags & RTF_REJECT)) {
4255                 switch (f6i->fib6_type) {
4256                 case RTN_BLACKHOLE:
4257                         return BPF_FIB_LKUP_RET_BLACKHOLE;
4258                 case RTN_UNREACHABLE:
4259                         return BPF_FIB_LKUP_RET_UNREACHABLE;
4260                 case RTN_PROHIBIT:
4261                         return BPF_FIB_LKUP_RET_PROHIBIT;
4262                 default:
4263                         return BPF_FIB_LKUP_RET_NOT_FWDED;
4264                 }
4265         }
4266
4267         if (f6i->fib6_type != RTN_UNICAST)
4268                 return BPF_FIB_LKUP_RET_NOT_FWDED;
4269
4270         if (f6i->fib6_nsiblings && fl6.flowi6_oif == 0)
4271                 f6i = ipv6_stub->fib6_multipath_select(net, f6i, &fl6,
4272                                                        fl6.flowi6_oif, NULL,
4273                                                        strict);
4274
4275         if (check_mtu) {
4276                 mtu = ipv6_stub->ip6_mtu_from_fib6(f6i, dst, src);
4277                 if (params->tot_len > mtu)
4278                         return BPF_FIB_LKUP_RET_FRAG_NEEDED;
4279         }
4280
4281         if (f6i->fib6_nh.nh_lwtstate)
4282                 return BPF_FIB_LKUP_RET_UNSUPP_LWT;
4283
4284         if (f6i->fib6_flags & RTF_GATEWAY)
4285                 *dst = f6i->fib6_nh.nh_gw;
4286
4287         dev = f6i->fib6_nh.nh_dev;
4288         params->rt_metric = f6i->fib6_metric;
4289
4290         /* xdp and cls_bpf programs are run in RCU-bh so rcu_read_lock_bh is
4291          * not needed here. Can not use __ipv6_neigh_lookup_noref here
4292          * because we need to get nd_tbl via the stub
4293          */
4294         neigh = ___neigh_lookup_noref(ipv6_stub->nd_tbl, neigh_key_eq128,
4295                                       ndisc_hashfn, dst, dev);
4296         if (!neigh)
4297                 return BPF_FIB_LKUP_RET_NO_NEIGH;
4298
4299         return bpf_fib_set_fwd_params(params, neigh, dev);
4300 }
4301 #endif
4302
4303 BPF_CALL_4(bpf_xdp_fib_lookup, struct xdp_buff *, ctx,
4304            struct bpf_fib_lookup *, params, int, plen, u32, flags)
4305 {
4306         if (plen < sizeof(*params))
4307                 return -EINVAL;
4308
4309         if (flags & ~(BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT))
4310                 return -EINVAL;
4311
4312         switch (params->family) {
4313 #if IS_ENABLED(CONFIG_INET)
4314         case AF_INET:
4315                 return bpf_ipv4_fib_lookup(dev_net(ctx->rxq->dev), params,
4316                                            flags, true);
4317 #endif
4318 #if IS_ENABLED(CONFIG_IPV6)
4319         case AF_INET6:
4320                 return bpf_ipv6_fib_lookup(dev_net(ctx->rxq->dev), params,
4321                                            flags, true);
4322 #endif
4323         }
4324         return -EAFNOSUPPORT;
4325 }
4326
4327 static const struct bpf_func_proto bpf_xdp_fib_lookup_proto = {
4328         .func           = bpf_xdp_fib_lookup,
4329         .gpl_only       = true,
4330         .ret_type       = RET_INTEGER,
4331         .arg1_type      = ARG_PTR_TO_CTX,
4332         .arg2_type      = ARG_PTR_TO_MEM,
4333         .arg3_type      = ARG_CONST_SIZE,
4334         .arg4_type      = ARG_ANYTHING,
4335 };
4336
4337 BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb,
4338            struct bpf_fib_lookup *, params, int, plen, u32, flags)
4339 {
4340         struct net *net = dev_net(skb->dev);
4341         int rc = -EAFNOSUPPORT;
4342
4343         if (plen < sizeof(*params))
4344                 return -EINVAL;
4345
4346         if (flags & ~(BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT))
4347                 return -EINVAL;
4348
4349         switch (params->family) {
4350 #if IS_ENABLED(CONFIG_INET)
4351         case AF_INET:
4352                 rc = bpf_ipv4_fib_lookup(net, params, flags, false);
4353                 break;
4354 #endif
4355 #if IS_ENABLED(CONFIG_IPV6)
4356         case AF_INET6:
4357                 rc = bpf_ipv6_fib_lookup(net, params, flags, false);
4358                 break;
4359 #endif
4360         }
4361
4362         if (!rc) {
4363                 struct net_device *dev;
4364
4365                 dev = dev_get_by_index_rcu(net, params->ifindex);
4366                 if (!is_skb_forwardable(dev, skb))
4367                         rc = BPF_FIB_LKUP_RET_FRAG_NEEDED;
4368         }
4369
4370         return rc;
4371 }
4372
4373 static const struct bpf_func_proto bpf_skb_fib_lookup_proto = {
4374         .func           = bpf_skb_fib_lookup,
4375         .gpl_only       = true,
4376         .ret_type       = RET_INTEGER,
4377         .arg1_type      = ARG_PTR_TO_CTX,
4378         .arg2_type      = ARG_PTR_TO_MEM,
4379         .arg3_type      = ARG_CONST_SIZE,
4380         .arg4_type      = ARG_ANYTHING,
4381 };
4382
4383 #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
4384 static int bpf_push_seg6_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len)
4385 {
4386         int err;
4387         struct ipv6_sr_hdr *srh = (struct ipv6_sr_hdr *)hdr;
4388
4389         if (!seg6_validate_srh(srh, len))
4390                 return -EINVAL;
4391
4392         switch (type) {
4393         case BPF_LWT_ENCAP_SEG6_INLINE:
4394                 if (skb->protocol != htons(ETH_P_IPV6))
4395                         return -EBADMSG;
4396
4397                 err = seg6_do_srh_inline(skb, srh);
4398                 break;
4399         case BPF_LWT_ENCAP_SEG6:
4400                 skb_reset_inner_headers(skb);
4401                 skb->encapsulation = 1;
4402                 err = seg6_do_srh_encap(skb, srh, IPPROTO_IPV6);
4403                 break;
4404         default:
4405                 return -EINVAL;
4406         }
4407
4408         bpf_compute_data_pointers(skb);
4409         if (err)
4410                 return err;
4411
4412         ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
4413         skb_set_transport_header(skb, sizeof(struct ipv6hdr));
4414
4415         return seg6_lookup_nexthop(skb, NULL, 0);
4416 }
4417 #endif /* CONFIG_IPV6_SEG6_BPF */
4418
4419 BPF_CALL_4(bpf_lwt_push_encap, struct sk_buff *, skb, u32, type, void *, hdr,
4420            u32, len)
4421 {
4422         switch (type) {
4423 #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
4424         case BPF_LWT_ENCAP_SEG6:
4425         case BPF_LWT_ENCAP_SEG6_INLINE:
4426                 return bpf_push_seg6_encap(skb, type, hdr, len);
4427 #endif
4428         default:
4429                 return -EINVAL;
4430         }
4431 }
4432
4433 static const struct bpf_func_proto bpf_lwt_push_encap_proto = {
4434         .func           = bpf_lwt_push_encap,
4435         .gpl_only       = false,
4436         .ret_type       = RET_INTEGER,
4437         .arg1_type      = ARG_PTR_TO_CTX,
4438         .arg2_type      = ARG_ANYTHING,
4439         .arg3_type      = ARG_PTR_TO_MEM,
4440         .arg4_type      = ARG_CONST_SIZE
4441 };
4442
4443 BPF_CALL_4(bpf_lwt_seg6_store_bytes, struct sk_buff *, skb, u32, offset,
4444            const void *, from, u32, len)
4445 {
4446 #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
4447         struct seg6_bpf_srh_state *srh_state =
4448                 this_cpu_ptr(&seg6_bpf_srh_states);
4449         void *srh_tlvs, *srh_end, *ptr;
4450         struct ipv6_sr_hdr *srh;
4451         int srhoff = 0;
4452
4453         if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0)
4454                 return -EINVAL;
4455
4456         srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
4457         srh_tlvs = (void *)((char *)srh + ((srh->first_segment + 1) << 4));
4458         srh_end = (void *)((char *)srh + sizeof(*srh) + srh_state->hdrlen);
4459
4460         ptr = skb->data + offset;
4461         if (ptr >= srh_tlvs && ptr + len <= srh_end)
4462                 srh_state->valid = 0;
4463         else if (ptr < (void *)&srh->flags ||
4464                  ptr + len > (void *)&srh->segments)
4465                 return -EFAULT;
4466
4467         if (unlikely(bpf_try_make_writable(skb, offset + len)))
4468                 return -EFAULT;
4469
4470         memcpy(skb->data + offset, from, len);
4471         return 0;
4472 #else /* CONFIG_IPV6_SEG6_BPF */
4473         return -EOPNOTSUPP;
4474 #endif
4475 }
4476
4477 static const struct bpf_func_proto bpf_lwt_seg6_store_bytes_proto = {
4478         .func           = bpf_lwt_seg6_store_bytes,
4479         .gpl_only       = false,
4480         .ret_type       = RET_INTEGER,
4481         .arg1_type      = ARG_PTR_TO_CTX,
4482         .arg2_type      = ARG_ANYTHING,
4483         .arg3_type      = ARG_PTR_TO_MEM,
4484         .arg4_type      = ARG_CONST_SIZE
4485 };
4486
4487 BPF_CALL_4(bpf_lwt_seg6_action, struct sk_buff *, skb,
4488            u32, action, void *, param, u32, param_len)
4489 {
4490 #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
4491         struct seg6_bpf_srh_state *srh_state =
4492                 this_cpu_ptr(&seg6_bpf_srh_states);
4493         struct ipv6_sr_hdr *srh;
4494         int srhoff = 0;
4495         int err;
4496
4497         if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0)
4498                 return -EINVAL;
4499         srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
4500
4501         if (!srh_state->valid) {
4502                 if (unlikely((srh_state->hdrlen & 7) != 0))
4503                         return -EBADMSG;
4504
4505                 srh->hdrlen = (u8)(srh_state->hdrlen >> 3);
4506                 if (unlikely(!seg6_validate_srh(srh, (srh->hdrlen + 1) << 3)))
4507                         return -EBADMSG;
4508
4509                 srh_state->valid = 1;
4510         }
4511
4512         switch (action) {
4513         case SEG6_LOCAL_ACTION_END_X:
4514                 if (param_len != sizeof(struct in6_addr))
4515                         return -EINVAL;
4516                 return seg6_lookup_nexthop(skb, (struct in6_addr *)param, 0);
4517         case SEG6_LOCAL_ACTION_END_T:
4518                 if (param_len != sizeof(int))
4519                         return -EINVAL;
4520                 return seg6_lookup_nexthop(skb, NULL, *(int *)param);
4521         case SEG6_LOCAL_ACTION_END_B6:
4522                 err = bpf_push_seg6_encap(skb, BPF_LWT_ENCAP_SEG6_INLINE,
4523                                           param, param_len);
4524                 if (!err)
4525                         srh_state->hdrlen =
4526                                 ((struct ipv6_sr_hdr *)param)->hdrlen << 3;
4527                 return err;
4528         case SEG6_LOCAL_ACTION_END_B6_ENCAP:
4529                 err = bpf_push_seg6_encap(skb, BPF_LWT_ENCAP_SEG6,
4530                                           param, param_len);
4531                 if (!err)
4532                         srh_state->hdrlen =
4533                                 ((struct ipv6_sr_hdr *)param)->hdrlen << 3;
4534                 return err;
4535         default:
4536                 return -EINVAL;
4537         }
4538 #else /* CONFIG_IPV6_SEG6_BPF */
4539         return -EOPNOTSUPP;
4540 #endif
4541 }
4542
4543 static const struct bpf_func_proto bpf_lwt_seg6_action_proto = {
4544         .func           = bpf_lwt_seg6_action,
4545         .gpl_only       = false,
4546         .ret_type       = RET_INTEGER,
4547         .arg1_type      = ARG_PTR_TO_CTX,
4548         .arg2_type      = ARG_ANYTHING,
4549         .arg3_type      = ARG_PTR_TO_MEM,
4550         .arg4_type      = ARG_CONST_SIZE
4551 };
4552
4553 BPF_CALL_3(bpf_lwt_seg6_adjust_srh, struct sk_buff *, skb, u32, offset,
4554            s32, len)
4555 {
4556 #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
4557         struct seg6_bpf_srh_state *srh_state =
4558                 this_cpu_ptr(&seg6_bpf_srh_states);
4559         void *srh_end, *srh_tlvs, *ptr;
4560         struct ipv6_sr_hdr *srh;
4561         struct ipv6hdr *hdr;
4562         int srhoff = 0;
4563         int ret;
4564
4565         if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0)
4566                 return -EINVAL;
4567         srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
4568
4569         srh_tlvs = (void *)((unsigned char *)srh + sizeof(*srh) +
4570                         ((srh->first_segment + 1) << 4));
4571         srh_end = (void *)((unsigned char *)srh + sizeof(*srh) +
4572                         srh_state->hdrlen);
4573         ptr = skb->data + offset;
4574
4575         if (unlikely(ptr < srh_tlvs || ptr > srh_end))
4576                 return -EFAULT;
4577         if (unlikely(len < 0 && (void *)((char *)ptr - len) > srh_end))
4578                 return -EFAULT;
4579
4580         if (len > 0) {
4581                 ret = skb_cow_head(skb, len);
4582                 if (unlikely(ret < 0))
4583                         return ret;
4584
4585                 ret = bpf_skb_net_hdr_push(skb, offset, len);
4586         } else {
4587                 ret = bpf_skb_net_hdr_pop(skb, offset, -1 * len);
4588         }
4589
4590         bpf_compute_data_pointers(skb);
4591         if (unlikely(ret < 0))
4592                 return ret;
4593
4594         hdr = (struct ipv6hdr *)skb->data;
4595         hdr->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
4596
4597         srh_state->hdrlen += len;
4598         srh_state->valid = 0;
4599         return 0;
4600 #else /* CONFIG_IPV6_SEG6_BPF */
4601         return -EOPNOTSUPP;
4602 #endif
4603 }
4604
4605 static const struct bpf_func_proto bpf_lwt_seg6_adjust_srh_proto = {
4606         .func           = bpf_lwt_seg6_adjust_srh,
4607         .gpl_only       = false,
4608         .ret_type       = RET_INTEGER,
4609         .arg1_type      = ARG_PTR_TO_CTX,
4610         .arg2_type      = ARG_ANYTHING,
4611         .arg3_type      = ARG_ANYTHING,
4612 };
4613
4614 bool bpf_helper_changes_pkt_data(void *func)
4615 {
4616         if (func == bpf_skb_vlan_push ||
4617             func == bpf_skb_vlan_pop ||
4618             func == bpf_skb_store_bytes ||
4619             func == bpf_skb_change_proto ||
4620             func == bpf_skb_change_head ||
4621             func == bpf_skb_change_tail ||
4622             func == bpf_skb_adjust_room ||
4623             func == bpf_skb_pull_data ||
4624             func == bpf_clone_redirect ||
4625             func == bpf_l3_csum_replace ||
4626             func == bpf_l4_csum_replace ||
4627             func == bpf_xdp_adjust_head ||
4628             func == bpf_xdp_adjust_meta ||
4629             func == bpf_msg_pull_data ||
4630             func == bpf_xdp_adjust_tail ||
4631             func == bpf_lwt_push_encap ||
4632             func == bpf_lwt_seg6_store_bytes ||
4633             func == bpf_lwt_seg6_adjust_srh ||
4634             func == bpf_lwt_seg6_action
4635             )
4636                 return true;
4637
4638         return false;
4639 }
4640
4641 static const struct bpf_func_proto *
4642 bpf_base_func_proto(enum bpf_func_id func_id)
4643 {
4644         switch (func_id) {
4645         case BPF_FUNC_map_lookup_elem:
4646                 return &bpf_map_lookup_elem_proto;
4647         case BPF_FUNC_map_update_elem:
4648                 return &bpf_map_update_elem_proto;
4649         case BPF_FUNC_map_delete_elem:
4650                 return &bpf_map_delete_elem_proto;
4651         case BPF_FUNC_get_prandom_u32:
4652                 return &bpf_get_prandom_u32_proto;
4653         case BPF_FUNC_get_smp_processor_id:
4654                 return &bpf_get_raw_smp_processor_id_proto;
4655         case BPF_FUNC_get_numa_node_id:
4656                 return &bpf_get_numa_node_id_proto;
4657         case BPF_FUNC_tail_call:
4658                 return &bpf_tail_call_proto;
4659         case BPF_FUNC_ktime_get_ns:
4660                 return &bpf_ktime_get_ns_proto;
4661         case BPF_FUNC_trace_printk:
4662                 if (capable(CAP_SYS_ADMIN))
4663                         return bpf_get_trace_printk_proto();
4664         default:
4665                 return NULL;
4666         }
4667 }
4668
4669 static const struct bpf_func_proto *
4670 sock_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
4671 {
4672         switch (func_id) {
4673         /* inet and inet6 sockets are created in a process
4674          * context so there is always a valid uid/gid
4675          */
4676         case BPF_FUNC_get_current_uid_gid:
4677                 return &bpf_get_current_uid_gid_proto;
4678         default:
4679                 return bpf_base_func_proto(func_id);
4680         }
4681 }
4682
4683 static const struct bpf_func_proto *
4684 sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
4685 {
4686         switch (func_id) {
4687         /* inet and inet6 sockets are created in a process
4688          * context so there is always a valid uid/gid
4689          */
4690         case BPF_FUNC_get_current_uid_gid:
4691                 return &bpf_get_current_uid_gid_proto;
4692         case BPF_FUNC_bind:
4693                 switch (prog->expected_attach_type) {
4694                 case BPF_CGROUP_INET4_CONNECT:
4695                 case BPF_CGROUP_INET6_CONNECT:
4696                         return &bpf_bind_proto;
4697                 default:
4698                         return NULL;
4699                 }
4700         default:
4701                 return bpf_base_func_proto(func_id);
4702         }
4703 }
4704
4705 static const struct bpf_func_proto *
4706 sk_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
4707 {
4708         switch (func_id) {
4709         case BPF_FUNC_skb_load_bytes:
4710                 return &bpf_skb_load_bytes_proto;
4711         case BPF_FUNC_skb_load_bytes_relative:
4712                 return &bpf_skb_load_bytes_relative_proto;
4713         case BPF_FUNC_get_socket_cookie:
4714                 return &bpf_get_socket_cookie_proto;
4715         case BPF_FUNC_get_socket_uid:
4716                 return &bpf_get_socket_uid_proto;
4717         default:
4718                 return bpf_base_func_proto(func_id);
4719         }
4720 }
4721
4722 static const struct bpf_func_proto *
4723 tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
4724 {
4725         switch (func_id) {
4726         case BPF_FUNC_skb_store_bytes:
4727                 return &bpf_skb_store_bytes_proto;
4728         case BPF_FUNC_skb_load_bytes:
4729                 return &bpf_skb_load_bytes_proto;
4730         case BPF_FUNC_skb_load_bytes_relative:
4731                 return &bpf_skb_load_bytes_relative_proto;
4732         case BPF_FUNC_skb_pull_data:
4733                 return &bpf_skb_pull_data_proto;
4734         case BPF_FUNC_csum_diff:
4735                 return &bpf_csum_diff_proto;
4736         case BPF_FUNC_csum_update:
4737                 return &bpf_csum_update_proto;
4738         case BPF_FUNC_l3_csum_replace:
4739                 return &bpf_l3_csum_replace_proto;
4740         case BPF_FUNC_l4_csum_replace:
4741                 return &bpf_l4_csum_replace_proto;
4742         case BPF_FUNC_clone_redirect:
4743                 return &bpf_clone_redirect_proto;
4744         case BPF_FUNC_get_cgroup_classid:
4745                 return &bpf_get_cgroup_classid_proto;
4746         case BPF_FUNC_skb_vlan_push:
4747                 return &bpf_skb_vlan_push_proto;
4748         case BPF_FUNC_skb_vlan_pop:
4749                 return &bpf_skb_vlan_pop_proto;
4750         case BPF_FUNC_skb_change_proto:
4751                 return &bpf_skb_change_proto_proto;
4752         case BPF_FUNC_skb_change_type:
4753                 return &bpf_skb_change_type_proto;
4754         case BPF_FUNC_skb_adjust_room:
4755                 return &bpf_skb_adjust_room_proto;
4756         case BPF_FUNC_skb_change_tail:
4757                 return &bpf_skb_change_tail_proto;
4758         case BPF_FUNC_skb_get_tunnel_key:
4759                 return &bpf_skb_get_tunnel_key_proto;
4760         case BPF_FUNC_skb_set_tunnel_key:
4761                 return bpf_get_skb_set_tunnel_proto(func_id);
4762         case BPF_FUNC_skb_get_tunnel_opt:
4763                 return &bpf_skb_get_tunnel_opt_proto;
4764         case BPF_FUNC_skb_set_tunnel_opt:
4765                 return bpf_get_skb_set_tunnel_proto(func_id);
4766         case BPF_FUNC_redirect:
4767                 return &bpf_redirect_proto;
4768         case BPF_FUNC_get_route_realm:
4769                 return &bpf_get_route_realm_proto;
4770         case BPF_FUNC_get_hash_recalc:
4771                 return &bpf_get_hash_recalc_proto;
4772         case BPF_FUNC_set_hash_invalid:
4773                 return &bpf_set_hash_invalid_proto;
4774         case BPF_FUNC_set_hash:
4775                 return &bpf_set_hash_proto;
4776         case BPF_FUNC_perf_event_output:
4777                 return &bpf_skb_event_output_proto;
4778         case BPF_FUNC_get_smp_processor_id:
4779                 return &bpf_get_smp_processor_id_proto;
4780         case BPF_FUNC_skb_under_cgroup:
4781                 return &bpf_skb_under_cgroup_proto;
4782         case BPF_FUNC_get_socket_cookie:
4783                 return &bpf_get_socket_cookie_proto;
4784         case BPF_FUNC_get_socket_uid:
4785                 return &bpf_get_socket_uid_proto;
4786         case BPF_FUNC_fib_lookup:
4787                 return &bpf_skb_fib_lookup_proto;
4788 #ifdef CONFIG_XFRM
4789         case BPF_FUNC_skb_get_xfrm_state:
4790                 return &bpf_skb_get_xfrm_state_proto;
4791 #endif
4792 #ifdef CONFIG_SOCK_CGROUP_DATA
4793         case BPF_FUNC_skb_cgroup_id:
4794                 return &bpf_skb_cgroup_id_proto;
4795 #endif
4796         default:
4797                 return bpf_base_func_proto(func_id);
4798         }
4799 }
4800
4801 static const struct bpf_func_proto *
4802 xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
4803 {
4804         switch (func_id) {
4805         case BPF_FUNC_perf_event_output:
4806                 return &bpf_xdp_event_output_proto;
4807         case BPF_FUNC_get_smp_processor_id:
4808                 return &bpf_get_smp_processor_id_proto;
4809         case BPF_FUNC_csum_diff:
4810                 return &bpf_csum_diff_proto;
4811         case BPF_FUNC_xdp_adjust_head:
4812                 return &bpf_xdp_adjust_head_proto;
4813         case BPF_FUNC_xdp_adjust_meta:
4814                 return &bpf_xdp_adjust_meta_proto;
4815         case BPF_FUNC_redirect:
4816                 return &bpf_xdp_redirect_proto;
4817         case BPF_FUNC_redirect_map:
4818                 return &bpf_xdp_redirect_map_proto;
4819         case BPF_FUNC_xdp_adjust_tail:
4820                 return &bpf_xdp_adjust_tail_proto;
4821         case BPF_FUNC_fib_lookup:
4822                 return &bpf_xdp_fib_lookup_proto;
4823         default:
4824                 return bpf_base_func_proto(func_id);
4825         }
4826 }
4827
4828 static const struct bpf_func_proto *
4829 sock_ops_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
4830 {
4831         switch (func_id) {
4832         case BPF_FUNC_setsockopt:
4833                 return &bpf_setsockopt_proto;
4834         case BPF_FUNC_getsockopt:
4835                 return &bpf_getsockopt_proto;
4836         case BPF_FUNC_sock_ops_cb_flags_set:
4837                 return &bpf_sock_ops_cb_flags_set_proto;
4838         case BPF_FUNC_sock_map_update:
4839                 return &bpf_sock_map_update_proto;
4840         case BPF_FUNC_sock_hash_update:
4841                 return &bpf_sock_hash_update_proto;
4842         default:
4843                 return bpf_base_func_proto(func_id);
4844         }
4845 }
4846
4847 static const struct bpf_func_proto *
4848 sk_msg_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
4849 {
4850         switch (func_id) {
4851         case BPF_FUNC_msg_redirect_map:
4852                 return &bpf_msg_redirect_map_proto;
4853         case BPF_FUNC_msg_redirect_hash:
4854                 return &bpf_msg_redirect_hash_proto;
4855         case BPF_FUNC_msg_apply_bytes:
4856                 return &bpf_msg_apply_bytes_proto;
4857         case BPF_FUNC_msg_cork_bytes:
4858                 return &bpf_msg_cork_bytes_proto;
4859         case BPF_FUNC_msg_pull_data:
4860                 return &bpf_msg_pull_data_proto;
4861         default:
4862                 return bpf_base_func_proto(func_id);
4863         }
4864 }
4865
4866 static const struct bpf_func_proto *
4867 sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
4868 {
4869         switch (func_id) {
4870         case BPF_FUNC_skb_store_bytes:
4871                 return &bpf_skb_store_bytes_proto;
4872         case BPF_FUNC_skb_load_bytes:
4873                 return &bpf_skb_load_bytes_proto;
4874         case BPF_FUNC_skb_pull_data:
4875                 return &bpf_skb_pull_data_proto;
4876         case BPF_FUNC_skb_change_tail:
4877                 return &bpf_skb_change_tail_proto;
4878         case BPF_FUNC_skb_change_head:
4879                 return &bpf_skb_change_head_proto;
4880         case BPF_FUNC_get_socket_cookie:
4881                 return &bpf_get_socket_cookie_proto;
4882         case BPF_FUNC_get_socket_uid:
4883                 return &bpf_get_socket_uid_proto;
4884         case BPF_FUNC_sk_redirect_map:
4885                 return &bpf_sk_redirect_map_proto;
4886         case BPF_FUNC_sk_redirect_hash:
4887                 return &bpf_sk_redirect_hash_proto;
4888         default:
4889                 return bpf_base_func_proto(func_id);
4890         }
4891 }
4892
4893 static const struct bpf_func_proto *
4894 lwt_out_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
4895 {
4896         switch (func_id) {
4897         case BPF_FUNC_skb_load_bytes:
4898                 return &bpf_skb_load_bytes_proto;
4899         case BPF_FUNC_skb_pull_data:
4900                 return &bpf_skb_pull_data_proto;
4901         case BPF_FUNC_csum_diff:
4902                 return &bpf_csum_diff_proto;
4903         case BPF_FUNC_get_cgroup_classid:
4904                 return &bpf_get_cgroup_classid_proto;
4905         case BPF_FUNC_get_route_realm:
4906                 return &bpf_get_route_realm_proto;
4907         case BPF_FUNC_get_hash_recalc:
4908                 return &bpf_get_hash_recalc_proto;
4909         case BPF_FUNC_perf_event_output:
4910                 return &bpf_skb_event_output_proto;
4911         case BPF_FUNC_get_smp_processor_id:
4912                 return &bpf_get_smp_processor_id_proto;
4913         case BPF_FUNC_skb_under_cgroup:
4914                 return &bpf_skb_under_cgroup_proto;
4915         default:
4916                 return bpf_base_func_proto(func_id);
4917         }
4918 }
4919
4920 static const struct bpf_func_proto *
4921 lwt_in_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
4922 {
4923         switch (func_id) {
4924         case BPF_FUNC_lwt_push_encap:
4925                 return &bpf_lwt_push_encap_proto;
4926         default:
4927                 return lwt_out_func_proto(func_id, prog);
4928         }
4929 }
4930
4931 static const struct bpf_func_proto *
4932 lwt_xmit_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
4933 {
4934         switch (func_id) {
4935         case BPF_FUNC_skb_get_tunnel_key:
4936                 return &bpf_skb_get_tunnel_key_proto;
4937         case BPF_FUNC_skb_set_tunnel_key:
4938                 return bpf_get_skb_set_tunnel_proto(func_id);
4939         case BPF_FUNC_skb_get_tunnel_opt:
4940                 return &bpf_skb_get_tunnel_opt_proto;
4941         case BPF_FUNC_skb_set_tunnel_opt:
4942                 return bpf_get_skb_set_tunnel_proto(func_id);
4943         case BPF_FUNC_redirect:
4944                 return &bpf_redirect_proto;
4945         case BPF_FUNC_clone_redirect:
4946                 return &bpf_clone_redirect_proto;
4947         case BPF_FUNC_skb_change_tail:
4948                 return &bpf_skb_change_tail_proto;
4949         case BPF_FUNC_skb_change_head:
4950                 return &bpf_skb_change_head_proto;
4951         case BPF_FUNC_skb_store_bytes:
4952                 return &bpf_skb_store_bytes_proto;
4953         case BPF_FUNC_csum_update:
4954                 return &bpf_csum_update_proto;
4955         case BPF_FUNC_l3_csum_replace:
4956                 return &bpf_l3_csum_replace_proto;
4957         case BPF_FUNC_l4_csum_replace:
4958                 return &bpf_l4_csum_replace_proto;
4959         case BPF_FUNC_set_hash_invalid:
4960                 return &bpf_set_hash_invalid_proto;
4961         default:
4962                 return lwt_out_func_proto(func_id, prog);
4963         }
4964 }
4965
4966 static const struct bpf_func_proto *
4967 lwt_seg6local_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
4968 {
4969         switch (func_id) {
4970         case BPF_FUNC_lwt_seg6_store_bytes:
4971                 return &bpf_lwt_seg6_store_bytes_proto;
4972         case BPF_FUNC_lwt_seg6_action:
4973                 return &bpf_lwt_seg6_action_proto;
4974         case BPF_FUNC_lwt_seg6_adjust_srh:
4975                 return &bpf_lwt_seg6_adjust_srh_proto;
4976         default:
4977                 return lwt_out_func_proto(func_id, prog);
4978         }
4979 }
4980
4981 static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type,
4982                                     const struct bpf_prog *prog,
4983                                     struct bpf_insn_access_aux *info)
4984 {
4985         const int size_default = sizeof(__u32);
4986
4987         if (off < 0 || off >= sizeof(struct __sk_buff))
4988                 return false;
4989
4990         /* The verifier guarantees that size > 0. */
4991         if (off % size != 0)
4992                 return false;
4993
4994         switch (off) {
4995         case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
4996                 if (off + size > offsetofend(struct __sk_buff, cb[4]))
4997                         return false;
4998                 break;
4999         case bpf_ctx_range_till(struct __sk_buff, remote_ip6[0], remote_ip6[3]):
5000         case bpf_ctx_range_till(struct __sk_buff, local_ip6[0], local_ip6[3]):
5001         case bpf_ctx_range_till(struct __sk_buff, remote_ip4, remote_ip4):
5002         case bpf_ctx_range_till(struct __sk_buff, local_ip4, local_ip4):
5003         case bpf_ctx_range(struct __sk_buff, data):
5004         case bpf_ctx_range(struct __sk_buff, data_meta):
5005         case bpf_ctx_range(struct __sk_buff, data_end):
5006                 if (size != size_default)
5007                         return false;
5008                 break;
5009         default:
5010                 /* Only narrow read access allowed for now. */
5011                 if (type == BPF_WRITE) {
5012                         if (size != size_default)
5013                                 return false;
5014                 } else {
5015                         bpf_ctx_record_field_size(info, size_default);
5016                         if (!bpf_ctx_narrow_access_ok(off, size, size_default))
5017                                 return false;
5018                 }
5019         }
5020
5021         return true;
5022 }
5023
5024 static bool sk_filter_is_valid_access(int off, int size,
5025                                       enum bpf_access_type type,
5026                                       const struct bpf_prog *prog,
5027                                       struct bpf_insn_access_aux *info)
5028 {
5029         switch (off) {
5030         case bpf_ctx_range(struct __sk_buff, tc_classid):
5031         case bpf_ctx_range(struct __sk_buff, data):
5032         case bpf_ctx_range(struct __sk_buff, data_meta):
5033         case bpf_ctx_range(struct __sk_buff, data_end):
5034         case bpf_ctx_range_till(struct __sk_buff, family, local_port):
5035                 return false;
5036         }
5037
5038         if (type == BPF_WRITE) {
5039                 switch (off) {
5040                 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
5041                         break;
5042                 default:
5043                         return false;
5044                 }
5045         }
5046
5047         return bpf_skb_is_valid_access(off, size, type, prog, info);
5048 }
5049
5050 static bool lwt_is_valid_access(int off, int size,
5051                                 enum bpf_access_type type,
5052                                 const struct bpf_prog *prog,
5053                                 struct bpf_insn_access_aux *info)
5054 {
5055         switch (off) {
5056         case bpf_ctx_range(struct __sk_buff, tc_classid):
5057         case bpf_ctx_range_till(struct __sk_buff, family, local_port):
5058         case bpf_ctx_range(struct __sk_buff, data_meta):
5059                 return false;
5060         }
5061
5062         if (type == BPF_WRITE) {
5063                 switch (off) {
5064                 case bpf_ctx_range(struct __sk_buff, mark):
5065                 case bpf_ctx_range(struct __sk_buff, priority):
5066                 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
5067                         break;
5068                 default:
5069                         return false;
5070                 }
5071         }
5072
5073         switch (off) {
5074         case bpf_ctx_range(struct __sk_buff, data):
5075                 info->reg_type = PTR_TO_PACKET;
5076                 break;
5077         case bpf_ctx_range(struct __sk_buff, data_end):
5078                 info->reg_type = PTR_TO_PACKET_END;
5079                 break;
5080         }
5081
5082         return bpf_skb_is_valid_access(off, size, type, prog, info);
5083 }
5084
5085 /* Attach type specific accesses */
5086 static bool __sock_filter_check_attach_type(int off,
5087                                             enum bpf_access_type access_type,
5088                                             enum bpf_attach_type attach_type)
5089 {
5090         switch (off) {
5091         case offsetof(struct bpf_sock, bound_dev_if):
5092         case offsetof(struct bpf_sock, mark):
5093         case offsetof(struct bpf_sock, priority):
5094                 switch (attach_type) {
5095                 case BPF_CGROUP_INET_SOCK_CREATE:
5096                         goto full_access;
5097                 default:
5098                         return false;
5099                 }
5100         case bpf_ctx_range(struct bpf_sock, src_ip4):
5101                 switch (attach_type) {
5102                 case BPF_CGROUP_INET4_POST_BIND:
5103                         goto read_only;
5104                 default:
5105                         return false;
5106                 }
5107         case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]):
5108                 switch (attach_type) {
5109                 case BPF_CGROUP_INET6_POST_BIND:
5110                         goto read_only;
5111                 default:
5112                         return false;
5113                 }
5114         case bpf_ctx_range(struct bpf_sock, src_port):
5115                 switch (attach_type) {
5116                 case BPF_CGROUP_INET4_POST_BIND:
5117                 case BPF_CGROUP_INET6_POST_BIND:
5118                         goto read_only;
5119                 default:
5120                         return false;
5121                 }
5122         }
5123 read_only:
5124         return access_type == BPF_READ;
5125 full_access:
5126         return true;
5127 }
5128
5129 static bool __sock_filter_check_size(int off, int size,
5130                                      struct bpf_insn_access_aux *info)
5131 {
5132         const int size_default = sizeof(__u32);
5133
5134         switch (off) {
5135         case bpf_ctx_range(struct bpf_sock, src_ip4):
5136         case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]):
5137                 bpf_ctx_record_field_size(info, size_default);
5138                 return bpf_ctx_narrow_access_ok(off, size, size_default);
5139         }
5140
5141         return size == size_default;
5142 }
5143
5144 static bool sock_filter_is_valid_access(int off, int size,
5145                                         enum bpf_access_type type,
5146                                         const struct bpf_prog *prog,
5147                                         struct bpf_insn_access_aux *info)
5148 {
5149         if (off < 0 || off >= sizeof(struct bpf_sock))
5150                 return false;
5151         if (off % size != 0)
5152                 return false;
5153         if (!__sock_filter_check_attach_type(off, type,
5154                                              prog->expected_attach_type))
5155                 return false;
5156         if (!__sock_filter_check_size(off, size, info))
5157                 return false;
5158         return true;
5159 }
5160
5161 static int bpf_unclone_prologue(struct bpf_insn *insn_buf, bool direct_write,
5162                                 const struct bpf_prog *prog, int drop_verdict)
5163 {
5164         struct bpf_insn *insn = insn_buf;
5165
5166         if (!direct_write)
5167                 return 0;
5168
5169         /* if (!skb->cloned)
5170          *       goto start;
5171          *
5172          * (Fast-path, otherwise approximation that we might be
5173          *  a clone, do the rest in helper.)
5174          */
5175         *insn++ = BPF_LDX_MEM(BPF_B, BPF_REG_6, BPF_REG_1, CLONED_OFFSET());
5176         *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_6, CLONED_MASK);
5177         *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 7);
5178
5179         /* ret = bpf_skb_pull_data(skb, 0); */
5180         *insn++ = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
5181         *insn++ = BPF_ALU64_REG(BPF_XOR, BPF_REG_2, BPF_REG_2);
5182         *insn++ = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5183                                BPF_FUNC_skb_pull_data);
5184         /* if (!ret)
5185          *      goto restore;
5186          * return TC_ACT_SHOT;
5187          */
5188         *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2);
5189         *insn++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, drop_verdict);
5190         *insn++ = BPF_EXIT_INSN();
5191
5192         /* restore: */
5193         *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
5194         /* start: */
5195         *insn++ = prog->insnsi[0];
5196
5197         return insn - insn_buf;
5198 }
5199
5200 static int bpf_gen_ld_abs(const struct bpf_insn *orig,
5201                           struct bpf_insn *insn_buf)
5202 {
5203         bool indirect = BPF_MODE(orig->code) == BPF_IND;
5204         struct bpf_insn *insn = insn_buf;
5205
5206         /* We're guaranteed here that CTX is in R6. */
5207         *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_CTX);
5208         if (!indirect) {
5209                 *insn++ = BPF_MOV64_IMM(BPF_REG_2, orig->imm);
5210         } else {
5211                 *insn++ = BPF_MOV64_REG(BPF_REG_2, orig->src_reg);
5212                 if (orig->imm)
5213                         *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, orig->imm);
5214         }
5215
5216         switch (BPF_SIZE(orig->code)) {
5217         case BPF_B:
5218                 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_8_no_cache);
5219                 break;
5220         case BPF_H:
5221                 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_16_no_cache);
5222                 break;
5223         case BPF_W:
5224                 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_32_no_cache);
5225                 break;
5226         }
5227
5228         *insn++ = BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 2);
5229         *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_0, BPF_REG_0);
5230         *insn++ = BPF_EXIT_INSN();
5231
5232         return insn - insn_buf;
5233 }
5234
5235 static int tc_cls_act_prologue(struct bpf_insn *insn_buf, bool direct_write,
5236                                const struct bpf_prog *prog)
5237 {
5238         return bpf_unclone_prologue(insn_buf, direct_write, prog, TC_ACT_SHOT);
5239 }
5240
5241 static bool tc_cls_act_is_valid_access(int off, int size,
5242                                        enum bpf_access_type type,
5243                                        const struct bpf_prog *prog,
5244                                        struct bpf_insn_access_aux *info)
5245 {
5246         if (type == BPF_WRITE) {
5247                 switch (off) {
5248                 case bpf_ctx_range(struct __sk_buff, mark):
5249                 case bpf_ctx_range(struct __sk_buff, tc_index):
5250                 case bpf_ctx_range(struct __sk_buff, priority):
5251                 case bpf_ctx_range(struct __sk_buff, tc_classid):
5252                 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
5253                         break;
5254                 default:
5255                         return false;
5256                 }
5257         }
5258
5259         switch (off) {
5260         case bpf_ctx_range(struct __sk_buff, data):
5261                 info->reg_type = PTR_TO_PACKET;
5262                 break;
5263         case bpf_ctx_range(struct __sk_buff, data_meta):
5264                 info->reg_type = PTR_TO_PACKET_META;
5265                 break;
5266         case bpf_ctx_range(struct __sk_buff, data_end):
5267                 info->reg_type = PTR_TO_PACKET_END;
5268                 break;
5269         case bpf_ctx_range_till(struct __sk_buff, family, local_port):
5270                 return false;
5271         }
5272
5273         return bpf_skb_is_valid_access(off, size, type, prog, info);
5274 }
5275
5276 static bool __is_valid_xdp_access(int off, int size)
5277 {
5278         if (off < 0 || off >= sizeof(struct xdp_md))
5279                 return false;
5280         if (off % size != 0)
5281                 return false;
5282         if (size != sizeof(__u32))
5283                 return false;
5284
5285         return true;
5286 }
5287
5288 static bool xdp_is_valid_access(int off, int size,
5289                                 enum bpf_access_type type,
5290                                 const struct bpf_prog *prog,
5291                                 struct bpf_insn_access_aux *info)
5292 {
5293         if (type == BPF_WRITE) {
5294                 if (bpf_prog_is_dev_bound(prog->aux)) {
5295                         switch (off) {
5296                         case offsetof(struct xdp_md, rx_queue_index):
5297                                 return __is_valid_xdp_access(off, size);
5298                         }
5299                 }
5300                 return false;
5301         }
5302
5303         switch (off) {
5304         case offsetof(struct xdp_md, data):
5305                 info->reg_type = PTR_TO_PACKET;
5306                 break;
5307         case offsetof(struct xdp_md, data_meta):
5308                 info->reg_type = PTR_TO_PACKET_META;
5309                 break;
5310         case offsetof(struct xdp_md, data_end):
5311                 info->reg_type = PTR_TO_PACKET_END;
5312                 break;
5313         }
5314
5315         return __is_valid_xdp_access(off, size);
5316 }
5317
5318 void bpf_warn_invalid_xdp_action(u32 act)
5319 {
5320         const u32 act_max = XDP_REDIRECT;
5321
5322         WARN_ONCE(1, "%s XDP return value %u, expect packet loss!\n",
5323                   act > act_max ? "Illegal" : "Driver unsupported",
5324                   act);
5325 }
5326 EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action);
5327
5328 static bool sock_addr_is_valid_access(int off, int size,
5329                                       enum bpf_access_type type,
5330                                       const struct bpf_prog *prog,
5331                                       struct bpf_insn_access_aux *info)
5332 {
5333         const int size_default = sizeof(__u32);
5334
5335         if (off < 0 || off >= sizeof(struct bpf_sock_addr))
5336                 return false;
5337         if (off % size != 0)
5338                 return false;
5339
5340         /* Disallow access to IPv6 fields from IPv4 contex and vise
5341          * versa.
5342          */
5343         switch (off) {
5344         case bpf_ctx_range(struct bpf_sock_addr, user_ip4):
5345                 switch (prog->expected_attach_type) {
5346                 case BPF_CGROUP_INET4_BIND:
5347                 case BPF_CGROUP_INET4_CONNECT:
5348                 case BPF_CGROUP_UDP4_SENDMSG:
5349                         break;
5350                 default:
5351                         return false;
5352                 }
5353                 break;
5354         case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]):
5355                 switch (prog->expected_attach_type) {
5356                 case BPF_CGROUP_INET6_BIND:
5357                 case BPF_CGROUP_INET6_CONNECT:
5358                 case BPF_CGROUP_UDP6_SENDMSG:
5359                         break;
5360                 default:
5361                         return false;
5362                 }
5363                 break;
5364         case bpf_ctx_range(struct bpf_sock_addr, msg_src_ip4):
5365                 switch (prog->expected_attach_type) {
5366                 case BPF_CGROUP_UDP4_SENDMSG:
5367                         break;
5368                 default:
5369                         return false;
5370                 }
5371                 break;
5372         case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0],
5373                                 msg_src_ip6[3]):
5374                 switch (prog->expected_attach_type) {
5375                 case BPF_CGROUP_UDP6_SENDMSG:
5376                         break;
5377                 default:
5378                         return false;
5379                 }
5380                 break;
5381         }
5382
5383         switch (off) {
5384         case bpf_ctx_range(struct bpf_sock_addr, user_ip4):
5385         case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]):
5386         case bpf_ctx_range(struct bpf_sock_addr, msg_src_ip4):
5387         case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0],
5388                                 msg_src_ip6[3]):
5389                 /* Only narrow read access allowed for now. */
5390                 if (type == BPF_READ) {
5391                         bpf_ctx_record_field_size(info, size_default);
5392                         if (!bpf_ctx_narrow_access_ok(off, size, size_default))
5393                                 return false;
5394                 } else {
5395                         if (size != size_default)
5396                                 return false;
5397                 }
5398                 break;
5399         case bpf_ctx_range(struct bpf_sock_addr, user_port):
5400                 if (size != size_default)
5401                         return false;
5402                 break;
5403         default:
5404                 if (type == BPF_READ) {
5405                         if (size != size_default)
5406                                 return false;
5407                 } else {
5408                         return false;
5409                 }
5410         }
5411
5412         return true;
5413 }
5414
5415 static bool sock_ops_is_valid_access(int off, int size,
5416                                      enum bpf_access_type type,
5417                                      const struct bpf_prog *prog,
5418                                      struct bpf_insn_access_aux *info)
5419 {
5420         const int size_default = sizeof(__u32);
5421
5422         if (off < 0 || off >= sizeof(struct bpf_sock_ops))
5423                 return false;
5424
5425         /* The verifier guarantees that size > 0. */
5426         if (off % size != 0)
5427                 return false;
5428
5429         if (type == BPF_WRITE) {
5430                 switch (off) {
5431                 case offsetof(struct bpf_sock_ops, reply):
5432                 case offsetof(struct bpf_sock_ops, sk_txhash):
5433                         if (size != size_default)
5434                                 return false;
5435                         break;
5436                 default:
5437                         return false;
5438                 }
5439         } else {
5440                 switch (off) {
5441                 case bpf_ctx_range_till(struct bpf_sock_ops, bytes_received,
5442                                         bytes_acked):
5443                         if (size != sizeof(__u64))
5444                                 return false;
5445                         break;
5446                 default:
5447                         if (size != size_default)
5448                                 return false;
5449                         break;
5450                 }
5451         }
5452
5453         return true;
5454 }
5455
5456 static int sk_skb_prologue(struct bpf_insn *insn_buf, bool direct_write,
5457                            const struct bpf_prog *prog)
5458 {
5459         return bpf_unclone_prologue(insn_buf, direct_write, prog, SK_DROP);
5460 }
5461
5462 static bool sk_skb_is_valid_access(int off, int size,
5463                                    enum bpf_access_type type,
5464                                    const struct bpf_prog *prog,
5465                                    struct bpf_insn_access_aux *info)
5466 {
5467         switch (off) {
5468         case bpf_ctx_range(struct __sk_buff, tc_classid):
5469         case bpf_ctx_range(struct __sk_buff, data_meta):
5470                 return false;
5471         }
5472
5473         if (type == BPF_WRITE) {
5474                 switch (off) {
5475                 case bpf_ctx_range(struct __sk_buff, tc_index):
5476                 case bpf_ctx_range(struct __sk_buff, priority):
5477                         break;
5478                 default:
5479                         return false;
5480                 }
5481         }
5482
5483         switch (off) {
5484         case bpf_ctx_range(struct __sk_buff, mark):
5485                 return false;
5486         case bpf_ctx_range(struct __sk_buff, data):
5487                 info->reg_type = PTR_TO_PACKET;
5488                 break;
5489         case bpf_ctx_range(struct __sk_buff, data_end):
5490                 info->reg_type = PTR_TO_PACKET_END;
5491                 break;
5492         }
5493
5494         return bpf_skb_is_valid_access(off, size, type, prog, info);
5495 }
5496
5497 static bool sk_msg_is_valid_access(int off, int size,
5498                                    enum bpf_access_type type,
5499                                    const struct bpf_prog *prog,
5500                                    struct bpf_insn_access_aux *info)
5501 {
5502         if (type == BPF_WRITE)
5503                 return false;
5504
5505         switch (off) {
5506         case offsetof(struct sk_msg_md, data):
5507                 info->reg_type = PTR_TO_PACKET;
5508                 if (size != sizeof(__u64))
5509                         return false;
5510                 break;
5511         case offsetof(struct sk_msg_md, data_end):
5512                 info->reg_type = PTR_TO_PACKET_END;
5513                 if (size != sizeof(__u64))
5514                         return false;
5515                 break;
5516         default:
5517                 if (size != sizeof(__u32))
5518                         return false;
5519         }
5520
5521         if (off < 0 || off >= sizeof(struct sk_msg_md))
5522                 return false;
5523         if (off % size != 0)
5524                 return false;
5525
5526         return true;
5527 }
5528
5529 static u32 bpf_convert_ctx_access(enum bpf_access_type type,
5530                                   const struct bpf_insn *si,
5531                                   struct bpf_insn *insn_buf,
5532                                   struct bpf_prog *prog, u32 *target_size)
5533 {
5534         struct bpf_insn *insn = insn_buf;
5535         int off;
5536
5537         switch (si->off) {
5538         case offsetof(struct __sk_buff, len):
5539                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
5540                                       bpf_target_off(struct sk_buff, len, 4,
5541                                                      target_size));
5542                 break;
5543
5544         case offsetof(struct __sk_buff, protocol):
5545                 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
5546                                       bpf_target_off(struct sk_buff, protocol, 2,
5547                                                      target_size));
5548                 break;
5549
5550         case offsetof(struct __sk_buff, vlan_proto):
5551                 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
5552                                       bpf_target_off(struct sk_buff, vlan_proto, 2,
5553                                                      target_size));
5554                 break;
5555
5556         case offsetof(struct __sk_buff, priority):
5557                 if (type == BPF_WRITE)
5558                         *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
5559                                               bpf_target_off(struct sk_buff, priority, 4,
5560                                                              target_size));
5561                 else
5562                         *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
5563                                               bpf_target_off(struct sk_buff, priority, 4,
5564                                                              target_size));
5565                 break;
5566
5567         case offsetof(struct __sk_buff, ingress_ifindex):
5568                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
5569                                       bpf_target_off(struct sk_buff, skb_iif, 4,
5570                                                      target_size));
5571                 break;
5572
5573         case offsetof(struct __sk_buff, ifindex):
5574                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
5575                                       si->dst_reg, si->src_reg,
5576                                       offsetof(struct sk_buff, dev));
5577                 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
5578                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
5579                                       bpf_target_off(struct net_device, ifindex, 4,
5580                                                      target_size));
5581                 break;
5582
5583         case offsetof(struct __sk_buff, hash):
5584                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
5585                                       bpf_target_off(struct sk_buff, hash, 4,
5586                                                      target_size));
5587                 break;
5588
5589         case offsetof(struct __sk_buff, mark):
5590                 if (type == BPF_WRITE)
5591                         *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
5592                                               bpf_target_off(struct sk_buff, mark, 4,
5593                                                              target_size));
5594                 else
5595                         *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
5596                                               bpf_target_off(struct sk_buff, mark, 4,
5597                                                              target_size));
5598                 break;
5599
5600         case offsetof(struct __sk_buff, pkt_type):
5601                 *target_size = 1;
5602                 *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg,
5603                                       PKT_TYPE_OFFSET());
5604                 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, PKT_TYPE_MAX);
5605 #ifdef __BIG_ENDIAN_BITFIELD
5606                 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, 5);
5607 #endif
5608                 break;
5609
5610         case offsetof(struct __sk_buff, queue_mapping):
5611                 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
5612                                       bpf_target_off(struct sk_buff, queue_mapping, 2,
5613                                                      target_size));
5614                 break;
5615
5616         case offsetof(struct __sk_buff, vlan_present):
5617         case offsetof(struct __sk_buff, vlan_tci):
5618                 BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
5619
5620                 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
5621                                       bpf_target_off(struct sk_buff, vlan_tci, 2,
5622                                                      target_size));
5623                 if (si->off == offsetof(struct __sk_buff, vlan_tci)) {
5624                         *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg,
5625                                                 ~VLAN_TAG_PRESENT);
5626                 } else {
5627                         *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, 12);
5628                         *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, 1);
5629                 }
5630                 break;
5631
5632         case offsetof(struct __sk_buff, cb[0]) ...
5633              offsetofend(struct __sk_buff, cb[4]) - 1:
5634                 BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, data) < 20);
5635                 BUILD_BUG_ON((offsetof(struct sk_buff, cb) +
5636                               offsetof(struct qdisc_skb_cb, data)) %
5637                              sizeof(__u64));
5638
5639                 prog->cb_access = 1;
5640                 off  = si->off;
5641                 off -= offsetof(struct __sk_buff, cb[0]);
5642                 off += offsetof(struct sk_buff, cb);
5643                 off += offsetof(struct qdisc_skb_cb, data);
5644                 if (type == BPF_WRITE)
5645                         *insn++ = BPF_STX_MEM(BPF_SIZE(si->code), si->dst_reg,
5646                                               si->src_reg, off);
5647                 else
5648                         *insn++ = BPF_LDX_MEM(BPF_SIZE(si->code), si->dst_reg,
5649                                               si->src_reg, off);
5650                 break;
5651
5652         case offsetof(struct __sk_buff, tc_classid):
5653                 BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, tc_classid) != 2);
5654
5655                 off  = si->off;
5656                 off -= offsetof(struct __sk_buff, tc_classid);
5657                 off += offsetof(struct sk_buff, cb);
5658                 off += offsetof(struct qdisc_skb_cb, tc_classid);
5659                 *target_size = 2;
5660                 if (type == BPF_WRITE)
5661                         *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg,
5662                                               si->src_reg, off);
5663                 else
5664                         *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg,
5665                                               si->src_reg, off);
5666                 break;
5667
5668         case offsetof(struct __sk_buff, data):
5669                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data),
5670                                       si->dst_reg, si->src_reg,
5671                                       offsetof(struct sk_buff, data));
5672                 break;
5673
5674         case offsetof(struct __sk_buff, data_meta):
5675                 off  = si->off;
5676                 off -= offsetof(struct __sk_buff, data_meta);
5677                 off += offsetof(struct sk_buff, cb);
5678                 off += offsetof(struct bpf_skb_data_end, data_meta);
5679                 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
5680                                       si->src_reg, off);
5681                 break;
5682
5683         case offsetof(struct __sk_buff, data_end):
5684                 off  = si->off;
5685                 off -= offsetof(struct __sk_buff, data_end);
5686                 off += offsetof(struct sk_buff, cb);
5687                 off += offsetof(struct bpf_skb_data_end, data_end);
5688                 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
5689                                       si->src_reg, off);
5690                 break;
5691
5692         case offsetof(struct __sk_buff, tc_index):
5693 #ifdef CONFIG_NET_SCHED
5694                 if (type == BPF_WRITE)
5695                         *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg,
5696                                               bpf_target_off(struct sk_buff, tc_index, 2,
5697                                                              target_size));
5698                 else
5699                         *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
5700                                               bpf_target_off(struct sk_buff, tc_index, 2,
5701                                                              target_size));
5702 #else
5703                 *target_size = 2;
5704                 if (type == BPF_WRITE)
5705                         *insn++ = BPF_MOV64_REG(si->dst_reg, si->dst_reg);
5706                 else
5707                         *insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
5708 #endif
5709                 break;
5710
5711         case offsetof(struct __sk_buff, napi_id):
5712 #if defined(CONFIG_NET_RX_BUSY_POLL)
5713                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
5714                                       bpf_target_off(struct sk_buff, napi_id, 4,
5715                                                      target_size));
5716                 *insn++ = BPF_JMP_IMM(BPF_JGE, si->dst_reg, MIN_NAPI_ID, 1);
5717                 *insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
5718 #else
5719                 *target_size = 4;
5720                 *insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
5721 #endif
5722                 break;
5723         case offsetof(struct __sk_buff, family):
5724                 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_family) != 2);
5725
5726                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
5727                                       si->dst_reg, si->src_reg,
5728                                       offsetof(struct sk_buff, sk));
5729                 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
5730                                       bpf_target_off(struct sock_common,
5731                                                      skc_family,
5732                                                      2, target_size));
5733                 break;
5734         case offsetof(struct __sk_buff, remote_ip4):
5735                 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_daddr) != 4);
5736
5737                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
5738                                       si->dst_reg, si->src_reg,
5739                                       offsetof(struct sk_buff, sk));
5740                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
5741                                       bpf_target_off(struct sock_common,
5742                                                      skc_daddr,
5743                                                      4, target_size));
5744                 break;
5745         case offsetof(struct __sk_buff, local_ip4):
5746                 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
5747                                           skc_rcv_saddr) != 4);
5748
5749                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
5750                                       si->dst_reg, si->src_reg,
5751                                       offsetof(struct sk_buff, sk));
5752                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
5753                                       bpf_target_off(struct sock_common,
5754                                                      skc_rcv_saddr,
5755                                                      4, target_size));
5756                 break;
5757         case offsetof(struct __sk_buff, remote_ip6[0]) ...
5758              offsetof(struct __sk_buff, remote_ip6[3]):
5759 #if IS_ENABLED(CONFIG_IPV6)
5760                 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
5761                                           skc_v6_daddr.s6_addr32[0]) != 4);
5762
5763                 off = si->off;
5764                 off -= offsetof(struct __sk_buff, remote_ip6[0]);
5765
5766                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
5767                                       si->dst_reg, si->src_reg,
5768                                       offsetof(struct sk_buff, sk));
5769                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
5770                                       offsetof(struct sock_common,
5771                                                skc_v6_daddr.s6_addr32[0]) +
5772                                       off);
5773 #else
5774                 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
5775 #endif
5776                 break;
5777         case offsetof(struct __sk_buff, local_ip6[0]) ...
5778              offsetof(struct __sk_buff, local_ip6[3]):
5779 #if IS_ENABLED(CONFIG_IPV6)
5780                 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
5781                                           skc_v6_rcv_saddr.s6_addr32[0]) != 4);
5782
5783                 off = si->off;
5784                 off -= offsetof(struct __sk_buff, local_ip6[0]);
5785
5786                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
5787                                       si->dst_reg, si->src_reg,
5788                                       offsetof(struct sk_buff, sk));
5789                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
5790                                       offsetof(struct sock_common,
5791                                                skc_v6_rcv_saddr.s6_addr32[0]) +
5792                                       off);
5793 #else
5794                 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
5795 #endif
5796                 break;
5797
5798         case offsetof(struct __sk_buff, remote_port):
5799                 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_dport) != 2);
5800
5801                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
5802                                       si->dst_reg, si->src_reg,
5803                                       offsetof(struct sk_buff, sk));
5804                 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
5805                                       bpf_target_off(struct sock_common,
5806                                                      skc_dport,
5807                                                      2, target_size));
5808 #ifndef __BIG_ENDIAN_BITFIELD
5809                 *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16);
5810 #endif
5811                 break;
5812
5813         case offsetof(struct __sk_buff, local_port):
5814                 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_num) != 2);
5815
5816                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
5817                                       si->dst_reg, si->src_reg,
5818                                       offsetof(struct sk_buff, sk));
5819                 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
5820                                       bpf_target_off(struct sock_common,
5821                                                      skc_num, 2, target_size));
5822                 break;
5823         }
5824
5825         return insn - insn_buf;
5826 }
5827
5828 static u32 sock_filter_convert_ctx_access(enum bpf_access_type type,
5829                                           const struct bpf_insn *si,
5830                                           struct bpf_insn *insn_buf,
5831                                           struct bpf_prog *prog, u32 *target_size)
5832 {
5833         struct bpf_insn *insn = insn_buf;
5834         int off;
5835
5836         switch (si->off) {
5837         case offsetof(struct bpf_sock, bound_dev_if):
5838                 BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_bound_dev_if) != 4);
5839
5840                 if (type == BPF_WRITE)
5841                         *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
5842                                         offsetof(struct sock, sk_bound_dev_if));
5843                 else
5844                         *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
5845                                       offsetof(struct sock, sk_bound_dev_if));
5846                 break;
5847
5848         case offsetof(struct bpf_sock, mark):
5849                 BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_mark) != 4);
5850
5851                 if (type == BPF_WRITE)
5852                         *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
5853                                         offsetof(struct sock, sk_mark));
5854                 else
5855                         *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
5856                                       offsetof(struct sock, sk_mark));
5857                 break;
5858
5859         case offsetof(struct bpf_sock, priority):
5860                 BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_priority) != 4);
5861
5862                 if (type == BPF_WRITE)
5863                         *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
5864                                         offsetof(struct sock, sk_priority));
5865                 else
5866                         *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
5867                                       offsetof(struct sock, sk_priority));
5868                 break;
5869
5870         case offsetof(struct bpf_sock, family):
5871                 BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_family) != 2);
5872
5873                 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
5874                                       offsetof(struct sock, sk_family));
5875                 break;
5876
5877         case offsetof(struct bpf_sock, type):
5878                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
5879                                       offsetof(struct sock, __sk_flags_offset));
5880                 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_TYPE_MASK);
5881                 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_TYPE_SHIFT);
5882                 break;
5883
5884         case offsetof(struct bpf_sock, protocol):
5885                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
5886                                       offsetof(struct sock, __sk_flags_offset));
5887                 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK);
5888                 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_PROTO_SHIFT);
5889                 break;
5890
5891         case offsetof(struct bpf_sock, src_ip4):
5892                 *insn++ = BPF_LDX_MEM(
5893                         BPF_SIZE(si->code), si->dst_reg, si->src_reg,
5894                         bpf_target_off(struct sock_common, skc_rcv_saddr,
5895                                        FIELD_SIZEOF(struct sock_common,
5896                                                     skc_rcv_saddr),
5897                                        target_size));
5898                 break;
5899
5900         case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]):
5901 #if IS_ENABLED(CONFIG_IPV6)
5902                 off = si->off;
5903                 off -= offsetof(struct bpf_sock, src_ip6[0]);
5904                 *insn++ = BPF_LDX_MEM(
5905                         BPF_SIZE(si->code), si->dst_reg, si->src_reg,
5906                         bpf_target_off(
5907                                 struct sock_common,
5908                                 skc_v6_rcv_saddr.s6_addr32[0],
5909                                 FIELD_SIZEOF(struct sock_common,
5910                                              skc_v6_rcv_saddr.s6_addr32[0]),
5911                                 target_size) + off);
5912 #else
5913                 (void)off;
5914                 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
5915 #endif
5916                 break;
5917
5918         case offsetof(struct bpf_sock, src_port):
5919                 *insn++ = BPF_LDX_MEM(
5920                         BPF_FIELD_SIZEOF(struct sock_common, skc_num),
5921                         si->dst_reg, si->src_reg,
5922                         bpf_target_off(struct sock_common, skc_num,
5923                                        FIELD_SIZEOF(struct sock_common,
5924                                                     skc_num),
5925                                        target_size));
5926                 break;
5927         }
5928
5929         return insn - insn_buf;
5930 }
5931
5932 static u32 tc_cls_act_convert_ctx_access(enum bpf_access_type type,
5933                                          const struct bpf_insn *si,
5934                                          struct bpf_insn *insn_buf,
5935                                          struct bpf_prog *prog, u32 *target_size)
5936 {
5937         struct bpf_insn *insn = insn_buf;
5938
5939         switch (si->off) {
5940         case offsetof(struct __sk_buff, ifindex):
5941                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
5942                                       si->dst_reg, si->src_reg,
5943                                       offsetof(struct sk_buff, dev));
5944                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
5945                                       bpf_target_off(struct net_device, ifindex, 4,
5946                                                      target_size));
5947                 break;
5948         default:
5949                 return bpf_convert_ctx_access(type, si, insn_buf, prog,
5950                                               target_size);
5951         }
5952
5953         return insn - insn_buf;
5954 }
5955
5956 static u32 xdp_convert_ctx_access(enum bpf_access_type type,
5957                                   const struct bpf_insn *si,
5958                                   struct bpf_insn *insn_buf,
5959                                   struct bpf_prog *prog, u32 *target_size)
5960 {
5961         struct bpf_insn *insn = insn_buf;
5962
5963         switch (si->off) {
5964         case offsetof(struct xdp_md, data):
5965                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data),
5966                                       si->dst_reg, si->src_reg,
5967                                       offsetof(struct xdp_buff, data));
5968                 break;
5969         case offsetof(struct xdp_md, data_meta):
5970                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data_meta),
5971                                       si->dst_reg, si->src_reg,
5972                                       offsetof(struct xdp_buff, data_meta));
5973                 break;
5974         case offsetof(struct xdp_md, data_end):
5975                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data_end),
5976                                       si->dst_reg, si->src_reg,
5977                                       offsetof(struct xdp_buff, data_end));
5978                 break;
5979         case offsetof(struct xdp_md, ingress_ifindex):
5980                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq),
5981                                       si->dst_reg, si->src_reg,
5982                                       offsetof(struct xdp_buff, rxq));
5983                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_rxq_info, dev),
5984                                       si->dst_reg, si->dst_reg,
5985                                       offsetof(struct xdp_rxq_info, dev));
5986                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
5987                                       offsetof(struct net_device, ifindex));
5988                 break;
5989         case offsetof(struct xdp_md, rx_queue_index):
5990                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq),
5991                                       si->dst_reg, si->src_reg,
5992                                       offsetof(struct xdp_buff, rxq));
5993                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
5994                                       offsetof(struct xdp_rxq_info,
5995                                                queue_index));
5996                 break;
5997         }
5998
5999         return insn - insn_buf;
6000 }
6001
6002 /* SOCK_ADDR_LOAD_NESTED_FIELD() loads Nested Field S.F.NF where S is type of
6003  * context Structure, F is Field in context structure that contains a pointer
6004  * to Nested Structure of type NS that has the field NF.
6005  *
6006  * SIZE encodes the load size (BPF_B, BPF_H, etc). It's up to caller to make
6007  * sure that SIZE is not greater than actual size of S.F.NF.
6008  *
6009  * If offset OFF is provided, the load happens from that offset relative to
6010  * offset of NF.
6011  */
6012 #define SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, SIZE, OFF)          \
6013         do {                                                                   \
6014                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), si->dst_reg,     \
6015                                       si->src_reg, offsetof(S, F));            \
6016                 *insn++ = BPF_LDX_MEM(                                         \
6017                         SIZE, si->dst_reg, si->dst_reg,                        \
6018                         bpf_target_off(NS, NF, FIELD_SIZEOF(NS, NF),           \
6019                                        target_size)                            \
6020                                 + OFF);                                        \
6021         } while (0)
6022
6023 #define SOCK_ADDR_LOAD_NESTED_FIELD(S, NS, F, NF)                              \
6024         SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(S, NS, F, NF,                     \
6025                                              BPF_FIELD_SIZEOF(NS, NF), 0)
6026
6027 /* SOCK_ADDR_STORE_NESTED_FIELD_OFF() has semantic similar to
6028  * SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF() but for store operation.
6029  *
6030  * It doesn't support SIZE argument though since narrow stores are not
6031  * supported for now.
6032  *
6033  * In addition it uses Temporary Field TF (member of struct S) as the 3rd
6034  * "register" since two registers available in convert_ctx_access are not
6035  * enough: we can't override neither SRC, since it contains value to store, nor
6036  * DST since it contains pointer to context that may be used by later
6037  * instructions. But we need a temporary place to save pointer to nested
6038  * structure whose field we want to store to.
6039  */
6040 #define SOCK_ADDR_STORE_NESTED_FIELD_OFF(S, NS, F, NF, OFF, TF)                \
6041         do {                                                                   \
6042                 int tmp_reg = BPF_REG_9;                                       \
6043                 if (si->src_reg == tmp_reg || si->dst_reg == tmp_reg)          \
6044                         --tmp_reg;                                             \
6045                 if (si->src_reg == tmp_reg || si->dst_reg == tmp_reg)          \
6046                         --tmp_reg;                                             \
6047                 *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, tmp_reg,            \
6048                                       offsetof(S, TF));                        \
6049                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), tmp_reg,         \
6050                                       si->dst_reg, offsetof(S, F));            \
6051                 *insn++ = BPF_STX_MEM(                                         \
6052                         BPF_FIELD_SIZEOF(NS, NF), tmp_reg, si->src_reg,        \
6053                         bpf_target_off(NS, NF, FIELD_SIZEOF(NS, NF),           \
6054                                        target_size)                            \
6055                                 + OFF);                                        \
6056                 *insn++ = BPF_LDX_MEM(BPF_DW, tmp_reg, si->dst_reg,            \
6057                                       offsetof(S, TF));                        \
6058         } while (0)
6059
6060 #define SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, SIZE, OFF, \
6061                                                       TF)                      \
6062         do {                                                                   \
6063                 if (type == BPF_WRITE) {                                       \
6064                         SOCK_ADDR_STORE_NESTED_FIELD_OFF(S, NS, F, NF, OFF,    \
6065                                                          TF);                  \
6066                 } else {                                                       \
6067                         SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(                  \
6068                                 S, NS, F, NF, SIZE, OFF);  \
6069                 }                                                              \
6070         } while (0)
6071
6072 #define SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD(S, NS, F, NF, TF)                 \
6073         SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(                         \
6074                 S, NS, F, NF, BPF_FIELD_SIZEOF(NS, NF), 0, TF)
6075
6076 static u32 sock_addr_convert_ctx_access(enum bpf_access_type type,
6077                                         const struct bpf_insn *si,
6078                                         struct bpf_insn *insn_buf,
6079                                         struct bpf_prog *prog, u32 *target_size)
6080 {
6081         struct bpf_insn *insn = insn_buf;
6082         int off;
6083
6084         switch (si->off) {
6085         case offsetof(struct bpf_sock_addr, user_family):
6086                 SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern,
6087                                             struct sockaddr, uaddr, sa_family);
6088                 break;
6089
6090         case offsetof(struct bpf_sock_addr, user_ip4):
6091                 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
6092                         struct bpf_sock_addr_kern, struct sockaddr_in, uaddr,
6093                         sin_addr, BPF_SIZE(si->code), 0, tmp_reg);
6094                 break;
6095
6096         case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]):
6097                 off = si->off;
6098                 off -= offsetof(struct bpf_sock_addr, user_ip6[0]);
6099                 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
6100                         struct bpf_sock_addr_kern, struct sockaddr_in6, uaddr,
6101                         sin6_addr.s6_addr32[0], BPF_SIZE(si->code), off,
6102                         tmp_reg);
6103                 break;
6104
6105         case offsetof(struct bpf_sock_addr, user_port):
6106                 /* To get port we need to know sa_family first and then treat
6107                  * sockaddr as either sockaddr_in or sockaddr_in6.
6108                  * Though we can simplify since port field has same offset and
6109                  * size in both structures.
6110                  * Here we check this invariant and use just one of the
6111                  * structures if it's true.
6112                  */
6113                 BUILD_BUG_ON(offsetof(struct sockaddr_in, sin_port) !=
6114                              offsetof(struct sockaddr_in6, sin6_port));
6115                 BUILD_BUG_ON(FIELD_SIZEOF(struct sockaddr_in, sin_port) !=
6116                              FIELD_SIZEOF(struct sockaddr_in6, sin6_port));
6117                 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD(struct bpf_sock_addr_kern,
6118                                                      struct sockaddr_in6, uaddr,
6119                                                      sin6_port, tmp_reg);
6120                 break;
6121
6122         case offsetof(struct bpf_sock_addr, family):
6123                 SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern,
6124                                             struct sock, sk, sk_family);
6125                 break;
6126
6127         case offsetof(struct bpf_sock_addr, type):
6128                 SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(
6129                         struct bpf_sock_addr_kern, struct sock, sk,
6130                         __sk_flags_offset, BPF_W, 0);
6131                 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_TYPE_MASK);
6132                 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_TYPE_SHIFT);
6133                 break;
6134
6135         case offsetof(struct bpf_sock_addr, protocol):
6136                 SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(
6137                         struct bpf_sock_addr_kern, struct sock, sk,
6138                         __sk_flags_offset, BPF_W, 0);
6139                 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK);
6140                 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg,
6141                                         SK_FL_PROTO_SHIFT);
6142                 break;
6143
6144         case offsetof(struct bpf_sock_addr, msg_src_ip4):
6145                 /* Treat t_ctx as struct in_addr for msg_src_ip4. */
6146                 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
6147                         struct bpf_sock_addr_kern, struct in_addr, t_ctx,
6148                         s_addr, BPF_SIZE(si->code), 0, tmp_reg);
6149                 break;
6150
6151         case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0],
6152                                 msg_src_ip6[3]):
6153                 off = si->off;
6154                 off -= offsetof(struct bpf_sock_addr, msg_src_ip6[0]);
6155                 /* Treat t_ctx as struct in6_addr for msg_src_ip6. */
6156                 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
6157                         struct bpf_sock_addr_kern, struct in6_addr, t_ctx,
6158                         s6_addr32[0], BPF_SIZE(si->code), off, tmp_reg);
6159                 break;
6160         }
6161
6162         return insn - insn_buf;
6163 }
6164
6165 static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
6166                                        const struct bpf_insn *si,
6167                                        struct bpf_insn *insn_buf,
6168                                        struct bpf_prog *prog,
6169                                        u32 *target_size)
6170 {
6171         struct bpf_insn *insn = insn_buf;
6172         int off;
6173
6174         switch (si->off) {
6175         case offsetof(struct bpf_sock_ops, op) ...
6176              offsetof(struct bpf_sock_ops, replylong[3]):
6177                 BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops, op) !=
6178                              FIELD_SIZEOF(struct bpf_sock_ops_kern, op));
6179                 BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops, reply) !=
6180                              FIELD_SIZEOF(struct bpf_sock_ops_kern, reply));
6181                 BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops, replylong) !=
6182                              FIELD_SIZEOF(struct bpf_sock_ops_kern, replylong));
6183                 off = si->off;
6184                 off -= offsetof(struct bpf_sock_ops, op);
6185                 off += offsetof(struct bpf_sock_ops_kern, op);
6186                 if (type == BPF_WRITE)
6187                         *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
6188                                               off);
6189                 else
6190                         *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
6191                                               off);
6192                 break;
6193
6194         case offsetof(struct bpf_sock_ops, family):
6195                 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_family) != 2);
6196
6197                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
6198                                               struct bpf_sock_ops_kern, sk),
6199                                       si->dst_reg, si->src_reg,
6200                                       offsetof(struct bpf_sock_ops_kern, sk));
6201                 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
6202                                       offsetof(struct sock_common, skc_family));
6203                 break;
6204
6205         case offsetof(struct bpf_sock_ops, remote_ip4):
6206                 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_daddr) != 4);
6207
6208                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
6209                                                 struct bpf_sock_ops_kern, sk),
6210                                       si->dst_reg, si->src_reg,
6211                                       offsetof(struct bpf_sock_ops_kern, sk));
6212                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
6213                                       offsetof(struct sock_common, skc_daddr));
6214                 break;
6215
6216         case offsetof(struct bpf_sock_ops, local_ip4):
6217                 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
6218                                           skc_rcv_saddr) != 4);
6219
6220                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
6221                                               struct bpf_sock_ops_kern, sk),
6222                                       si->dst_reg, si->src_reg,
6223                                       offsetof(struct bpf_sock_ops_kern, sk));
6224                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
6225                                       offsetof(struct sock_common,
6226                                                skc_rcv_saddr));
6227                 break;
6228
6229         case offsetof(struct bpf_sock_ops, remote_ip6[0]) ...
6230              offsetof(struct bpf_sock_ops, remote_ip6[3]):
6231 #if IS_ENABLED(CONFIG_IPV6)
6232                 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
6233                                           skc_v6_daddr.s6_addr32[0]) != 4);
6234
6235                 off = si->off;
6236                 off -= offsetof(struct bpf_sock_ops, remote_ip6[0]);
6237                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
6238                                                 struct bpf_sock_ops_kern, sk),
6239                                       si->dst_reg, si->src_reg,
6240                                       offsetof(struct bpf_sock_ops_kern, sk));
6241                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
6242                                       offsetof(struct sock_common,
6243                                                skc_v6_daddr.s6_addr32[0]) +
6244                                       off);
6245 #else
6246                 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
6247 #endif
6248                 break;
6249
6250         case offsetof(struct bpf_sock_ops, local_ip6[0]) ...
6251              offsetof(struct bpf_sock_ops, local_ip6[3]):
6252 #if IS_ENABLED(CONFIG_IPV6)
6253                 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
6254                                           skc_v6_rcv_saddr.s6_addr32[0]) != 4);
6255
6256                 off = si->off;
6257                 off -= offsetof(struct bpf_sock_ops, local_ip6[0]);
6258                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
6259                                                 struct bpf_sock_ops_kern, sk),
6260                                       si->dst_reg, si->src_reg,
6261                                       offsetof(struct bpf_sock_ops_kern, sk));
6262                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
6263                                       offsetof(struct sock_common,
6264                                                skc_v6_rcv_saddr.s6_addr32[0]) +
6265                                       off);
6266 #else
6267                 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
6268 #endif
6269                 break;
6270
6271         case offsetof(struct bpf_sock_ops, remote_port):
6272                 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_dport) != 2);
6273
6274                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
6275                                                 struct bpf_sock_ops_kern, sk),
6276                                       si->dst_reg, si->src_reg,
6277                                       offsetof(struct bpf_sock_ops_kern, sk));
6278                 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
6279                                       offsetof(struct sock_common, skc_dport));
6280 #ifndef __BIG_ENDIAN_BITFIELD
6281                 *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16);
6282 #endif
6283                 break;
6284
6285         case offsetof(struct bpf_sock_ops, local_port):
6286                 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_num) != 2);
6287
6288                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
6289                                                 struct bpf_sock_ops_kern, sk),
6290                                       si->dst_reg, si->src_reg,
6291                                       offsetof(struct bpf_sock_ops_kern, sk));
6292                 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
6293                                       offsetof(struct sock_common, skc_num));
6294                 break;
6295
6296         case offsetof(struct bpf_sock_ops, is_fullsock):
6297                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
6298                                                 struct bpf_sock_ops_kern,
6299                                                 is_fullsock),
6300                                       si->dst_reg, si->src_reg,
6301                                       offsetof(struct bpf_sock_ops_kern,
6302                                                is_fullsock));
6303                 break;
6304
6305         case offsetof(struct bpf_sock_ops, state):
6306                 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_state) != 1);
6307
6308                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
6309                                                 struct bpf_sock_ops_kern, sk),
6310                                       si->dst_reg, si->src_reg,
6311                                       offsetof(struct bpf_sock_ops_kern, sk));
6312                 *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->dst_reg,
6313                                       offsetof(struct sock_common, skc_state));
6314                 break;
6315
6316         case offsetof(struct bpf_sock_ops, rtt_min):
6317                 BUILD_BUG_ON(FIELD_SIZEOF(struct tcp_sock, rtt_min) !=
6318                              sizeof(struct minmax));
6319                 BUILD_BUG_ON(sizeof(struct minmax) <
6320                              sizeof(struct minmax_sample));
6321
6322                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
6323                                                 struct bpf_sock_ops_kern, sk),
6324                                       si->dst_reg, si->src_reg,
6325                                       offsetof(struct bpf_sock_ops_kern, sk));
6326                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
6327                                       offsetof(struct tcp_sock, rtt_min) +
6328                                       FIELD_SIZEOF(struct minmax_sample, t));
6329                 break;
6330
6331 /* Helper macro for adding read access to tcp_sock or sock fields. */
6332 #define SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ)                         \
6333         do {                                                                  \
6334                 BUILD_BUG_ON(FIELD_SIZEOF(OBJ, OBJ_FIELD) >                   \
6335                              FIELD_SIZEOF(struct bpf_sock_ops, BPF_FIELD));   \
6336                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(                       \
6337                                                 struct bpf_sock_ops_kern,     \
6338                                                 is_fullsock),                 \
6339                                       si->dst_reg, si->src_reg,               \
6340                                       offsetof(struct bpf_sock_ops_kern,      \
6341                                                is_fullsock));                 \
6342                 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 2);            \
6343                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(                       \
6344                                                 struct bpf_sock_ops_kern, sk),\
6345                                       si->dst_reg, si->src_reg,               \
6346                                       offsetof(struct bpf_sock_ops_kern, sk));\
6347                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(OBJ,                   \
6348                                                        OBJ_FIELD),            \
6349                                       si->dst_reg, si->dst_reg,               \
6350                                       offsetof(OBJ, OBJ_FIELD));              \
6351         } while (0)
6352
6353 /* Helper macro for adding write access to tcp_sock or sock fields.
6354  * The macro is called with two registers, dst_reg which contains a pointer
6355  * to ctx (context) and src_reg which contains the value that should be
6356  * stored. However, we need an additional register since we cannot overwrite
6357  * dst_reg because it may be used later in the program.
6358  * Instead we "borrow" one of the other register. We first save its value
6359  * into a new (temp) field in bpf_sock_ops_kern, use it, and then restore
6360  * it at the end of the macro.
6361  */
6362 #define SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ)                         \
6363         do {                                                                  \
6364                 int reg = BPF_REG_9;                                          \
6365                 BUILD_BUG_ON(FIELD_SIZEOF(OBJ, OBJ_FIELD) >                   \
6366                              FIELD_SIZEOF(struct bpf_sock_ops, BPF_FIELD));   \
6367                 if (si->dst_reg == reg || si->src_reg == reg)                 \
6368                         reg--;                                                \
6369                 if (si->dst_reg == reg || si->src_reg == reg)                 \
6370                         reg--;                                                \
6371                 *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, reg,               \
6372                                       offsetof(struct bpf_sock_ops_kern,      \
6373                                                temp));                        \
6374                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(                       \
6375                                                 struct bpf_sock_ops_kern,     \
6376                                                 is_fullsock),                 \
6377                                       reg, si->dst_reg,                       \
6378                                       offsetof(struct bpf_sock_ops_kern,      \
6379                                                is_fullsock));                 \
6380                 *insn++ = BPF_JMP_IMM(BPF_JEQ, reg, 0, 2);                    \
6381                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(                       \
6382                                                 struct bpf_sock_ops_kern, sk),\
6383                                       reg, si->dst_reg,                       \
6384                                       offsetof(struct bpf_sock_ops_kern, sk));\
6385                 *insn++ = BPF_STX_MEM(BPF_FIELD_SIZEOF(OBJ, OBJ_FIELD),       \
6386                                       reg, si->src_reg,                       \
6387                                       offsetof(OBJ, OBJ_FIELD));              \
6388                 *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->dst_reg,               \
6389                                       offsetof(struct bpf_sock_ops_kern,      \
6390                                                temp));                        \
6391         } while (0)
6392
6393 #define SOCK_OPS_GET_OR_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ, TYPE)            \
6394         do {                                                                  \
6395                 if (TYPE == BPF_WRITE)                                        \
6396                         SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ);        \
6397                 else                                                          \
6398                         SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ);        \
6399         } while (0)
6400
6401         case offsetof(struct bpf_sock_ops, snd_cwnd):
6402                 SOCK_OPS_GET_FIELD(snd_cwnd, snd_cwnd, struct tcp_sock);
6403                 break;
6404
6405         case offsetof(struct bpf_sock_ops, srtt_us):
6406                 SOCK_OPS_GET_FIELD(srtt_us, srtt_us, struct tcp_sock);
6407                 break;
6408
6409         case offsetof(struct bpf_sock_ops, bpf_sock_ops_cb_flags):
6410                 SOCK_OPS_GET_FIELD(bpf_sock_ops_cb_flags, bpf_sock_ops_cb_flags,
6411                                    struct tcp_sock);
6412                 break;
6413
6414         case offsetof(struct bpf_sock_ops, snd_ssthresh):
6415                 SOCK_OPS_GET_FIELD(snd_ssthresh, snd_ssthresh, struct tcp_sock);
6416                 break;
6417
6418         case offsetof(struct bpf_sock_ops, rcv_nxt):
6419                 SOCK_OPS_GET_FIELD(rcv_nxt, rcv_nxt, struct tcp_sock);
6420                 break;
6421
6422         case offsetof(struct bpf_sock_ops, snd_nxt):
6423                 SOCK_OPS_GET_FIELD(snd_nxt, snd_nxt, struct tcp_sock);
6424                 break;
6425
6426         case offsetof(struct bpf_sock_ops, snd_una):
6427                 SOCK_OPS_GET_FIELD(snd_una, snd_una, struct tcp_sock);
6428                 break;
6429
6430         case offsetof(struct bpf_sock_ops, mss_cache):
6431                 SOCK_OPS_GET_FIELD(mss_cache, mss_cache, struct tcp_sock);
6432                 break;
6433
6434         case offsetof(struct bpf_sock_ops, ecn_flags):
6435                 SOCK_OPS_GET_FIELD(ecn_flags, ecn_flags, struct tcp_sock);
6436                 break;
6437
6438         case offsetof(struct bpf_sock_ops, rate_delivered):
6439                 SOCK_OPS_GET_FIELD(rate_delivered, rate_delivered,
6440                                    struct tcp_sock);
6441                 break;
6442
6443         case offsetof(struct bpf_sock_ops, rate_interval_us):
6444                 SOCK_OPS_GET_FIELD(rate_interval_us, rate_interval_us,
6445                                    struct tcp_sock);
6446                 break;
6447
6448         case offsetof(struct bpf_sock_ops, packets_out):
6449                 SOCK_OPS_GET_FIELD(packets_out, packets_out, struct tcp_sock);
6450                 break;
6451
6452         case offsetof(struct bpf_sock_ops, retrans_out):
6453                 SOCK_OPS_GET_FIELD(retrans_out, retrans_out, struct tcp_sock);
6454                 break;
6455
6456         case offsetof(struct bpf_sock_ops, total_retrans):
6457                 SOCK_OPS_GET_FIELD(total_retrans, total_retrans,
6458                                    struct tcp_sock);
6459                 break;
6460
6461         case offsetof(struct bpf_sock_ops, segs_in):
6462                 SOCK_OPS_GET_FIELD(segs_in, segs_in, struct tcp_sock);
6463                 break;
6464
6465         case offsetof(struct bpf_sock_ops, data_segs_in):
6466                 SOCK_OPS_GET_FIELD(data_segs_in, data_segs_in, struct tcp_sock);
6467                 break;
6468
6469         case offsetof(struct bpf_sock_ops, segs_out):
6470                 SOCK_OPS_GET_FIELD(segs_out, segs_out, struct tcp_sock);
6471                 break;
6472
6473         case offsetof(struct bpf_sock_ops, data_segs_out):
6474                 SOCK_OPS_GET_FIELD(data_segs_out, data_segs_out,
6475                                    struct tcp_sock);
6476                 break;
6477
6478         case offsetof(struct bpf_sock_ops, lost_out):
6479                 SOCK_OPS_GET_FIELD(lost_out, lost_out, struct tcp_sock);
6480                 break;
6481
6482         case offsetof(struct bpf_sock_ops, sacked_out):
6483                 SOCK_OPS_GET_FIELD(sacked_out, sacked_out, struct tcp_sock);
6484                 break;
6485
6486         case offsetof(struct bpf_sock_ops, sk_txhash):
6487                 SOCK_OPS_GET_OR_SET_FIELD(sk_txhash, sk_txhash,
6488                                           struct sock, type);
6489                 break;
6490
6491         case offsetof(struct bpf_sock_ops, bytes_received):
6492                 SOCK_OPS_GET_FIELD(bytes_received, bytes_received,
6493                                    struct tcp_sock);
6494                 break;
6495
6496         case offsetof(struct bpf_sock_ops, bytes_acked):
6497                 SOCK_OPS_GET_FIELD(bytes_acked, bytes_acked, struct tcp_sock);
6498                 break;
6499
6500         }
6501         return insn - insn_buf;
6502 }
6503
6504 static u32 sk_skb_convert_ctx_access(enum bpf_access_type type,
6505                                      const struct bpf_insn *si,
6506                                      struct bpf_insn *insn_buf,
6507                                      struct bpf_prog *prog, u32 *target_size)
6508 {
6509         struct bpf_insn *insn = insn_buf;
6510         int off;
6511
6512         switch (si->off) {
6513         case offsetof(struct __sk_buff, data_end):
6514                 off  = si->off;
6515                 off -= offsetof(struct __sk_buff, data_end);
6516                 off += offsetof(struct sk_buff, cb);
6517                 off += offsetof(struct tcp_skb_cb, bpf.data_end);
6518                 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
6519                                       si->src_reg, off);
6520                 break;
6521         default:
6522                 return bpf_convert_ctx_access(type, si, insn_buf, prog,
6523                                               target_size);
6524         }
6525
6526         return insn - insn_buf;
6527 }
6528
6529 static u32 sk_msg_convert_ctx_access(enum bpf_access_type type,
6530                                      const struct bpf_insn *si,
6531                                      struct bpf_insn *insn_buf,
6532                                      struct bpf_prog *prog, u32 *target_size)
6533 {
6534         struct bpf_insn *insn = insn_buf;
6535 #if IS_ENABLED(CONFIG_IPV6)
6536         int off;
6537 #endif
6538
6539         switch (si->off) {
6540         case offsetof(struct sk_msg_md, data):
6541                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg_buff, data),
6542                                       si->dst_reg, si->src_reg,
6543                                       offsetof(struct sk_msg_buff, data));
6544                 break;
6545         case offsetof(struct sk_msg_md, data_end):
6546                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg_buff, data_end),
6547                                       si->dst_reg, si->src_reg,
6548                                       offsetof(struct sk_msg_buff, data_end));
6549                 break;
6550         case offsetof(struct sk_msg_md, family):
6551                 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_family) != 2);
6552
6553                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
6554                                               struct sk_msg_buff, sk),
6555                                       si->dst_reg, si->src_reg,
6556                                       offsetof(struct sk_msg_buff, sk));
6557                 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
6558                                       offsetof(struct sock_common, skc_family));
6559                 break;
6560
6561         case offsetof(struct sk_msg_md, remote_ip4):
6562                 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_daddr) != 4);
6563
6564                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
6565                                                 struct sk_msg_buff, sk),
6566                                       si->dst_reg, si->src_reg,
6567                                       offsetof(struct sk_msg_buff, sk));
6568                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
6569                                       offsetof(struct sock_common, skc_daddr));
6570                 break;
6571
6572         case offsetof(struct sk_msg_md, local_ip4):
6573                 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
6574                                           skc_rcv_saddr) != 4);
6575
6576                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
6577                                               struct sk_msg_buff, sk),
6578                                       si->dst_reg, si->src_reg,
6579                                       offsetof(struct sk_msg_buff, sk));
6580                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
6581                                       offsetof(struct sock_common,
6582                                                skc_rcv_saddr));
6583                 break;
6584
6585         case offsetof(struct sk_msg_md, remote_ip6[0]) ...
6586              offsetof(struct sk_msg_md, remote_ip6[3]):
6587 #if IS_ENABLED(CONFIG_IPV6)
6588                 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
6589                                           skc_v6_daddr.s6_addr32[0]) != 4);
6590
6591                 off = si->off;
6592                 off -= offsetof(struct sk_msg_md, remote_ip6[0]);
6593                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
6594                                                 struct sk_msg_buff, sk),
6595                                       si->dst_reg, si->src_reg,
6596                                       offsetof(struct sk_msg_buff, sk));
6597                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
6598                                       offsetof(struct sock_common,
6599                                                skc_v6_daddr.s6_addr32[0]) +
6600                                       off);
6601 #else
6602                 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
6603 #endif
6604                 break;
6605
6606         case offsetof(struct sk_msg_md, local_ip6[0]) ...
6607              offsetof(struct sk_msg_md, local_ip6[3]):
6608 #if IS_ENABLED(CONFIG_IPV6)
6609                 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
6610                                           skc_v6_rcv_saddr.s6_addr32[0]) != 4);
6611
6612                 off = si->off;
6613                 off -= offsetof(struct sk_msg_md, local_ip6[0]);
6614                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
6615                                                 struct sk_msg_buff, sk),
6616                                       si->dst_reg, si->src_reg,
6617                                       offsetof(struct sk_msg_buff, sk));
6618                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
6619                                       offsetof(struct sock_common,
6620                                                skc_v6_rcv_saddr.s6_addr32[0]) +
6621                                       off);
6622 #else
6623                 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
6624 #endif
6625                 break;
6626
6627         case offsetof(struct sk_msg_md, remote_port):
6628                 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_dport) != 2);
6629
6630                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
6631                                                 struct sk_msg_buff, sk),
6632                                       si->dst_reg, si->src_reg,
6633                                       offsetof(struct sk_msg_buff, sk));
6634                 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
6635                                       offsetof(struct sock_common, skc_dport));
6636 #ifndef __BIG_ENDIAN_BITFIELD
6637                 *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16);
6638 #endif
6639                 break;
6640
6641         case offsetof(struct sk_msg_md, local_port):
6642                 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_num) != 2);
6643
6644                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
6645                                                 struct sk_msg_buff, sk),
6646                                       si->dst_reg, si->src_reg,
6647                                       offsetof(struct sk_msg_buff, sk));
6648                 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
6649                                       offsetof(struct sock_common, skc_num));
6650                 break;
6651         }
6652
6653         return insn - insn_buf;
6654 }
6655
6656 const struct bpf_verifier_ops sk_filter_verifier_ops = {
6657         .get_func_proto         = sk_filter_func_proto,
6658         .is_valid_access        = sk_filter_is_valid_access,
6659         .convert_ctx_access     = bpf_convert_ctx_access,
6660         .gen_ld_abs             = bpf_gen_ld_abs,
6661 };
6662
6663 const struct bpf_prog_ops sk_filter_prog_ops = {
6664         .test_run               = bpf_prog_test_run_skb,
6665 };
6666
6667 const struct bpf_verifier_ops tc_cls_act_verifier_ops = {
6668         .get_func_proto         = tc_cls_act_func_proto,
6669         .is_valid_access        = tc_cls_act_is_valid_access,
6670         .convert_ctx_access     = tc_cls_act_convert_ctx_access,
6671         .gen_prologue           = tc_cls_act_prologue,
6672         .gen_ld_abs             = bpf_gen_ld_abs,
6673 };
6674
6675 const struct bpf_prog_ops tc_cls_act_prog_ops = {
6676         .test_run               = bpf_prog_test_run_skb,
6677 };
6678
6679 const struct bpf_verifier_ops xdp_verifier_ops = {
6680         .get_func_proto         = xdp_func_proto,
6681         .is_valid_access        = xdp_is_valid_access,
6682         .convert_ctx_access     = xdp_convert_ctx_access,
6683 };
6684
6685 const struct bpf_prog_ops xdp_prog_ops = {
6686         .test_run               = bpf_prog_test_run_xdp,
6687 };
6688
6689 const struct bpf_verifier_ops cg_skb_verifier_ops = {
6690         .get_func_proto         = sk_filter_func_proto,
6691         .is_valid_access        = sk_filter_is_valid_access,
6692         .convert_ctx_access     = bpf_convert_ctx_access,
6693 };
6694
6695 const struct bpf_prog_ops cg_skb_prog_ops = {
6696         .test_run               = bpf_prog_test_run_skb,
6697 };
6698
6699 const struct bpf_verifier_ops lwt_in_verifier_ops = {
6700         .get_func_proto         = lwt_in_func_proto,
6701         .is_valid_access        = lwt_is_valid_access,
6702         .convert_ctx_access     = bpf_convert_ctx_access,
6703 };
6704
6705 const struct bpf_prog_ops lwt_in_prog_ops = {
6706         .test_run               = bpf_prog_test_run_skb,
6707 };
6708
6709 const struct bpf_verifier_ops lwt_out_verifier_ops = {
6710         .get_func_proto         = lwt_out_func_proto,
6711         .is_valid_access        = lwt_is_valid_access,
6712         .convert_ctx_access     = bpf_convert_ctx_access,
6713 };
6714
6715 const struct bpf_prog_ops lwt_out_prog_ops = {
6716         .test_run               = bpf_prog_test_run_skb,
6717 };
6718
6719 const struct bpf_verifier_ops lwt_xmit_verifier_ops = {
6720         .get_func_proto         = lwt_xmit_func_proto,
6721         .is_valid_access        = lwt_is_valid_access,
6722         .convert_ctx_access     = bpf_convert_ctx_access,
6723         .gen_prologue           = tc_cls_act_prologue,
6724 };
6725
6726 const struct bpf_prog_ops lwt_xmit_prog_ops = {
6727         .test_run               = bpf_prog_test_run_skb,
6728 };
6729
6730 const struct bpf_verifier_ops lwt_seg6local_verifier_ops = {
6731         .get_func_proto         = lwt_seg6local_func_proto,
6732         .is_valid_access        = lwt_is_valid_access,
6733         .convert_ctx_access     = bpf_convert_ctx_access,
6734 };
6735
6736 const struct bpf_prog_ops lwt_seg6local_prog_ops = {
6737         .test_run               = bpf_prog_test_run_skb,
6738 };
6739
6740 const struct bpf_verifier_ops cg_sock_verifier_ops = {
6741         .get_func_proto         = sock_filter_func_proto,
6742         .is_valid_access        = sock_filter_is_valid_access,
6743         .convert_ctx_access     = sock_filter_convert_ctx_access,
6744 };
6745
6746 const struct bpf_prog_ops cg_sock_prog_ops = {
6747 };
6748
6749 const struct bpf_verifier_ops cg_sock_addr_verifier_ops = {
6750         .get_func_proto         = sock_addr_func_proto,
6751         .is_valid_access        = sock_addr_is_valid_access,
6752         .convert_ctx_access     = sock_addr_convert_ctx_access,
6753 };
6754
6755 const struct bpf_prog_ops cg_sock_addr_prog_ops = {
6756 };
6757
6758 const struct bpf_verifier_ops sock_ops_verifier_ops = {
6759         .get_func_proto         = sock_ops_func_proto,
6760         .is_valid_access        = sock_ops_is_valid_access,
6761         .convert_ctx_access     = sock_ops_convert_ctx_access,
6762 };
6763
6764 const struct bpf_prog_ops sock_ops_prog_ops = {
6765 };
6766
6767 const struct bpf_verifier_ops sk_skb_verifier_ops = {
6768         .get_func_proto         = sk_skb_func_proto,
6769         .is_valid_access        = sk_skb_is_valid_access,
6770         .convert_ctx_access     = sk_skb_convert_ctx_access,
6771         .gen_prologue           = sk_skb_prologue,
6772 };
6773
6774 const struct bpf_prog_ops sk_skb_prog_ops = {
6775 };
6776
6777 const struct bpf_verifier_ops sk_msg_verifier_ops = {
6778         .get_func_proto         = sk_msg_func_proto,
6779         .is_valid_access        = sk_msg_is_valid_access,
6780         .convert_ctx_access     = sk_msg_convert_ctx_access,
6781 };
6782
6783 const struct bpf_prog_ops sk_msg_prog_ops = {
6784 };
6785
6786 int sk_detach_filter(struct sock *sk)
6787 {
6788         int ret = -ENOENT;
6789         struct sk_filter *filter;
6790
6791         if (sock_flag(sk, SOCK_FILTER_LOCKED))
6792                 return -EPERM;
6793
6794         filter = rcu_dereference_protected(sk->sk_filter,
6795                                            lockdep_sock_is_held(sk));
6796         if (filter) {
6797                 RCU_INIT_POINTER(sk->sk_filter, NULL);
6798                 sk_filter_uncharge(sk, filter);
6799                 ret = 0;
6800         }
6801
6802         return ret;
6803 }
6804 EXPORT_SYMBOL_GPL(sk_detach_filter);
6805
6806 int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
6807                   unsigned int len)
6808 {
6809         struct sock_fprog_kern *fprog;
6810         struct sk_filter *filter;
6811         int ret = 0;
6812
6813         lock_sock(sk);
6814         filter = rcu_dereference_protected(sk->sk_filter,
6815                                            lockdep_sock_is_held(sk));
6816         if (!filter)
6817                 goto out;
6818
6819         /* We're copying the filter that has been originally attached,
6820          * so no conversion/decode needed anymore. eBPF programs that
6821          * have no original program cannot be dumped through this.
6822          */
6823         ret = -EACCES;
6824         fprog = filter->prog->orig_prog;
6825         if (!fprog)
6826                 goto out;
6827
6828         ret = fprog->len;
6829         if (!len)
6830                 /* User space only enquires number of filter blocks. */
6831                 goto out;
6832
6833         ret = -EINVAL;
6834         if (len < fprog->len)
6835                 goto out;
6836
6837         ret = -EFAULT;
6838         if (copy_to_user(ubuf, fprog->filter, bpf_classic_proglen(fprog)))
6839                 goto out;
6840
6841         /* Instead of bytes, the API requests to return the number
6842          * of filter blocks.
6843          */
6844         ret = fprog->len;
6845 out:
6846         release_sock(sk);
6847         return ret;
6848 }