]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - include/linux/filter.h
bpf: dynamically allocate digest scratch buffer
[linux.git] / include / linux / filter.h
index 7ba644626553274120d9b34467a931ab6c4bf051..70231425379751021b57326eec85e70d881d3f42 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/workqueue.h>
 #include <linux/sched.h>
 #include <linux/capability.h>
+#include <linux/cryptohash.h>
 
 #include <net/sch_generic.h>
 
@@ -402,10 +403,12 @@ struct bpf_prog {
        u16                     jited:1,        /* Is our filter JIT'ed? */
                                gpl_compatible:1, /* Is filter GPL compatible? */
                                cb_access:1,    /* Is control block accessed? */
-                               dst_needed:1;   /* Do we need dst entry? */
+                               dst_needed:1,   /* Do we need dst entry? */
+                               xdp_adjust_head:1; /* Adjusting pkt head? */
        kmemcheck_bitfield_end(meta);
-       u32                     len;            /* Number of filter blocks */
        enum bpf_prog_type      type;           /* Type of BPF program */
+       u32                     len;            /* Number of filter blocks */
+       u32                     digest[SHA_DIGEST_WORDS]; /* Program digest */
        struct bpf_prog_aux     *aux;           /* Auxiliary fields */
        struct sock_fprog_kern  *orig_prog;     /* Original BPF program */
        unsigned int            (*bpf_func)(const void *ctx,
@@ -435,6 +438,7 @@ struct bpf_skb_data_end {
 struct xdp_buff {
        void *data;
        void *data_end;
+       void *data_hard_start;
 };
 
 /* compute the linear packet data range [data, data_end) which
@@ -498,16 +502,27 @@ static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
        return BPF_PROG_RUN(prog, skb);
 }
 
-static inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
-                                  struct xdp_buff *xdp)
+static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
+                                           struct xdp_buff *xdp)
 {
-       u32 ret;
+       /* Caller needs to hold rcu_read_lock() (!), otherwise program
+        * can be released while still running, or map elements could be
+        * freed early while still having concurrent users. XDP fastpath
+        * already takes rcu_read_lock() when fetching the program, so
+        * it's not necessary here anymore.
+        */
+       return BPF_PROG_RUN(prog, xdp);
+}
 
-       rcu_read_lock();
-       ret = BPF_PROG_RUN(prog, xdp);
-       rcu_read_unlock();
+static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog)
+{
+       return prog->len * sizeof(struct bpf_insn);
+}
 
-       return ret;
+static inline u32 bpf_prog_digest_scratch_size(const struct bpf_prog *prog)
+{
+       return round_up(bpf_prog_insn_size(prog) +
+                       sizeof(__be64) + 1, SHA_MESSAGE_BYTES);
 }
 
 static inline unsigned int bpf_prog_size(unsigned int proglen)
@@ -590,11 +605,12 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
 u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
 
 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
-bool bpf_helper_changes_skb_data(void *func);
+bool bpf_helper_changes_pkt_data(void *func);
 
 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
                                       const struct bpf_insn *patch, u32 len);
 void bpf_warn_invalid_xdp_action(u32 act);
+void bpf_warn_invalid_xdp_buffer(void);
 
 #ifdef CONFIG_BPF_JIT
 extern int bpf_jit_enable;