]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - kernel/bpf/verifier.c
bpf: Fix and simplifications on inline map lookup
[linux.git] / kernel / bpf / verifier.c
index 3fc6e39b223e2cb4dc2985d33696a858a116d8d0..9bf82267f2f9795c992043ab2d2360e924fffa2f 100644 (file)
@@ -33,7 +33,7 @@
  * - out of bounds or malformed jumps
  * The second pass is all possible path descent from the 1st insn.
  * Since it's analyzing all pathes through the program, the length of the
- * analysis is limited to 32k insn, which may be hit even if total number of
+ * analysis is limited to 64k insn, which may be hit even if total number of
  * insn is less then 4K, but there are too many branches that change stack/regs.
  * Number of 'branches to be analyzed' is limited to 1k
  *
@@ -143,6 +143,8 @@ struct bpf_verifier_stack_elem {
 #define BPF_COMPLEXITY_LIMIT_INSNS     65536
 #define BPF_COMPLEXITY_LIMIT_STACK     1024
 
+#define BPF_MAP_PTR_POISON ((void *)0xeB9F + POISON_POINTER_DELTA)
+
 struct bpf_call_arg_meta {
        struct bpf_map *map_ptr;
        bool raw_mode;
@@ -1273,7 +1275,7 @@ static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
        }
 }
 
-static int check_call(struct bpf_verifier_env *env, int func_id)
+static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
 {
        struct bpf_verifier_state *state = &env->cur_state;
        const struct bpf_func_proto *fn = NULL;
@@ -1357,6 +1359,8 @@ static int check_call(struct bpf_verifier_env *env, int func_id)
        } else if (fn->ret_type == RET_VOID) {
                regs[BPF_REG_0].type = NOT_INIT;
        } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) {
+               struct bpf_insn_aux_data *insn_aux;
+
                regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
                regs[BPF_REG_0].max_value = regs[BPF_REG_0].min_value = 0;
                /* remember map_ptr, so that check_map_access()
@@ -1369,6 +1373,11 @@ static int check_call(struct bpf_verifier_env *env, int func_id)
                }
                regs[BPF_REG_0].map_ptr = meta.map_ptr;
                regs[BPF_REG_0].id = ++env->id_gen;
+               insn_aux = &env->insn_aux_data[insn_idx];
+               if (!insn_aux->map_ptr)
+                       insn_aux->map_ptr = meta.map_ptr;
+               else if (insn_aux->map_ptr != meta.map_ptr)
+                       insn_aux->map_ptr = BPF_MAP_PTR_POISON;
        } else {
                verbose("unknown return type %d of func %s#%d\n",
                        fn->ret_type, func_id_name(func_id), func_id);
@@ -2940,7 +2949,7 @@ static int do_check(struct bpf_verifier_env *env)
                                        return -EINVAL;
                                }
 
-                               err = check_call(env, insn->imm);
+                               err = check_call(env, insn->imm, insn_idx);
                                if (err)
                                        return err;
 
@@ -3162,6 +3171,41 @@ static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
                        insn->src_reg = 0;
 }
 
+/* single env->prog->insni[off] instruction was replaced with the range
+ * insni[off, off + cnt).  Adjust corresponding insn_aux_data by copying
+ * [0, off) and [off, end) to new locations, so the patched range stays zero
+ */
+static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len,
+                               u32 off, u32 cnt)
+{
+       struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
+
+       if (cnt == 1)
+               return 0;
+       new_data = vzalloc(sizeof(struct bpf_insn_aux_data) * prog_len);
+       if (!new_data)
+               return -ENOMEM;
+       memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
+       memcpy(new_data + off + cnt - 1, old_data + off,
+              sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
+       env->insn_aux_data = new_data;
+       vfree(old_data);
+       return 0;
+}
+
+static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
+                                           const struct bpf_insn *patch, u32 len)
+{
+       struct bpf_prog *new_prog;
+
+       new_prog = bpf_patch_insn_single(env->prog, off, patch, len);
+       if (!new_prog)
+               return NULL;
+       if (adjust_insn_aux_data(env, new_prog->len, off, len))
+               return NULL;
+       return new_prog;
+}
+
 /* convert load instructions that access fields of 'struct __sk_buff'
  * into sequence of instructions that access fields of 'struct sk_buff'
  */
@@ -3181,10 +3225,10 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
                        verbose("bpf verifier is misconfigured\n");
                        return -EINVAL;
                } else if (cnt) {
-                       new_prog = bpf_patch_insn_single(env->prog, 0,
-                                                        insn_buf, cnt);
+                       new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt);
                        if (!new_prog)
                                return -ENOMEM;
+
                        env->prog = new_prog;
                        delta += cnt - 1;
                }
@@ -3209,7 +3253,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
                else
                        continue;
 
-               if (env->insn_aux_data[i].ptr_type != PTR_TO_CTX)
+               if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX)
                        continue;
 
                cnt = ops->convert_ctx_access(type, insn, insn_buf, env->prog);
@@ -3218,8 +3262,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
                        return -EINVAL;
                }
 
-               new_prog = bpf_patch_insn_single(env->prog, i + delta, insn_buf,
-                                                cnt);
+               new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
                if (!new_prog)
                        return -ENOMEM;
 
@@ -3233,6 +3276,84 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
        return 0;
 }
 
+/* fixup insn->imm field of bpf_call instructions
+ * and inline eligible helpers as explicit sequence of BPF instructions
+ *
+ * this function is called after eBPF program passed verification
+ */
+static int fixup_bpf_calls(struct bpf_verifier_env *env)
+{
+       struct bpf_prog *prog = env->prog;
+       struct bpf_insn *insn = prog->insnsi;
+       const struct bpf_func_proto *fn;
+       const int insn_cnt = prog->len;
+       struct bpf_insn insn_buf[16];
+       struct bpf_prog *new_prog;
+       struct bpf_map *map_ptr;
+       int i, cnt, delta = 0;
+
+       for (i = 0; i < insn_cnt; i++, insn++) {
+               if (insn->code != (BPF_JMP | BPF_CALL))
+                       continue;
+
+               if (insn->imm == BPF_FUNC_get_route_realm)
+                       prog->dst_needed = 1;
+               if (insn->imm == BPF_FUNC_get_prandom_u32)
+                       bpf_user_rnd_init_once();
+               if (insn->imm == BPF_FUNC_xdp_adjust_head)
+                       prog->xdp_adjust_head = 1;
+               if (insn->imm == BPF_FUNC_tail_call) {
+                       /* mark bpf_tail_call as different opcode to avoid
+                        * conditional branch in the interpeter for every normal
+                        * call and to prevent accidental JITing by JIT compiler
+                        * that doesn't support bpf_tail_call yet
+                        */
+                       insn->imm = 0;
+                       insn->code |= BPF_X;
+                       continue;
+               }
+
+               if (ebpf_jit_enabled() && insn->imm == BPF_FUNC_map_lookup_elem) {
+                       map_ptr = env->insn_aux_data[i + delta].map_ptr;
+                       if (map_ptr == BPF_MAP_PTR_POISON ||
+                           !map_ptr->ops->map_gen_lookup)
+                               goto patch_call_imm;
+
+                       cnt = map_ptr->ops->map_gen_lookup(map_ptr, insn_buf);
+                       if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
+                               verbose("bpf verifier is misconfigured\n");
+                               return -EINVAL;
+                       }
+
+                       new_prog = bpf_patch_insn_data(env, i + delta, insn_buf,
+                                                      cnt);
+                       if (!new_prog)
+                               return -ENOMEM;
+
+                       delta += cnt - 1;
+
+                       /* keep walking new program and skip insns we just inserted */
+                       env->prog = prog = new_prog;
+                       insn      = new_prog->insnsi + i + delta;
+                       continue;
+               }
+
+patch_call_imm:
+               fn = prog->aux->ops->get_func_proto(insn->imm);
+               /* all functions that have prototype and verifier allowed
+                * programs to call them, must be real in-kernel functions
+                */
+               if (!fn->func) {
+                       verbose("kernel subsystem misconfigured func %s#%d\n",
+                               func_id_name(insn->imm), insn->imm);
+                       return -EFAULT;
+               }
+               insn->imm = fn->func - __bpf_call_base;
+       }
+
+       return 0;
+}
+
 static void free_states(struct bpf_verifier_env *env)
 {
        struct bpf_verifier_state_list *sl, *sln;
@@ -3328,6 +3449,9 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
                /* program is valid, convert *(u32*)(ctx + off) accesses */
                ret = convert_ctx_accesses(env);
 
+       if (ret == 0)
+               ret = fixup_bpf_calls(env);
+
        if (log_level && log_len >= log_size - 1) {
                BUG_ON(log_len >= log_size);
                /* verifier log exceeded user supplied buffer */