| From foo@baz Fri Dec 22 16:57:35 CET 2017 |
| From: Daniel Borkmann <daniel@iogearbox.net> |
| Date: Fri, 22 Dec 2017 16:29:03 +0100 |
| Subject: bpf: fix branch pruning logic |
| To: gregkh@linuxfoundation.org |
| Cc: ast@kernel.org, daniel@iogearbox.net, jannh@google.com, stable@vger.kernel.org, Alexei Starovoitov <ast@fb.com> |
| Message-ID: <20171222152905.3455-3-daniel@iogearbox.net> |
| |
| From: Daniel Borkmann <daniel@iogearbox.net> |
| |
| |
| From: Alexei Starovoitov <ast@fb.com> |
| |
| [ Upstream commit c131187db2d3fa2f8bf32fdf4e9a4ef805168467 ] |
| |
| when the verifier detects that register contains a runtime constant |
| and it's compared with another constant it will prune exploration |
| of the branch that is guaranteed not to be taken at runtime. |
| This is all correct, but malicious program may be constructed |
| in such a way that it always has a constant comparison and |
| the other branch is never taken under any conditions. |
| In this case such path through the program will not be explored |
| by the verifier. It won't be taken at run-time either, but since |
| all instructions are JITed the malicious program may cause JITs |
| to complain about using reserved fields, etc. |
| To fix the issue we have to track the instructions explored by |
| the verifier and sanitize instructions that are dead at run time |
| with NOPs. We cannot reject such dead code, since llvm generates |
| it for valid C code, since it doesn't do as much data flow |
| analysis as the verifier does. |
| |
| Fixes: 17a5267067f3 ("bpf: verifier (add verifier core)") |
| Signed-off-by: Alexei Starovoitov <ast@kernel.org> |
| Acked-by: Daniel Borkmann <daniel@iogearbox.net> |
| Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> |
| Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
| --- |
| include/linux/bpf_verifier.h | 1 + |
| kernel/bpf/verifier.c | 27 +++++++++++++++++++++++++++ |
| 2 files changed, 28 insertions(+) |
| |
| --- a/include/linux/bpf_verifier.h |
| +++ b/include/linux/bpf_verifier.h |
| @@ -68,6 +68,7 @@ struct bpf_verifier_state_list { |
| |
| struct bpf_insn_aux_data { |
| enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ |
| + bool seen; /* this insn was processed by the verifier */ |
| }; |
| |
| #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ |
| --- a/kernel/bpf/verifier.c |
| +++ b/kernel/bpf/verifier.c |
| @@ -2862,6 +2862,7 @@ static int do_check(struct bpf_verifier_ |
| if (err) |
| return err; |
| |
| + env->insn_aux_data[insn_idx].seen = true; |
| if (class == BPF_ALU || class == BPF_ALU64) { |
| err = check_alu_op(env, insn); |
| if (err) |
| @@ -3059,6 +3060,7 @@ process_bpf_exit: |
| return err; |
| |
| insn_idx++; |
| + env->insn_aux_data[insn_idx].seen = true; |
| } else { |
| verbose("invalid BPF_LD mode\n"); |
| return -EINVAL; |
| @@ -3218,6 +3220,7 @@ static int adjust_insn_aux_data(struct b |
| u32 off, u32 cnt) |
| { |
| struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data; |
| + int i; |
| |
| if (cnt == 1) |
| return 0; |
| @@ -3227,6 +3230,8 @@ static int adjust_insn_aux_data(struct b |
| memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off); |
| memcpy(new_data + off + cnt - 1, old_data + off, |
| sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1)); |
| + for (i = off; i < off + cnt - 1; i++) |
| + new_data[i].seen = true; |
| env->insn_aux_data = new_data; |
| vfree(old_data); |
| return 0; |
| @@ -3245,6 +3250,25 @@ static struct bpf_prog *bpf_patch_insn_d |
| return new_prog; |
| } |
| |
| +/* The verifier does more data flow analysis than llvm and will not explore |
| + * branches that are dead at run time. Malicious programs can have dead code |
| + * too. Therefore replace all dead at-run-time code with nops. |
| + */ |
| +static void sanitize_dead_code(struct bpf_verifier_env *env) |
| +{ |
| + struct bpf_insn_aux_data *aux_data = env->insn_aux_data; |
| + struct bpf_insn nop = BPF_MOV64_REG(BPF_REG_0, BPF_REG_0); |
| + struct bpf_insn *insn = env->prog->insnsi; |
| + const int insn_cnt = env->prog->len; |
| + int i; |
| + |
| + for (i = 0; i < insn_cnt; i++) { |
| + if (aux_data[i].seen) |
| + continue; |
| + memcpy(insn + i, &nop, sizeof(nop)); |
| + } |
| +} |
| + |
| /* convert load instructions that access fields of 'struct __sk_buff' |
| * into sequence of instructions that access fields of 'struct sk_buff' |
| */ |
| @@ -3407,6 +3431,9 @@ skip_full_check: |
| free_states(env); |
| |
| if (ret == 0) |
| + sanitize_dead_code(env); |
| + |
| + if (ret == 0) |
| /* program is valid, convert *(u32*)(ctx + off) accesses */ |
| ret = convert_ctx_accesses(env); |
| |