summaryrefslogtreecommitdiff
path: root/kernel/bpf/verifier.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/bpf/verifier.c')
-rw-r--r--kernel/bpf/verifier.c62
1 files changed, 49 insertions, 13 deletions
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index dfb138b46488..5fb69a85d967 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -4981,6 +4981,13 @@ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
next_insn:
insn++;
i++;
+ continue;
+ }
+
+ /* Basic sanity check before we invest more work here. */
+ if (!bpf_opcode_in_insntable(insn->code)) {
+ verbose(env, "unknown opcode %02x\n", insn->code);
+ return -EINVAL;
}
}
@@ -5064,14 +5071,21 @@ static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 of
return new_prog;
}
-/* The verifier does more data flow analysis than llvm and will not explore
- * branches that are dead at run time. Malicious programs can have dead code
- * too. Therefore replace all dead at-run-time code with nops.
+/* The verifier does more data flow analysis than llvm and will not
+ * explore branches that are dead at run time. Malicious programs can
+ * have dead code too. Therefore replace all dead at-run-time code
+ * with 'ja -1'.
+ *
+ * Just nops are not optimal, e.g. if they would sit at the end of the
+ * program and through another bug we would manage to jump there, then
+ * we'd execute beyond program memory otherwise. Returning exception
+ * code also wouldn't work since we can have subprogs where the dead
+ * code could be located.
*/
static void sanitize_dead_code(struct bpf_verifier_env *env)
{
struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
- struct bpf_insn nop = BPF_MOV64_REG(BPF_REG_0, BPF_REG_0);
+ struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1);
struct bpf_insn *insn = env->prog->insnsi;
const int insn_cnt = env->prog->len;
int i;
@@ -5079,7 +5093,7 @@ static void sanitize_dead_code(struct bpf_verifier_env *env)
for (i = 0; i < insn_cnt; i++) {
if (aux_data[i].seen)
continue;
- memcpy(insn + i, &nop, sizeof(nop));
+ memcpy(insn + i, &trap, sizeof(trap));
}
}
@@ -5386,15 +5400,37 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
int i, cnt, delta = 0;
for (i = 0; i < insn_cnt; i++, insn++) {
- if (insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
+ if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) ||
+ insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
+ insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
- /* due to JIT bugs clear upper 32-bits of src register
- * before div/mod operation
- */
- insn_buf[0] = BPF_MOV32_REG(insn->src_reg, insn->src_reg);
- insn_buf[1] = *insn;
- cnt = 2;
- new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
+ bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
+ struct bpf_insn mask_and_div[] = {
+ BPF_MOV32_REG(insn->src_reg, insn->src_reg),
+ /* Rx div 0 -> 0 */
+ BPF_JMP_IMM(BPF_JNE, insn->src_reg, 0, 2),
+ BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ *insn,
+ };
+ struct bpf_insn mask_and_mod[] = {
+ BPF_MOV32_REG(insn->src_reg, insn->src_reg),
+ /* Rx mod 0 -> Rx */
+ BPF_JMP_IMM(BPF_JEQ, insn->src_reg, 0, 1),
+ *insn,
+ };
+ struct bpf_insn *patchlet;
+
+ if (insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
+ insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
+ patchlet = mask_and_div + (is64 ? 1 : 0);
+ cnt = ARRAY_SIZE(mask_and_div) - (is64 ? 1 : 0);
+ } else {
+ patchlet = mask_and_mod + (is64 ? 1 : 0);
+ cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 1 : 0);
+ }
+
+ new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
if (!new_prog)
return -ENOMEM;