Skip to content

Commit 565c32b

Browse files
Alexei Starovoitovshiloong
authored andcommitted
bpf: track spill/fill of constants
OpenAnolis Bug Tracker: 0000429 commit f7cf25b upstream. Compilers often spill induction variables into the stack, hence it is necessary for the verifier to track scalar values of the registers through stack slots. Also few bpf programs were incorrectly rejected in the past, since the verifier was not able to track such constants while they were used to compute offsets into packet headers. Tracking constants through the stack significantly decreases the chances of state pruning, since two different constants are considered to be different by state equivalency. End result that cilium tests suffer serious degradation in the number of states processed and corresponding verification time increase. before after bpf_lb-DLB_L3.o 1838 6441 bpf_lb-DLB_L4.o 3218 5908 bpf_lb-DUNKNOWN.o 1064 1064 bpf_lxc-DDROP_ALL.o 26935 93790 bpf_lxc-DUNKNOWN.o 34439 123886 bpf_netdev.o 9721 31413 bpf_overlay.o 6184 18561 bpf_lxc_jit.o 39389 359445 After further debugging turned out that cillium progs are getting hurt by clang due to the same constant tracking issue. Newer clang generates better code by spilling less to the stack. Instead it keeps more constants in the registers which hurts state pruning since the verifier already tracks constants in the registers: old clang new clang (no spill/fill tracking introduced by this patch) bpf_lb-DLB_L3.o 1838 1923 bpf_lb-DLB_L4.o 3218 3077 bpf_lb-DUNKNOWN.o 1064 1062 bpf_lxc-DDROP_ALL.o 26935 166729 bpf_lxc-DUNKNOWN.o 34439 174607 bpf_netdev.o 9721 8407 bpf_overlay.o 6184 5420 bpf_lcx_jit.o 39389 39389 The final table is depressing: old clang old clang new clang new clang const spill/fill const spill/fill bpf_lb-DLB_L3.o 1838 6441 1923 8128 bpf_lb-DLB_L4.o 3218 5908 3077 6707 bpf_lb-DUNKNOWN.o 1064 1064 1062 1062 bpf_lxc-DDROP_ALL.o 26935 93790 166729 380712 bpf_lxc-DUNKNOWN.o 34439 123886 174607 440652 bpf_netdev.o 9721 31413 8407 31904 bpf_overlay.o 6184 18561 5420 23569 bpf_lxc_jit.o 39389 359445 39389 359445 Tracking constants in the registers hurts state pruning already. Adding tracking of constants through stack hurts pruning even more. The later patch address this general constant tracking issue with coarse/precise logic. Signed-off-by: Alexei Starovoitov <[email protected]> Acked-by: Andrii Nakryiko <[email protected]> Signed-off-by: Daniel Borkmann <[email protected]> [OP: - drop verbose_linfo() calls, as the function is not implemented in 4.19 - adjust mark_reg_read() calls to match the prototype in 4.19] Signed-off-by: Ovidiu Panait <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]> Fixes: CVE-2021-34556, CVE-2021-35477 Signed-off-by: Shile Zhang <[email protected]> Acked-by: Mao Wenan <[email protected]>
1 parent 77d2789 commit 565c32b

File tree

1 file changed

+63
-23
lines changed

1 file changed

+63
-23
lines changed

kernel/bpf/verifier.c

Lines changed: 63 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -964,6 +964,23 @@ static bool register_is_null(struct bpf_reg_state *reg)
964964
return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0);
965965
}
966966

967+
static bool register_is_const(struct bpf_reg_state *reg)
968+
{
969+
return reg->type == SCALAR_VALUE && tnum_is_const(reg->var_off);
970+
}
971+
972+
static void save_register_state(struct bpf_func_state *state,
973+
int spi, struct bpf_reg_state *reg)
974+
{
975+
int i;
976+
977+
state->stack[spi].spilled_ptr = *reg;
978+
state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
979+
980+
for (i = 0; i < BPF_REG_SIZE; i++)
981+
state->stack[spi].slot_type[i] = STACK_SPILL;
982+
}
983+
967984
/* check_stack_read/write functions track spill/fill of registers,
968985
* stack boundary and alignment are checked in check_mem_access()
969986
*/
@@ -973,7 +990,7 @@ static int check_stack_write(struct bpf_verifier_env *env,
973990
{
974991
struct bpf_func_state *cur; /* state of the current function */
975992
int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
976-
enum bpf_reg_type type;
993+
struct bpf_reg_state *reg = NULL;
977994

978995
err = realloc_func_state(state, round_up(slot + 1, BPF_REG_SIZE),
979996
true);
@@ -990,27 +1007,36 @@ static int check_stack_write(struct bpf_verifier_env *env,
9901007
}
9911008

9921009
cur = env->cur_state->frame[env->cur_state->curframe];
993-
if (value_regno >= 0 &&
994-
is_spillable_regtype((type = cur->regs[value_regno].type))) {
1010+
if (value_regno >= 0)
1011+
reg = &cur->regs[value_regno];
9951012

1013+
if (reg && size == BPF_REG_SIZE && register_is_const(reg) &&
1014+
!register_is_null(reg) && env->allow_ptr_leaks) {
1015+
save_register_state(state, spi, reg);
1016+
} else if (reg && is_spillable_regtype(reg->type)) {
9961017
/* register containing pointer is being spilled into stack */
9971018
if (size != BPF_REG_SIZE) {
9981019
verbose(env, "invalid size of register spill\n");
9991020
return -EACCES;
10001021
}
10011022

1002-
if (state != cur && type == PTR_TO_STACK) {
1023+
if (state != cur && reg->type == PTR_TO_STACK) {
10031024
verbose(env, "cannot spill pointers to stack into stack frame of the caller\n");
10041025
return -EINVAL;
10051026
}
10061027

1007-
/* save register state */
1008-
state->stack[spi].spilled_ptr = cur->regs[value_regno];
1009-
state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
1028+
if (!env->allow_ptr_leaks) {
1029+
bool sanitize = false;
10101030

1011-
for (i = 0; i < BPF_REG_SIZE; i++) {
1012-
if (state->stack[spi].slot_type[i] == STACK_MISC &&
1013-
!env->allow_ptr_leaks) {
1031+
if (state->stack[spi].slot_type[0] == STACK_SPILL &&
1032+
register_is_const(&state->stack[spi].spilled_ptr))
1033+
sanitize = true;
1034+
for (i = 0; i < BPF_REG_SIZE; i++)
1035+
if (state->stack[spi].slot_type[i] == STACK_MISC) {
1036+
sanitize = true;
1037+
break;
1038+
}
1039+
if (sanitize) {
10141040
int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off;
10151041
int soff = (-spi - 1) * BPF_REG_SIZE;
10161042

@@ -1033,8 +1059,8 @@ static int check_stack_write(struct bpf_verifier_env *env,
10331059
}
10341060
*poff = soff;
10351061
}
1036-
state->stack[spi].slot_type[i] = STACK_SPILL;
10371062
}
1063+
save_register_state(state, spi, reg);
10381064
} else {
10391065
u8 type = STACK_MISC;
10401066

@@ -1057,8 +1083,7 @@ static int check_stack_write(struct bpf_verifier_env *env,
10571083
state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
10581084

10591085
/* when we zero initialize stack slots mark them as such */
1060-
if (value_regno >= 0 &&
1061-
register_is_null(&cur->regs[value_regno]))
1086+
if (reg && register_is_null(reg))
10621087
type = STACK_ZERO;
10631088

10641089
/* Mark slots affected by this stack write. */
@@ -1076,6 +1101,7 @@ static int check_stack_read(struct bpf_verifier_env *env,
10761101
struct bpf_verifier_state *vstate = env->cur_state;
10771102
struct bpf_func_state *state = vstate->frame[vstate->curframe];
10781103
int i, slot = -off - 1, spi = slot / BPF_REG_SIZE;
1104+
struct bpf_reg_state *reg;
10791105
u8 *stype;
10801106

10811107
if (reg_state->allocated_stack <= slot) {
@@ -1084,11 +1110,20 @@ static int check_stack_read(struct bpf_verifier_env *env,
10841110
return -EACCES;
10851111
}
10861112
stype = reg_state->stack[spi].slot_type;
1113+
reg = &reg_state->stack[spi].spilled_ptr;
10871114

10881115
if (stype[0] == STACK_SPILL) {
10891116
if (size != BPF_REG_SIZE) {
1090-
verbose(env, "invalid size of register spill\n");
1091-
return -EACCES;
1117+
if (reg->type != SCALAR_VALUE) {
1118+
verbose(env, "invalid size of register fill\n");
1119+
return -EACCES;
1120+
}
1121+
if (value_regno >= 0) {
1122+
mark_reg_unknown(env, state->regs, value_regno);
1123+
state->regs[value_regno].live |= REG_LIVE_WRITTEN;
1124+
}
1125+
mark_reg_read(env, reg, reg->parent);
1126+
return 0;
10921127
}
10931128
for (i = 1; i < BPF_REG_SIZE; i++) {
10941129
if (stype[(slot - i) % BPF_REG_SIZE] != STACK_SPILL) {
@@ -1099,16 +1134,14 @@ static int check_stack_read(struct bpf_verifier_env *env,
10991134

11001135
if (value_regno >= 0) {
11011136
/* restore register state from stack */
1102-
state->regs[value_regno] = reg_state->stack[spi].spilled_ptr;
1137+
state->regs[value_regno] = *reg;
11031138
/* mark reg as written since spilled pointer state likely
11041139
* has its liveness marks cleared by is_state_visited()
11051140
* which resets stack/reg liveness for state transitions
11061141
*/
11071142
state->regs[value_regno].live |= REG_LIVE_WRITTEN;
11081143
}
1109-
mark_reg_read(env, &reg_state->stack[spi].spilled_ptr,
1110-
reg_state->stack[spi].spilled_ptr.parent);
1111-
return 0;
1144+
mark_reg_read(env, reg, reg->parent);
11121145
} else {
11131146
int zeros = 0;
11141147

@@ -1123,8 +1156,7 @@ static int check_stack_read(struct bpf_verifier_env *env,
11231156
off, i, size);
11241157
return -EACCES;
11251158
}
1126-
mark_reg_read(env, &reg_state->stack[spi].spilled_ptr,
1127-
reg_state->stack[spi].spilled_ptr.parent);
1159+
mark_reg_read(env, reg, reg->parent);
11281160
if (value_regno >= 0) {
11291161
if (zeros == size) {
11301162
/* any size read into register is zero extended,
@@ -1137,8 +1169,8 @@ static int check_stack_read(struct bpf_verifier_env *env,
11371169
}
11381170
state->regs[value_regno].live |= REG_LIVE_WRITTEN;
11391171
}
1140-
return 0;
11411172
}
1173+
return 0;
11421174
}
11431175

11441176
static int check_stack_access(struct bpf_verifier_env *env,
@@ -1791,7 +1823,7 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
17911823
{
17921824
struct bpf_reg_state *reg = cur_regs(env) + regno;
17931825
struct bpf_func_state *state = func(env, reg);
1794-
int err, min_off, max_off, i, slot, spi;
1826+
int err, min_off, max_off, i, j, slot, spi;
17951827

17961828
if (reg->type != PTR_TO_STACK) {
17971829
/* Allow zero-byte read from NULL, regardless of pointer type */
@@ -1879,6 +1911,14 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
18791911
*stype = STACK_MISC;
18801912
goto mark;
18811913
}
1914+
if (state->stack[spi].slot_type[0] == STACK_SPILL &&
1915+
state->stack[spi].spilled_ptr.type == SCALAR_VALUE) {
1916+
__mark_reg_unknown(&state->stack[spi].spilled_ptr);
1917+
for (j = 0; j < BPF_REG_SIZE; j++)
1918+
state->stack[spi].slot_type[j] = STACK_MISC;
1919+
goto mark;
1920+
}
1921+
18821922
err:
18831923
if (tnum_is_const(reg->var_off)) {
18841924
verbose(env, "invalid indirect read from stack off %d+%d size %d\n",

0 commit comments

Comments
 (0)