]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - kernel/bpf/verifier.c
bpf: fix mixed signed/unsigned derived min/max value bounds
[karo-tx-linux.git] / kernel / bpf / verifier.c
index a8a725697bed693e8e77f225eea5dcc7db46dad0..af9e84a4944e60fdd617c0c69ecbc46d53e230ac 100644 (file)
@@ -504,6 +504,7 @@ static void reset_reg_range_values(struct bpf_reg_state *regs, u32 regno)
 {
        regs[regno].min_value = BPF_REGISTER_MIN_RANGE;
        regs[regno].max_value = BPF_REGISTER_MAX_RANGE;
+       regs[regno].value_from_signed = false;
        regs[regno].min_align = 0;
 }
 
@@ -546,20 +547,6 @@ static int check_reg_arg(struct bpf_reg_state *regs, u32 regno,
        return 0;
 }
 
-static int bpf_size_to_bytes(int bpf_size)
-{
-       if (bpf_size == BPF_W)
-               return 4;
-       else if (bpf_size == BPF_H)
-               return 2;
-       else if (bpf_size == BPF_B)
-               return 1;
-       else if (bpf_size == BPF_DW)
-               return 8;
-       else
-               return -EINVAL;
-}
-
 static bool is_spillable_regtype(enum bpf_reg_type type)
 {
        switch (type) {
@@ -758,15 +745,29 @@ static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
 }
 
 /* check access to 'struct bpf_context' fields */
-static int check_ctx_access(struct bpf_verifier_env *env, int off, int size,
+static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
                            enum bpf_access_type t, enum bpf_reg_type *reg_type)
 {
+       struct bpf_insn_access_aux info = {
+               .reg_type = *reg_type,
+       };
+
        /* for analyzer ctx accesses are already validated and converted */
        if (env->analyzer_ops)
                return 0;
 
        if (env->prog->aux->ops->is_valid_access &&
-           env->prog->aux->ops->is_valid_access(off, size, t, reg_type)) {
+           env->prog->aux->ops->is_valid_access(off, size, t, &info)) {
+               /* A non zero info.ctx_field_size indicates that this field is a
+                * candidate for later verifier transformation to load the whole
+                * field and then apply a mask when accessed with a narrower
+                * access than actual ctx access size. A zero info.ctx_field_size
+                * will only allow for whole field access and rejects any other
+                * type of narrower access.
+                */
+               env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
+               *reg_type = info.reg_type;
+
                /* remember the offset of last byte accessed in ctx */
                if (env->prog->aux->max_ctx_offset < off + size)
                        env->prog->aux->max_ctx_offset = off + size;
@@ -777,12 +778,13 @@ static int check_ctx_access(struct bpf_verifier_env *env, int off, int size,
        return -EACCES;
 }
 
-static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
+static bool __is_pointer_value(bool allow_ptr_leaks,
+                              const struct bpf_reg_state *reg)
 {
-       if (env->allow_ptr_leaks)
+       if (allow_ptr_leaks)
                return false;
 
-       switch (env->cur_state.regs[regno].type) {
+       switch (reg->type) {
        case UNKNOWN_VALUE:
        case CONST_IMM:
                return false;
@@ -791,6 +793,11 @@ static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
        }
 }
 
+static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
+{
+       return __is_pointer_value(env->allow_ptr_leaks, &env->cur_state.regs[regno]);
+}
+
 static int check_pkt_ptr_alignment(const struct bpf_reg_state *reg,
                                   int off, int size, bool strict)
 {
@@ -868,7 +875,7 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
  * if t==write && value_regno==-1, some unknown value is stored into memory
  * if t==read && value_regno==-1, don't care what we read from memory
  */
-static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off,
+static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, int off,
                            int bpf_size, enum bpf_access_type t,
                            int value_regno)
 {
@@ -911,7 +918,7 @@ static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off,
                        verbose("R%d leaks addr into ctx\n", value_regno);
                        return -EACCES;
                }
-               err = check_ctx_access(env, off, size, t, &reg_type);
+               err = check_ctx_access(env, insn_idx, off, size, t, &reg_type);
                if (!err && t == BPF_READ && value_regno >= 0) {
                        mark_reg_unknown_value_and_range(state->regs,
                                                         value_regno);
@@ -926,6 +933,10 @@ static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off,
                        verbose("invalid stack off=%d size=%d\n", off, size);
                        return -EACCES;
                }
+
+               if (env->prog->aux->stack_depth < -off)
+                       env->prog->aux->stack_depth = -off;
+
                if (t == BPF_WRITE) {
                        if (!env->allow_ptr_leaks &&
                            state->stack_slot_type[MAX_BPF_STACK + off] == STACK_SPILL &&
@@ -968,7 +979,7 @@ static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off,
        return err;
 }
 
-static int check_xadd(struct bpf_verifier_env *env, struct bpf_insn *insn)
+static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
 {
        struct bpf_reg_state *regs = env->cur_state.regs;
        int err;
@@ -995,13 +1006,13 @@ static int check_xadd(struct bpf_verifier_env *env, struct bpf_insn *insn)
        }
 
        /* check whether atomic_add can read the memory */
-       err = check_mem_access(env, insn->dst_reg, insn->off,
+       err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
                               BPF_SIZE(insn->code), BPF_READ, -1);
        if (err)
                return err;
 
        /* check whether atomic_add can write into the same memory */
-       return check_mem_access(env, insn->dst_reg, insn->off,
+       return check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
                                BPF_SIZE(insn->code), BPF_WRITE, -1);
 }
 
@@ -1037,6 +1048,9 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
                return -EACCES;
        }
 
+       if (env->prog->aux->stack_depth < -off)
+               env->prog->aux->stack_depth = -off;
+
        if (meta && meta->raw_mode) {
                meta->access_size = access_size;
                meta->regno = regno;
@@ -1344,8 +1358,8 @@ static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
                if (reg->type != PTR_TO_PACKET &&
                    reg->type != PTR_TO_PACKET_END)
                        continue;
-               reg->type = UNKNOWN_VALUE;
-               reg->imm = 0;
+               __mark_reg_unknown_value(state->spilled_regs,
+                                        i / BPF_REG_SIZE);
        }
 }
 
@@ -1414,7 +1428,7 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
         * is inferred from register state.
         */
        for (i = 0; i < meta.access_size; i++) {
-               err = check_mem_access(env, meta.regno, i, BPF_B, BPF_WRITE, -1);
+               err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, BPF_WRITE, -1);
                if (err)
                        return err;
        }
@@ -1650,6 +1664,65 @@ static int evaluate_reg_alu(struct bpf_verifier_env *env, struct bpf_insn *insn)
        return 0;
 }
 
+static int evaluate_reg_imm_alu_unknown(struct bpf_verifier_env *env,
+                                       struct bpf_insn *insn)
+{
+       struct bpf_reg_state *regs = env->cur_state.regs;
+       struct bpf_reg_state *dst_reg = &regs[insn->dst_reg];
+       struct bpf_reg_state *src_reg = &regs[insn->src_reg];
+       u8 opcode = BPF_OP(insn->code);
+       s64 imm_log2 = __ilog2_u64((long long)dst_reg->imm);
+
+       /* BPF_X code with src_reg->type UNKNOWN_VALUE here. */
+       if (src_reg->imm > 0 && dst_reg->imm) {
+               switch (opcode) {
+               case BPF_ADD:
+                       /* dreg += sreg
+                        * where both have zero upper bits. Adding them
+                        * can only result making one more bit non-zero
+                        * in the larger value.
+                        * Ex. 0xffff (imm=48) + 1 (imm=63) = 0x10000 (imm=47)
+                        *     0xffff (imm=48) + 0xffff = 0x1fffe (imm=47)
+                        */
+                       dst_reg->imm = min(src_reg->imm, 63 - imm_log2);
+                       dst_reg->imm--;
+                       break;
+               case BPF_AND:
+                       /* dreg &= sreg
+                        * AND can not extend zero bits only shrink
+                        * Ex.  0x00..00ffffff
+                        *    & 0x0f..ffffffff
+                        *     ----------------
+                        *      0x00..00ffffff
+                        */
+                       dst_reg->imm = max(src_reg->imm, 63 - imm_log2);
+                       break;
+               case BPF_OR:
+                       /* dreg |= sreg
+                        * OR can only extend zero bits
+                        * Ex.  0x00..00ffffff
+                        *    | 0x0f..ffffffff
+                        *     ----------------
+                        *      0x0f..00ffffff
+                        */
+                       dst_reg->imm = min(src_reg->imm, 63 - imm_log2);
+                       break;
+               case BPF_SUB:
+               case BPF_MUL:
+               case BPF_RSH:
+               case BPF_LSH:
+                       /* These may be flushed out later */
+               default:
+                       mark_reg_unknown_value(regs, insn->dst_reg);
+               }
+       } else {
+               mark_reg_unknown_value(regs, insn->dst_reg);
+       }
+
+       dst_reg->type = UNKNOWN_VALUE;
+       return 0;
+}
+
 static int evaluate_reg_imm_alu(struct bpf_verifier_env *env,
                                struct bpf_insn *insn)
 {
@@ -1659,6 +1732,9 @@ static int evaluate_reg_imm_alu(struct bpf_verifier_env *env,
        u8 opcode = BPF_OP(insn->code);
        u64 dst_imm = dst_reg->imm;
 
+       if (BPF_SRC(insn->code) == BPF_X && src_reg->type == UNKNOWN_VALUE)
+               return evaluate_reg_imm_alu_unknown(env, insn);
+
        /* dst_reg->type == CONST_IMM here. Simulate execution of insns
         * containing ALU ops. Don't care about overflow or negative
         * values, just add/sub/... them; registers are in u64.
@@ -1763,10 +1839,24 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
        dst_align = dst_reg->min_align;
 
        /* We don't know anything about what was done to this register, mark it
-        * as unknown.
+        * as unknown. Also, if both derived bounds came from signed/unsigned
+        * mixed compares and one side is unbounded, we cannot really do anything
+        * with them as boundaries cannot be trusted. Thus, arithmetic of two
+        * regs of such kind will get invalidated bounds on the dst side.
         */
-       if (min_val == BPF_REGISTER_MIN_RANGE &&
-           max_val == BPF_REGISTER_MAX_RANGE) {
+       if ((min_val == BPF_REGISTER_MIN_RANGE &&
+            max_val == BPF_REGISTER_MAX_RANGE) ||
+           (BPF_SRC(insn->code) == BPF_X &&
+            ((min_val != BPF_REGISTER_MIN_RANGE &&
+              max_val == BPF_REGISTER_MAX_RANGE) ||
+             (min_val == BPF_REGISTER_MIN_RANGE &&
+              max_val != BPF_REGISTER_MAX_RANGE) ||
+             (dst_reg->min_value != BPF_REGISTER_MIN_RANGE &&
+              dst_reg->max_value == BPF_REGISTER_MAX_RANGE) ||
+             (dst_reg->min_value == BPF_REGISTER_MIN_RANGE &&
+              dst_reg->max_value != BPF_REGISTER_MAX_RANGE)) &&
+            regs[insn->dst_reg].value_from_signed !=
+            regs[insn->src_reg].value_from_signed)) {
                reset_reg_range_values(regs, insn->dst_reg);
                return;
        }
@@ -1950,9 +2040,11 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
                         */
                        regs[insn->dst_reg].type = CONST_IMM;
                        regs[insn->dst_reg].imm = insn->imm;
+                       regs[insn->dst_reg].id = 0;
                        regs[insn->dst_reg].max_value = insn->imm;
                        regs[insn->dst_reg].min_value = insn->imm;
                        regs[insn->dst_reg].min_align = calc_align(insn->imm);
+                       regs[insn->dst_reg].value_from_signed = false;
                }
 
        } else if (opcode > BPF_END) {
@@ -2128,40 +2220,63 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg,
                            struct bpf_reg_state *false_reg, u64 val,
                            u8 opcode)
 {
+       bool value_from_signed = true;
+       bool is_range = true;
+
        switch (opcode) {
        case BPF_JEQ:
                /* If this is false then we know nothing Jon Snow, but if it is
                 * true then we know for sure.
                 */
                true_reg->max_value = true_reg->min_value = val;
+               is_range = false;
                break;
        case BPF_JNE:
                /* If this is true we know nothing Jon Snow, but if it is false
                 * we know the value for sure;
                 */
                false_reg->max_value = false_reg->min_value = val;
+               is_range = false;
                break;
        case BPF_JGT:
-               /* Unsigned comparison, the minimum value is 0. */
-               false_reg->min_value = 0;
+               value_from_signed = false;
                /* fallthrough */
        case BPF_JSGT:
+               if (true_reg->value_from_signed != value_from_signed)
+                       reset_reg_range_values(true_reg, 0);
+               if (false_reg->value_from_signed != value_from_signed)
+                       reset_reg_range_values(false_reg, 0);
+               if (opcode == BPF_JGT) {
+                       /* Unsigned comparison, the minimum value is 0. */
+                       false_reg->min_value = 0;
+               }
                /* If this is false then we know the maximum val is val,
                 * otherwise we know the min val is val+1.
                 */
                false_reg->max_value = val;
+               false_reg->value_from_signed = value_from_signed;
                true_reg->min_value = val + 1;
+               true_reg->value_from_signed = value_from_signed;
                break;
        case BPF_JGE:
-               /* Unsigned comparison, the minimum value is 0. */
-               false_reg->min_value = 0;
+               value_from_signed = false;
                /* fallthrough */
        case BPF_JSGE:
+               if (true_reg->value_from_signed != value_from_signed)
+                       reset_reg_range_values(true_reg, 0);
+               if (false_reg->value_from_signed != value_from_signed)
+                       reset_reg_range_values(false_reg, 0);
+               if (opcode == BPF_JGE) {
+                       /* Unsigned comparison, the minimum value is 0. */
+                       false_reg->min_value = 0;
+               }
                /* If this is false then we know the maximum value is val - 1,
                 * otherwise we know the mimimum value is val.
                 */
                false_reg->max_value = val - 1;
+               false_reg->value_from_signed = value_from_signed;
                true_reg->min_value = val;
+               true_reg->value_from_signed = value_from_signed;
                break;
        default:
                break;
@@ -2169,6 +2284,12 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg,
 
        check_reg_overflow(false_reg);
        check_reg_overflow(true_reg);
+       if (is_range) {
+               if (__is_pointer_value(false, false_reg))
+                       reset_reg_range_values(false_reg, 0);
+               if (__is_pointer_value(false, true_reg))
+                       reset_reg_range_values(true_reg, 0);
+       }
 }
 
 /* Same as above, but for the case that dst_reg is a CONST_IMM reg and src_reg
@@ -2178,41 +2299,64 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
                                struct bpf_reg_state *false_reg, u64 val,
                                u8 opcode)
 {
+       bool value_from_signed = true;
+       bool is_range = true;
+
        switch (opcode) {
        case BPF_JEQ:
                /* If this is false then we know nothing Jon Snow, but if it is
                 * true then we know for sure.
                 */
                true_reg->max_value = true_reg->min_value = val;
+               is_range = false;
                break;
        case BPF_JNE:
                /* If this is true we know nothing Jon Snow, but if it is false
                 * we know the value for sure;
                 */
                false_reg->max_value = false_reg->min_value = val;
+               is_range = false;
                break;
        case BPF_JGT:
-               /* Unsigned comparison, the minimum value is 0. */
-               true_reg->min_value = 0;
+               value_from_signed = false;
                /* fallthrough */
        case BPF_JSGT:
+               if (true_reg->value_from_signed != value_from_signed)
+                       reset_reg_range_values(true_reg, 0);
+               if (false_reg->value_from_signed != value_from_signed)
+                       reset_reg_range_values(false_reg, 0);
+               if (opcode == BPF_JGT) {
+                       /* Unsigned comparison, the minimum value is 0. */
+                       true_reg->min_value = 0;
+               }
                /*
                 * If this is false, then the val is <= the register, if it is
                 * true the register <= to the val.
                 */
                false_reg->min_value = val;
+               false_reg->value_from_signed = value_from_signed;
                true_reg->max_value = val - 1;
+               true_reg->value_from_signed = value_from_signed;
                break;
        case BPF_JGE:
-               /* Unsigned comparison, the minimum value is 0. */
-               true_reg->min_value = 0;
+               value_from_signed = false;
                /* fallthrough */
        case BPF_JSGE:
+               if (true_reg->value_from_signed != value_from_signed)
+                       reset_reg_range_values(true_reg, 0);
+               if (false_reg->value_from_signed != value_from_signed)
+                       reset_reg_range_values(false_reg, 0);
+               if (opcode == BPF_JGE) {
+                       /* Unsigned comparison, the minimum value is 0. */
+                       true_reg->min_value = 0;
+               }
                /* If this is false then constant < register, if it is true then
                 * the register < constant.
                 */
                false_reg->min_value = val + 1;
+               false_reg->value_from_signed = value_from_signed;
                true_reg->max_value = val;
+               true_reg->value_from_signed = value_from_signed;
                break;
        default:
                break;
@@ -2220,6 +2364,12 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
 
        check_reg_overflow(false_reg);
        check_reg_overflow(true_reg);
+       if (is_range) {
+               if (__is_pointer_value(false, false_reg))
+                       reset_reg_range_values(false_reg, 0);
+               if (__is_pointer_value(false, true_reg))
+                       reset_reg_range_values(true_reg, 0);
+       }
 }
 
 static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id,
@@ -2407,6 +2557,7 @@ static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
 
                regs[insn->dst_reg].type = CONST_IMM;
                regs[insn->dst_reg].imm = imm;
+               regs[insn->dst_reg].id = 0;
                return 0;
        }
 
@@ -2826,6 +2977,8 @@ static bool states_equal(struct bpf_verifier_env *env,
                        return false;
                if (i % BPF_REG_SIZE)
                        continue;
+               if (old->stack_slot_type[i] != STACK_SPILL)
+                       continue;
                if (memcmp(&old->spilled_regs[i / BPF_REG_SIZE],
                           &cur->spilled_regs[i / BPF_REG_SIZE],
                           sizeof(old->spilled_regs[0])))
@@ -2987,18 +3140,12 @@ static int do_check(struct bpf_verifier_env *env)
                        /* check that memory (src_reg + off) is readable,
                         * the state of dst_reg will be updated by this func
                         */
-                       err = check_mem_access(env, insn->src_reg, insn->off,
+                       err = check_mem_access(env, insn_idx, insn->src_reg, insn->off,
                                               BPF_SIZE(insn->code), BPF_READ,
                                               insn->dst_reg);
                        if (err)
                                return err;
 
-                       if (BPF_SIZE(insn->code) != BPF_W &&
-                           BPF_SIZE(insn->code) != BPF_DW) {
-                               insn_idx++;
-                               continue;
-                       }
-
                        prev_src_type = &env->insn_aux_data[insn_idx].ptr_type;
 
                        if (*prev_src_type == NOT_INIT) {
@@ -3026,7 +3173,7 @@ static int do_check(struct bpf_verifier_env *env)
                        enum bpf_reg_type *prev_dst_type, dst_reg_type;
 
                        if (BPF_MODE(insn->code) == BPF_XADD) {
-                               err = check_xadd(env, insn);
+                               err = check_xadd(env, insn_idx, insn);
                                if (err)
                                        return err;
                                insn_idx++;
@@ -3045,7 +3192,7 @@ static int do_check(struct bpf_verifier_env *env)
                        dst_reg_type = regs[insn->dst_reg].type;
 
                        /* check that memory (dst_reg + off) is writeable */
-                       err = check_mem_access(env, insn->dst_reg, insn->off,
+                       err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
                                               BPF_SIZE(insn->code), BPF_WRITE,
                                               insn->src_reg);
                        if (err)
@@ -3074,7 +3221,7 @@ static int do_check(struct bpf_verifier_env *env)
                                return err;
 
                        /* check that memory (dst_reg + off) is writeable */
-                       err = check_mem_access(env, insn->dst_reg, insn->off,
+                       err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
                                               BPF_SIZE(insn->code), BPF_WRITE,
                                               -1);
                        if (err)
@@ -3172,7 +3319,8 @@ process_bpf_exit:
                insn_idx++;
        }
 
-       verbose("processed %d insns\n", insn_processed);
+       verbose("processed %d insns, stack depth %d\n",
+               insn_processed, env->prog->aux->stack_depth);
        return 0;
 }
 
@@ -3372,11 +3520,13 @@ static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 of
 static int convert_ctx_accesses(struct bpf_verifier_env *env)
 {
        const struct bpf_verifier_ops *ops = env->prog->aux->ops;
+       int i, cnt, size, ctx_field_size, delta = 0;
        const int insn_cnt = env->prog->len;
        struct bpf_insn insn_buf[16], *insn;
        struct bpf_prog *new_prog;
        enum bpf_access_type type;
-       int i, cnt, delta = 0;
+       bool is_narrower_load;
+       u32 target_size;
 
        if (ops->gen_prologue) {
                cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
@@ -3416,12 +3566,52 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
                if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX)
                        continue;
 
-               cnt = ops->convert_ctx_access(type, insn, insn_buf, env->prog);
-               if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
+               ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size;
+               size = BPF_LDST_BYTES(insn);
+
+               /* If the read access is a narrower load of the field,
+                * convert to a 4/8-byte load, to minimum program type specific
+                * convert_ctx_access changes. If conversion is successful,
+                * we will apply proper mask to the result.
+                */
+               is_narrower_load = size < ctx_field_size;
+               if (is_narrower_load) {
+                       u32 off = insn->off;
+                       u8 size_code;
+
+                       if (type == BPF_WRITE) {
+                               verbose("bpf verifier narrow ctx access misconfigured\n");
+                               return -EINVAL;
+                       }
+
+                       size_code = BPF_H;
+                       if (ctx_field_size == 4)
+                               size_code = BPF_W;
+                       else if (ctx_field_size == 8)
+                               size_code = BPF_DW;
+
+                       insn->off = off & ~(ctx_field_size - 1);
+                       insn->code = BPF_LDX | BPF_MEM | size_code;
+               }
+
+               target_size = 0;
+               cnt = ops->convert_ctx_access(type, insn, insn_buf, env->prog,
+                                             &target_size);
+               if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) ||
+                   (ctx_field_size && !target_size)) {
                        verbose("bpf verifier is misconfigured\n");
                        return -EINVAL;
                }
 
+               if (is_narrower_load && size < target_size) {
+                       if (ctx_field_size <= 4)
+                               insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
+                                                               (1 << size * 8) - 1);
+                       else
+                               insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
+                                                               (1 << size * 8) - 1);
+               }
+
                new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
                if (!new_prog)
                        return -ENOMEM;
@@ -3467,6 +3657,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
                         * the program array.
                         */
                        prog->cb_access = 1;
+                       env->prog->aux->stack_depth = MAX_BPF_STACK;
 
                        /* mark bpf_tail_call as different opcode to avoid
                         * conditional branch in the interpeter for every normal
@@ -3474,7 +3665,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
                         * that doesn't support bpf_tail_call yet
                         */
                        insn->imm = 0;
-                       insn->code |= BPF_X;
+                       insn->code = BPF_JMP | BPF_TAIL_CALL;
                        continue;
                }