As we need to add further flags to the bpf_prog structure, lets migrate
both bools to a bitfield representation. The size of the base structure
(excluding insns) remains unchanged at 40 bytes.
Add also tags for the kmemchecker, so that it doesn't throw false
positives. Even in case gcc would generate suboptimal code, it's not
being accessed in performance critical paths.
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@plumgrid.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
set_memory_ro((unsigned long)header, header->pages);
fp->bpf_func = (void *)ctx.target;
set_memory_ro((unsigned long)header, header->pages);
fp->bpf_func = (void *)ctx.target;
out:
kfree(ctx.offsets);
return;
out:
kfree(ctx.offsets);
return;
set_memory_ro((unsigned long)header, header->pages);
prog->bpf_func = (void *)ctx.image;
set_memory_ro((unsigned long)header, header->pages);
prog->bpf_func = (void *)ctx.image;
out:
kfree(ctx.offset);
}
out:
kfree(ctx.offset);
}
bpf_jit_dump(fp->len, alloc_size, 2, ctx.target);
fp->bpf_func = (void *)ctx.target;
bpf_jit_dump(fp->len, alloc_size, 2, ctx.target);
fp->bpf_func = (void *)ctx.target;
((u64 *)image)[1] = local_paca->kernel_toc;
#endif
fp->bpf_func = (void *)image;
((u64 *)image)[1] = local_paca->kernel_toc;
#endif
fp->bpf_func = (void *)image;
if (jit.prg_buf) {
set_memory_ro((unsigned long)header, header->pages);
fp->bpf_func = (void *) jit.prg_buf;
if (jit.prg_buf) {
set_memory_ro((unsigned long)header, header->pages);
fp->bpf_func = (void *) jit.prg_buf;
}
free_addrs:
kfree(jit.addrs);
}
free_addrs:
kfree(jit.addrs);
if (image) {
bpf_flush_icache(image, image + proglen);
fp->bpf_func = (void *)image;
if (image) {
bpf_flush_icache(image, image + proglen);
fp->bpf_func = (void *)image;
bpf_flush_icache(header, image + proglen);
set_memory_ro((unsigned long)header, header->pages);
prog->bpf_func = (void *)image;
bpf_flush_icache(header, image + proglen);
set_memory_ro((unsigned long)header, header->pages);
prog->bpf_func = (void *)image;
struct bpf_prog {
u16 pages; /* Number of allocated pages */
struct bpf_prog {
u16 pages; /* Number of allocated pages */
- bool jited; /* Is our filter JIT'ed? */
- bool gpl_compatible; /* Is our filter GPL compatible? */
+ kmemcheck_bitfield_begin(meta);
+ u16 jited:1, /* Is our filter JIT'ed? */
+ gpl_compatible:1; /* Is filter GPL compatible? */
+ kmemcheck_bitfield_end(meta);
u32 len; /* Number of filter blocks */
enum bpf_prog_type type; /* Type of BPF program */
struct bpf_prog_aux *aux; /* Auxiliary fields */
u32 len; /* Number of filter blocks */
enum bpf_prog_type type; /* Type of BPF program */
struct bpf_prog_aux *aux; /* Auxiliary fields */
if (fp == NULL)
return NULL;
if (fp == NULL)
return NULL;
+ kmemcheck_annotate_bitfield(fp, meta);
+
aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
if (aux == NULL) {
vfree(fp);
aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
if (aux == NULL) {
vfree(fp);
fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
if (fp != NULL) {
fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
if (fp != NULL) {
+ kmemcheck_annotate_bitfield(fp, meta);
+
memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
fp->pages = size / PAGE_SIZE;
memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
fp->pages = size / PAGE_SIZE;
goto free_prog;
prog->orig_prog = NULL;
goto free_prog;
prog->orig_prog = NULL;
atomic_set(&prog->aux->refcnt, 1);
atomic_set(&prog->aux->refcnt, 1);
- prog->gpl_compatible = is_gpl;
+ prog->gpl_compatible = is_gpl ? 1 : 0;
/* find program type: socket_filter vs tracing_filter */
err = find_prog_type(type, prog);
/* find program type: socket_filter vs tracing_filter */
err = find_prog_type(type, prog);
int err;
fp->bpf_func = NULL;
int err;
fp->bpf_func = NULL;
err = bpf_check_classic(fp->insns, fp->len);
if (err) {
err = bpf_check_classic(fp->insns, fp->len);
if (err) {