2 * Testsuite for eBPF verifier
4 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
12 #include <linux/bpf.h>
14 #include <linux/unistd.h>
16 #include <linux/filter.h>
21 #define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x)))
25 struct bpf_insn insns[MAX_INSNS];
32 enum bpf_prog_type prog_type;
35 static struct bpf_test tests[] = {
39 BPF_MOV64_IMM(BPF_REG_1, 1),
40 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
41 BPF_MOV64_IMM(BPF_REG_2, 3),
42 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
43 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
44 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
45 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
56 .errstr = "unreachable",
62 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
63 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
66 .errstr = "unreachable",
72 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
75 .errstr = "jump out of range",
81 BPF_JMP_IMM(BPF_JA, 0, 0, -2),
84 .errstr = "jump out of range",
90 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
91 BPF_LD_IMM64(BPF_REG_0, 0),
92 BPF_LD_IMM64(BPF_REG_0, 0),
93 BPF_LD_IMM64(BPF_REG_0, 1),
94 BPF_LD_IMM64(BPF_REG_0, 1),
95 BPF_MOV64_IMM(BPF_REG_0, 2),
98 .errstr = "invalid BPF_LD_IMM insn",
104 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
105 BPF_LD_IMM64(BPF_REG_0, 0),
106 BPF_LD_IMM64(BPF_REG_0, 0),
107 BPF_LD_IMM64(BPF_REG_0, 1),
108 BPF_LD_IMM64(BPF_REG_0, 1),
111 .errstr = "invalid BPF_LD_IMM insn",
117 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
118 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
119 BPF_LD_IMM64(BPF_REG_0, 0),
120 BPF_LD_IMM64(BPF_REG_0, 0),
121 BPF_LD_IMM64(BPF_REG_0, 1),
122 BPF_LD_IMM64(BPF_REG_0, 1),
125 .errstr = "invalid bpf_ld_imm64 insn",
131 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
134 .errstr = "invalid bpf_ld_imm64 insn",
140 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
142 .errstr = "invalid bpf_ld_imm64 insn",
148 BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
150 .errstr = "jump out of range",
156 BPF_JMP_IMM(BPF_JA, 0, 0, -1),
159 .errstr = "back-edge",
165 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
166 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
167 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
168 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
171 .errstr = "back-edge",
177 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
178 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
179 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
180 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
183 .errstr = "back-edge",
187 "read uninitialized register",
189 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
192 .errstr = "R2 !read_ok",
196 "read invalid register",
198 BPF_MOV64_REG(BPF_REG_0, -1),
201 .errstr = "R15 is invalid",
205 "program doesn't init R0 before exit",
207 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
210 .errstr = "R0 !read_ok",
214 "program doesn't init R0 before exit in all branches",
216 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
217 BPF_MOV64_IMM(BPF_REG_0, 1),
218 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
221 .errstr = "R0 !read_ok",
225 "stack out of bounds",
227 BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
230 .errstr = "invalid stack",
234 "invalid call insn1",
236 BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
239 .errstr = "BPF_CALL uses reserved",
243 "invalid call insn2",
245 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
248 .errstr = "BPF_CALL uses reserved",
252 "invalid function call",
254 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
257 .errstr = "invalid func 1234567",
261 "uninitialized stack1",
263 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
264 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
265 BPF_LD_MAP_FD(BPF_REG_1, 0),
266 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
270 .errstr = "invalid indirect read from stack",
274 "uninitialized stack2",
276 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
277 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
280 .errstr = "invalid read from stack",
284 "check valid spill/fill",
286 /* spill R1(ctx) into stack */
287 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
289 /* fill it back into R2 */
290 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
292 /* should be able to access R0 = *(R2 + 8) */
293 /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
294 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
300 "check corrupted spill/fill",
302 /* spill R1(ctx) into stack */
303 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
305 /* mess up with R1 pointer on stack */
306 BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
308 /* fill back into R0 should fail */
309 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
313 .errstr = "corrupted spill",
317 "invalid src register in STX",
319 BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
322 .errstr = "R15 is invalid",
326 "invalid dst register in STX",
328 BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
331 .errstr = "R14 is invalid",
335 "invalid dst register in ST",
337 BPF_ST_MEM(BPF_B, 14, -1, -1),
340 .errstr = "R14 is invalid",
344 "invalid src register in LDX",
346 BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
349 .errstr = "R12 is invalid",
353 "invalid dst register in LDX",
355 BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
358 .errstr = "R11 is invalid",
364 BPF_RAW_INSN(0, 0, 0, 0, 0),
367 .errstr = "invalid BPF_LD_IMM",
373 BPF_RAW_INSN(1, 0, 0, 0, 0),
376 .errstr = "BPF_LDX uses reserved fields",
382 BPF_RAW_INSN(-1, 0, 0, 0, 0),
385 .errstr = "invalid BPF_ALU opcode f0",
391 BPF_RAW_INSN(-1, -1, -1, -1, -1),
394 .errstr = "invalid BPF_ALU opcode f0",
400 BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
403 .errstr = "BPF_ALU uses reserved fields",
407 "misaligned read from stack",
409 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
410 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
413 .errstr = "misaligned access",
417 "invalid map_fd for function call",
419 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
420 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
421 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
422 BPF_LD_MAP_FD(BPF_REG_1, 0),
423 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_delete_elem),
426 .errstr = "fd 0 is not pointing to valid bpf_map",
430 "don't check return value before access",
432 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
433 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
434 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
435 BPF_LD_MAP_FD(BPF_REG_1, 0),
436 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
437 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
441 .errstr = "R0 invalid mem access 'map_value_or_null'",
445 "access memory with incorrect alignment",
447 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
448 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
449 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
450 BPF_LD_MAP_FD(BPF_REG_1, 0),
451 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
452 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
453 BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
457 .errstr = "misaligned access",
461 "sometimes access memory with incorrect alignment",
463 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
464 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
465 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
466 BPF_LD_MAP_FD(BPF_REG_1, 0),
467 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
468 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
469 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
471 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
475 .errstr = "R0 invalid mem access",
481 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
482 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
483 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
484 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
485 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
486 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
487 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
488 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
489 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
490 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
491 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
492 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
493 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
494 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
495 BPF_MOV64_IMM(BPF_REG_0, 0),
503 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
504 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
505 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
506 BPF_JMP_IMM(BPF_JA, 0, 0, 14),
507 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
508 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
509 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
510 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
511 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
512 BPF_JMP_IMM(BPF_JA, 0, 0, 8),
513 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
514 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
515 BPF_JMP_IMM(BPF_JA, 0, 0, 5),
516 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
517 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
518 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
519 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
520 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
521 BPF_MOV64_IMM(BPF_REG_0, 0),
529 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
530 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
531 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
532 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
533 BPF_JMP_IMM(BPF_JA, 0, 0, 19),
534 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
535 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
536 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
537 BPF_JMP_IMM(BPF_JA, 0, 0, 15),
538 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
539 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
540 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
541 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
542 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
543 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
544 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
545 BPF_JMP_IMM(BPF_JA, 0, 0, 7),
546 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
547 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
548 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
549 BPF_JMP_IMM(BPF_JA, 0, 0, 3),
550 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
551 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
552 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
553 BPF_LD_MAP_FD(BPF_REG_1, 0),
554 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_delete_elem),
563 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
564 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
565 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
566 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
567 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
568 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
569 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
570 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
571 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
572 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
573 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
574 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
575 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
576 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
577 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
578 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
579 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
580 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
581 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
582 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
583 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
584 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
585 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
586 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
587 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
588 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
589 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
590 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
591 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
592 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
593 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
594 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
595 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
596 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
597 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
598 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
599 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
600 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
601 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
602 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
603 BPF_MOV64_IMM(BPF_REG_0, 0),
611 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
612 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
613 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
614 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
615 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
616 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
617 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
618 BPF_MOV64_IMM(BPF_REG_0, 0),
619 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
620 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
621 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
622 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
623 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
624 BPF_MOV64_IMM(BPF_REG_0, 0),
625 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
626 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
627 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
628 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
629 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
630 BPF_MOV64_IMM(BPF_REG_0, 0),
631 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
632 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
633 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
634 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
635 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
636 BPF_MOV64_IMM(BPF_REG_0, 0),
637 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
638 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
639 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
640 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
641 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
642 BPF_MOV64_IMM(BPF_REG_0, 0),
648 "access skb fields ok",
650 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
651 offsetof(struct __sk_buff, len)),
652 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
653 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
654 offsetof(struct __sk_buff, mark)),
655 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
656 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
657 offsetof(struct __sk_buff, pkt_type)),
658 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
659 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
660 offsetof(struct __sk_buff, queue_mapping)),
661 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
662 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
663 offsetof(struct __sk_buff, protocol)),
664 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
665 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
666 offsetof(struct __sk_buff, vlan_present)),
667 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
668 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
669 offsetof(struct __sk_buff, vlan_tci)),
670 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
676 "access skb fields bad1",
678 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
681 .errstr = "invalid bpf_context access",
685 "access skb fields bad2",
687 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
688 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
689 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
690 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
691 BPF_LD_MAP_FD(BPF_REG_1, 0),
692 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
693 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
695 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
696 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
697 offsetof(struct __sk_buff, pkt_type)),
701 .errstr = "different pointers",
705 "access skb fields bad3",
707 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
708 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
709 offsetof(struct __sk_buff, pkt_type)),
711 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
712 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
713 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
714 BPF_LD_MAP_FD(BPF_REG_1, 0),
715 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
716 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
718 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
719 BPF_JMP_IMM(BPF_JA, 0, 0, -12),
722 .errstr = "different pointers",
726 "access skb fields bad4",
728 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
729 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
730 offsetof(struct __sk_buff, len)),
731 BPF_MOV64_IMM(BPF_REG_0, 0),
733 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
734 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
735 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
736 BPF_LD_MAP_FD(BPF_REG_1, 0),
737 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
738 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
740 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
741 BPF_JMP_IMM(BPF_JA, 0, 0, -13),
744 .errstr = "different pointers",
748 "check skb->mark is not writeable by sockets",
750 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
751 offsetof(struct __sk_buff, mark)),
754 .errstr = "invalid bpf_context access",
758 "check skb->tc_index is not writeable by sockets",
760 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
761 offsetof(struct __sk_buff, tc_index)),
764 .errstr = "invalid bpf_context access",
768 "check non-u32 access to cb",
770 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_1,
771 offsetof(struct __sk_buff, cb[0])),
774 .errstr = "invalid bpf_context access",
778 "check out of range skb->cb access",
780 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
781 offsetof(struct __sk_buff, cb[60])),
784 .errstr = "invalid bpf_context access",
786 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
789 "write skb fields from socket prog",
791 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
792 offsetof(struct __sk_buff, cb[4])),
793 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
794 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
795 offsetof(struct __sk_buff, mark)),
796 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
797 offsetof(struct __sk_buff, tc_index)),
798 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
799 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
800 offsetof(struct __sk_buff, cb[0])),
801 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
802 offsetof(struct __sk_buff, cb[2])),
808 "write skb fields from tc_cls_act prog",
810 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
811 offsetof(struct __sk_buff, cb[0])),
812 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
813 offsetof(struct __sk_buff, mark)),
814 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
815 offsetof(struct __sk_buff, tc_index)),
816 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
817 offsetof(struct __sk_buff, tc_index)),
818 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
819 offsetof(struct __sk_buff, cb[3])),
823 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
827 static int probe_filter_length(struct bpf_insn *fp)
831 for (len = MAX_INSNS - 1; len > 0; --len)
832 if (fp[len].code != 0 || fp[len].imm != 0)
838 static int create_map(void)
840 long long key, value = 0;
843 map_fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value), 1024);
845 printf("failed to create map '%s'\n", strerror(errno));
851 static int test(void)
853 int prog_fd, i, pass_cnt = 0, err_cnt = 0;
855 for (i = 0; i < ARRAY_SIZE(tests); i++) {
856 struct bpf_insn *prog = tests[i].insns;
857 int prog_type = tests[i].prog_type;
858 int prog_len = probe_filter_length(prog);
859 int *fixup = tests[i].fixup;
863 map_fd = create_map();
866 prog[*fixup].imm = map_fd;
870 printf("#%d %s ", i, tests[i].descr);
872 prog_fd = bpf_prog_load(prog_type ?: BPF_PROG_TYPE_SOCKET_FILTER,
873 prog, prog_len * sizeof(struct bpf_insn),
876 if (tests[i].result == ACCEPT) {
878 printf("FAIL\nfailed to load prog '%s'\n",
880 printf("%s", bpf_log_buf);
886 printf("FAIL\nunexpected success to load\n");
887 printf("%s", bpf_log_buf);
891 if (strstr(bpf_log_buf, tests[i].errstr) == 0) {
892 printf("FAIL\nunexpected error message: %s",
907 printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, err_cnt);