1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Linux Socket Filter - Kernel level socket filtering
5 * Based on the design of the Berkeley Packet Filter. The new
6 * internal format has been designed by PLUMgrid:
8 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
12 * Jay Schulist <jschlst@samba.org>
13 * Alexei Starovoitov <ast@plumgrid.com>
14 * Daniel Borkmann <dborkman@redhat.com>
16 * Andi Kleen - Fix a few bad bugs and races.
17 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
20 #include <uapi/linux/btf.h>
21 #include <linux/filter.h>
22 #include <linux/skbuff.h>
23 #include <linux/vmalloc.h>
24 #include <linux/random.h>
25 #include <linux/moduleloader.h>
26 #include <linux/bpf.h>
27 #include <linux/btf.h>
28 #include <linux/objtool.h>
29 #include <linux/rbtree_latch.h>
30 #include <linux/kallsyms.h>
31 #include <linux/rcupdate.h>
32 #include <linux/perf_event.h>
33 #include <linux/extable.h>
34 #include <linux/log2.h>
35 #include <linux/bpf_verifier.h>
36 #include <linux/nodemask.h>
37 #include <linux/nospec.h>
38 #include <linux/bpf_mem_alloc.h>
39 #include <linux/memcontrol.h>
41 #include <asm/barrier.h>
42 #include <asm/unaligned.h>
45 #define BPF_R0 regs[BPF_REG_0]
46 #define BPF_R1 regs[BPF_REG_1]
47 #define BPF_R2 regs[BPF_REG_2]
48 #define BPF_R3 regs[BPF_REG_3]
49 #define BPF_R4 regs[BPF_REG_4]
50 #define BPF_R5 regs[BPF_REG_5]
51 #define BPF_R6 regs[BPF_REG_6]
52 #define BPF_R7 regs[BPF_REG_7]
53 #define BPF_R8 regs[BPF_REG_8]
54 #define BPF_R9 regs[BPF_REG_9]
55 #define BPF_R10 regs[BPF_REG_10]
58 #define DST regs[insn->dst_reg]
59 #define SRC regs[insn->src_reg]
60 #define FP regs[BPF_REG_FP]
61 #define AX regs[BPF_REG_AX]
62 #define ARG1 regs[BPF_REG_ARG1]
63 #define CTX regs[BPF_REG_CTX]
67 struct bpf_mem_alloc bpf_global_ma;
68 bool bpf_global_ma_set;
70 /* No hurry in this branch
72 * Exported for the bpf jit load helper.
74 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
78 if (k >= SKF_NET_OFF) {
79 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
80 } else if (k >= SKF_LL_OFF) {
81 if (unlikely(!skb_mac_header_was_set(skb)))
83 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
85 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
91 /* tell bpf programs that include vmlinux.h kernel's PAGE_SIZE */
93 __PAGE_SIZE = PAGE_SIZE
96 struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)
98 gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
99 struct bpf_prog_aux *aux;
102 size = round_up(size, __PAGE_SIZE);
103 fp = __vmalloc(size, gfp_flags);
107 aux = kzalloc(sizeof(*aux), bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags));
112 fp->active = alloc_percpu_gfp(int, bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags));
119 fp->pages = size / PAGE_SIZE;
122 fp->jit_requested = ebpf_jit_enabled();
123 fp->blinding_requested = bpf_jit_blinding_enabled(fp);
124 #ifdef CONFIG_CGROUP_BPF
125 aux->cgroup_atype = CGROUP_BPF_ATTACH_TYPE_INVALID;
128 INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode);
129 #ifdef CONFIG_FINEIBT
130 INIT_LIST_HEAD_RCU(&fp->aux->ksym_prefix.lnode);
132 mutex_init(&fp->aux->used_maps_mutex);
133 mutex_init(&fp->aux->dst_mutex);
138 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
140 gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
141 struct bpf_prog *prog;
144 prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags);
148 prog->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags);
150 free_percpu(prog->active);
156 for_each_possible_cpu(cpu) {
157 struct bpf_prog_stats *pstats;
159 pstats = per_cpu_ptr(prog->stats, cpu);
160 u64_stats_init(&pstats->syncp);
164 EXPORT_SYMBOL_GPL(bpf_prog_alloc);
166 int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
168 if (!prog->aux->nr_linfo || !prog->jit_requested)
171 prog->aux->jited_linfo = kvcalloc(prog->aux->nr_linfo,
172 sizeof(*prog->aux->jited_linfo),
173 bpf_memcg_flags(GFP_KERNEL | __GFP_NOWARN));
174 if (!prog->aux->jited_linfo)
180 void bpf_prog_jit_attempt_done(struct bpf_prog *prog)
182 if (prog->aux->jited_linfo &&
183 (!prog->jited || !prog->aux->jited_linfo[0])) {
184 kvfree(prog->aux->jited_linfo);
185 prog->aux->jited_linfo = NULL;
188 kfree(prog->aux->kfunc_tab);
189 prog->aux->kfunc_tab = NULL;
192 /* The jit engine is responsible to provide an array
193 * for insn_off to the jited_off mapping (insn_to_jit_off).
195 * The idx to this array is the insn_off. Hence, the insn_off
196 * here is relative to the prog itself instead of the main prog.
197 * This array has one entry for each xlated bpf insn.
199 * jited_off is the byte off to the end of the jited insn.
203 * The first bpf insn off of the prog. The insn off
204 * here is relative to the main prog.
205 * e.g. if prog is a subprog, insn_start > 0
207 * The prog's idx to prog->aux->linfo and jited_linfo
209 * jited_linfo[linfo_idx] = prog->bpf_func
213 * jited_linfo[i] = prog->bpf_func +
214 * insn_to_jit_off[linfo[i].insn_off - insn_start - 1]
216 void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
217 const u32 *insn_to_jit_off)
219 u32 linfo_idx, insn_start, insn_end, nr_linfo, i;
220 const struct bpf_line_info *linfo;
223 if (!prog->aux->jited_linfo || prog->aux->func_idx > prog->aux->func_cnt)
224 /* Userspace did not provide linfo */
227 linfo_idx = prog->aux->linfo_idx;
228 linfo = &prog->aux->linfo[linfo_idx];
229 insn_start = linfo[0].insn_off;
230 insn_end = insn_start + prog->len;
232 jited_linfo = &prog->aux->jited_linfo[linfo_idx];
233 jited_linfo[0] = prog->bpf_func;
235 nr_linfo = prog->aux->nr_linfo - linfo_idx;
237 for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++)
238 /* The verifier ensures that linfo[i].insn_off is
239 * strictly increasing
241 jited_linfo[i] = prog->bpf_func +
242 insn_to_jit_off[linfo[i].insn_off - insn_start - 1];
245 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
246 gfp_t gfp_extra_flags)
248 gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
252 size = round_up(size, PAGE_SIZE);
253 pages = size / PAGE_SIZE;
254 if (pages <= fp_old->pages)
257 fp = __vmalloc(size, gfp_flags);
259 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
263 /* We keep fp->aux from fp_old around in the new
264 * reallocated structure.
267 fp_old->stats = NULL;
268 fp_old->active = NULL;
269 __bpf_prog_free(fp_old);
275 void __bpf_prog_free(struct bpf_prog *fp)
278 mutex_destroy(&fp->aux->used_maps_mutex);
279 mutex_destroy(&fp->aux->dst_mutex);
280 kfree(fp->aux->poke_tab);
283 free_percpu(fp->stats);
284 free_percpu(fp->active);
288 int bpf_prog_calc_tag(struct bpf_prog *fp)
290 const u32 bits_offset = SHA1_BLOCK_SIZE - sizeof(__be64);
291 u32 raw_size = bpf_prog_tag_scratch_size(fp);
292 u32 digest[SHA1_DIGEST_WORDS];
293 u32 ws[SHA1_WORKSPACE_WORDS];
294 u32 i, bsize, psize, blocks;
295 struct bpf_insn *dst;
301 raw = vmalloc(raw_size);
306 memset(ws, 0, sizeof(ws));
308 /* We need to take out the map fd for the digest calculation
309 * since they are unstable from user space side.
312 for (i = 0, was_ld_map = false; i < fp->len; i++) {
313 dst[i] = fp->insnsi[i];
315 dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
316 (dst[i].src_reg == BPF_PSEUDO_MAP_FD ||
317 dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) {
320 } else if (was_ld_map &&
322 dst[i].dst_reg == 0 &&
323 dst[i].src_reg == 0 &&
332 psize = bpf_prog_insn_size(fp);
333 memset(&raw[psize], 0, raw_size - psize);
336 bsize = round_up(psize, SHA1_BLOCK_SIZE);
337 blocks = bsize / SHA1_BLOCK_SIZE;
339 if (bsize - psize >= sizeof(__be64)) {
340 bits = (__be64 *)(todo + bsize - sizeof(__be64));
342 bits = (__be64 *)(todo + bsize + bits_offset);
345 *bits = cpu_to_be64((psize - 1) << 3);
348 sha1_transform(digest, todo, ws);
349 todo += SHA1_BLOCK_SIZE;
352 result = (__force __be32 *)digest;
353 for (i = 0; i < SHA1_DIGEST_WORDS; i++)
354 result[i] = cpu_to_be32(digest[i]);
355 memcpy(fp->tag, result, sizeof(fp->tag));
361 static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
362 s32 end_new, s32 curr, const bool probe_pass)
364 const s64 imm_min = S32_MIN, imm_max = S32_MAX;
365 s32 delta = end_new - end_old;
368 if (curr < pos && curr + imm + 1 >= end_old)
370 else if (curr >= end_new && curr + imm + 1 < end_new)
372 if (imm < imm_min || imm > imm_max)
379 static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
380 s32 end_new, s32 curr, const bool probe_pass)
382 s64 off_min, off_max, off;
383 s32 delta = end_new - end_old;
385 if (insn->code == (BPF_JMP32 | BPF_JA)) {
395 if (curr < pos && curr + off + 1 >= end_old)
397 else if (curr >= end_new && curr + off + 1 < end_new)
399 if (off < off_min || off > off_max)
402 if (insn->code == (BPF_JMP32 | BPF_JA))
410 static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old,
411 s32 end_new, const bool probe_pass)
413 u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0);
414 struct bpf_insn *insn = prog->insnsi;
417 for (i = 0; i < insn_cnt; i++, insn++) {
420 /* In the probing pass we still operate on the original,
421 * unpatched image in order to check overflows before we
422 * do any other adjustments. Therefore skip the patchlet.
424 if (probe_pass && i == pos) {
426 insn = prog->insnsi + end_old;
428 if (bpf_pseudo_func(insn)) {
429 ret = bpf_adj_delta_to_imm(insn, pos, end_old,
430 end_new, i, probe_pass);
436 if ((BPF_CLASS(code) != BPF_JMP &&
437 BPF_CLASS(code) != BPF_JMP32) ||
438 BPF_OP(code) == BPF_EXIT)
440 /* Adjust offset of jmps if we cross patch boundaries. */
441 if (BPF_OP(code) == BPF_CALL) {
442 if (insn->src_reg != BPF_PSEUDO_CALL)
444 ret = bpf_adj_delta_to_imm(insn, pos, end_old,
445 end_new, i, probe_pass);
447 ret = bpf_adj_delta_to_off(insn, pos, end_old,
448 end_new, i, probe_pass);
457 static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta)
459 struct bpf_line_info *linfo;
462 nr_linfo = prog->aux->nr_linfo;
463 if (!nr_linfo || !delta)
466 linfo = prog->aux->linfo;
468 for (i = 0; i < nr_linfo; i++)
469 if (off < linfo[i].insn_off)
472 /* Push all off < linfo[i].insn_off by delta */
473 for (; i < nr_linfo; i++)
474 linfo[i].insn_off += delta;
477 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
478 const struct bpf_insn *patch, u32 len)
480 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
481 const u32 cnt_max = S16_MAX;
482 struct bpf_prog *prog_adj;
485 /* Since our patchlet doesn't expand the image, we're done. */
486 if (insn_delta == 0) {
487 memcpy(prog->insnsi + off, patch, sizeof(*patch));
491 insn_adj_cnt = prog->len + insn_delta;
493 /* Reject anything that would potentially let the insn->off
494 * target overflow when we have excessive program expansions.
495 * We need to probe here before we do any reallocation where
496 * we afterwards may not fail anymore.
498 if (insn_adj_cnt > cnt_max &&
499 (err = bpf_adj_branches(prog, off, off + 1, off + len, true)))
502 /* Several new instructions need to be inserted. Make room
503 * for them. Likely, there's no need for a new allocation as
504 * last page could have large enough tailroom.
506 prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
509 return ERR_PTR(-ENOMEM);
511 prog_adj->len = insn_adj_cnt;
513 /* Patching happens in 3 steps:
515 * 1) Move over tail of insnsi from next instruction onwards,
516 * so we can patch the single target insn with one or more
517 * new ones (patching is always from 1 to n insns, n > 0).
518 * 2) Inject new instructions at the target location.
519 * 3) Adjust branch offsets if necessary.
521 insn_rest = insn_adj_cnt - off - len;
523 memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
524 sizeof(*patch) * insn_rest);
525 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
527 /* We are guaranteed to not fail at this point, otherwise
528 * the ship has sailed to reverse to the original state. An
529 * overflow cannot happen at this point.
531 BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false));
533 bpf_adj_linfo(prog_adj, off, insn_delta);
538 int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
540 /* Branch offsets can't overflow when program is shrinking, no need
541 * to call bpf_adj_branches(..., true) here
543 memmove(prog->insnsi + off, prog->insnsi + off + cnt,
544 sizeof(struct bpf_insn) * (prog->len - off - cnt));
547 return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false));
550 static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
554 for (i = 0; i < fp->aux->real_func_cnt; i++)
555 bpf_prog_kallsyms_del(fp->aux->func[i]);
558 void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
560 bpf_prog_kallsyms_del_subprogs(fp);
561 bpf_prog_kallsyms_del(fp);
564 #ifdef CONFIG_BPF_JIT
565 /* All BPF JIT sysctl knobs here. */
566 int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
567 int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
568 int bpf_jit_harden __read_mostly;
569 long bpf_jit_limit __read_mostly;
570 long bpf_jit_limit_max __read_mostly;
573 bpf_prog_ksym_set_addr(struct bpf_prog *prog)
575 WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
577 prog->aux->ksym.start = (unsigned long) prog->bpf_func;
578 prog->aux->ksym.end = prog->aux->ksym.start + prog->jited_len;
582 bpf_prog_ksym_set_name(struct bpf_prog *prog)
584 char *sym = prog->aux->ksym.name;
585 const char *end = sym + KSYM_NAME_LEN;
586 const struct btf_type *type;
587 const char *func_name;
589 BUILD_BUG_ON(sizeof("bpf_prog_") +
590 sizeof(prog->tag) * 2 +
591 /* name has been null terminated.
592 * We should need +1 for the '_' preceding
593 * the name. However, the null character
594 * is double counted between the name and the
595 * sizeof("bpf_prog_") above, so we omit
598 sizeof(prog->aux->name) > KSYM_NAME_LEN);
600 sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
601 sym = bin2hex(sym, prog->tag, sizeof(prog->tag));
603 /* prog->aux->name will be ignored if full btf name is available */
604 if (prog->aux->func_info_cnt && prog->aux->func_idx < prog->aux->func_info_cnt) {
605 type = btf_type_by_id(prog->aux->btf,
606 prog->aux->func_info[prog->aux->func_idx].type_id);
607 func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
608 snprintf(sym, (size_t)(end - sym), "_%s", func_name);
612 if (prog->aux->name[0])
613 snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
618 static unsigned long bpf_get_ksym_start(struct latch_tree_node *n)
620 return container_of(n, struct bpf_ksym, tnode)->start;
623 static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
624 struct latch_tree_node *b)
626 return bpf_get_ksym_start(a) < bpf_get_ksym_start(b);
629 static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
631 unsigned long val = (unsigned long)key;
632 const struct bpf_ksym *ksym;
634 ksym = container_of(n, struct bpf_ksym, tnode);
636 if (val < ksym->start)
638 /* Ensure that we detect return addresses as part of the program, when
639 * the final instruction is a call for a program part of the stack
640 * trace. Therefore, do val > ksym->end instead of val >= ksym->end.
648 static const struct latch_tree_ops bpf_tree_ops = {
649 .less = bpf_tree_less,
650 .comp = bpf_tree_comp,
653 static DEFINE_SPINLOCK(bpf_lock);
654 static LIST_HEAD(bpf_kallsyms);
655 static struct latch_tree_root bpf_tree __cacheline_aligned;
657 void bpf_ksym_add(struct bpf_ksym *ksym)
659 spin_lock_bh(&bpf_lock);
660 WARN_ON_ONCE(!list_empty(&ksym->lnode));
661 list_add_tail_rcu(&ksym->lnode, &bpf_kallsyms);
662 latch_tree_insert(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
663 spin_unlock_bh(&bpf_lock);
666 static void __bpf_ksym_del(struct bpf_ksym *ksym)
668 if (list_empty(&ksym->lnode))
671 latch_tree_erase(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
672 list_del_rcu(&ksym->lnode);
675 void bpf_ksym_del(struct bpf_ksym *ksym)
677 spin_lock_bh(&bpf_lock);
678 __bpf_ksym_del(ksym);
679 spin_unlock_bh(&bpf_lock);
682 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
684 return fp->jited && !bpf_prog_was_classic(fp);
687 void bpf_prog_kallsyms_add(struct bpf_prog *fp)
689 if (!bpf_prog_kallsyms_candidate(fp) ||
690 !bpf_token_capable(fp->aux->token, CAP_BPF))
693 bpf_prog_ksym_set_addr(fp);
694 bpf_prog_ksym_set_name(fp);
695 fp->aux->ksym.prog = true;
697 bpf_ksym_add(&fp->aux->ksym);
699 #ifdef CONFIG_FINEIBT
701 * When FineIBT, code in the __cfi_foo() symbols can get executed
702 * and hence unwinder needs help.
704 if (cfi_mode != CFI_FINEIBT)
707 snprintf(fp->aux->ksym_prefix.name, KSYM_NAME_LEN,
708 "__cfi_%s", fp->aux->ksym.name);
710 fp->aux->ksym_prefix.start = (unsigned long) fp->bpf_func - 16;
711 fp->aux->ksym_prefix.end = (unsigned long) fp->bpf_func;
713 bpf_ksym_add(&fp->aux->ksym_prefix);
717 void bpf_prog_kallsyms_del(struct bpf_prog *fp)
719 if (!bpf_prog_kallsyms_candidate(fp))
722 bpf_ksym_del(&fp->aux->ksym);
723 #ifdef CONFIG_FINEIBT
724 if (cfi_mode != CFI_FINEIBT)
726 bpf_ksym_del(&fp->aux->ksym_prefix);
730 static struct bpf_ksym *bpf_ksym_find(unsigned long addr)
732 struct latch_tree_node *n;
734 n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
735 return n ? container_of(n, struct bpf_ksym, tnode) : NULL;
738 const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
739 unsigned long *off, char *sym)
741 struct bpf_ksym *ksym;
745 ksym = bpf_ksym_find(addr);
747 unsigned long symbol_start = ksym->start;
748 unsigned long symbol_end = ksym->end;
750 strncpy(sym, ksym->name, KSYM_NAME_LEN);
754 *size = symbol_end - symbol_start;
756 *off = addr - symbol_start;
763 bool is_bpf_text_address(unsigned long addr)
768 ret = bpf_ksym_find(addr) != NULL;
774 struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
776 struct bpf_ksym *ksym = bpf_ksym_find(addr);
778 return ksym && ksym->prog ?
779 container_of(ksym, struct bpf_prog_aux, ksym)->prog :
783 const struct exception_table_entry *search_bpf_extables(unsigned long addr)
785 const struct exception_table_entry *e = NULL;
786 struct bpf_prog *prog;
789 prog = bpf_prog_ksym_find(addr);
792 if (!prog->aux->num_exentries)
795 e = search_extable(prog->aux->extable, prog->aux->num_exentries, addr);
801 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
804 struct bpf_ksym *ksym;
808 if (!bpf_jit_kallsyms_enabled())
812 list_for_each_entry_rcu(ksym, &bpf_kallsyms, lnode) {
816 strncpy(sym, ksym->name, KSYM_NAME_LEN);
818 *value = ksym->start;
819 *type = BPF_SYM_ELF_TYPE;
829 int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
830 struct bpf_jit_poke_descriptor *poke)
832 struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
833 static const u32 poke_tab_max = 1024;
834 u32 slot = prog->aux->size_poke_tab;
837 if (size > poke_tab_max)
839 if (poke->tailcall_target || poke->tailcall_target_stable ||
840 poke->tailcall_bypass || poke->adj_off || poke->bypass_addr)
843 switch (poke->reason) {
844 case BPF_POKE_REASON_TAIL_CALL:
845 if (!poke->tail_call.map)
852 tab = krealloc(tab, size * sizeof(*poke), GFP_KERNEL);
856 memcpy(&tab[slot], poke, sizeof(*poke));
857 prog->aux->size_poke_tab = size;
858 prog->aux->poke_tab = tab;
864 * BPF program pack allocator.
866 * Most BPF programs are pretty small. Allocating a hole page for each
867 * program is sometime a waste. Many small bpf program also adds pressure
868 * to instruction TLB. To solve this issue, we introduce a BPF program pack
869 * allocator. The prog_pack allocator uses HPAGE_PMD_SIZE page (2MB on x86)
870 * to host BPF programs.
872 #define BPF_PROG_CHUNK_SHIFT 6
873 #define BPF_PROG_CHUNK_SIZE (1 << BPF_PROG_CHUNK_SHIFT)
874 #define BPF_PROG_CHUNK_MASK (~(BPF_PROG_CHUNK_SIZE - 1))
876 struct bpf_prog_pack {
877 struct list_head list;
879 unsigned long bitmap[];
882 void bpf_jit_fill_hole_with_zero(void *area, unsigned int size)
884 memset(area, 0, size);
887 #define BPF_PROG_SIZE_TO_NBITS(size) (round_up(size, BPF_PROG_CHUNK_SIZE) / BPF_PROG_CHUNK_SIZE)
889 static DEFINE_MUTEX(pack_mutex);
890 static LIST_HEAD(pack_list);
892 /* PMD_SIZE is not available in some special config, e.g. ARCH=arm with
893 * CONFIG_MMU=n. Use PAGE_SIZE in these cases.
896 /* PMD_SIZE is really big for some archs. It doesn't make sense to
897 * reserve too much memory in one allocation. Hardcode BPF_PROG_PACK_SIZE to
898 * 2MiB * num_possible_nodes(). On most architectures PMD_SIZE will be
899 * greater than or equal to 2MB.
901 #define BPF_PROG_PACK_SIZE (SZ_2M * num_possible_nodes())
903 #define BPF_PROG_PACK_SIZE PAGE_SIZE
906 #define BPF_PROG_CHUNK_COUNT (BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE)
908 static struct bpf_prog_pack *alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_insns)
910 struct bpf_prog_pack *pack;
912 pack = kzalloc(struct_size(pack, bitmap, BITS_TO_LONGS(BPF_PROG_CHUNK_COUNT)),
916 pack->ptr = bpf_jit_alloc_exec(BPF_PROG_PACK_SIZE);
921 bpf_fill_ill_insns(pack->ptr, BPF_PROG_PACK_SIZE);
922 bitmap_zero(pack->bitmap, BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE);
923 list_add_tail(&pack->list, &pack_list);
925 set_vm_flush_reset_perms(pack->ptr);
926 set_memory_rox((unsigned long)pack->ptr, BPF_PROG_PACK_SIZE / PAGE_SIZE);
930 void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns)
932 unsigned int nbits = BPF_PROG_SIZE_TO_NBITS(size);
933 struct bpf_prog_pack *pack;
937 mutex_lock(&pack_mutex);
938 if (size > BPF_PROG_PACK_SIZE) {
939 size = round_up(size, PAGE_SIZE);
940 ptr = bpf_jit_alloc_exec(size);
942 bpf_fill_ill_insns(ptr, size);
943 set_vm_flush_reset_perms(ptr);
944 set_memory_rox((unsigned long)ptr, size / PAGE_SIZE);
948 list_for_each_entry(pack, &pack_list, list) {
949 pos = bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
951 if (pos < BPF_PROG_CHUNK_COUNT)
952 goto found_free_area;
955 pack = alloc_new_pack(bpf_fill_ill_insns);
962 bitmap_set(pack->bitmap, pos, nbits);
963 ptr = (void *)(pack->ptr) + (pos << BPF_PROG_CHUNK_SHIFT);
966 mutex_unlock(&pack_mutex);
970 void bpf_prog_pack_free(void *ptr, u32 size)
972 struct bpf_prog_pack *pack = NULL, *tmp;
976 mutex_lock(&pack_mutex);
977 if (size > BPF_PROG_PACK_SIZE) {
978 bpf_jit_free_exec(ptr);
982 list_for_each_entry(tmp, &pack_list, list) {
983 if (ptr >= tmp->ptr && (tmp->ptr + BPF_PROG_PACK_SIZE) > ptr) {
989 if (WARN_ONCE(!pack, "bpf_prog_pack bug\n"))
992 nbits = BPF_PROG_SIZE_TO_NBITS(size);
993 pos = ((unsigned long)ptr - (unsigned long)pack->ptr) >> BPF_PROG_CHUNK_SHIFT;
995 WARN_ONCE(bpf_arch_text_invalidate(ptr, size),
996 "bpf_prog_pack bug: missing bpf_arch_text_invalidate?\n");
998 bitmap_clear(pack->bitmap, pos, nbits);
999 if (bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
1000 BPF_PROG_CHUNK_COUNT, 0) == 0) {
1001 list_del(&pack->list);
1002 bpf_jit_free_exec(pack->ptr);
1006 mutex_unlock(&pack_mutex);
1009 static atomic_long_t bpf_jit_current;
1011 /* Can be overridden by an arch's JIT compiler if it has a custom,
1012 * dedicated BPF backend memory area, or if neither of the two
1015 u64 __weak bpf_jit_alloc_exec_limit(void)
1017 #if defined(MODULES_VADDR)
1018 return MODULES_END - MODULES_VADDR;
1020 return VMALLOC_END - VMALLOC_START;
1024 static int __init bpf_jit_charge_init(void)
1026 /* Only used as heuristic here to derive limit. */
1027 bpf_jit_limit_max = bpf_jit_alloc_exec_limit();
1028 bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 1,
1029 PAGE_SIZE), LONG_MAX);
1032 pure_initcall(bpf_jit_charge_init);
1034 int bpf_jit_charge_modmem(u32 size)
1036 if (atomic_long_add_return(size, &bpf_jit_current) > READ_ONCE(bpf_jit_limit)) {
1037 if (!bpf_capable()) {
1038 atomic_long_sub(size, &bpf_jit_current);
1046 void bpf_jit_uncharge_modmem(u32 size)
1048 atomic_long_sub(size, &bpf_jit_current);
1051 void *__weak bpf_jit_alloc_exec(unsigned long size)
1053 return module_alloc(size);
1056 void __weak bpf_jit_free_exec(void *addr)
1058 module_memfree(addr);
1061 struct bpf_binary_header *
1062 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
1063 unsigned int alignment,
1064 bpf_jit_fill_hole_t bpf_fill_ill_insns)
1066 struct bpf_binary_header *hdr;
1067 u32 size, hole, start;
1069 WARN_ON_ONCE(!is_power_of_2(alignment) ||
1070 alignment > BPF_IMAGE_ALIGNMENT);
1072 /* Most of BPF filters are really small, but if some of them
1073 * fill a page, allow at least 128 extra bytes to insert a
1074 * random section of illegal instructions.
1076 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
1078 if (bpf_jit_charge_modmem(size))
1080 hdr = bpf_jit_alloc_exec(size);
1082 bpf_jit_uncharge_modmem(size);
1086 /* Fill space with illegal/arch-dep instructions. */
1087 bpf_fill_ill_insns(hdr, size);
1090 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
1091 PAGE_SIZE - sizeof(*hdr));
1092 start = get_random_u32_below(hole) & ~(alignment - 1);
1094 /* Leave a random number of instructions before BPF code. */
1095 *image_ptr = &hdr->image[start];
1100 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
1102 u32 size = hdr->size;
1104 bpf_jit_free_exec(hdr);
1105 bpf_jit_uncharge_modmem(size);
1108 /* Allocate jit binary from bpf_prog_pack allocator.
1109 * Since the allocated memory is RO+X, the JIT engine cannot write directly
1110 * to the memory. To solve this problem, a RW buffer is also allocated at
1111 * as the same time. The JIT engine should calculate offsets based on the
1112 * RO memory address, but write JITed program to the RW buffer. Once the
1113 * JIT engine finishes, it calls bpf_jit_binary_pack_finalize, which copies
1114 * the JITed program to the RO memory.
1116 struct bpf_binary_header *
1117 bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **image_ptr,
1118 unsigned int alignment,
1119 struct bpf_binary_header **rw_header,
1121 bpf_jit_fill_hole_t bpf_fill_ill_insns)
1123 struct bpf_binary_header *ro_header;
1124 u32 size, hole, start;
1126 WARN_ON_ONCE(!is_power_of_2(alignment) ||
1127 alignment > BPF_IMAGE_ALIGNMENT);
1129 /* add 16 bytes for a random section of illegal instructions */
1130 size = round_up(proglen + sizeof(*ro_header) + 16, BPF_PROG_CHUNK_SIZE);
1132 if (bpf_jit_charge_modmem(size))
1134 ro_header = bpf_prog_pack_alloc(size, bpf_fill_ill_insns);
1136 bpf_jit_uncharge_modmem(size);
1140 *rw_header = kvmalloc(size, GFP_KERNEL);
1142 bpf_prog_pack_free(ro_header, size);
1143 bpf_jit_uncharge_modmem(size);
1147 /* Fill space with illegal/arch-dep instructions. */
1148 bpf_fill_ill_insns(*rw_header, size);
1149 (*rw_header)->size = size;
1151 hole = min_t(unsigned int, size - (proglen + sizeof(*ro_header)),
1152 BPF_PROG_CHUNK_SIZE - sizeof(*ro_header));
1153 start = get_random_u32_below(hole) & ~(alignment - 1);
1155 *image_ptr = &ro_header->image[start];
1156 *rw_image = &(*rw_header)->image[start];
1161 /* Copy JITed text from rw_header to its final location, the ro_header. */
1162 int bpf_jit_binary_pack_finalize(struct bpf_prog *prog,
1163 struct bpf_binary_header *ro_header,
1164 struct bpf_binary_header *rw_header)
1168 ptr = bpf_arch_text_copy(ro_header, rw_header, rw_header->size);
1173 bpf_prog_pack_free(ro_header, ro_header->size);
1174 return PTR_ERR(ptr);
1179 /* bpf_jit_binary_pack_free is called in two different scenarios:
1180 * 1) when the program is freed after;
1181 * 2) when the JIT engine fails (before bpf_jit_binary_pack_finalize).
1182 * For case 2), we need to free both the RO memory and the RW buffer.
1184 * bpf_jit_binary_pack_free requires proper ro_header->size. However,
1185 * bpf_jit_binary_pack_alloc does not set it. Therefore, ro_header->size
1186 * must be set with either bpf_jit_binary_pack_finalize (normal path) or
1187 * bpf_arch_text_copy (when jit fails).
1189 void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header,
1190 struct bpf_binary_header *rw_header)
1192 u32 size = ro_header->size;
1194 bpf_prog_pack_free(ro_header, size);
1196 bpf_jit_uncharge_modmem(size);
1199 struct bpf_binary_header *
1200 bpf_jit_binary_pack_hdr(const struct bpf_prog *fp)
1202 unsigned long real_start = (unsigned long)fp->bpf_func;
1205 addr = real_start & BPF_PROG_CHUNK_MASK;
1206 return (void *)addr;
1209 static inline struct bpf_binary_header *
1210 bpf_jit_binary_hdr(const struct bpf_prog *fp)
1212 unsigned long real_start = (unsigned long)fp->bpf_func;
1215 addr = real_start & PAGE_MASK;
1216 return (void *)addr;
1219 /* This symbol is only overridden by archs that have different
1220 * requirements than the usual eBPF JITs, f.e. when they only
1221 * implement cBPF JIT, do not set images read-only, etc.
1223 void __weak bpf_jit_free(struct bpf_prog *fp)
1226 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
1228 bpf_jit_binary_free(hdr);
1229 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
1232 bpf_prog_unlock_free(fp);
1235 int bpf_jit_get_func_addr(const struct bpf_prog *prog,
1236 const struct bpf_insn *insn, bool extra_pass,
1237 u64 *func_addr, bool *func_addr_fixed)
1239 s16 off = insn->off;
1240 s32 imm = insn->imm;
1244 *func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
1245 if (!*func_addr_fixed) {
1246 /* Place-holder address till the last pass has collected
1247 * all addresses for JITed subprograms in which case we
1248 * can pick them up from prog->aux.
1252 else if (prog->aux->func &&
1253 off >= 0 && off < prog->aux->real_func_cnt)
1254 addr = (u8 *)prog->aux->func[off]->bpf_func;
1257 } else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL &&
1258 bpf_jit_supports_far_kfunc_call()) {
1259 err = bpf_get_kfunc_addr(prog, insn->imm, insn->off, &addr);
1263 /* Address of a BPF helper call. Since part of the core
1264 * kernel, it's always at a fixed location. __bpf_call_base
1265 * and the helper with imm relative to it are both in core
1268 addr = (u8 *)__bpf_call_base + imm;
1271 *func_addr = (unsigned long)addr;
1275 static int bpf_jit_blind_insn(const struct bpf_insn *from,
1276 const struct bpf_insn *aux,
1277 struct bpf_insn *to_buff,
1280 struct bpf_insn *to = to_buff;
1281 u32 imm_rnd = get_random_u32();
1284 BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG);
1285 BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
1287 /* Constraints on AX register:
1289 * AX register is inaccessible from user space. It is mapped in
1290 * all JITs, and used here for constant blinding rewrites. It is
1291 * typically "stateless" meaning its contents are only valid within
1292 * the executed instruction, but not across several instructions.
1293 * There are a few exceptions however which are further detailed
1296 * Constant blinding is only used by JITs, not in the interpreter.
1297 * The interpreter uses AX in some occasions as a local temporary
1298 * register e.g. in DIV or MOD instructions.
1300 * In restricted circumstances, the verifier can also use the AX
1301 * register for rewrites as long as they do not interfere with
1304 if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
1307 if (from->imm == 0 &&
1308 (from->code == (BPF_ALU | BPF_MOV | BPF_K) ||
1309 from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
1310 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
1314 switch (from->code) {
1315 case BPF_ALU | BPF_ADD | BPF_K:
1316 case BPF_ALU | BPF_SUB | BPF_K:
1317 case BPF_ALU | BPF_AND | BPF_K:
1318 case BPF_ALU | BPF_OR | BPF_K:
1319 case BPF_ALU | BPF_XOR | BPF_K:
1320 case BPF_ALU | BPF_MUL | BPF_K:
1321 case BPF_ALU | BPF_MOV | BPF_K:
1322 case BPF_ALU | BPF_DIV | BPF_K:
1323 case BPF_ALU | BPF_MOD | BPF_K:
1324 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1325 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1326 *to++ = BPF_ALU32_REG_OFF(from->code, from->dst_reg, BPF_REG_AX, from->off);
1329 case BPF_ALU64 | BPF_ADD | BPF_K:
1330 case BPF_ALU64 | BPF_SUB | BPF_K:
1331 case BPF_ALU64 | BPF_AND | BPF_K:
1332 case BPF_ALU64 | BPF_OR | BPF_K:
1333 case BPF_ALU64 | BPF_XOR | BPF_K:
1334 case BPF_ALU64 | BPF_MUL | BPF_K:
1335 case BPF_ALU64 | BPF_MOV | BPF_K:
1336 case BPF_ALU64 | BPF_DIV | BPF_K:
1337 case BPF_ALU64 | BPF_MOD | BPF_K:
1338 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1339 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1340 *to++ = BPF_ALU64_REG_OFF(from->code, from->dst_reg, BPF_REG_AX, from->off);
1343 case BPF_JMP | BPF_JEQ | BPF_K:
1344 case BPF_JMP | BPF_JNE | BPF_K:
1345 case BPF_JMP | BPF_JGT | BPF_K:
1346 case BPF_JMP | BPF_JLT | BPF_K:
1347 case BPF_JMP | BPF_JGE | BPF_K:
1348 case BPF_JMP | BPF_JLE | BPF_K:
1349 case BPF_JMP | BPF_JSGT | BPF_K:
1350 case BPF_JMP | BPF_JSLT | BPF_K:
1351 case BPF_JMP | BPF_JSGE | BPF_K:
1352 case BPF_JMP | BPF_JSLE | BPF_K:
1353 case BPF_JMP | BPF_JSET | BPF_K:
1354 /* Accommodate for extra offset in case of a backjump. */
1358 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1359 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1360 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
1363 case BPF_JMP32 | BPF_JEQ | BPF_K:
1364 case BPF_JMP32 | BPF_JNE | BPF_K:
1365 case BPF_JMP32 | BPF_JGT | BPF_K:
1366 case BPF_JMP32 | BPF_JLT | BPF_K:
1367 case BPF_JMP32 | BPF_JGE | BPF_K:
1368 case BPF_JMP32 | BPF_JLE | BPF_K:
1369 case BPF_JMP32 | BPF_JSGT | BPF_K:
1370 case BPF_JMP32 | BPF_JSLT | BPF_K:
1371 case BPF_JMP32 | BPF_JSGE | BPF_K:
1372 case BPF_JMP32 | BPF_JSLE | BPF_K:
1373 case BPF_JMP32 | BPF_JSET | BPF_K:
1374 /* Accommodate for extra offset in case of a backjump. */
1378 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1379 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1380 *to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX,
1384 case BPF_LD | BPF_IMM | BPF_DW:
1385 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
1386 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1387 *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
1388 *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
1390 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
1391 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
1392 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1394 *to++ = BPF_ZEXT_REG(BPF_REG_AX);
1395 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX);
1398 case BPF_ST | BPF_MEM | BPF_DW:
1399 case BPF_ST | BPF_MEM | BPF_W:
1400 case BPF_ST | BPF_MEM | BPF_H:
1401 case BPF_ST | BPF_MEM | BPF_B:
1402 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1403 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1404 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
1408 return to - to_buff;
1411 static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
1412 gfp_t gfp_extra_flags)
1414 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
1415 struct bpf_prog *fp;
1417 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags);
1419 /* aux->prog still points to the fp_other one, so
1420 * when promoting the clone to the real program,
1421 * this still needs to be adapted.
1423 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
1429 static void bpf_prog_clone_free(struct bpf_prog *fp)
1431 /* aux was stolen by the other clone, so we cannot free
1432 * it from this path! It will be freed eventually by the
1433 * other program on release.
1435 * At this point, we don't need a deferred release since
1436 * clone is guaranteed to not be locked.
1441 __bpf_prog_free(fp);
1444 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
1446 /* We have to repoint aux->prog to self, as we don't
1447 * know whether fp here is the clone or the original.
1450 bpf_prog_clone_free(fp_other);
1453 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
1455 struct bpf_insn insn_buff[16], aux[2];
1456 struct bpf_prog *clone, *tmp;
1457 int insn_delta, insn_cnt;
1458 struct bpf_insn *insn;
1461 if (!prog->blinding_requested || prog->blinded)
1464 clone = bpf_prog_clone_create(prog, GFP_USER);
1466 return ERR_PTR(-ENOMEM);
1468 insn_cnt = clone->len;
1469 insn = clone->insnsi;
1471 for (i = 0; i < insn_cnt; i++, insn++) {
1472 if (bpf_pseudo_func(insn)) {
1473 /* ld_imm64 with an address of bpf subprog is not
1474 * a user controlled constant. Don't randomize it,
1475 * since it will conflict with jit_subprogs() logic.
1482 /* We temporarily need to hold the original ld64 insn
1483 * so that we can still access the first part in the
1484 * second blinding run.
1486 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
1488 memcpy(aux, insn, sizeof(aux));
1490 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff,
1491 clone->aux->verifier_zext);
1495 tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
1497 /* Patching may have repointed aux->prog during
1498 * realloc from the original one, so we need to
1499 * fix it up here on error.
1501 bpf_jit_prog_release_other(prog, clone);
1506 insn_delta = rewritten - 1;
1508 /* Walk new program and skip insns we just inserted. */
1509 insn = clone->insnsi + i + insn_delta;
1510 insn_cnt += insn_delta;
1517 #endif /* CONFIG_BPF_JIT */
1519 /* Base function for offset calculation. Needs to go into .text section,
1520 * therefore keeping it non-static as well; will also be used by JITs
1521 * anyway later on, so do not let the compiler omit it. This also needs
1522 * to go into kallsyms for correlation from e.g. bpftool, so naming
1525 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1529 EXPORT_SYMBOL_GPL(__bpf_call_base);
1531 /* All UAPI available opcodes. */
1532 #define BPF_INSN_MAP(INSN_2, INSN_3) \
1533 /* 32 bit ALU operations. */ \
1534 /* Register based. */ \
1535 INSN_3(ALU, ADD, X), \
1536 INSN_3(ALU, SUB, X), \
1537 INSN_3(ALU, AND, X), \
1538 INSN_3(ALU, OR, X), \
1539 INSN_3(ALU, LSH, X), \
1540 INSN_3(ALU, RSH, X), \
1541 INSN_3(ALU, XOR, X), \
1542 INSN_3(ALU, MUL, X), \
1543 INSN_3(ALU, MOV, X), \
1544 INSN_3(ALU, ARSH, X), \
1545 INSN_3(ALU, DIV, X), \
1546 INSN_3(ALU, MOD, X), \
1548 INSN_3(ALU, END, TO_BE), \
1549 INSN_3(ALU, END, TO_LE), \
1550 /* Immediate based. */ \
1551 INSN_3(ALU, ADD, K), \
1552 INSN_3(ALU, SUB, K), \
1553 INSN_3(ALU, AND, K), \
1554 INSN_3(ALU, OR, K), \
1555 INSN_3(ALU, LSH, K), \
1556 INSN_3(ALU, RSH, K), \
1557 INSN_3(ALU, XOR, K), \
1558 INSN_3(ALU, MUL, K), \
1559 INSN_3(ALU, MOV, K), \
1560 INSN_3(ALU, ARSH, K), \
1561 INSN_3(ALU, DIV, K), \
1562 INSN_3(ALU, MOD, K), \
1563 /* 64 bit ALU operations. */ \
1564 /* Register based. */ \
1565 INSN_3(ALU64, ADD, X), \
1566 INSN_3(ALU64, SUB, X), \
1567 INSN_3(ALU64, AND, X), \
1568 INSN_3(ALU64, OR, X), \
1569 INSN_3(ALU64, LSH, X), \
1570 INSN_3(ALU64, RSH, X), \
1571 INSN_3(ALU64, XOR, X), \
1572 INSN_3(ALU64, MUL, X), \
1573 INSN_3(ALU64, MOV, X), \
1574 INSN_3(ALU64, ARSH, X), \
1575 INSN_3(ALU64, DIV, X), \
1576 INSN_3(ALU64, MOD, X), \
1577 INSN_2(ALU64, NEG), \
1578 INSN_3(ALU64, END, TO_LE), \
1579 /* Immediate based. */ \
1580 INSN_3(ALU64, ADD, K), \
1581 INSN_3(ALU64, SUB, K), \
1582 INSN_3(ALU64, AND, K), \
1583 INSN_3(ALU64, OR, K), \
1584 INSN_3(ALU64, LSH, K), \
1585 INSN_3(ALU64, RSH, K), \
1586 INSN_3(ALU64, XOR, K), \
1587 INSN_3(ALU64, MUL, K), \
1588 INSN_3(ALU64, MOV, K), \
1589 INSN_3(ALU64, ARSH, K), \
1590 INSN_3(ALU64, DIV, K), \
1591 INSN_3(ALU64, MOD, K), \
1592 /* Call instruction. */ \
1593 INSN_2(JMP, CALL), \
1594 /* Exit instruction. */ \
1595 INSN_2(JMP, EXIT), \
1596 /* 32-bit Jump instructions. */ \
1597 /* Register based. */ \
1598 INSN_3(JMP32, JEQ, X), \
1599 INSN_3(JMP32, JNE, X), \
1600 INSN_3(JMP32, JGT, X), \
1601 INSN_3(JMP32, JLT, X), \
1602 INSN_3(JMP32, JGE, X), \
1603 INSN_3(JMP32, JLE, X), \
1604 INSN_3(JMP32, JSGT, X), \
1605 INSN_3(JMP32, JSLT, X), \
1606 INSN_3(JMP32, JSGE, X), \
1607 INSN_3(JMP32, JSLE, X), \
1608 INSN_3(JMP32, JSET, X), \
1609 /* Immediate based. */ \
1610 INSN_3(JMP32, JEQ, K), \
1611 INSN_3(JMP32, JNE, K), \
1612 INSN_3(JMP32, JGT, K), \
1613 INSN_3(JMP32, JLT, K), \
1614 INSN_3(JMP32, JGE, K), \
1615 INSN_3(JMP32, JLE, K), \
1616 INSN_3(JMP32, JSGT, K), \
1617 INSN_3(JMP32, JSLT, K), \
1618 INSN_3(JMP32, JSGE, K), \
1619 INSN_3(JMP32, JSLE, K), \
1620 INSN_3(JMP32, JSET, K), \
1621 /* Jump instructions. */ \
1622 /* Register based. */ \
1623 INSN_3(JMP, JEQ, X), \
1624 INSN_3(JMP, JNE, X), \
1625 INSN_3(JMP, JGT, X), \
1626 INSN_3(JMP, JLT, X), \
1627 INSN_3(JMP, JGE, X), \
1628 INSN_3(JMP, JLE, X), \
1629 INSN_3(JMP, JSGT, X), \
1630 INSN_3(JMP, JSLT, X), \
1631 INSN_3(JMP, JSGE, X), \
1632 INSN_3(JMP, JSLE, X), \
1633 INSN_3(JMP, JSET, X), \
1634 /* Immediate based. */ \
1635 INSN_3(JMP, JEQ, K), \
1636 INSN_3(JMP, JNE, K), \
1637 INSN_3(JMP, JGT, K), \
1638 INSN_3(JMP, JLT, K), \
1639 INSN_3(JMP, JGE, K), \
1640 INSN_3(JMP, JLE, K), \
1641 INSN_3(JMP, JSGT, K), \
1642 INSN_3(JMP, JSLT, K), \
1643 INSN_3(JMP, JSGE, K), \
1644 INSN_3(JMP, JSLE, K), \
1645 INSN_3(JMP, JSET, K), \
1647 INSN_2(JMP32, JA), \
1648 /* Store instructions. */ \
1649 /* Register based. */ \
1650 INSN_3(STX, MEM, B), \
1651 INSN_3(STX, MEM, H), \
1652 INSN_3(STX, MEM, W), \
1653 INSN_3(STX, MEM, DW), \
1654 INSN_3(STX, ATOMIC, W), \
1655 INSN_3(STX, ATOMIC, DW), \
1656 /* Immediate based. */ \
1657 INSN_3(ST, MEM, B), \
1658 INSN_3(ST, MEM, H), \
1659 INSN_3(ST, MEM, W), \
1660 INSN_3(ST, MEM, DW), \
1661 /* Load instructions. */ \
1662 /* Register based. */ \
1663 INSN_3(LDX, MEM, B), \
1664 INSN_3(LDX, MEM, H), \
1665 INSN_3(LDX, MEM, W), \
1666 INSN_3(LDX, MEM, DW), \
1667 INSN_3(LDX, MEMSX, B), \
1668 INSN_3(LDX, MEMSX, H), \
1669 INSN_3(LDX, MEMSX, W), \
1670 /* Immediate based. */ \
1673 bool bpf_opcode_in_insntable(u8 code)
1675 #define BPF_INSN_2_TBL(x, y) [BPF_##x | BPF_##y] = true
1676 #define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
1677 static const bool public_insntable[256] = {
1678 [0 ... 255] = false,
1679 /* Now overwrite non-defaults ... */
1680 BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
1681 /* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
1682 [BPF_LD | BPF_ABS | BPF_B] = true,
1683 [BPF_LD | BPF_ABS | BPF_H] = true,
1684 [BPF_LD | BPF_ABS | BPF_W] = true,
1685 [BPF_LD | BPF_IND | BPF_B] = true,
1686 [BPF_LD | BPF_IND | BPF_H] = true,
1687 [BPF_LD | BPF_IND | BPF_W] = true,
1688 [BPF_JMP | BPF_JCOND] = true,
1690 #undef BPF_INSN_3_TBL
1691 #undef BPF_INSN_2_TBL
1692 return public_insntable[code];
1695 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1697 * ___bpf_prog_run - run eBPF program on a given context
1698 * @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
1699 * @insn: is the array of eBPF instructions
1701 * Decode and execute eBPF instructions.
1703 * Return: whatever value is in %BPF_R0 at program exit
1705 static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
1707 #define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y
1708 #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
1709 static const void * const jumptable[256] __annotate_jump_table = {
1710 [0 ... 255] = &&default_label,
1711 /* Now overwrite non-defaults ... */
1712 BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
1713 /* Non-UAPI available opcodes. */
1714 [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
1715 [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
1716 [BPF_ST | BPF_NOSPEC] = &&ST_NOSPEC,
1717 [BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B,
1718 [BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
1719 [BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
1720 [BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW,
1721 [BPF_LDX | BPF_PROBE_MEMSX | BPF_B] = &&LDX_PROBE_MEMSX_B,
1722 [BPF_LDX | BPF_PROBE_MEMSX | BPF_H] = &&LDX_PROBE_MEMSX_H,
1723 [BPF_LDX | BPF_PROBE_MEMSX | BPF_W] = &&LDX_PROBE_MEMSX_W,
1725 #undef BPF_INSN_3_LBL
1726 #undef BPF_INSN_2_LBL
1727 u32 tail_call_cnt = 0;
1729 #define CONT ({ insn++; goto select_insn; })
1730 #define CONT_JMP ({ insn++; goto select_insn; })
1733 goto *jumptable[insn->code];
1735 /* Explicitly mask the register-based shift amounts with 63 or 31
1736 * to avoid undefined behavior. Normally this won't affect the
1737 * generated code, for example, in case of native 64 bit archs such
1738 * as x86-64 or arm64, the compiler is optimizing the AND away for
1739 * the interpreter. In case of JITs, each of the JIT backends compiles
1740 * the BPF shift operations to machine instructions which produce
1741 * implementation-defined results in such a case; the resulting
1742 * contents of the register may be arbitrary, but program behaviour
1743 * as a whole remains defined. In other words, in case of JIT backends,
1744 * the AND must /not/ be added to the emitted LSH/RSH/ARSH translation.
1747 #define SHT(OPCODE, OP) \
1748 ALU64_##OPCODE##_X: \
1749 DST = DST OP (SRC & 63); \
1752 DST = (u32) DST OP ((u32) SRC & 31); \
1754 ALU64_##OPCODE##_K: \
1758 DST = (u32) DST OP (u32) IMM; \
1761 #define ALU(OPCODE, OP) \
1762 ALU64_##OPCODE##_X: \
1766 DST = (u32) DST OP (u32) SRC; \
1768 ALU64_##OPCODE##_K: \
1772 DST = (u32) DST OP (u32) IMM; \
1796 DST = (u32)(s8) SRC;
1799 DST = (u32)(s16) SRC;
1826 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
1830 DST = (u64) (u32) (((s32) DST) >> (SRC & 31));
1833 DST = (u64) (u32) (((s32) DST) >> IMM);
1836 (*(s64 *) &DST) >>= (SRC & 63);
1839 (*(s64 *) &DST) >>= IMM;
1844 div64_u64_rem(DST, SRC, &AX);
1848 AX = div64_s64(DST, SRC);
1849 DST = DST - AX * SRC;
1857 DST = do_div(AX, (u32) SRC);
1861 AX = do_div(AX, abs((s32)SRC));
1872 div64_u64_rem(DST, IMM, &AX);
1876 AX = div64_s64(DST, IMM);
1877 DST = DST - AX * IMM;
1885 DST = do_div(AX, (u32) IMM);
1889 AX = do_div(AX, abs((s32)IMM));
1900 DST = div64_u64(DST, SRC);
1903 DST = div64_s64(DST, SRC);
1911 do_div(AX, (u32) SRC);
1916 do_div(AX, abs((s32)SRC));
1917 if (((s32)DST < 0) == ((s32)SRC < 0))
1927 DST = div64_u64(DST, IMM);
1930 DST = div64_s64(DST, IMM);
1938 do_div(AX, (u32) IMM);
1943 do_div(AX, abs((s32)IMM));
1944 if (((s32)DST < 0) == ((s32)IMM < 0))
1954 DST = (__force u16) cpu_to_be16(DST);
1957 DST = (__force u32) cpu_to_be32(DST);
1960 DST = (__force u64) cpu_to_be64(DST);
1967 DST = (__force u16) cpu_to_le16(DST);
1970 DST = (__force u32) cpu_to_le32(DST);
1973 DST = (__force u64) cpu_to_le64(DST);
1980 DST = (__force u16) __swab16(DST);
1983 DST = (__force u32) __swab32(DST);
1986 DST = (__force u64) __swab64(DST);
1993 /* Function call scratches BPF_R1-BPF_R5 registers,
1994 * preserves BPF_R6-BPF_R9, and stores return value
1997 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
2002 BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
2005 insn + insn->off + 1);
2009 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
2010 struct bpf_array *array = container_of(map, struct bpf_array, map);
2011 struct bpf_prog *prog;
2014 if (unlikely(index >= array->map.max_entries))
2017 if (unlikely(tail_call_cnt >= MAX_TAIL_CALL_CNT))
2022 prog = READ_ONCE(array->ptrs[index]);
2026 /* ARG1 at this point is guaranteed to point to CTX from
2027 * the verifier side due to the fact that the tail call is
2028 * handled like a helper, that is, bpf_tail_call_proto,
2029 * where arg1_type is ARG_PTR_TO_CTX.
2031 insn = prog->insnsi;
2045 #define COND_JMP(SIGN, OPCODE, CMP_OP) \
2047 if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) { \
2048 insn += insn->off; \
2052 JMP32_##OPCODE##_X: \
2053 if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) { \
2054 insn += insn->off; \
2059 if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) { \
2060 insn += insn->off; \
2064 JMP32_##OPCODE##_K: \
2065 if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) { \
2066 insn += insn->off; \
2070 COND_JMP(u, JEQ, ==)
2071 COND_JMP(u, JNE, !=)
2074 COND_JMP(u, JGE, >=)
2075 COND_JMP(u, JLE, <=)
2076 COND_JMP(u, JSET, &)
2077 COND_JMP(s, JSGT, >)
2078 COND_JMP(s, JSLT, <)
2079 COND_JMP(s, JSGE, >=)
2080 COND_JMP(s, JSLE, <=)
2082 /* ST, STX and LDX*/
2084 /* Speculation barrier for mitigating Speculative Store Bypass.
2085 * In case of arm64, we rely on the firmware mitigation as
2086 * controlled via the ssbd kernel parameter. Whenever the
2087 * mitigation is enabled, it works for all of the kernel code
2088 * with no need to provide any additional instructions here.
2089 * In case of x86, we use 'lfence' insn for mitigation. We
2090 * reuse preexisting logic from Spectre v1 mitigation that
2091 * happens to produce the required code on x86 for v4 as well.
2095 #define LDST(SIZEOP, SIZE) \
2097 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
2100 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \
2103 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
2105 LDX_PROBE_MEM_##SIZEOP: \
2106 bpf_probe_read_kernel_common(&DST, sizeof(SIZE), \
2107 (const void *)(long) (SRC + insn->off)); \
2108 DST = *((SIZE *)&DST); \
2117 #define LDSX(SIZEOP, SIZE) \
2118 LDX_MEMSX_##SIZEOP: \
2119 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
2121 LDX_PROBE_MEMSX_##SIZEOP: \
2122 bpf_probe_read_kernel_common(&DST, sizeof(SIZE), \
2123 (const void *)(long) (SRC + insn->off)); \
2124 DST = *((SIZE *)&DST); \
2132 #define ATOMIC_ALU_OP(BOP, KOP) \
2134 if (BPF_SIZE(insn->code) == BPF_W) \
2135 atomic_##KOP((u32) SRC, (atomic_t *)(unsigned long) \
2136 (DST + insn->off)); \
2138 atomic64_##KOP((u64) SRC, (atomic64_t *)(unsigned long) \
2139 (DST + insn->off)); \
2141 case BOP | BPF_FETCH: \
2142 if (BPF_SIZE(insn->code) == BPF_W) \
2143 SRC = (u32) atomic_fetch_##KOP( \
2145 (atomic_t *)(unsigned long) (DST + insn->off)); \
2147 SRC = (u64) atomic64_fetch_##KOP( \
2149 (atomic64_t *)(unsigned long) (DST + insn->off)); \
2155 ATOMIC_ALU_OP(BPF_ADD, add)
2156 ATOMIC_ALU_OP(BPF_AND, and)
2157 ATOMIC_ALU_OP(BPF_OR, or)
2158 ATOMIC_ALU_OP(BPF_XOR, xor)
2159 #undef ATOMIC_ALU_OP
2162 if (BPF_SIZE(insn->code) == BPF_W)
2163 SRC = (u32) atomic_xchg(
2164 (atomic_t *)(unsigned long) (DST + insn->off),
2167 SRC = (u64) atomic64_xchg(
2168 (atomic64_t *)(unsigned long) (DST + insn->off),
2172 if (BPF_SIZE(insn->code) == BPF_W)
2173 BPF_R0 = (u32) atomic_cmpxchg(
2174 (atomic_t *)(unsigned long) (DST + insn->off),
2175 (u32) BPF_R0, (u32) SRC);
2177 BPF_R0 = (u64) atomic64_cmpxchg(
2178 (atomic64_t *)(unsigned long) (DST + insn->off),
2179 (u64) BPF_R0, (u64) SRC);
2188 /* If we ever reach this, we have a bug somewhere. Die hard here
2189 * instead of just returning 0; we could be somewhere in a subprog,
2190 * so execution could continue otherwise which we do /not/ want.
2192 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
2194 pr_warn("BPF interpreter: unknown opcode %02x (imm: 0x%x)\n",
2195 insn->code, insn->imm);
2200 #define PROG_NAME(stack_size) __bpf_prog_run##stack_size
2201 #define DEFINE_BPF_PROG_RUN(stack_size) \
2202 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
2204 u64 stack[stack_size / sizeof(u64)]; \
2205 u64 regs[MAX_BPF_EXT_REG] = {}; \
2207 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
2208 ARG1 = (u64) (unsigned long) ctx; \
2209 return ___bpf_prog_run(regs, insn); \
2212 #define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
2213 #define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
2214 static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
2215 const struct bpf_insn *insn) \
2217 u64 stack[stack_size / sizeof(u64)]; \
2218 u64 regs[MAX_BPF_EXT_REG]; \
2220 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
2226 return ___bpf_prog_run(regs, insn); \
2229 #define EVAL1(FN, X) FN(X)
2230 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
2231 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
2232 #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
2233 #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
2234 #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
2236 EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
2237 EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
2238 EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
2240 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
2241 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
2242 EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
2244 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
2246 static unsigned int (*interpreters[])(const void *ctx,
2247 const struct bpf_insn *insn) = {
2248 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
2249 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
2250 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
2252 #undef PROG_NAME_LIST
2253 #define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
2254 static __maybe_unused
2255 u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
2256 const struct bpf_insn *insn) = {
2257 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
2258 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
2259 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
2261 #undef PROG_NAME_LIST
2263 #ifdef CONFIG_BPF_SYSCALL
2264 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
2266 stack_depth = max_t(u32, stack_depth, 1);
2267 insn->off = (s16) insn->imm;
2268 insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
2269 __bpf_call_base_args;
2270 insn->code = BPF_JMP | BPF_CALL_ARGS;
2274 static unsigned int __bpf_prog_ret0_warn(const void *ctx,
2275 const struct bpf_insn *insn)
2277 /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
2278 * is not working properly, so warn about it!
2285 bool bpf_prog_map_compatible(struct bpf_map *map,
2286 const struct bpf_prog *fp)
2288 enum bpf_prog_type prog_type = resolve_prog_type(fp);
2291 if (fp->kprobe_override)
2294 /* XDP programs inserted into maps are not guaranteed to run on
2295 * a particular netdev (and can run outside driver context entirely
2296 * in the case of devmap and cpumap). Until device checks
2297 * are implemented, prohibit adding dev-bound programs to program maps.
2299 if (bpf_prog_is_dev_bound(fp->aux))
2302 spin_lock(&map->owner.lock);
2303 if (!map->owner.type) {
2304 /* There's no owner yet where we could check for
2307 map->owner.type = prog_type;
2308 map->owner.jited = fp->jited;
2309 map->owner.xdp_has_frags = fp->aux->xdp_has_frags;
2312 ret = map->owner.type == prog_type &&
2313 map->owner.jited == fp->jited &&
2314 map->owner.xdp_has_frags == fp->aux->xdp_has_frags;
2316 spin_unlock(&map->owner.lock);
2321 static int bpf_check_tail_call(const struct bpf_prog *fp)
2323 struct bpf_prog_aux *aux = fp->aux;
2326 mutex_lock(&aux->used_maps_mutex);
2327 for (i = 0; i < aux->used_map_cnt; i++) {
2328 struct bpf_map *map = aux->used_maps[i];
2330 if (!map_type_contains_progs(map))
2333 if (!bpf_prog_map_compatible(map, fp)) {
2340 mutex_unlock(&aux->used_maps_mutex);
2344 static void bpf_prog_select_func(struct bpf_prog *fp)
2346 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
2347 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
2349 fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
2351 fp->bpf_func = __bpf_prog_ret0_warn;
2356 * bpf_prog_select_runtime - select exec runtime for BPF program
2357 * @fp: bpf_prog populated with BPF program
2358 * @err: pointer to error variable
2360 * Try to JIT eBPF program, if JIT is not available, use interpreter.
2361 * The BPF program will be executed via bpf_prog_run() function.
2363 * Return: the &fp argument along with &err set to 0 for success or
2364 * a negative errno code on failure
2366 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
2368 /* In case of BPF to BPF calls, verifier did all the prep
2369 * work with regards to JITing, etc.
2371 bool jit_needed = false;
2376 if (IS_ENABLED(CONFIG_BPF_JIT_ALWAYS_ON) ||
2377 bpf_prog_has_kfunc_call(fp))
2380 bpf_prog_select_func(fp);
2382 /* eBPF JITs can rewrite the program in case constant
2383 * blinding is active. However, in case of error during
2384 * blinding, bpf_int_jit_compile() must always return a
2385 * valid program, which in this case would simply not
2386 * be JITed, but falls back to the interpreter.
2388 if (!bpf_prog_is_offloaded(fp->aux)) {
2389 *err = bpf_prog_alloc_jited_linfo(fp);
2393 fp = bpf_int_jit_compile(fp);
2394 bpf_prog_jit_attempt_done(fp);
2395 if (!fp->jited && jit_needed) {
2400 *err = bpf_prog_offload_compile(fp);
2406 bpf_prog_lock_ro(fp);
2408 /* The tail call compatibility check can only be done at
2409 * this late stage as we need to determine, if we deal
2410 * with JITed or non JITed program concatenations and not
2411 * all eBPF JITs might immediately support all features.
2413 *err = bpf_check_tail_call(fp);
2417 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
2419 static unsigned int __bpf_prog_ret1(const void *ctx,
2420 const struct bpf_insn *insn)
2425 static struct bpf_prog_dummy {
2426 struct bpf_prog prog;
2427 } dummy_bpf_prog = {
2429 .bpf_func = __bpf_prog_ret1,
2433 struct bpf_empty_prog_array bpf_empty_prog_array = {
2436 EXPORT_SYMBOL(bpf_empty_prog_array);
2438 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
2441 return kzalloc(sizeof(struct bpf_prog_array) +
2442 sizeof(struct bpf_prog_array_item) *
2446 return &bpf_empty_prog_array.hdr;
2449 void bpf_prog_array_free(struct bpf_prog_array *progs)
2451 if (!progs || progs == &bpf_empty_prog_array.hdr)
2453 kfree_rcu(progs, rcu);
2456 static void __bpf_prog_array_free_sleepable_cb(struct rcu_head *rcu)
2458 struct bpf_prog_array *progs;
2460 /* If RCU Tasks Trace grace period implies RCU grace period, there is
2461 * no need to call kfree_rcu(), just call kfree() directly.
2463 progs = container_of(rcu, struct bpf_prog_array, rcu);
2464 if (rcu_trace_implies_rcu_gp())
2467 kfree_rcu(progs, rcu);
2470 void bpf_prog_array_free_sleepable(struct bpf_prog_array *progs)
2472 if (!progs || progs == &bpf_empty_prog_array.hdr)
2474 call_rcu_tasks_trace(&progs->rcu, __bpf_prog_array_free_sleepable_cb);
2477 int bpf_prog_array_length(struct bpf_prog_array *array)
2479 struct bpf_prog_array_item *item;
2482 for (item = array->items; item->prog; item++)
2483 if (item->prog != &dummy_bpf_prog.prog)
2488 bool bpf_prog_array_is_empty(struct bpf_prog_array *array)
2490 struct bpf_prog_array_item *item;
2492 for (item = array->items; item->prog; item++)
2493 if (item->prog != &dummy_bpf_prog.prog)
2498 static bool bpf_prog_array_copy_core(struct bpf_prog_array *array,
2502 struct bpf_prog_array_item *item;
2505 for (item = array->items; item->prog; item++) {
2506 if (item->prog == &dummy_bpf_prog.prog)
2508 prog_ids[i] = item->prog->aux->id;
2509 if (++i == request_cnt) {
2515 return !!(item->prog);
2518 int bpf_prog_array_copy_to_user(struct bpf_prog_array *array,
2519 __u32 __user *prog_ids, u32 cnt)
2521 unsigned long err = 0;
2525 /* users of this function are doing:
2526 * cnt = bpf_prog_array_length();
2528 * bpf_prog_array_copy_to_user(..., cnt);
2529 * so below kcalloc doesn't need extra cnt > 0 check.
2531 ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
2534 nospc = bpf_prog_array_copy_core(array, ids, cnt);
2535 err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
2544 void bpf_prog_array_delete_safe(struct bpf_prog_array *array,
2545 struct bpf_prog *old_prog)
2547 struct bpf_prog_array_item *item;
2549 for (item = array->items; item->prog; item++)
2550 if (item->prog == old_prog) {
2551 WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
2557 * bpf_prog_array_delete_safe_at() - Replaces the program at the given
2558 * index into the program array with
2559 * a dummy no-op program.
2560 * @array: a bpf_prog_array
2561 * @index: the index of the program to replace
2563 * Skips over dummy programs, by not counting them, when calculating
2564 * the position of the program to replace.
2568 * * -EINVAL - Invalid index value. Must be a non-negative integer.
2569 * * -ENOENT - Index out of range
2571 int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index)
2573 return bpf_prog_array_update_at(array, index, &dummy_bpf_prog.prog);
2577 * bpf_prog_array_update_at() - Updates the program at the given index
2578 * into the program array.
2579 * @array: a bpf_prog_array
2580 * @index: the index of the program to update
2581 * @prog: the program to insert into the array
2583 * Skips over dummy programs, by not counting them, when calculating
2584 * the position of the program to update.
2588 * * -EINVAL - Invalid index value. Must be a non-negative integer.
2589 * * -ENOENT - Index out of range
2591 int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
2592 struct bpf_prog *prog)
2594 struct bpf_prog_array_item *item;
2596 if (unlikely(index < 0))
2599 for (item = array->items; item->prog; item++) {
2600 if (item->prog == &dummy_bpf_prog.prog)
2603 WRITE_ONCE(item->prog, prog);
2611 int bpf_prog_array_copy(struct bpf_prog_array *old_array,
2612 struct bpf_prog *exclude_prog,
2613 struct bpf_prog *include_prog,
2615 struct bpf_prog_array **new_array)
2617 int new_prog_cnt, carry_prog_cnt = 0;
2618 struct bpf_prog_array_item *existing, *new;
2619 struct bpf_prog_array *array;
2620 bool found_exclude = false;
2622 /* Figure out how many existing progs we need to carry over to
2626 existing = old_array->items;
2627 for (; existing->prog; existing++) {
2628 if (existing->prog == exclude_prog) {
2629 found_exclude = true;
2632 if (existing->prog != &dummy_bpf_prog.prog)
2634 if (existing->prog == include_prog)
2639 if (exclude_prog && !found_exclude)
2642 /* How many progs (not NULL) will be in the new array? */
2643 new_prog_cnt = carry_prog_cnt;
2647 /* Do we have any prog (not NULL) in the new array? */
2648 if (!new_prog_cnt) {
2653 /* +1 as the end of prog_array is marked with NULL */
2654 array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
2659 /* Fill in the new prog array */
2660 if (carry_prog_cnt) {
2661 existing = old_array->items;
2662 for (; existing->prog; existing++) {
2663 if (existing->prog == exclude_prog ||
2664 existing->prog == &dummy_bpf_prog.prog)
2667 new->prog = existing->prog;
2668 new->bpf_cookie = existing->bpf_cookie;
2673 new->prog = include_prog;
2674 new->bpf_cookie = bpf_cookie;
2682 int bpf_prog_array_copy_info(struct bpf_prog_array *array,
2683 u32 *prog_ids, u32 request_cnt,
2689 cnt = bpf_prog_array_length(array);
2693 /* return early if user requested only program count or nothing to copy */
2694 if (!request_cnt || !cnt)
2697 /* this function is called under trace/bpf_trace.c: bpf_event_mutex */
2698 return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
2702 void __bpf_free_used_maps(struct bpf_prog_aux *aux,
2703 struct bpf_map **used_maps, u32 len)
2705 struct bpf_map *map;
2709 sleepable = aux->prog->sleepable;
2710 for (i = 0; i < len; i++) {
2712 if (map->ops->map_poke_untrack)
2713 map->ops->map_poke_untrack(map, aux);
2715 atomic64_dec(&map->sleepable_refcnt);
2720 static void bpf_free_used_maps(struct bpf_prog_aux *aux)
2722 __bpf_free_used_maps(aux, aux->used_maps, aux->used_map_cnt);
2723 kfree(aux->used_maps);
2726 void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
2727 struct btf_mod_pair *used_btfs, u32 len)
2729 #ifdef CONFIG_BPF_SYSCALL
2730 struct btf_mod_pair *btf_mod;
2733 for (i = 0; i < len; i++) {
2734 btf_mod = &used_btfs[i];
2735 if (btf_mod->module)
2736 module_put(btf_mod->module);
2737 btf_put(btf_mod->btf);
2742 static void bpf_free_used_btfs(struct bpf_prog_aux *aux)
2744 __bpf_free_used_btfs(aux, aux->used_btfs, aux->used_btf_cnt);
2745 kfree(aux->used_btfs);
2748 static void bpf_prog_free_deferred(struct work_struct *work)
2750 struct bpf_prog_aux *aux;
2753 aux = container_of(work, struct bpf_prog_aux, work);
2754 #ifdef CONFIG_BPF_SYSCALL
2755 bpf_free_kfunc_btf_tab(aux->kfunc_btf_tab);
2757 #ifdef CONFIG_CGROUP_BPF
2758 if (aux->cgroup_atype != CGROUP_BPF_ATTACH_TYPE_INVALID)
2759 bpf_cgroup_atype_put(aux->cgroup_atype);
2761 bpf_free_used_maps(aux);
2762 bpf_free_used_btfs(aux);
2763 if (bpf_prog_is_dev_bound(aux))
2764 bpf_prog_dev_bound_destroy(aux->prog);
2765 #ifdef CONFIG_PERF_EVENTS
2766 if (aux->prog->has_callchain_buf)
2767 put_callchain_buffers();
2769 if (aux->dst_trampoline)
2770 bpf_trampoline_put(aux->dst_trampoline);
2771 for (i = 0; i < aux->real_func_cnt; i++) {
2772 /* We can just unlink the subprog poke descriptor table as
2773 * it was originally linked to the main program and is also
2774 * released along with it.
2776 aux->func[i]->aux->poke_tab = NULL;
2777 bpf_jit_free(aux->func[i]);
2779 if (aux->real_func_cnt) {
2781 bpf_prog_unlock_free(aux->prog);
2783 bpf_jit_free(aux->prog);
2787 void bpf_prog_free(struct bpf_prog *fp)
2789 struct bpf_prog_aux *aux = fp->aux;
2792 bpf_prog_put(aux->dst_prog);
2793 bpf_token_put(aux->token);
2794 INIT_WORK(&aux->work, bpf_prog_free_deferred);
2795 schedule_work(&aux->work);
2797 EXPORT_SYMBOL_GPL(bpf_prog_free);
2799 /* RNG for unpriviledged user space with separated state from prandom_u32(). */
2800 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
2802 void bpf_user_rnd_init_once(void)
2804 prandom_init_once(&bpf_user_rnd_state);
2807 BPF_CALL_0(bpf_user_rnd_u32)
2809 /* Should someone ever have the rather unwise idea to use some
2810 * of the registers passed into this function, then note that
2811 * this function is called from native eBPF and classic-to-eBPF
2812 * transformations. Register assignments from both sides are
2813 * different, f.e. classic always sets fn(ctx, A, X) here.
2815 struct rnd_state *state;
2818 state = &get_cpu_var(bpf_user_rnd_state);
2819 res = prandom_u32_state(state);
2820 put_cpu_var(bpf_user_rnd_state);
2825 BPF_CALL_0(bpf_get_raw_cpu_id)
2827 return raw_smp_processor_id();
2830 /* Weak definitions of helper functions in case we don't have bpf syscall. */
2831 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
2832 const struct bpf_func_proto bpf_map_update_elem_proto __weak;
2833 const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
2834 const struct bpf_func_proto bpf_map_push_elem_proto __weak;
2835 const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
2836 const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
2837 const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto __weak;
2838 const struct bpf_func_proto bpf_spin_lock_proto __weak;
2839 const struct bpf_func_proto bpf_spin_unlock_proto __weak;
2840 const struct bpf_func_proto bpf_jiffies64_proto __weak;
2842 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
2843 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
2844 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
2845 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
2846 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto __weak;
2847 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto __weak;
2848 const struct bpf_func_proto bpf_ktime_get_tai_ns_proto __weak;
2850 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
2851 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
2852 const struct bpf_func_proto bpf_get_current_comm_proto __weak;
2853 const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
2854 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto __weak;
2855 const struct bpf_func_proto bpf_get_local_storage_proto __weak;
2856 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto __weak;
2857 const struct bpf_func_proto bpf_snprintf_btf_proto __weak;
2858 const struct bpf_func_proto bpf_seq_printf_btf_proto __weak;
2859 const struct bpf_func_proto bpf_set_retval_proto __weak;
2860 const struct bpf_func_proto bpf_get_retval_proto __weak;
2862 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
2867 const struct bpf_func_proto * __weak bpf_get_trace_vprintk_proto(void)
2873 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
2874 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
2878 EXPORT_SYMBOL_GPL(bpf_event_output);
2880 /* Always built-in helper functions. */
2881 const struct bpf_func_proto bpf_tail_call_proto = {
2884 .ret_type = RET_VOID,
2885 .arg1_type = ARG_PTR_TO_CTX,
2886 .arg2_type = ARG_CONST_MAP_PTR,
2887 .arg3_type = ARG_ANYTHING,
2890 /* Stub for JITs that only support cBPF. eBPF programs are interpreted.
2891 * It is encouraged to implement bpf_int_jit_compile() instead, so that
2892 * eBPF and implicitly also cBPF can get JITed!
2894 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
2899 /* Stub for JITs that support eBPF. All cBPF code gets transformed into
2900 * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
2902 void __weak bpf_jit_compile(struct bpf_prog *prog)
2906 bool __weak bpf_helper_changes_pkt_data(void *func)
2911 /* Return TRUE if the JIT backend wants verifier to enable sub-register usage
2912 * analysis code and wants explicit zero extension inserted by verifier.
2913 * Otherwise, return FALSE.
2915 * The verifier inserts an explicit zero extension after BPF_CMPXCHGs even if
2916 * you don't override this. JITs that don't want these extra insns can detect
2917 * them using insn_is_zext.
2919 bool __weak bpf_jit_needs_zext(void)
2924 /* Return TRUE if the JIT backend supports mixing bpf2bpf and tailcalls. */
2925 bool __weak bpf_jit_supports_subprog_tailcalls(void)
2930 bool __weak bpf_jit_supports_kfunc_call(void)
2935 bool __weak bpf_jit_supports_far_kfunc_call(void)
2940 bool __weak bpf_jit_supports_arena(void)
2945 u64 __weak bpf_arch_uaddress_limit(void)
2947 #if defined(CONFIG_64BIT) && defined(CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE)
2954 /* Return TRUE if the JIT backend satisfies the following two conditions:
2955 * 1) JIT backend supports atomic_xchg() on pointer-sized words.
2956 * 2) Under the specific arch, the implementation of xchg() is the same
2957 * as atomic_xchg() on pointer-sized words.
2959 bool __weak bpf_jit_supports_ptr_xchg(void)
2964 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
2965 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
2967 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
2973 int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
2974 void *addr1, void *addr2)
2979 void * __weak bpf_arch_text_copy(void *dst, void *src, size_t len)
2981 return ERR_PTR(-ENOTSUPP);
2984 int __weak bpf_arch_text_invalidate(void *dst, size_t len)
2989 bool __weak bpf_jit_supports_exceptions(void)
2994 void __weak arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie)
2998 /* for configs without MMU or 32-bit */
2999 __weak const struct bpf_map_ops arena_map_ops;
3000 __weak u64 bpf_arena_get_user_vm_start(struct bpf_arena *arena)
3004 __weak u64 bpf_arena_get_kern_vm_start(struct bpf_arena *arena)
3009 #ifdef CONFIG_BPF_SYSCALL
3010 static int __init bpf_global_ma_init(void)
3014 ret = bpf_mem_alloc_init(&bpf_global_ma, 0, false);
3015 bpf_global_ma_set = !ret;
3018 late_initcall(bpf_global_ma_init);
3021 DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
3022 EXPORT_SYMBOL(bpf_stats_enabled_key);
3024 /* All definitions of tracepoints related to BPF. */
3025 #define CREATE_TRACE_POINTS
3026 #include <linux/bpf_trace.h>
3028 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
3029 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_bulk_tx);