cifs: fix creating sockets when using sfu mount options
[sfrench/cifs-2.6.git] / kernel / bpf / core.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Linux Socket Filter - Kernel level socket filtering
4  *
5  * Based on the design of the Berkeley Packet Filter. The new
6  * internal format has been designed by PLUMgrid:
7  *
8  *      Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
9  *
10  * Authors:
11  *
12  *      Jay Schulist <jschlst@samba.org>
13  *      Alexei Starovoitov <ast@plumgrid.com>
14  *      Daniel Borkmann <dborkman@redhat.com>
15  *
16  * Andi Kleen - Fix a few bad bugs and races.
17  * Kris Katterjohn - Added many additional checks in bpf_check_classic()
18  */
19
20 #include <uapi/linux/btf.h>
21 #include <linux/filter.h>
22 #include <linux/skbuff.h>
23 #include <linux/vmalloc.h>
24 #include <linux/random.h>
25 #include <linux/moduleloader.h>
26 #include <linux/bpf.h>
27 #include <linux/btf.h>
28 #include <linux/objtool.h>
29 #include <linux/rbtree_latch.h>
30 #include <linux/kallsyms.h>
31 #include <linux/rcupdate.h>
32 #include <linux/perf_event.h>
33 #include <linux/extable.h>
34 #include <linux/log2.h>
35 #include <linux/bpf_verifier.h>
36 #include <linux/nodemask.h>
37 #include <linux/nospec.h>
38 #include <linux/bpf_mem_alloc.h>
39 #include <linux/memcontrol.h>
40
41 #include <asm/barrier.h>
42 #include <asm/unaligned.h>
43
44 /* Registers */
45 #define BPF_R0  regs[BPF_REG_0]
46 #define BPF_R1  regs[BPF_REG_1]
47 #define BPF_R2  regs[BPF_REG_2]
48 #define BPF_R3  regs[BPF_REG_3]
49 #define BPF_R4  regs[BPF_REG_4]
50 #define BPF_R5  regs[BPF_REG_5]
51 #define BPF_R6  regs[BPF_REG_6]
52 #define BPF_R7  regs[BPF_REG_7]
53 #define BPF_R8  regs[BPF_REG_8]
54 #define BPF_R9  regs[BPF_REG_9]
55 #define BPF_R10 regs[BPF_REG_10]
56
57 /* Named registers */
58 #define DST     regs[insn->dst_reg]
59 #define SRC     regs[insn->src_reg]
60 #define FP      regs[BPF_REG_FP]
61 #define AX      regs[BPF_REG_AX]
62 #define ARG1    regs[BPF_REG_ARG1]
63 #define CTX     regs[BPF_REG_CTX]
64 #define OFF     insn->off
65 #define IMM     insn->imm
66
67 struct bpf_mem_alloc bpf_global_ma;
68 bool bpf_global_ma_set;
69
70 /* No hurry in this branch
71  *
72  * Exported for the bpf jit load helper.
73  */
74 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
75 {
76         u8 *ptr = NULL;
77
78         if (k >= SKF_NET_OFF) {
79                 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
80         } else if (k >= SKF_LL_OFF) {
81                 if (unlikely(!skb_mac_header_was_set(skb)))
82                         return NULL;
83                 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
84         }
85         if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
86                 return ptr;
87
88         return NULL;
89 }
90
91 /* tell bpf programs that include vmlinux.h kernel's PAGE_SIZE */
92 enum page_size_enum {
93         __PAGE_SIZE = PAGE_SIZE
94 };
95
96 struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)
97 {
98         gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
99         struct bpf_prog_aux *aux;
100         struct bpf_prog *fp;
101
102         size = round_up(size, __PAGE_SIZE);
103         fp = __vmalloc(size, gfp_flags);
104         if (fp == NULL)
105                 return NULL;
106
107         aux = kzalloc(sizeof(*aux), bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags));
108         if (aux == NULL) {
109                 vfree(fp);
110                 return NULL;
111         }
112         fp->active = alloc_percpu_gfp(int, bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags));
113         if (!fp->active) {
114                 vfree(fp);
115                 kfree(aux);
116                 return NULL;
117         }
118
119         fp->pages = size / PAGE_SIZE;
120         fp->aux = aux;
121         fp->aux->prog = fp;
122         fp->jit_requested = ebpf_jit_enabled();
123         fp->blinding_requested = bpf_jit_blinding_enabled(fp);
124 #ifdef CONFIG_CGROUP_BPF
125         aux->cgroup_atype = CGROUP_BPF_ATTACH_TYPE_INVALID;
126 #endif
127
128         INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode);
129 #ifdef CONFIG_FINEIBT
130         INIT_LIST_HEAD_RCU(&fp->aux->ksym_prefix.lnode);
131 #endif
132         mutex_init(&fp->aux->used_maps_mutex);
133         mutex_init(&fp->aux->dst_mutex);
134
135         return fp;
136 }
137
138 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
139 {
140         gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
141         struct bpf_prog *prog;
142         int cpu;
143
144         prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags);
145         if (!prog)
146                 return NULL;
147
148         prog->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags);
149         if (!prog->stats) {
150                 free_percpu(prog->active);
151                 kfree(prog->aux);
152                 vfree(prog);
153                 return NULL;
154         }
155
156         for_each_possible_cpu(cpu) {
157                 struct bpf_prog_stats *pstats;
158
159                 pstats = per_cpu_ptr(prog->stats, cpu);
160                 u64_stats_init(&pstats->syncp);
161         }
162         return prog;
163 }
164 EXPORT_SYMBOL_GPL(bpf_prog_alloc);
165
166 int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
167 {
168         if (!prog->aux->nr_linfo || !prog->jit_requested)
169                 return 0;
170
171         prog->aux->jited_linfo = kvcalloc(prog->aux->nr_linfo,
172                                           sizeof(*prog->aux->jited_linfo),
173                                           bpf_memcg_flags(GFP_KERNEL | __GFP_NOWARN));
174         if (!prog->aux->jited_linfo)
175                 return -ENOMEM;
176
177         return 0;
178 }
179
180 void bpf_prog_jit_attempt_done(struct bpf_prog *prog)
181 {
182         if (prog->aux->jited_linfo &&
183             (!prog->jited || !prog->aux->jited_linfo[0])) {
184                 kvfree(prog->aux->jited_linfo);
185                 prog->aux->jited_linfo = NULL;
186         }
187
188         kfree(prog->aux->kfunc_tab);
189         prog->aux->kfunc_tab = NULL;
190 }
191
192 /* The jit engine is responsible to provide an array
193  * for insn_off to the jited_off mapping (insn_to_jit_off).
194  *
195  * The idx to this array is the insn_off.  Hence, the insn_off
196  * here is relative to the prog itself instead of the main prog.
197  * This array has one entry for each xlated bpf insn.
198  *
199  * jited_off is the byte off to the end of the jited insn.
200  *
201  * Hence, with
202  * insn_start:
203  *      The first bpf insn off of the prog.  The insn off
204  *      here is relative to the main prog.
205  *      e.g. if prog is a subprog, insn_start > 0
206  * linfo_idx:
207  *      The prog's idx to prog->aux->linfo and jited_linfo
208  *
209  * jited_linfo[linfo_idx] = prog->bpf_func
210  *
211  * For i > linfo_idx,
212  *
213  * jited_linfo[i] = prog->bpf_func +
214  *      insn_to_jit_off[linfo[i].insn_off - insn_start - 1]
215  */
216 void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
217                                const u32 *insn_to_jit_off)
218 {
219         u32 linfo_idx, insn_start, insn_end, nr_linfo, i;
220         const struct bpf_line_info *linfo;
221         void **jited_linfo;
222
223         if (!prog->aux->jited_linfo || prog->aux->func_idx > prog->aux->func_cnt)
224                 /* Userspace did not provide linfo */
225                 return;
226
227         linfo_idx = prog->aux->linfo_idx;
228         linfo = &prog->aux->linfo[linfo_idx];
229         insn_start = linfo[0].insn_off;
230         insn_end = insn_start + prog->len;
231
232         jited_linfo = &prog->aux->jited_linfo[linfo_idx];
233         jited_linfo[0] = prog->bpf_func;
234
235         nr_linfo = prog->aux->nr_linfo - linfo_idx;
236
237         for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++)
238                 /* The verifier ensures that linfo[i].insn_off is
239                  * strictly increasing
240                  */
241                 jited_linfo[i] = prog->bpf_func +
242                         insn_to_jit_off[linfo[i].insn_off - insn_start - 1];
243 }
244
245 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
246                                   gfp_t gfp_extra_flags)
247 {
248         gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
249         struct bpf_prog *fp;
250         u32 pages;
251
252         size = round_up(size, PAGE_SIZE);
253         pages = size / PAGE_SIZE;
254         if (pages <= fp_old->pages)
255                 return fp_old;
256
257         fp = __vmalloc(size, gfp_flags);
258         if (fp) {
259                 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
260                 fp->pages = pages;
261                 fp->aux->prog = fp;
262
263                 /* We keep fp->aux from fp_old around in the new
264                  * reallocated structure.
265                  */
266                 fp_old->aux = NULL;
267                 fp_old->stats = NULL;
268                 fp_old->active = NULL;
269                 __bpf_prog_free(fp_old);
270         }
271
272         return fp;
273 }
274
275 void __bpf_prog_free(struct bpf_prog *fp)
276 {
277         if (fp->aux) {
278                 mutex_destroy(&fp->aux->used_maps_mutex);
279                 mutex_destroy(&fp->aux->dst_mutex);
280                 kfree(fp->aux->poke_tab);
281                 kfree(fp->aux);
282         }
283         free_percpu(fp->stats);
284         free_percpu(fp->active);
285         vfree(fp);
286 }
287
288 int bpf_prog_calc_tag(struct bpf_prog *fp)
289 {
290         const u32 bits_offset = SHA1_BLOCK_SIZE - sizeof(__be64);
291         u32 raw_size = bpf_prog_tag_scratch_size(fp);
292         u32 digest[SHA1_DIGEST_WORDS];
293         u32 ws[SHA1_WORKSPACE_WORDS];
294         u32 i, bsize, psize, blocks;
295         struct bpf_insn *dst;
296         bool was_ld_map;
297         u8 *raw, *todo;
298         __be32 *result;
299         __be64 *bits;
300
301         raw = vmalloc(raw_size);
302         if (!raw)
303                 return -ENOMEM;
304
305         sha1_init(digest);
306         memset(ws, 0, sizeof(ws));
307
308         /* We need to take out the map fd for the digest calculation
309          * since they are unstable from user space side.
310          */
311         dst = (void *)raw;
312         for (i = 0, was_ld_map = false; i < fp->len; i++) {
313                 dst[i] = fp->insnsi[i];
314                 if (!was_ld_map &&
315                     dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
316                     (dst[i].src_reg == BPF_PSEUDO_MAP_FD ||
317                      dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) {
318                         was_ld_map = true;
319                         dst[i].imm = 0;
320                 } else if (was_ld_map &&
321                            dst[i].code == 0 &&
322                            dst[i].dst_reg == 0 &&
323                            dst[i].src_reg == 0 &&
324                            dst[i].off == 0) {
325                         was_ld_map = false;
326                         dst[i].imm = 0;
327                 } else {
328                         was_ld_map = false;
329                 }
330         }
331
332         psize = bpf_prog_insn_size(fp);
333         memset(&raw[psize], 0, raw_size - psize);
334         raw[psize++] = 0x80;
335
336         bsize  = round_up(psize, SHA1_BLOCK_SIZE);
337         blocks = bsize / SHA1_BLOCK_SIZE;
338         todo   = raw;
339         if (bsize - psize >= sizeof(__be64)) {
340                 bits = (__be64 *)(todo + bsize - sizeof(__be64));
341         } else {
342                 bits = (__be64 *)(todo + bsize + bits_offset);
343                 blocks++;
344         }
345         *bits = cpu_to_be64((psize - 1) << 3);
346
347         while (blocks--) {
348                 sha1_transform(digest, todo, ws);
349                 todo += SHA1_BLOCK_SIZE;
350         }
351
352         result = (__force __be32 *)digest;
353         for (i = 0; i < SHA1_DIGEST_WORDS; i++)
354                 result[i] = cpu_to_be32(digest[i]);
355         memcpy(fp->tag, result, sizeof(fp->tag));
356
357         vfree(raw);
358         return 0;
359 }
360
361 static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
362                                 s32 end_new, s32 curr, const bool probe_pass)
363 {
364         const s64 imm_min = S32_MIN, imm_max = S32_MAX;
365         s32 delta = end_new - end_old;
366         s64 imm = insn->imm;
367
368         if (curr < pos && curr + imm + 1 >= end_old)
369                 imm += delta;
370         else if (curr >= end_new && curr + imm + 1 < end_new)
371                 imm -= delta;
372         if (imm < imm_min || imm > imm_max)
373                 return -ERANGE;
374         if (!probe_pass)
375                 insn->imm = imm;
376         return 0;
377 }
378
379 static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
380                                 s32 end_new, s32 curr, const bool probe_pass)
381 {
382         s64 off_min, off_max, off;
383         s32 delta = end_new - end_old;
384
385         if (insn->code == (BPF_JMP32 | BPF_JA)) {
386                 off = insn->imm;
387                 off_min = S32_MIN;
388                 off_max = S32_MAX;
389         } else {
390                 off = insn->off;
391                 off_min = S16_MIN;
392                 off_max = S16_MAX;
393         }
394
395         if (curr < pos && curr + off + 1 >= end_old)
396                 off += delta;
397         else if (curr >= end_new && curr + off + 1 < end_new)
398                 off -= delta;
399         if (off < off_min || off > off_max)
400                 return -ERANGE;
401         if (!probe_pass) {
402                 if (insn->code == (BPF_JMP32 | BPF_JA))
403                         insn->imm = off;
404                 else
405                         insn->off = off;
406         }
407         return 0;
408 }
409
410 static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old,
411                             s32 end_new, const bool probe_pass)
412 {
413         u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0);
414         struct bpf_insn *insn = prog->insnsi;
415         int ret = 0;
416
417         for (i = 0; i < insn_cnt; i++, insn++) {
418                 u8 code;
419
420                 /* In the probing pass we still operate on the original,
421                  * unpatched image in order to check overflows before we
422                  * do any other adjustments. Therefore skip the patchlet.
423                  */
424                 if (probe_pass && i == pos) {
425                         i = end_new;
426                         insn = prog->insnsi + end_old;
427                 }
428                 if (bpf_pseudo_func(insn)) {
429                         ret = bpf_adj_delta_to_imm(insn, pos, end_old,
430                                                    end_new, i, probe_pass);
431                         if (ret)
432                                 return ret;
433                         continue;
434                 }
435                 code = insn->code;
436                 if ((BPF_CLASS(code) != BPF_JMP &&
437                      BPF_CLASS(code) != BPF_JMP32) ||
438                     BPF_OP(code) == BPF_EXIT)
439                         continue;
440                 /* Adjust offset of jmps if we cross patch boundaries. */
441                 if (BPF_OP(code) == BPF_CALL) {
442                         if (insn->src_reg != BPF_PSEUDO_CALL)
443                                 continue;
444                         ret = bpf_adj_delta_to_imm(insn, pos, end_old,
445                                                    end_new, i, probe_pass);
446                 } else {
447                         ret = bpf_adj_delta_to_off(insn, pos, end_old,
448                                                    end_new, i, probe_pass);
449                 }
450                 if (ret)
451                         break;
452         }
453
454         return ret;
455 }
456
457 static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta)
458 {
459         struct bpf_line_info *linfo;
460         u32 i, nr_linfo;
461
462         nr_linfo = prog->aux->nr_linfo;
463         if (!nr_linfo || !delta)
464                 return;
465
466         linfo = prog->aux->linfo;
467
468         for (i = 0; i < nr_linfo; i++)
469                 if (off < linfo[i].insn_off)
470                         break;
471
472         /* Push all off < linfo[i].insn_off by delta */
473         for (; i < nr_linfo; i++)
474                 linfo[i].insn_off += delta;
475 }
476
477 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
478                                        const struct bpf_insn *patch, u32 len)
479 {
480         u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
481         const u32 cnt_max = S16_MAX;
482         struct bpf_prog *prog_adj;
483         int err;
484
485         /* Since our patchlet doesn't expand the image, we're done. */
486         if (insn_delta == 0) {
487                 memcpy(prog->insnsi + off, patch, sizeof(*patch));
488                 return prog;
489         }
490
491         insn_adj_cnt = prog->len + insn_delta;
492
493         /* Reject anything that would potentially let the insn->off
494          * target overflow when we have excessive program expansions.
495          * We need to probe here before we do any reallocation where
496          * we afterwards may not fail anymore.
497          */
498         if (insn_adj_cnt > cnt_max &&
499             (err = bpf_adj_branches(prog, off, off + 1, off + len, true)))
500                 return ERR_PTR(err);
501
502         /* Several new instructions need to be inserted. Make room
503          * for them. Likely, there's no need for a new allocation as
504          * last page could have large enough tailroom.
505          */
506         prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
507                                     GFP_USER);
508         if (!prog_adj)
509                 return ERR_PTR(-ENOMEM);
510
511         prog_adj->len = insn_adj_cnt;
512
513         /* Patching happens in 3 steps:
514          *
515          * 1) Move over tail of insnsi from next instruction onwards,
516          *    so we can patch the single target insn with one or more
517          *    new ones (patching is always from 1 to n insns, n > 0).
518          * 2) Inject new instructions at the target location.
519          * 3) Adjust branch offsets if necessary.
520          */
521         insn_rest = insn_adj_cnt - off - len;
522
523         memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
524                 sizeof(*patch) * insn_rest);
525         memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
526
527         /* We are guaranteed to not fail at this point, otherwise
528          * the ship has sailed to reverse to the original state. An
529          * overflow cannot happen at this point.
530          */
531         BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false));
532
533         bpf_adj_linfo(prog_adj, off, insn_delta);
534
535         return prog_adj;
536 }
537
538 int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
539 {
540         /* Branch offsets can't overflow when program is shrinking, no need
541          * to call bpf_adj_branches(..., true) here
542          */
543         memmove(prog->insnsi + off, prog->insnsi + off + cnt,
544                 sizeof(struct bpf_insn) * (prog->len - off - cnt));
545         prog->len -= cnt;
546
547         return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false));
548 }
549
550 static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
551 {
552         int i;
553
554         for (i = 0; i < fp->aux->real_func_cnt; i++)
555                 bpf_prog_kallsyms_del(fp->aux->func[i]);
556 }
557
558 void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
559 {
560         bpf_prog_kallsyms_del_subprogs(fp);
561         bpf_prog_kallsyms_del(fp);
562 }
563
564 #ifdef CONFIG_BPF_JIT
565 /* All BPF JIT sysctl knobs here. */
566 int bpf_jit_enable   __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
567 int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
568 int bpf_jit_harden   __read_mostly;
569 long bpf_jit_limit   __read_mostly;
570 long bpf_jit_limit_max __read_mostly;
571
572 static void
573 bpf_prog_ksym_set_addr(struct bpf_prog *prog)
574 {
575         WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
576
577         prog->aux->ksym.start = (unsigned long) prog->bpf_func;
578         prog->aux->ksym.end   = prog->aux->ksym.start + prog->jited_len;
579 }
580
581 static void
582 bpf_prog_ksym_set_name(struct bpf_prog *prog)
583 {
584         char *sym = prog->aux->ksym.name;
585         const char *end = sym + KSYM_NAME_LEN;
586         const struct btf_type *type;
587         const char *func_name;
588
589         BUILD_BUG_ON(sizeof("bpf_prog_") +
590                      sizeof(prog->tag) * 2 +
591                      /* name has been null terminated.
592                       * We should need +1 for the '_' preceding
593                       * the name.  However, the null character
594                       * is double counted between the name and the
595                       * sizeof("bpf_prog_") above, so we omit
596                       * the +1 here.
597                       */
598                      sizeof(prog->aux->name) > KSYM_NAME_LEN);
599
600         sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
601         sym  = bin2hex(sym, prog->tag, sizeof(prog->tag));
602
603         /* prog->aux->name will be ignored if full btf name is available */
604         if (prog->aux->func_info_cnt && prog->aux->func_idx < prog->aux->func_info_cnt) {
605                 type = btf_type_by_id(prog->aux->btf,
606                                       prog->aux->func_info[prog->aux->func_idx].type_id);
607                 func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
608                 snprintf(sym, (size_t)(end - sym), "_%s", func_name);
609                 return;
610         }
611
612         if (prog->aux->name[0])
613                 snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
614         else
615                 *sym = 0;
616 }
617
618 static unsigned long bpf_get_ksym_start(struct latch_tree_node *n)
619 {
620         return container_of(n, struct bpf_ksym, tnode)->start;
621 }
622
623 static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
624                                           struct latch_tree_node *b)
625 {
626         return bpf_get_ksym_start(a) < bpf_get_ksym_start(b);
627 }
628
629 static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
630 {
631         unsigned long val = (unsigned long)key;
632         const struct bpf_ksym *ksym;
633
634         ksym = container_of(n, struct bpf_ksym, tnode);
635
636         if (val < ksym->start)
637                 return -1;
638         /* Ensure that we detect return addresses as part of the program, when
639          * the final instruction is a call for a program part of the stack
640          * trace. Therefore, do val > ksym->end instead of val >= ksym->end.
641          */
642         if (val > ksym->end)
643                 return  1;
644
645         return 0;
646 }
647
648 static const struct latch_tree_ops bpf_tree_ops = {
649         .less   = bpf_tree_less,
650         .comp   = bpf_tree_comp,
651 };
652
653 static DEFINE_SPINLOCK(bpf_lock);
654 static LIST_HEAD(bpf_kallsyms);
655 static struct latch_tree_root bpf_tree __cacheline_aligned;
656
657 void bpf_ksym_add(struct bpf_ksym *ksym)
658 {
659         spin_lock_bh(&bpf_lock);
660         WARN_ON_ONCE(!list_empty(&ksym->lnode));
661         list_add_tail_rcu(&ksym->lnode, &bpf_kallsyms);
662         latch_tree_insert(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
663         spin_unlock_bh(&bpf_lock);
664 }
665
666 static void __bpf_ksym_del(struct bpf_ksym *ksym)
667 {
668         if (list_empty(&ksym->lnode))
669                 return;
670
671         latch_tree_erase(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
672         list_del_rcu(&ksym->lnode);
673 }
674
675 void bpf_ksym_del(struct bpf_ksym *ksym)
676 {
677         spin_lock_bh(&bpf_lock);
678         __bpf_ksym_del(ksym);
679         spin_unlock_bh(&bpf_lock);
680 }
681
682 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
683 {
684         return fp->jited && !bpf_prog_was_classic(fp);
685 }
686
687 void bpf_prog_kallsyms_add(struct bpf_prog *fp)
688 {
689         if (!bpf_prog_kallsyms_candidate(fp) ||
690             !bpf_token_capable(fp->aux->token, CAP_BPF))
691                 return;
692
693         bpf_prog_ksym_set_addr(fp);
694         bpf_prog_ksym_set_name(fp);
695         fp->aux->ksym.prog = true;
696
697         bpf_ksym_add(&fp->aux->ksym);
698
699 #ifdef CONFIG_FINEIBT
700         /*
701          * When FineIBT, code in the __cfi_foo() symbols can get executed
702          * and hence unwinder needs help.
703          */
704         if (cfi_mode != CFI_FINEIBT)
705                 return;
706
707         snprintf(fp->aux->ksym_prefix.name, KSYM_NAME_LEN,
708                  "__cfi_%s", fp->aux->ksym.name);
709
710         fp->aux->ksym_prefix.start = (unsigned long) fp->bpf_func - 16;
711         fp->aux->ksym_prefix.end   = (unsigned long) fp->bpf_func;
712
713         bpf_ksym_add(&fp->aux->ksym_prefix);
714 #endif
715 }
716
717 void bpf_prog_kallsyms_del(struct bpf_prog *fp)
718 {
719         if (!bpf_prog_kallsyms_candidate(fp))
720                 return;
721
722         bpf_ksym_del(&fp->aux->ksym);
723 #ifdef CONFIG_FINEIBT
724         if (cfi_mode != CFI_FINEIBT)
725                 return;
726         bpf_ksym_del(&fp->aux->ksym_prefix);
727 #endif
728 }
729
730 static struct bpf_ksym *bpf_ksym_find(unsigned long addr)
731 {
732         struct latch_tree_node *n;
733
734         n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
735         return n ? container_of(n, struct bpf_ksym, tnode) : NULL;
736 }
737
738 const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
739                                  unsigned long *off, char *sym)
740 {
741         struct bpf_ksym *ksym;
742         char *ret = NULL;
743
744         rcu_read_lock();
745         ksym = bpf_ksym_find(addr);
746         if (ksym) {
747                 unsigned long symbol_start = ksym->start;
748                 unsigned long symbol_end = ksym->end;
749
750                 strncpy(sym, ksym->name, KSYM_NAME_LEN);
751
752                 ret = sym;
753                 if (size)
754                         *size = symbol_end - symbol_start;
755                 if (off)
756                         *off  = addr - symbol_start;
757         }
758         rcu_read_unlock();
759
760         return ret;
761 }
762
763 bool is_bpf_text_address(unsigned long addr)
764 {
765         bool ret;
766
767         rcu_read_lock();
768         ret = bpf_ksym_find(addr) != NULL;
769         rcu_read_unlock();
770
771         return ret;
772 }
773
774 struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
775 {
776         struct bpf_ksym *ksym = bpf_ksym_find(addr);
777
778         return ksym && ksym->prog ?
779                container_of(ksym, struct bpf_prog_aux, ksym)->prog :
780                NULL;
781 }
782
783 const struct exception_table_entry *search_bpf_extables(unsigned long addr)
784 {
785         const struct exception_table_entry *e = NULL;
786         struct bpf_prog *prog;
787
788         rcu_read_lock();
789         prog = bpf_prog_ksym_find(addr);
790         if (!prog)
791                 goto out;
792         if (!prog->aux->num_exentries)
793                 goto out;
794
795         e = search_extable(prog->aux->extable, prog->aux->num_exentries, addr);
796 out:
797         rcu_read_unlock();
798         return e;
799 }
800
801 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
802                     char *sym)
803 {
804         struct bpf_ksym *ksym;
805         unsigned int it = 0;
806         int ret = -ERANGE;
807
808         if (!bpf_jit_kallsyms_enabled())
809                 return ret;
810
811         rcu_read_lock();
812         list_for_each_entry_rcu(ksym, &bpf_kallsyms, lnode) {
813                 if (it++ != symnum)
814                         continue;
815
816                 strncpy(sym, ksym->name, KSYM_NAME_LEN);
817
818                 *value = ksym->start;
819                 *type  = BPF_SYM_ELF_TYPE;
820
821                 ret = 0;
822                 break;
823         }
824         rcu_read_unlock();
825
826         return ret;
827 }
828
829 int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
830                                 struct bpf_jit_poke_descriptor *poke)
831 {
832         struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
833         static const u32 poke_tab_max = 1024;
834         u32 slot = prog->aux->size_poke_tab;
835         u32 size = slot + 1;
836
837         if (size > poke_tab_max)
838                 return -ENOSPC;
839         if (poke->tailcall_target || poke->tailcall_target_stable ||
840             poke->tailcall_bypass || poke->adj_off || poke->bypass_addr)
841                 return -EINVAL;
842
843         switch (poke->reason) {
844         case BPF_POKE_REASON_TAIL_CALL:
845                 if (!poke->tail_call.map)
846                         return -EINVAL;
847                 break;
848         default:
849                 return -EINVAL;
850         }
851
852         tab = krealloc(tab, size * sizeof(*poke), GFP_KERNEL);
853         if (!tab)
854                 return -ENOMEM;
855
856         memcpy(&tab[slot], poke, sizeof(*poke));
857         prog->aux->size_poke_tab = size;
858         prog->aux->poke_tab = tab;
859
860         return slot;
861 }
862
863 /*
864  * BPF program pack allocator.
865  *
866  * Most BPF programs are pretty small. Allocating a hole page for each
867  * program is sometime a waste. Many small bpf program also adds pressure
868  * to instruction TLB. To solve this issue, we introduce a BPF program pack
869  * allocator. The prog_pack allocator uses HPAGE_PMD_SIZE page (2MB on x86)
870  * to host BPF programs.
871  */
872 #define BPF_PROG_CHUNK_SHIFT    6
873 #define BPF_PROG_CHUNK_SIZE     (1 << BPF_PROG_CHUNK_SHIFT)
874 #define BPF_PROG_CHUNK_MASK     (~(BPF_PROG_CHUNK_SIZE - 1))
875
876 struct bpf_prog_pack {
877         struct list_head list;
878         void *ptr;
879         unsigned long bitmap[];
880 };
881
882 void bpf_jit_fill_hole_with_zero(void *area, unsigned int size)
883 {
884         memset(area, 0, size);
885 }
886
887 #define BPF_PROG_SIZE_TO_NBITS(size)    (round_up(size, BPF_PROG_CHUNK_SIZE) / BPF_PROG_CHUNK_SIZE)
888
889 static DEFINE_MUTEX(pack_mutex);
890 static LIST_HEAD(pack_list);
891
892 /* PMD_SIZE is not available in some special config, e.g. ARCH=arm with
893  * CONFIG_MMU=n. Use PAGE_SIZE in these cases.
894  */
895 #ifdef PMD_SIZE
896 /* PMD_SIZE is really big for some archs. It doesn't make sense to
897  * reserve too much memory in one allocation. Hardcode BPF_PROG_PACK_SIZE to
898  * 2MiB * num_possible_nodes(). On most architectures PMD_SIZE will be
899  * greater than or equal to 2MB.
900  */
901 #define BPF_PROG_PACK_SIZE (SZ_2M * num_possible_nodes())
902 #else
903 #define BPF_PROG_PACK_SIZE PAGE_SIZE
904 #endif
905
906 #define BPF_PROG_CHUNK_COUNT (BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE)
907
908 static struct bpf_prog_pack *alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_insns)
909 {
910         struct bpf_prog_pack *pack;
911
912         pack = kzalloc(struct_size(pack, bitmap, BITS_TO_LONGS(BPF_PROG_CHUNK_COUNT)),
913                        GFP_KERNEL);
914         if (!pack)
915                 return NULL;
916         pack->ptr = bpf_jit_alloc_exec(BPF_PROG_PACK_SIZE);
917         if (!pack->ptr) {
918                 kfree(pack);
919                 return NULL;
920         }
921         bpf_fill_ill_insns(pack->ptr, BPF_PROG_PACK_SIZE);
922         bitmap_zero(pack->bitmap, BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE);
923         list_add_tail(&pack->list, &pack_list);
924
925         set_vm_flush_reset_perms(pack->ptr);
926         set_memory_rox((unsigned long)pack->ptr, BPF_PROG_PACK_SIZE / PAGE_SIZE);
927         return pack;
928 }
929
930 void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns)
931 {
932         unsigned int nbits = BPF_PROG_SIZE_TO_NBITS(size);
933         struct bpf_prog_pack *pack;
934         unsigned long pos;
935         void *ptr = NULL;
936
937         mutex_lock(&pack_mutex);
938         if (size > BPF_PROG_PACK_SIZE) {
939                 size = round_up(size, PAGE_SIZE);
940                 ptr = bpf_jit_alloc_exec(size);
941                 if (ptr) {
942                         bpf_fill_ill_insns(ptr, size);
943                         set_vm_flush_reset_perms(ptr);
944                         set_memory_rox((unsigned long)ptr, size / PAGE_SIZE);
945                 }
946                 goto out;
947         }
948         list_for_each_entry(pack, &pack_list, list) {
949                 pos = bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
950                                                  nbits, 0);
951                 if (pos < BPF_PROG_CHUNK_COUNT)
952                         goto found_free_area;
953         }
954
955         pack = alloc_new_pack(bpf_fill_ill_insns);
956         if (!pack)
957                 goto out;
958
959         pos = 0;
960
961 found_free_area:
962         bitmap_set(pack->bitmap, pos, nbits);
963         ptr = (void *)(pack->ptr) + (pos << BPF_PROG_CHUNK_SHIFT);
964
965 out:
966         mutex_unlock(&pack_mutex);
967         return ptr;
968 }
969
970 void bpf_prog_pack_free(void *ptr, u32 size)
971 {
972         struct bpf_prog_pack *pack = NULL, *tmp;
973         unsigned int nbits;
974         unsigned long pos;
975
976         mutex_lock(&pack_mutex);
977         if (size > BPF_PROG_PACK_SIZE) {
978                 bpf_jit_free_exec(ptr);
979                 goto out;
980         }
981
982         list_for_each_entry(tmp, &pack_list, list) {
983                 if (ptr >= tmp->ptr && (tmp->ptr + BPF_PROG_PACK_SIZE) > ptr) {
984                         pack = tmp;
985                         break;
986                 }
987         }
988
989         if (WARN_ONCE(!pack, "bpf_prog_pack bug\n"))
990                 goto out;
991
992         nbits = BPF_PROG_SIZE_TO_NBITS(size);
993         pos = ((unsigned long)ptr - (unsigned long)pack->ptr) >> BPF_PROG_CHUNK_SHIFT;
994
995         WARN_ONCE(bpf_arch_text_invalidate(ptr, size),
996                   "bpf_prog_pack bug: missing bpf_arch_text_invalidate?\n");
997
998         bitmap_clear(pack->bitmap, pos, nbits);
999         if (bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
1000                                        BPF_PROG_CHUNK_COUNT, 0) == 0) {
1001                 list_del(&pack->list);
1002                 bpf_jit_free_exec(pack->ptr);
1003                 kfree(pack);
1004         }
1005 out:
1006         mutex_unlock(&pack_mutex);
1007 }
1008
1009 static atomic_long_t bpf_jit_current;
1010
1011 /* Can be overridden by an arch's JIT compiler if it has a custom,
1012  * dedicated BPF backend memory area, or if neither of the two
1013  * below apply.
1014  */
1015 u64 __weak bpf_jit_alloc_exec_limit(void)
1016 {
1017 #if defined(MODULES_VADDR)
1018         return MODULES_END - MODULES_VADDR;
1019 #else
1020         return VMALLOC_END - VMALLOC_START;
1021 #endif
1022 }
1023
1024 static int __init bpf_jit_charge_init(void)
1025 {
1026         /* Only used as heuristic here to derive limit. */
1027         bpf_jit_limit_max = bpf_jit_alloc_exec_limit();
1028         bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 1,
1029                                             PAGE_SIZE), LONG_MAX);
1030         return 0;
1031 }
1032 pure_initcall(bpf_jit_charge_init);
1033
1034 int bpf_jit_charge_modmem(u32 size)
1035 {
1036         if (atomic_long_add_return(size, &bpf_jit_current) > READ_ONCE(bpf_jit_limit)) {
1037                 if (!bpf_capable()) {
1038                         atomic_long_sub(size, &bpf_jit_current);
1039                         return -EPERM;
1040                 }
1041         }
1042
1043         return 0;
1044 }
1045
1046 void bpf_jit_uncharge_modmem(u32 size)
1047 {
1048         atomic_long_sub(size, &bpf_jit_current);
1049 }
1050
1051 void *__weak bpf_jit_alloc_exec(unsigned long size)
1052 {
1053         return module_alloc(size);
1054 }
1055
1056 void __weak bpf_jit_free_exec(void *addr)
1057 {
1058         module_memfree(addr);
1059 }
1060
1061 struct bpf_binary_header *
1062 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
1063                      unsigned int alignment,
1064                      bpf_jit_fill_hole_t bpf_fill_ill_insns)
1065 {
1066         struct bpf_binary_header *hdr;
1067         u32 size, hole, start;
1068
1069         WARN_ON_ONCE(!is_power_of_2(alignment) ||
1070                      alignment > BPF_IMAGE_ALIGNMENT);
1071
1072         /* Most of BPF filters are really small, but if some of them
1073          * fill a page, allow at least 128 extra bytes to insert a
1074          * random section of illegal instructions.
1075          */
1076         size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
1077
1078         if (bpf_jit_charge_modmem(size))
1079                 return NULL;
1080         hdr = bpf_jit_alloc_exec(size);
1081         if (!hdr) {
1082                 bpf_jit_uncharge_modmem(size);
1083                 return NULL;
1084         }
1085
1086         /* Fill space with illegal/arch-dep instructions. */
1087         bpf_fill_ill_insns(hdr, size);
1088
1089         hdr->size = size;
1090         hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
1091                      PAGE_SIZE - sizeof(*hdr));
1092         start = get_random_u32_below(hole) & ~(alignment - 1);
1093
1094         /* Leave a random number of instructions before BPF code. */
1095         *image_ptr = &hdr->image[start];
1096
1097         return hdr;
1098 }
1099
1100 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
1101 {
1102         u32 size = hdr->size;
1103
1104         bpf_jit_free_exec(hdr);
1105         bpf_jit_uncharge_modmem(size);
1106 }
1107
1108 /* Allocate jit binary from bpf_prog_pack allocator.
1109  * Since the allocated memory is RO+X, the JIT engine cannot write directly
1110  * to the memory. To solve this problem, a RW buffer is also allocated at
1111  * as the same time. The JIT engine should calculate offsets based on the
1112  * RO memory address, but write JITed program to the RW buffer. Once the
1113  * JIT engine finishes, it calls bpf_jit_binary_pack_finalize, which copies
1114  * the JITed program to the RO memory.
1115  */
1116 struct bpf_binary_header *
1117 bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **image_ptr,
1118                           unsigned int alignment,
1119                           struct bpf_binary_header **rw_header,
1120                           u8 **rw_image,
1121                           bpf_jit_fill_hole_t bpf_fill_ill_insns)
1122 {
1123         struct bpf_binary_header *ro_header;
1124         u32 size, hole, start;
1125
1126         WARN_ON_ONCE(!is_power_of_2(alignment) ||
1127                      alignment > BPF_IMAGE_ALIGNMENT);
1128
1129         /* add 16 bytes for a random section of illegal instructions */
1130         size = round_up(proglen + sizeof(*ro_header) + 16, BPF_PROG_CHUNK_SIZE);
1131
1132         if (bpf_jit_charge_modmem(size))
1133                 return NULL;
1134         ro_header = bpf_prog_pack_alloc(size, bpf_fill_ill_insns);
1135         if (!ro_header) {
1136                 bpf_jit_uncharge_modmem(size);
1137                 return NULL;
1138         }
1139
1140         *rw_header = kvmalloc(size, GFP_KERNEL);
1141         if (!*rw_header) {
1142                 bpf_prog_pack_free(ro_header, size);
1143                 bpf_jit_uncharge_modmem(size);
1144                 return NULL;
1145         }
1146
1147         /* Fill space with illegal/arch-dep instructions. */
1148         bpf_fill_ill_insns(*rw_header, size);
1149         (*rw_header)->size = size;
1150
1151         hole = min_t(unsigned int, size - (proglen + sizeof(*ro_header)),
1152                      BPF_PROG_CHUNK_SIZE - sizeof(*ro_header));
1153         start = get_random_u32_below(hole) & ~(alignment - 1);
1154
1155         *image_ptr = &ro_header->image[start];
1156         *rw_image = &(*rw_header)->image[start];
1157
1158         return ro_header;
1159 }
1160
1161 /* Copy JITed text from rw_header to its final location, the ro_header. */
1162 int bpf_jit_binary_pack_finalize(struct bpf_prog *prog,
1163                                  struct bpf_binary_header *ro_header,
1164                                  struct bpf_binary_header *rw_header)
1165 {
1166         void *ptr;
1167
1168         ptr = bpf_arch_text_copy(ro_header, rw_header, rw_header->size);
1169
1170         kvfree(rw_header);
1171
1172         if (IS_ERR(ptr)) {
1173                 bpf_prog_pack_free(ro_header, ro_header->size);
1174                 return PTR_ERR(ptr);
1175         }
1176         return 0;
1177 }
1178
1179 /* bpf_jit_binary_pack_free is called in two different scenarios:
1180  *   1) when the program is freed after;
1181  *   2) when the JIT engine fails (before bpf_jit_binary_pack_finalize).
1182  * For case 2), we need to free both the RO memory and the RW buffer.
1183  *
1184  * bpf_jit_binary_pack_free requires proper ro_header->size. However,
1185  * bpf_jit_binary_pack_alloc does not set it. Therefore, ro_header->size
1186  * must be set with either bpf_jit_binary_pack_finalize (normal path) or
1187  * bpf_arch_text_copy (when jit fails).
1188  */
1189 void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header,
1190                               struct bpf_binary_header *rw_header)
1191 {
1192         u32 size = ro_header->size;
1193
1194         bpf_prog_pack_free(ro_header, size);
1195         kvfree(rw_header);
1196         bpf_jit_uncharge_modmem(size);
1197 }
1198
1199 struct bpf_binary_header *
1200 bpf_jit_binary_pack_hdr(const struct bpf_prog *fp)
1201 {
1202         unsigned long real_start = (unsigned long)fp->bpf_func;
1203         unsigned long addr;
1204
1205         addr = real_start & BPF_PROG_CHUNK_MASK;
1206         return (void *)addr;
1207 }
1208
1209 static inline struct bpf_binary_header *
1210 bpf_jit_binary_hdr(const struct bpf_prog *fp)
1211 {
1212         unsigned long real_start = (unsigned long)fp->bpf_func;
1213         unsigned long addr;
1214
1215         addr = real_start & PAGE_MASK;
1216         return (void *)addr;
1217 }
1218
1219 /* This symbol is only overridden by archs that have different
1220  * requirements than the usual eBPF JITs, f.e. when they only
1221  * implement cBPF JIT, do not set images read-only, etc.
1222  */
1223 void __weak bpf_jit_free(struct bpf_prog *fp)
1224 {
1225         if (fp->jited) {
1226                 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
1227
1228                 bpf_jit_binary_free(hdr);
1229                 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
1230         }
1231
1232         bpf_prog_unlock_free(fp);
1233 }
1234
1235 int bpf_jit_get_func_addr(const struct bpf_prog *prog,
1236                           const struct bpf_insn *insn, bool extra_pass,
1237                           u64 *func_addr, bool *func_addr_fixed)
1238 {
1239         s16 off = insn->off;
1240         s32 imm = insn->imm;
1241         u8 *addr;
1242         int err;
1243
1244         *func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
1245         if (!*func_addr_fixed) {
1246                 /* Place-holder address till the last pass has collected
1247                  * all addresses for JITed subprograms in which case we
1248                  * can pick them up from prog->aux.
1249                  */
1250                 if (!extra_pass)
1251                         addr = NULL;
1252                 else if (prog->aux->func &&
1253                          off >= 0 && off < prog->aux->real_func_cnt)
1254                         addr = (u8 *)prog->aux->func[off]->bpf_func;
1255                 else
1256                         return -EINVAL;
1257         } else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL &&
1258                    bpf_jit_supports_far_kfunc_call()) {
1259                 err = bpf_get_kfunc_addr(prog, insn->imm, insn->off, &addr);
1260                 if (err)
1261                         return err;
1262         } else {
1263                 /* Address of a BPF helper call. Since part of the core
1264                  * kernel, it's always at a fixed location. __bpf_call_base
1265                  * and the helper with imm relative to it are both in core
1266                  * kernel.
1267                  */
1268                 addr = (u8 *)__bpf_call_base + imm;
1269         }
1270
1271         *func_addr = (unsigned long)addr;
1272         return 0;
1273 }
1274
1275 static int bpf_jit_blind_insn(const struct bpf_insn *from,
1276                               const struct bpf_insn *aux,
1277                               struct bpf_insn *to_buff,
1278                               bool emit_zext)
1279 {
1280         struct bpf_insn *to = to_buff;
1281         u32 imm_rnd = get_random_u32();
1282         s16 off;
1283
1284         BUILD_BUG_ON(BPF_REG_AX  + 1 != MAX_BPF_JIT_REG);
1285         BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
1286
1287         /* Constraints on AX register:
1288          *
1289          * AX register is inaccessible from user space. It is mapped in
1290          * all JITs, and used here for constant blinding rewrites. It is
1291          * typically "stateless" meaning its contents are only valid within
1292          * the executed instruction, but not across several instructions.
1293          * There are a few exceptions however which are further detailed
1294          * below.
1295          *
1296          * Constant blinding is only used by JITs, not in the interpreter.
1297          * The interpreter uses AX in some occasions as a local temporary
1298          * register e.g. in DIV or MOD instructions.
1299          *
1300          * In restricted circumstances, the verifier can also use the AX
1301          * register for rewrites as long as they do not interfere with
1302          * the above cases!
1303          */
1304         if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
1305                 goto out;
1306
1307         if (from->imm == 0 &&
1308             (from->code == (BPF_ALU   | BPF_MOV | BPF_K) ||
1309              from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
1310                 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
1311                 goto out;
1312         }
1313
1314         switch (from->code) {
1315         case BPF_ALU | BPF_ADD | BPF_K:
1316         case BPF_ALU | BPF_SUB | BPF_K:
1317         case BPF_ALU | BPF_AND | BPF_K:
1318         case BPF_ALU | BPF_OR  | BPF_K:
1319         case BPF_ALU | BPF_XOR | BPF_K:
1320         case BPF_ALU | BPF_MUL | BPF_K:
1321         case BPF_ALU | BPF_MOV | BPF_K:
1322         case BPF_ALU | BPF_DIV | BPF_K:
1323         case BPF_ALU | BPF_MOD | BPF_K:
1324                 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1325                 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1326                 *to++ = BPF_ALU32_REG_OFF(from->code, from->dst_reg, BPF_REG_AX, from->off);
1327                 break;
1328
1329         case BPF_ALU64 | BPF_ADD | BPF_K:
1330         case BPF_ALU64 | BPF_SUB | BPF_K:
1331         case BPF_ALU64 | BPF_AND | BPF_K:
1332         case BPF_ALU64 | BPF_OR  | BPF_K:
1333         case BPF_ALU64 | BPF_XOR | BPF_K:
1334         case BPF_ALU64 | BPF_MUL | BPF_K:
1335         case BPF_ALU64 | BPF_MOV | BPF_K:
1336         case BPF_ALU64 | BPF_DIV | BPF_K:
1337         case BPF_ALU64 | BPF_MOD | BPF_K:
1338                 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1339                 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1340                 *to++ = BPF_ALU64_REG_OFF(from->code, from->dst_reg, BPF_REG_AX, from->off);
1341                 break;
1342
1343         case BPF_JMP | BPF_JEQ  | BPF_K:
1344         case BPF_JMP | BPF_JNE  | BPF_K:
1345         case BPF_JMP | BPF_JGT  | BPF_K:
1346         case BPF_JMP | BPF_JLT  | BPF_K:
1347         case BPF_JMP | BPF_JGE  | BPF_K:
1348         case BPF_JMP | BPF_JLE  | BPF_K:
1349         case BPF_JMP | BPF_JSGT | BPF_K:
1350         case BPF_JMP | BPF_JSLT | BPF_K:
1351         case BPF_JMP | BPF_JSGE | BPF_K:
1352         case BPF_JMP | BPF_JSLE | BPF_K:
1353         case BPF_JMP | BPF_JSET | BPF_K:
1354                 /* Accommodate for extra offset in case of a backjump. */
1355                 off = from->off;
1356                 if (off < 0)
1357                         off -= 2;
1358                 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1359                 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1360                 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
1361                 break;
1362
1363         case BPF_JMP32 | BPF_JEQ  | BPF_K:
1364         case BPF_JMP32 | BPF_JNE  | BPF_K:
1365         case BPF_JMP32 | BPF_JGT  | BPF_K:
1366         case BPF_JMP32 | BPF_JLT  | BPF_K:
1367         case BPF_JMP32 | BPF_JGE  | BPF_K:
1368         case BPF_JMP32 | BPF_JLE  | BPF_K:
1369         case BPF_JMP32 | BPF_JSGT | BPF_K:
1370         case BPF_JMP32 | BPF_JSLT | BPF_K:
1371         case BPF_JMP32 | BPF_JSGE | BPF_K:
1372         case BPF_JMP32 | BPF_JSLE | BPF_K:
1373         case BPF_JMP32 | BPF_JSET | BPF_K:
1374                 /* Accommodate for extra offset in case of a backjump. */
1375                 off = from->off;
1376                 if (off < 0)
1377                         off -= 2;
1378                 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1379                 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1380                 *to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX,
1381                                       off);
1382                 break;
1383
1384         case BPF_LD | BPF_IMM | BPF_DW:
1385                 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
1386                 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1387                 *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
1388                 *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
1389                 break;
1390         case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
1391                 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
1392                 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1393                 if (emit_zext)
1394                         *to++ = BPF_ZEXT_REG(BPF_REG_AX);
1395                 *to++ = BPF_ALU64_REG(BPF_OR,  aux[0].dst_reg, BPF_REG_AX);
1396                 break;
1397
1398         case BPF_ST | BPF_MEM | BPF_DW:
1399         case BPF_ST | BPF_MEM | BPF_W:
1400         case BPF_ST | BPF_MEM | BPF_H:
1401         case BPF_ST | BPF_MEM | BPF_B:
1402                 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1403                 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1404                 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
1405                 break;
1406         }
1407 out:
1408         return to - to_buff;
1409 }
1410
1411 static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
1412                                               gfp_t gfp_extra_flags)
1413 {
1414         gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
1415         struct bpf_prog *fp;
1416
1417         fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags);
1418         if (fp != NULL) {
1419                 /* aux->prog still points to the fp_other one, so
1420                  * when promoting the clone to the real program,
1421                  * this still needs to be adapted.
1422                  */
1423                 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
1424         }
1425
1426         return fp;
1427 }
1428
1429 static void bpf_prog_clone_free(struct bpf_prog *fp)
1430 {
1431         /* aux was stolen by the other clone, so we cannot free
1432          * it from this path! It will be freed eventually by the
1433          * other program on release.
1434          *
1435          * At this point, we don't need a deferred release since
1436          * clone is guaranteed to not be locked.
1437          */
1438         fp->aux = NULL;
1439         fp->stats = NULL;
1440         fp->active = NULL;
1441         __bpf_prog_free(fp);
1442 }
1443
1444 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
1445 {
1446         /* We have to repoint aux->prog to self, as we don't
1447          * know whether fp here is the clone or the original.
1448          */
1449         fp->aux->prog = fp;
1450         bpf_prog_clone_free(fp_other);
1451 }
1452
1453 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
1454 {
1455         struct bpf_insn insn_buff[16], aux[2];
1456         struct bpf_prog *clone, *tmp;
1457         int insn_delta, insn_cnt;
1458         struct bpf_insn *insn;
1459         int i, rewritten;
1460
1461         if (!prog->blinding_requested || prog->blinded)
1462                 return prog;
1463
1464         clone = bpf_prog_clone_create(prog, GFP_USER);
1465         if (!clone)
1466                 return ERR_PTR(-ENOMEM);
1467
1468         insn_cnt = clone->len;
1469         insn = clone->insnsi;
1470
1471         for (i = 0; i < insn_cnt; i++, insn++) {
1472                 if (bpf_pseudo_func(insn)) {
1473                         /* ld_imm64 with an address of bpf subprog is not
1474                          * a user controlled constant. Don't randomize it,
1475                          * since it will conflict with jit_subprogs() logic.
1476                          */
1477                         insn++;
1478                         i++;
1479                         continue;
1480                 }
1481
1482                 /* We temporarily need to hold the original ld64 insn
1483                  * so that we can still access the first part in the
1484                  * second blinding run.
1485                  */
1486                 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
1487                     insn[1].code == 0)
1488                         memcpy(aux, insn, sizeof(aux));
1489
1490                 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff,
1491                                                 clone->aux->verifier_zext);
1492                 if (!rewritten)
1493                         continue;
1494
1495                 tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
1496                 if (IS_ERR(tmp)) {
1497                         /* Patching may have repointed aux->prog during
1498                          * realloc from the original one, so we need to
1499                          * fix it up here on error.
1500                          */
1501                         bpf_jit_prog_release_other(prog, clone);
1502                         return tmp;
1503                 }
1504
1505                 clone = tmp;
1506                 insn_delta = rewritten - 1;
1507
1508                 /* Walk new program and skip insns we just inserted. */
1509                 insn = clone->insnsi + i + insn_delta;
1510                 insn_cnt += insn_delta;
1511                 i        += insn_delta;
1512         }
1513
1514         clone->blinded = 1;
1515         return clone;
1516 }
1517 #endif /* CONFIG_BPF_JIT */
1518
1519 /* Base function for offset calculation. Needs to go into .text section,
1520  * therefore keeping it non-static as well; will also be used by JITs
1521  * anyway later on, so do not let the compiler omit it. This also needs
1522  * to go into kallsyms for correlation from e.g. bpftool, so naming
1523  * must not change.
1524  */
1525 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1526 {
1527         return 0;
1528 }
1529 EXPORT_SYMBOL_GPL(__bpf_call_base);
1530
1531 /* All UAPI available opcodes. */
1532 #define BPF_INSN_MAP(INSN_2, INSN_3)            \
1533         /* 32 bit ALU operations. */            \
1534         /*   Register based. */                 \
1535         INSN_3(ALU, ADD,  X),                   \
1536         INSN_3(ALU, SUB,  X),                   \
1537         INSN_3(ALU, AND,  X),                   \
1538         INSN_3(ALU, OR,   X),                   \
1539         INSN_3(ALU, LSH,  X),                   \
1540         INSN_3(ALU, RSH,  X),                   \
1541         INSN_3(ALU, XOR,  X),                   \
1542         INSN_3(ALU, MUL,  X),                   \
1543         INSN_3(ALU, MOV,  X),                   \
1544         INSN_3(ALU, ARSH, X),                   \
1545         INSN_3(ALU, DIV,  X),                   \
1546         INSN_3(ALU, MOD,  X),                   \
1547         INSN_2(ALU, NEG),                       \
1548         INSN_3(ALU, END, TO_BE),                \
1549         INSN_3(ALU, END, TO_LE),                \
1550         /*   Immediate based. */                \
1551         INSN_3(ALU, ADD,  K),                   \
1552         INSN_3(ALU, SUB,  K),                   \
1553         INSN_3(ALU, AND,  K),                   \
1554         INSN_3(ALU, OR,   K),                   \
1555         INSN_3(ALU, LSH,  K),                   \
1556         INSN_3(ALU, RSH,  K),                   \
1557         INSN_3(ALU, XOR,  K),                   \
1558         INSN_3(ALU, MUL,  K),                   \
1559         INSN_3(ALU, MOV,  K),                   \
1560         INSN_3(ALU, ARSH, K),                   \
1561         INSN_3(ALU, DIV,  K),                   \
1562         INSN_3(ALU, MOD,  K),                   \
1563         /* 64 bit ALU operations. */            \
1564         /*   Register based. */                 \
1565         INSN_3(ALU64, ADD,  X),                 \
1566         INSN_3(ALU64, SUB,  X),                 \
1567         INSN_3(ALU64, AND,  X),                 \
1568         INSN_3(ALU64, OR,   X),                 \
1569         INSN_3(ALU64, LSH,  X),                 \
1570         INSN_3(ALU64, RSH,  X),                 \
1571         INSN_3(ALU64, XOR,  X),                 \
1572         INSN_3(ALU64, MUL,  X),                 \
1573         INSN_3(ALU64, MOV,  X),                 \
1574         INSN_3(ALU64, ARSH, X),                 \
1575         INSN_3(ALU64, DIV,  X),                 \
1576         INSN_3(ALU64, MOD,  X),                 \
1577         INSN_2(ALU64, NEG),                     \
1578         INSN_3(ALU64, END, TO_LE),              \
1579         /*   Immediate based. */                \
1580         INSN_3(ALU64, ADD,  K),                 \
1581         INSN_3(ALU64, SUB,  K),                 \
1582         INSN_3(ALU64, AND,  K),                 \
1583         INSN_3(ALU64, OR,   K),                 \
1584         INSN_3(ALU64, LSH,  K),                 \
1585         INSN_3(ALU64, RSH,  K),                 \
1586         INSN_3(ALU64, XOR,  K),                 \
1587         INSN_3(ALU64, MUL,  K),                 \
1588         INSN_3(ALU64, MOV,  K),                 \
1589         INSN_3(ALU64, ARSH, K),                 \
1590         INSN_3(ALU64, DIV,  K),                 \
1591         INSN_3(ALU64, MOD,  K),                 \
1592         /* Call instruction. */                 \
1593         INSN_2(JMP, CALL),                      \
1594         /* Exit instruction. */                 \
1595         INSN_2(JMP, EXIT),                      \
1596         /* 32-bit Jump instructions. */         \
1597         /*   Register based. */                 \
1598         INSN_3(JMP32, JEQ,  X),                 \
1599         INSN_3(JMP32, JNE,  X),                 \
1600         INSN_3(JMP32, JGT,  X),                 \
1601         INSN_3(JMP32, JLT,  X),                 \
1602         INSN_3(JMP32, JGE,  X),                 \
1603         INSN_3(JMP32, JLE,  X),                 \
1604         INSN_3(JMP32, JSGT, X),                 \
1605         INSN_3(JMP32, JSLT, X),                 \
1606         INSN_3(JMP32, JSGE, X),                 \
1607         INSN_3(JMP32, JSLE, X),                 \
1608         INSN_3(JMP32, JSET, X),                 \
1609         /*   Immediate based. */                \
1610         INSN_3(JMP32, JEQ,  K),                 \
1611         INSN_3(JMP32, JNE,  K),                 \
1612         INSN_3(JMP32, JGT,  K),                 \
1613         INSN_3(JMP32, JLT,  K),                 \
1614         INSN_3(JMP32, JGE,  K),                 \
1615         INSN_3(JMP32, JLE,  K),                 \
1616         INSN_3(JMP32, JSGT, K),                 \
1617         INSN_3(JMP32, JSLT, K),                 \
1618         INSN_3(JMP32, JSGE, K),                 \
1619         INSN_3(JMP32, JSLE, K),                 \
1620         INSN_3(JMP32, JSET, K),                 \
1621         /* Jump instructions. */                \
1622         /*   Register based. */                 \
1623         INSN_3(JMP, JEQ,  X),                   \
1624         INSN_3(JMP, JNE,  X),                   \
1625         INSN_3(JMP, JGT,  X),                   \
1626         INSN_3(JMP, JLT,  X),                   \
1627         INSN_3(JMP, JGE,  X),                   \
1628         INSN_3(JMP, JLE,  X),                   \
1629         INSN_3(JMP, JSGT, X),                   \
1630         INSN_3(JMP, JSLT, X),                   \
1631         INSN_3(JMP, JSGE, X),                   \
1632         INSN_3(JMP, JSLE, X),                   \
1633         INSN_3(JMP, JSET, X),                   \
1634         /*   Immediate based. */                \
1635         INSN_3(JMP, JEQ,  K),                   \
1636         INSN_3(JMP, JNE,  K),                   \
1637         INSN_3(JMP, JGT,  K),                   \
1638         INSN_3(JMP, JLT,  K),                   \
1639         INSN_3(JMP, JGE,  K),                   \
1640         INSN_3(JMP, JLE,  K),                   \
1641         INSN_3(JMP, JSGT, K),                   \
1642         INSN_3(JMP, JSLT, K),                   \
1643         INSN_3(JMP, JSGE, K),                   \
1644         INSN_3(JMP, JSLE, K),                   \
1645         INSN_3(JMP, JSET, K),                   \
1646         INSN_2(JMP, JA),                        \
1647         INSN_2(JMP32, JA),                      \
1648         /* Store instructions. */               \
1649         /*   Register based. */                 \
1650         INSN_3(STX, MEM,  B),                   \
1651         INSN_3(STX, MEM,  H),                   \
1652         INSN_3(STX, MEM,  W),                   \
1653         INSN_3(STX, MEM,  DW),                  \
1654         INSN_3(STX, ATOMIC, W),                 \
1655         INSN_3(STX, ATOMIC, DW),                \
1656         /*   Immediate based. */                \
1657         INSN_3(ST, MEM, B),                     \
1658         INSN_3(ST, MEM, H),                     \
1659         INSN_3(ST, MEM, W),                     \
1660         INSN_3(ST, MEM, DW),                    \
1661         /* Load instructions. */                \
1662         /*   Register based. */                 \
1663         INSN_3(LDX, MEM, B),                    \
1664         INSN_3(LDX, MEM, H),                    \
1665         INSN_3(LDX, MEM, W),                    \
1666         INSN_3(LDX, MEM, DW),                   \
1667         INSN_3(LDX, MEMSX, B),                  \
1668         INSN_3(LDX, MEMSX, H),                  \
1669         INSN_3(LDX, MEMSX, W),                  \
1670         /*   Immediate based. */                \
1671         INSN_3(LD, IMM, DW)
1672
1673 bool bpf_opcode_in_insntable(u8 code)
1674 {
1675 #define BPF_INSN_2_TBL(x, y)    [BPF_##x | BPF_##y] = true
1676 #define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
1677         static const bool public_insntable[256] = {
1678                 [0 ... 255] = false,
1679                 /* Now overwrite non-defaults ... */
1680                 BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
1681                 /* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
1682                 [BPF_LD | BPF_ABS | BPF_B] = true,
1683                 [BPF_LD | BPF_ABS | BPF_H] = true,
1684                 [BPF_LD | BPF_ABS | BPF_W] = true,
1685                 [BPF_LD | BPF_IND | BPF_B] = true,
1686                 [BPF_LD | BPF_IND | BPF_H] = true,
1687                 [BPF_LD | BPF_IND | BPF_W] = true,
1688                 [BPF_JMP | BPF_JCOND] = true,
1689         };
1690 #undef BPF_INSN_3_TBL
1691 #undef BPF_INSN_2_TBL
1692         return public_insntable[code];
1693 }
1694
1695 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1696 /**
1697  *      ___bpf_prog_run - run eBPF program on a given context
1698  *      @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
1699  *      @insn: is the array of eBPF instructions
1700  *
1701  * Decode and execute eBPF instructions.
1702  *
1703  * Return: whatever value is in %BPF_R0 at program exit
1704  */
1705 static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
1706 {
1707 #define BPF_INSN_2_LBL(x, y)    [BPF_##x | BPF_##y] = &&x##_##y
1708 #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
1709         static const void * const jumptable[256] __annotate_jump_table = {
1710                 [0 ... 255] = &&default_label,
1711                 /* Now overwrite non-defaults ... */
1712                 BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
1713                 /* Non-UAPI available opcodes. */
1714                 [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
1715                 [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
1716                 [BPF_ST  | BPF_NOSPEC] = &&ST_NOSPEC,
1717                 [BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B,
1718                 [BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
1719                 [BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
1720                 [BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW,
1721                 [BPF_LDX | BPF_PROBE_MEMSX | BPF_B] = &&LDX_PROBE_MEMSX_B,
1722                 [BPF_LDX | BPF_PROBE_MEMSX | BPF_H] = &&LDX_PROBE_MEMSX_H,
1723                 [BPF_LDX | BPF_PROBE_MEMSX | BPF_W] = &&LDX_PROBE_MEMSX_W,
1724         };
1725 #undef BPF_INSN_3_LBL
1726 #undef BPF_INSN_2_LBL
1727         u32 tail_call_cnt = 0;
1728
1729 #define CONT     ({ insn++; goto select_insn; })
1730 #define CONT_JMP ({ insn++; goto select_insn; })
1731
1732 select_insn:
1733         goto *jumptable[insn->code];
1734
1735         /* Explicitly mask the register-based shift amounts with 63 or 31
1736          * to avoid undefined behavior. Normally this won't affect the
1737          * generated code, for example, in case of native 64 bit archs such
1738          * as x86-64 or arm64, the compiler is optimizing the AND away for
1739          * the interpreter. In case of JITs, each of the JIT backends compiles
1740          * the BPF shift operations to machine instructions which produce
1741          * implementation-defined results in such a case; the resulting
1742          * contents of the register may be arbitrary, but program behaviour
1743          * as a whole remains defined. In other words, in case of JIT backends,
1744          * the AND must /not/ be added to the emitted LSH/RSH/ARSH translation.
1745          */
1746         /* ALU (shifts) */
1747 #define SHT(OPCODE, OP)                                 \
1748         ALU64_##OPCODE##_X:                             \
1749                 DST = DST OP (SRC & 63);                \
1750                 CONT;                                   \
1751         ALU_##OPCODE##_X:                               \
1752                 DST = (u32) DST OP ((u32) SRC & 31);    \
1753                 CONT;                                   \
1754         ALU64_##OPCODE##_K:                             \
1755                 DST = DST OP IMM;                       \
1756                 CONT;                                   \
1757         ALU_##OPCODE##_K:                               \
1758                 DST = (u32) DST OP (u32) IMM;           \
1759                 CONT;
1760         /* ALU (rest) */
1761 #define ALU(OPCODE, OP)                                 \
1762         ALU64_##OPCODE##_X:                             \
1763                 DST = DST OP SRC;                       \
1764                 CONT;                                   \
1765         ALU_##OPCODE##_X:                               \
1766                 DST = (u32) DST OP (u32) SRC;           \
1767                 CONT;                                   \
1768         ALU64_##OPCODE##_K:                             \
1769                 DST = DST OP IMM;                       \
1770                 CONT;                                   \
1771         ALU_##OPCODE##_K:                               \
1772                 DST = (u32) DST OP (u32) IMM;           \
1773                 CONT;
1774         ALU(ADD,  +)
1775         ALU(SUB,  -)
1776         ALU(AND,  &)
1777         ALU(OR,   |)
1778         ALU(XOR,  ^)
1779         ALU(MUL,  *)
1780         SHT(LSH, <<)
1781         SHT(RSH, >>)
1782 #undef SHT
1783 #undef ALU
1784         ALU_NEG:
1785                 DST = (u32) -DST;
1786                 CONT;
1787         ALU64_NEG:
1788                 DST = -DST;
1789                 CONT;
1790         ALU_MOV_X:
1791                 switch (OFF) {
1792                 case 0:
1793                         DST = (u32) SRC;
1794                         break;
1795                 case 8:
1796                         DST = (u32)(s8) SRC;
1797                         break;
1798                 case 16:
1799                         DST = (u32)(s16) SRC;
1800                         break;
1801                 }
1802                 CONT;
1803         ALU_MOV_K:
1804                 DST = (u32) IMM;
1805                 CONT;
1806         ALU64_MOV_X:
1807                 switch (OFF) {
1808                 case 0:
1809                         DST = SRC;
1810                         break;
1811                 case 8:
1812                         DST = (s8) SRC;
1813                         break;
1814                 case 16:
1815                         DST = (s16) SRC;
1816                         break;
1817                 case 32:
1818                         DST = (s32) SRC;
1819                         break;
1820                 }
1821                 CONT;
1822         ALU64_MOV_K:
1823                 DST = IMM;
1824                 CONT;
1825         LD_IMM_DW:
1826                 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
1827                 insn++;
1828                 CONT;
1829         ALU_ARSH_X:
1830                 DST = (u64) (u32) (((s32) DST) >> (SRC & 31));
1831                 CONT;
1832         ALU_ARSH_K:
1833                 DST = (u64) (u32) (((s32) DST) >> IMM);
1834                 CONT;
1835         ALU64_ARSH_X:
1836                 (*(s64 *) &DST) >>= (SRC & 63);
1837                 CONT;
1838         ALU64_ARSH_K:
1839                 (*(s64 *) &DST) >>= IMM;
1840                 CONT;
1841         ALU64_MOD_X:
1842                 switch (OFF) {
1843                 case 0:
1844                         div64_u64_rem(DST, SRC, &AX);
1845                         DST = AX;
1846                         break;
1847                 case 1:
1848                         AX = div64_s64(DST, SRC);
1849                         DST = DST - AX * SRC;
1850                         break;
1851                 }
1852                 CONT;
1853         ALU_MOD_X:
1854                 switch (OFF) {
1855                 case 0:
1856                         AX = (u32) DST;
1857                         DST = do_div(AX, (u32) SRC);
1858                         break;
1859                 case 1:
1860                         AX = abs((s32)DST);
1861                         AX = do_div(AX, abs((s32)SRC));
1862                         if ((s32)DST < 0)
1863                                 DST = (u32)-AX;
1864                         else
1865                                 DST = (u32)AX;
1866                         break;
1867                 }
1868                 CONT;
1869         ALU64_MOD_K:
1870                 switch (OFF) {
1871                 case 0:
1872                         div64_u64_rem(DST, IMM, &AX);
1873                         DST = AX;
1874                         break;
1875                 case 1:
1876                         AX = div64_s64(DST, IMM);
1877                         DST = DST - AX * IMM;
1878                         break;
1879                 }
1880                 CONT;
1881         ALU_MOD_K:
1882                 switch (OFF) {
1883                 case 0:
1884                         AX = (u32) DST;
1885                         DST = do_div(AX, (u32) IMM);
1886                         break;
1887                 case 1:
1888                         AX = abs((s32)DST);
1889                         AX = do_div(AX, abs((s32)IMM));
1890                         if ((s32)DST < 0)
1891                                 DST = (u32)-AX;
1892                         else
1893                                 DST = (u32)AX;
1894                         break;
1895                 }
1896                 CONT;
1897         ALU64_DIV_X:
1898                 switch (OFF) {
1899                 case 0:
1900                         DST = div64_u64(DST, SRC);
1901                         break;
1902                 case 1:
1903                         DST = div64_s64(DST, SRC);
1904                         break;
1905                 }
1906                 CONT;
1907         ALU_DIV_X:
1908                 switch (OFF) {
1909                 case 0:
1910                         AX = (u32) DST;
1911                         do_div(AX, (u32) SRC);
1912                         DST = (u32) AX;
1913                         break;
1914                 case 1:
1915                         AX = abs((s32)DST);
1916                         do_div(AX, abs((s32)SRC));
1917                         if (((s32)DST < 0) == ((s32)SRC < 0))
1918                                 DST = (u32)AX;
1919                         else
1920                                 DST = (u32)-AX;
1921                         break;
1922                 }
1923                 CONT;
1924         ALU64_DIV_K:
1925                 switch (OFF) {
1926                 case 0:
1927                         DST = div64_u64(DST, IMM);
1928                         break;
1929                 case 1:
1930                         DST = div64_s64(DST, IMM);
1931                         break;
1932                 }
1933                 CONT;
1934         ALU_DIV_K:
1935                 switch (OFF) {
1936                 case 0:
1937                         AX = (u32) DST;
1938                         do_div(AX, (u32) IMM);
1939                         DST = (u32) AX;
1940                         break;
1941                 case 1:
1942                         AX = abs((s32)DST);
1943                         do_div(AX, abs((s32)IMM));
1944                         if (((s32)DST < 0) == ((s32)IMM < 0))
1945                                 DST = (u32)AX;
1946                         else
1947                                 DST = (u32)-AX;
1948                         break;
1949                 }
1950                 CONT;
1951         ALU_END_TO_BE:
1952                 switch (IMM) {
1953                 case 16:
1954                         DST = (__force u16) cpu_to_be16(DST);
1955                         break;
1956                 case 32:
1957                         DST = (__force u32) cpu_to_be32(DST);
1958                         break;
1959                 case 64:
1960                         DST = (__force u64) cpu_to_be64(DST);
1961                         break;
1962                 }
1963                 CONT;
1964         ALU_END_TO_LE:
1965                 switch (IMM) {
1966                 case 16:
1967                         DST = (__force u16) cpu_to_le16(DST);
1968                         break;
1969                 case 32:
1970                         DST = (__force u32) cpu_to_le32(DST);
1971                         break;
1972                 case 64:
1973                         DST = (__force u64) cpu_to_le64(DST);
1974                         break;
1975                 }
1976                 CONT;
1977         ALU64_END_TO_LE:
1978                 switch (IMM) {
1979                 case 16:
1980                         DST = (__force u16) __swab16(DST);
1981                         break;
1982                 case 32:
1983                         DST = (__force u32) __swab32(DST);
1984                         break;
1985                 case 64:
1986                         DST = (__force u64) __swab64(DST);
1987                         break;
1988                 }
1989                 CONT;
1990
1991         /* CALL */
1992         JMP_CALL:
1993                 /* Function call scratches BPF_R1-BPF_R5 registers,
1994                  * preserves BPF_R6-BPF_R9, and stores return value
1995                  * into BPF_R0.
1996                  */
1997                 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1998                                                        BPF_R4, BPF_R5);
1999                 CONT;
2000
2001         JMP_CALL_ARGS:
2002                 BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
2003                                                             BPF_R3, BPF_R4,
2004                                                             BPF_R5,
2005                                                             insn + insn->off + 1);
2006                 CONT;
2007
2008         JMP_TAIL_CALL: {
2009                 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
2010                 struct bpf_array *array = container_of(map, struct bpf_array, map);
2011                 struct bpf_prog *prog;
2012                 u32 index = BPF_R3;
2013
2014                 if (unlikely(index >= array->map.max_entries))
2015                         goto out;
2016
2017                 if (unlikely(tail_call_cnt >= MAX_TAIL_CALL_CNT))
2018                         goto out;
2019
2020                 tail_call_cnt++;
2021
2022                 prog = READ_ONCE(array->ptrs[index]);
2023                 if (!prog)
2024                         goto out;
2025
2026                 /* ARG1 at this point is guaranteed to point to CTX from
2027                  * the verifier side due to the fact that the tail call is
2028                  * handled like a helper, that is, bpf_tail_call_proto,
2029                  * where arg1_type is ARG_PTR_TO_CTX.
2030                  */
2031                 insn = prog->insnsi;
2032                 goto select_insn;
2033 out:
2034                 CONT;
2035         }
2036         JMP_JA:
2037                 insn += insn->off;
2038                 CONT;
2039         JMP32_JA:
2040                 insn += insn->imm;
2041                 CONT;
2042         JMP_EXIT:
2043                 return BPF_R0;
2044         /* JMP */
2045 #define COND_JMP(SIGN, OPCODE, CMP_OP)                          \
2046         JMP_##OPCODE##_X:                                       \
2047                 if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) {     \
2048                         insn += insn->off;                      \
2049                         CONT_JMP;                               \
2050                 }                                               \
2051                 CONT;                                           \
2052         JMP32_##OPCODE##_X:                                     \
2053                 if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) {     \
2054                         insn += insn->off;                      \
2055                         CONT_JMP;                               \
2056                 }                                               \
2057                 CONT;                                           \
2058         JMP_##OPCODE##_K:                                       \
2059                 if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) {     \
2060                         insn += insn->off;                      \
2061                         CONT_JMP;                               \
2062                 }                                               \
2063                 CONT;                                           \
2064         JMP32_##OPCODE##_K:                                     \
2065                 if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) {     \
2066                         insn += insn->off;                      \
2067                         CONT_JMP;                               \
2068                 }                                               \
2069                 CONT;
2070         COND_JMP(u, JEQ, ==)
2071         COND_JMP(u, JNE, !=)
2072         COND_JMP(u, JGT, >)
2073         COND_JMP(u, JLT, <)
2074         COND_JMP(u, JGE, >=)
2075         COND_JMP(u, JLE, <=)
2076         COND_JMP(u, JSET, &)
2077         COND_JMP(s, JSGT, >)
2078         COND_JMP(s, JSLT, <)
2079         COND_JMP(s, JSGE, >=)
2080         COND_JMP(s, JSLE, <=)
2081 #undef COND_JMP
2082         /* ST, STX and LDX*/
2083         ST_NOSPEC:
2084                 /* Speculation barrier for mitigating Speculative Store Bypass.
2085                  * In case of arm64, we rely on the firmware mitigation as
2086                  * controlled via the ssbd kernel parameter. Whenever the
2087                  * mitigation is enabled, it works for all of the kernel code
2088                  * with no need to provide any additional instructions here.
2089                  * In case of x86, we use 'lfence' insn for mitigation. We
2090                  * reuse preexisting logic from Spectre v1 mitigation that
2091                  * happens to produce the required code on x86 for v4 as well.
2092                  */
2093                 barrier_nospec();
2094                 CONT;
2095 #define LDST(SIZEOP, SIZE)                                              \
2096         STX_MEM_##SIZEOP:                                               \
2097                 *(SIZE *)(unsigned long) (DST + insn->off) = SRC;       \
2098                 CONT;                                                   \
2099         ST_MEM_##SIZEOP:                                                \
2100                 *(SIZE *)(unsigned long) (DST + insn->off) = IMM;       \
2101                 CONT;                                                   \
2102         LDX_MEM_##SIZEOP:                                               \
2103                 DST = *(SIZE *)(unsigned long) (SRC + insn->off);       \
2104                 CONT;                                                   \
2105         LDX_PROBE_MEM_##SIZEOP:                                         \
2106                 bpf_probe_read_kernel_common(&DST, sizeof(SIZE),        \
2107                               (const void *)(long) (SRC + insn->off));  \
2108                 DST = *((SIZE *)&DST);                                  \
2109                 CONT;
2110
2111         LDST(B,   u8)
2112         LDST(H,  u16)
2113         LDST(W,  u32)
2114         LDST(DW, u64)
2115 #undef LDST
2116
2117 #define LDSX(SIZEOP, SIZE)                                              \
2118         LDX_MEMSX_##SIZEOP:                                             \
2119                 DST = *(SIZE *)(unsigned long) (SRC + insn->off);       \
2120                 CONT;                                                   \
2121         LDX_PROBE_MEMSX_##SIZEOP:                                       \
2122                 bpf_probe_read_kernel_common(&DST, sizeof(SIZE),                \
2123                                       (const void *)(long) (SRC + insn->off));  \
2124                 DST = *((SIZE *)&DST);                                  \
2125                 CONT;
2126
2127         LDSX(B,   s8)
2128         LDSX(H,  s16)
2129         LDSX(W,  s32)
2130 #undef LDSX
2131
2132 #define ATOMIC_ALU_OP(BOP, KOP)                                         \
2133                 case BOP:                                               \
2134                         if (BPF_SIZE(insn->code) == BPF_W)              \
2135                                 atomic_##KOP((u32) SRC, (atomic_t *)(unsigned long) \
2136                                              (DST + insn->off));        \
2137                         else                                            \
2138                                 atomic64_##KOP((u64) SRC, (atomic64_t *)(unsigned long) \
2139                                                (DST + insn->off));      \
2140                         break;                                          \
2141                 case BOP | BPF_FETCH:                                   \
2142                         if (BPF_SIZE(insn->code) == BPF_W)              \
2143                                 SRC = (u32) atomic_fetch_##KOP(         \
2144                                         (u32) SRC,                      \
2145                                         (atomic_t *)(unsigned long) (DST + insn->off)); \
2146                         else                                            \
2147                                 SRC = (u64) atomic64_fetch_##KOP(       \
2148                                         (u64) SRC,                      \
2149                                         (atomic64_t *)(unsigned long) (DST + insn->off)); \
2150                         break;
2151
2152         STX_ATOMIC_DW:
2153         STX_ATOMIC_W:
2154                 switch (IMM) {
2155                 ATOMIC_ALU_OP(BPF_ADD, add)
2156                 ATOMIC_ALU_OP(BPF_AND, and)
2157                 ATOMIC_ALU_OP(BPF_OR, or)
2158                 ATOMIC_ALU_OP(BPF_XOR, xor)
2159 #undef ATOMIC_ALU_OP
2160
2161                 case BPF_XCHG:
2162                         if (BPF_SIZE(insn->code) == BPF_W)
2163                                 SRC = (u32) atomic_xchg(
2164                                         (atomic_t *)(unsigned long) (DST + insn->off),
2165                                         (u32) SRC);
2166                         else
2167                                 SRC = (u64) atomic64_xchg(
2168                                         (atomic64_t *)(unsigned long) (DST + insn->off),
2169                                         (u64) SRC);
2170                         break;
2171                 case BPF_CMPXCHG:
2172                         if (BPF_SIZE(insn->code) == BPF_W)
2173                                 BPF_R0 = (u32) atomic_cmpxchg(
2174                                         (atomic_t *)(unsigned long) (DST + insn->off),
2175                                         (u32) BPF_R0, (u32) SRC);
2176                         else
2177                                 BPF_R0 = (u64) atomic64_cmpxchg(
2178                                         (atomic64_t *)(unsigned long) (DST + insn->off),
2179                                         (u64) BPF_R0, (u64) SRC);
2180                         break;
2181
2182                 default:
2183                         goto default_label;
2184                 }
2185                 CONT;
2186
2187         default_label:
2188                 /* If we ever reach this, we have a bug somewhere. Die hard here
2189                  * instead of just returning 0; we could be somewhere in a subprog,
2190                  * so execution could continue otherwise which we do /not/ want.
2191                  *
2192                  * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
2193                  */
2194                 pr_warn("BPF interpreter: unknown opcode %02x (imm: 0x%x)\n",
2195                         insn->code, insn->imm);
2196                 BUG_ON(1);
2197                 return 0;
2198 }
2199
2200 #define PROG_NAME(stack_size) __bpf_prog_run##stack_size
2201 #define DEFINE_BPF_PROG_RUN(stack_size) \
2202 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
2203 { \
2204         u64 stack[stack_size / sizeof(u64)]; \
2205         u64 regs[MAX_BPF_EXT_REG] = {}; \
2206 \
2207         FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
2208         ARG1 = (u64) (unsigned long) ctx; \
2209         return ___bpf_prog_run(regs, insn); \
2210 }
2211
2212 #define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
2213 #define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
2214 static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
2215                                       const struct bpf_insn *insn) \
2216 { \
2217         u64 stack[stack_size / sizeof(u64)]; \
2218         u64 regs[MAX_BPF_EXT_REG]; \
2219 \
2220         FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
2221         BPF_R1 = r1; \
2222         BPF_R2 = r2; \
2223         BPF_R3 = r3; \
2224         BPF_R4 = r4; \
2225         BPF_R5 = r5; \
2226         return ___bpf_prog_run(regs, insn); \
2227 }
2228
2229 #define EVAL1(FN, X) FN(X)
2230 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
2231 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
2232 #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
2233 #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
2234 #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
2235
2236 EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
2237 EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
2238 EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
2239
2240 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
2241 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
2242 EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
2243
2244 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
2245
2246 static unsigned int (*interpreters[])(const void *ctx,
2247                                       const struct bpf_insn *insn) = {
2248 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
2249 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
2250 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
2251 };
2252 #undef PROG_NAME_LIST
2253 #define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
2254 static __maybe_unused
2255 u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
2256                            const struct bpf_insn *insn) = {
2257 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
2258 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
2259 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
2260 };
2261 #undef PROG_NAME_LIST
2262
2263 #ifdef CONFIG_BPF_SYSCALL
2264 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
2265 {
2266         stack_depth = max_t(u32, stack_depth, 1);
2267         insn->off = (s16) insn->imm;
2268         insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
2269                 __bpf_call_base_args;
2270         insn->code = BPF_JMP | BPF_CALL_ARGS;
2271 }
2272 #endif
2273 #else
2274 static unsigned int __bpf_prog_ret0_warn(const void *ctx,
2275                                          const struct bpf_insn *insn)
2276 {
2277         /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
2278          * is not working properly, so warn about it!
2279          */
2280         WARN_ON_ONCE(1);
2281         return 0;
2282 }
2283 #endif
2284
2285 bool bpf_prog_map_compatible(struct bpf_map *map,
2286                              const struct bpf_prog *fp)
2287 {
2288         enum bpf_prog_type prog_type = resolve_prog_type(fp);
2289         bool ret;
2290
2291         if (fp->kprobe_override)
2292                 return false;
2293
2294         /* XDP programs inserted into maps are not guaranteed to run on
2295          * a particular netdev (and can run outside driver context entirely
2296          * in the case of devmap and cpumap). Until device checks
2297          * are implemented, prohibit adding dev-bound programs to program maps.
2298          */
2299         if (bpf_prog_is_dev_bound(fp->aux))
2300                 return false;
2301
2302         spin_lock(&map->owner.lock);
2303         if (!map->owner.type) {
2304                 /* There's no owner yet where we could check for
2305                  * compatibility.
2306                  */
2307                 map->owner.type  = prog_type;
2308                 map->owner.jited = fp->jited;
2309                 map->owner.xdp_has_frags = fp->aux->xdp_has_frags;
2310                 ret = true;
2311         } else {
2312                 ret = map->owner.type  == prog_type &&
2313                       map->owner.jited == fp->jited &&
2314                       map->owner.xdp_has_frags == fp->aux->xdp_has_frags;
2315         }
2316         spin_unlock(&map->owner.lock);
2317
2318         return ret;
2319 }
2320
2321 static int bpf_check_tail_call(const struct bpf_prog *fp)
2322 {
2323         struct bpf_prog_aux *aux = fp->aux;
2324         int i, ret = 0;
2325
2326         mutex_lock(&aux->used_maps_mutex);
2327         for (i = 0; i < aux->used_map_cnt; i++) {
2328                 struct bpf_map *map = aux->used_maps[i];
2329
2330                 if (!map_type_contains_progs(map))
2331                         continue;
2332
2333                 if (!bpf_prog_map_compatible(map, fp)) {
2334                         ret = -EINVAL;
2335                         goto out;
2336                 }
2337         }
2338
2339 out:
2340         mutex_unlock(&aux->used_maps_mutex);
2341         return ret;
2342 }
2343
2344 static void bpf_prog_select_func(struct bpf_prog *fp)
2345 {
2346 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
2347         u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
2348
2349         fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
2350 #else
2351         fp->bpf_func = __bpf_prog_ret0_warn;
2352 #endif
2353 }
2354
2355 /**
2356  *      bpf_prog_select_runtime - select exec runtime for BPF program
2357  *      @fp: bpf_prog populated with BPF program
2358  *      @err: pointer to error variable
2359  *
2360  * Try to JIT eBPF program, if JIT is not available, use interpreter.
2361  * The BPF program will be executed via bpf_prog_run() function.
2362  *
2363  * Return: the &fp argument along with &err set to 0 for success or
2364  * a negative errno code on failure
2365  */
2366 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
2367 {
2368         /* In case of BPF to BPF calls, verifier did all the prep
2369          * work with regards to JITing, etc.
2370          */
2371         bool jit_needed = false;
2372
2373         if (fp->bpf_func)
2374                 goto finalize;
2375
2376         if (IS_ENABLED(CONFIG_BPF_JIT_ALWAYS_ON) ||
2377             bpf_prog_has_kfunc_call(fp))
2378                 jit_needed = true;
2379
2380         bpf_prog_select_func(fp);
2381
2382         /* eBPF JITs can rewrite the program in case constant
2383          * blinding is active. However, in case of error during
2384          * blinding, bpf_int_jit_compile() must always return a
2385          * valid program, which in this case would simply not
2386          * be JITed, but falls back to the interpreter.
2387          */
2388         if (!bpf_prog_is_offloaded(fp->aux)) {
2389                 *err = bpf_prog_alloc_jited_linfo(fp);
2390                 if (*err)
2391                         return fp;
2392
2393                 fp = bpf_int_jit_compile(fp);
2394                 bpf_prog_jit_attempt_done(fp);
2395                 if (!fp->jited && jit_needed) {
2396                         *err = -ENOTSUPP;
2397                         return fp;
2398                 }
2399         } else {
2400                 *err = bpf_prog_offload_compile(fp);
2401                 if (*err)
2402                         return fp;
2403         }
2404
2405 finalize:
2406         bpf_prog_lock_ro(fp);
2407
2408         /* The tail call compatibility check can only be done at
2409          * this late stage as we need to determine, if we deal
2410          * with JITed or non JITed program concatenations and not
2411          * all eBPF JITs might immediately support all features.
2412          */
2413         *err = bpf_check_tail_call(fp);
2414
2415         return fp;
2416 }
2417 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
2418
2419 static unsigned int __bpf_prog_ret1(const void *ctx,
2420                                     const struct bpf_insn *insn)
2421 {
2422         return 1;
2423 }
2424
2425 static struct bpf_prog_dummy {
2426         struct bpf_prog prog;
2427 } dummy_bpf_prog = {
2428         .prog = {
2429                 .bpf_func = __bpf_prog_ret1,
2430         },
2431 };
2432
2433 struct bpf_empty_prog_array bpf_empty_prog_array = {
2434         .null_prog = NULL,
2435 };
2436 EXPORT_SYMBOL(bpf_empty_prog_array);
2437
2438 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
2439 {
2440         if (prog_cnt)
2441                 return kzalloc(sizeof(struct bpf_prog_array) +
2442                                sizeof(struct bpf_prog_array_item) *
2443                                (prog_cnt + 1),
2444                                flags);
2445
2446         return &bpf_empty_prog_array.hdr;
2447 }
2448
2449 void bpf_prog_array_free(struct bpf_prog_array *progs)
2450 {
2451         if (!progs || progs == &bpf_empty_prog_array.hdr)
2452                 return;
2453         kfree_rcu(progs, rcu);
2454 }
2455
2456 static void __bpf_prog_array_free_sleepable_cb(struct rcu_head *rcu)
2457 {
2458         struct bpf_prog_array *progs;
2459
2460         /* If RCU Tasks Trace grace period implies RCU grace period, there is
2461          * no need to call kfree_rcu(), just call kfree() directly.
2462          */
2463         progs = container_of(rcu, struct bpf_prog_array, rcu);
2464         if (rcu_trace_implies_rcu_gp())
2465                 kfree(progs);
2466         else
2467                 kfree_rcu(progs, rcu);
2468 }
2469
2470 void bpf_prog_array_free_sleepable(struct bpf_prog_array *progs)
2471 {
2472         if (!progs || progs == &bpf_empty_prog_array.hdr)
2473                 return;
2474         call_rcu_tasks_trace(&progs->rcu, __bpf_prog_array_free_sleepable_cb);
2475 }
2476
2477 int bpf_prog_array_length(struct bpf_prog_array *array)
2478 {
2479         struct bpf_prog_array_item *item;
2480         u32 cnt = 0;
2481
2482         for (item = array->items; item->prog; item++)
2483                 if (item->prog != &dummy_bpf_prog.prog)
2484                         cnt++;
2485         return cnt;
2486 }
2487
2488 bool bpf_prog_array_is_empty(struct bpf_prog_array *array)
2489 {
2490         struct bpf_prog_array_item *item;
2491
2492         for (item = array->items; item->prog; item++)
2493                 if (item->prog != &dummy_bpf_prog.prog)
2494                         return false;
2495         return true;
2496 }
2497
2498 static bool bpf_prog_array_copy_core(struct bpf_prog_array *array,
2499                                      u32 *prog_ids,
2500                                      u32 request_cnt)
2501 {
2502         struct bpf_prog_array_item *item;
2503         int i = 0;
2504
2505         for (item = array->items; item->prog; item++) {
2506                 if (item->prog == &dummy_bpf_prog.prog)
2507                         continue;
2508                 prog_ids[i] = item->prog->aux->id;
2509                 if (++i == request_cnt) {
2510                         item++;
2511                         break;
2512                 }
2513         }
2514
2515         return !!(item->prog);
2516 }
2517
2518 int bpf_prog_array_copy_to_user(struct bpf_prog_array *array,
2519                                 __u32 __user *prog_ids, u32 cnt)
2520 {
2521         unsigned long err = 0;
2522         bool nospc;
2523         u32 *ids;
2524
2525         /* users of this function are doing:
2526          * cnt = bpf_prog_array_length();
2527          * if (cnt > 0)
2528          *     bpf_prog_array_copy_to_user(..., cnt);
2529          * so below kcalloc doesn't need extra cnt > 0 check.
2530          */
2531         ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
2532         if (!ids)
2533                 return -ENOMEM;
2534         nospc = bpf_prog_array_copy_core(array, ids, cnt);
2535         err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
2536         kfree(ids);
2537         if (err)
2538                 return -EFAULT;
2539         if (nospc)
2540                 return -ENOSPC;
2541         return 0;
2542 }
2543
2544 void bpf_prog_array_delete_safe(struct bpf_prog_array *array,
2545                                 struct bpf_prog *old_prog)
2546 {
2547         struct bpf_prog_array_item *item;
2548
2549         for (item = array->items; item->prog; item++)
2550                 if (item->prog == old_prog) {
2551                         WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
2552                         break;
2553                 }
2554 }
2555
2556 /**
2557  * bpf_prog_array_delete_safe_at() - Replaces the program at the given
2558  *                                   index into the program array with
2559  *                                   a dummy no-op program.
2560  * @array: a bpf_prog_array
2561  * @index: the index of the program to replace
2562  *
2563  * Skips over dummy programs, by not counting them, when calculating
2564  * the position of the program to replace.
2565  *
2566  * Return:
2567  * * 0          - Success
2568  * * -EINVAL    - Invalid index value. Must be a non-negative integer.
2569  * * -ENOENT    - Index out of range
2570  */
2571 int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index)
2572 {
2573         return bpf_prog_array_update_at(array, index, &dummy_bpf_prog.prog);
2574 }
2575
2576 /**
2577  * bpf_prog_array_update_at() - Updates the program at the given index
2578  *                              into the program array.
2579  * @array: a bpf_prog_array
2580  * @index: the index of the program to update
2581  * @prog: the program to insert into the array
2582  *
2583  * Skips over dummy programs, by not counting them, when calculating
2584  * the position of the program to update.
2585  *
2586  * Return:
2587  * * 0          - Success
2588  * * -EINVAL    - Invalid index value. Must be a non-negative integer.
2589  * * -ENOENT    - Index out of range
2590  */
2591 int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
2592                              struct bpf_prog *prog)
2593 {
2594         struct bpf_prog_array_item *item;
2595
2596         if (unlikely(index < 0))
2597                 return -EINVAL;
2598
2599         for (item = array->items; item->prog; item++) {
2600                 if (item->prog == &dummy_bpf_prog.prog)
2601                         continue;
2602                 if (!index) {
2603                         WRITE_ONCE(item->prog, prog);
2604                         return 0;
2605                 }
2606                 index--;
2607         }
2608         return -ENOENT;
2609 }
2610
2611 int bpf_prog_array_copy(struct bpf_prog_array *old_array,
2612                         struct bpf_prog *exclude_prog,
2613                         struct bpf_prog *include_prog,
2614                         u64 bpf_cookie,
2615                         struct bpf_prog_array **new_array)
2616 {
2617         int new_prog_cnt, carry_prog_cnt = 0;
2618         struct bpf_prog_array_item *existing, *new;
2619         struct bpf_prog_array *array;
2620         bool found_exclude = false;
2621
2622         /* Figure out how many existing progs we need to carry over to
2623          * the new array.
2624          */
2625         if (old_array) {
2626                 existing = old_array->items;
2627                 for (; existing->prog; existing++) {
2628                         if (existing->prog == exclude_prog) {
2629                                 found_exclude = true;
2630                                 continue;
2631                         }
2632                         if (existing->prog != &dummy_bpf_prog.prog)
2633                                 carry_prog_cnt++;
2634                         if (existing->prog == include_prog)
2635                                 return -EEXIST;
2636                 }
2637         }
2638
2639         if (exclude_prog && !found_exclude)
2640                 return -ENOENT;
2641
2642         /* How many progs (not NULL) will be in the new array? */
2643         new_prog_cnt = carry_prog_cnt;
2644         if (include_prog)
2645                 new_prog_cnt += 1;
2646
2647         /* Do we have any prog (not NULL) in the new array? */
2648         if (!new_prog_cnt) {
2649                 *new_array = NULL;
2650                 return 0;
2651         }
2652
2653         /* +1 as the end of prog_array is marked with NULL */
2654         array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
2655         if (!array)
2656                 return -ENOMEM;
2657         new = array->items;
2658
2659         /* Fill in the new prog array */
2660         if (carry_prog_cnt) {
2661                 existing = old_array->items;
2662                 for (; existing->prog; existing++) {
2663                         if (existing->prog == exclude_prog ||
2664                             existing->prog == &dummy_bpf_prog.prog)
2665                                 continue;
2666
2667                         new->prog = existing->prog;
2668                         new->bpf_cookie = existing->bpf_cookie;
2669                         new++;
2670                 }
2671         }
2672         if (include_prog) {
2673                 new->prog = include_prog;
2674                 new->bpf_cookie = bpf_cookie;
2675                 new++;
2676         }
2677         new->prog = NULL;
2678         *new_array = array;
2679         return 0;
2680 }
2681
2682 int bpf_prog_array_copy_info(struct bpf_prog_array *array,
2683                              u32 *prog_ids, u32 request_cnt,
2684                              u32 *prog_cnt)
2685 {
2686         u32 cnt = 0;
2687
2688         if (array)
2689                 cnt = bpf_prog_array_length(array);
2690
2691         *prog_cnt = cnt;
2692
2693         /* return early if user requested only program count or nothing to copy */
2694         if (!request_cnt || !cnt)
2695                 return 0;
2696
2697         /* this function is called under trace/bpf_trace.c: bpf_event_mutex */
2698         return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
2699                                                                      : 0;
2700 }
2701
2702 void __bpf_free_used_maps(struct bpf_prog_aux *aux,
2703                           struct bpf_map **used_maps, u32 len)
2704 {
2705         struct bpf_map *map;
2706         bool sleepable;
2707         u32 i;
2708
2709         sleepable = aux->prog->sleepable;
2710         for (i = 0; i < len; i++) {
2711                 map = used_maps[i];
2712                 if (map->ops->map_poke_untrack)
2713                         map->ops->map_poke_untrack(map, aux);
2714                 if (sleepable)
2715                         atomic64_dec(&map->sleepable_refcnt);
2716                 bpf_map_put(map);
2717         }
2718 }
2719
2720 static void bpf_free_used_maps(struct bpf_prog_aux *aux)
2721 {
2722         __bpf_free_used_maps(aux, aux->used_maps, aux->used_map_cnt);
2723         kfree(aux->used_maps);
2724 }
2725
2726 void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
2727                           struct btf_mod_pair *used_btfs, u32 len)
2728 {
2729 #ifdef CONFIG_BPF_SYSCALL
2730         struct btf_mod_pair *btf_mod;
2731         u32 i;
2732
2733         for (i = 0; i < len; i++) {
2734                 btf_mod = &used_btfs[i];
2735                 if (btf_mod->module)
2736                         module_put(btf_mod->module);
2737                 btf_put(btf_mod->btf);
2738         }
2739 #endif
2740 }
2741
2742 static void bpf_free_used_btfs(struct bpf_prog_aux *aux)
2743 {
2744         __bpf_free_used_btfs(aux, aux->used_btfs, aux->used_btf_cnt);
2745         kfree(aux->used_btfs);
2746 }
2747
2748 static void bpf_prog_free_deferred(struct work_struct *work)
2749 {
2750         struct bpf_prog_aux *aux;
2751         int i;
2752
2753         aux = container_of(work, struct bpf_prog_aux, work);
2754 #ifdef CONFIG_BPF_SYSCALL
2755         bpf_free_kfunc_btf_tab(aux->kfunc_btf_tab);
2756 #endif
2757 #ifdef CONFIG_CGROUP_BPF
2758         if (aux->cgroup_atype != CGROUP_BPF_ATTACH_TYPE_INVALID)
2759                 bpf_cgroup_atype_put(aux->cgroup_atype);
2760 #endif
2761         bpf_free_used_maps(aux);
2762         bpf_free_used_btfs(aux);
2763         if (bpf_prog_is_dev_bound(aux))
2764                 bpf_prog_dev_bound_destroy(aux->prog);
2765 #ifdef CONFIG_PERF_EVENTS
2766         if (aux->prog->has_callchain_buf)
2767                 put_callchain_buffers();
2768 #endif
2769         if (aux->dst_trampoline)
2770                 bpf_trampoline_put(aux->dst_trampoline);
2771         for (i = 0; i < aux->real_func_cnt; i++) {
2772                 /* We can just unlink the subprog poke descriptor table as
2773                  * it was originally linked to the main program and is also
2774                  * released along with it.
2775                  */
2776                 aux->func[i]->aux->poke_tab = NULL;
2777                 bpf_jit_free(aux->func[i]);
2778         }
2779         if (aux->real_func_cnt) {
2780                 kfree(aux->func);
2781                 bpf_prog_unlock_free(aux->prog);
2782         } else {
2783                 bpf_jit_free(aux->prog);
2784         }
2785 }
2786
2787 void bpf_prog_free(struct bpf_prog *fp)
2788 {
2789         struct bpf_prog_aux *aux = fp->aux;
2790
2791         if (aux->dst_prog)
2792                 bpf_prog_put(aux->dst_prog);
2793         bpf_token_put(aux->token);
2794         INIT_WORK(&aux->work, bpf_prog_free_deferred);
2795         schedule_work(&aux->work);
2796 }
2797 EXPORT_SYMBOL_GPL(bpf_prog_free);
2798
2799 /* RNG for unpriviledged user space with separated state from prandom_u32(). */
2800 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
2801
2802 void bpf_user_rnd_init_once(void)
2803 {
2804         prandom_init_once(&bpf_user_rnd_state);
2805 }
2806
2807 BPF_CALL_0(bpf_user_rnd_u32)
2808 {
2809         /* Should someone ever have the rather unwise idea to use some
2810          * of the registers passed into this function, then note that
2811          * this function is called from native eBPF and classic-to-eBPF
2812          * transformations. Register assignments from both sides are
2813          * different, f.e. classic always sets fn(ctx, A, X) here.
2814          */
2815         struct rnd_state *state;
2816         u32 res;
2817
2818         state = &get_cpu_var(bpf_user_rnd_state);
2819         res = prandom_u32_state(state);
2820         put_cpu_var(bpf_user_rnd_state);
2821
2822         return res;
2823 }
2824
2825 BPF_CALL_0(bpf_get_raw_cpu_id)
2826 {
2827         return raw_smp_processor_id();
2828 }
2829
2830 /* Weak definitions of helper functions in case we don't have bpf syscall. */
2831 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
2832 const struct bpf_func_proto bpf_map_update_elem_proto __weak;
2833 const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
2834 const struct bpf_func_proto bpf_map_push_elem_proto __weak;
2835 const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
2836 const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
2837 const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto __weak;
2838 const struct bpf_func_proto bpf_spin_lock_proto __weak;
2839 const struct bpf_func_proto bpf_spin_unlock_proto __weak;
2840 const struct bpf_func_proto bpf_jiffies64_proto __weak;
2841
2842 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
2843 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
2844 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
2845 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
2846 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto __weak;
2847 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto __weak;
2848 const struct bpf_func_proto bpf_ktime_get_tai_ns_proto __weak;
2849
2850 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
2851 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
2852 const struct bpf_func_proto bpf_get_current_comm_proto __weak;
2853 const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
2854 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto __weak;
2855 const struct bpf_func_proto bpf_get_local_storage_proto __weak;
2856 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto __weak;
2857 const struct bpf_func_proto bpf_snprintf_btf_proto __weak;
2858 const struct bpf_func_proto bpf_seq_printf_btf_proto __weak;
2859 const struct bpf_func_proto bpf_set_retval_proto __weak;
2860 const struct bpf_func_proto bpf_get_retval_proto __weak;
2861
2862 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
2863 {
2864         return NULL;
2865 }
2866
2867 const struct bpf_func_proto * __weak bpf_get_trace_vprintk_proto(void)
2868 {
2869         return NULL;
2870 }
2871
2872 u64 __weak
2873 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
2874                  void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
2875 {
2876         return -ENOTSUPP;
2877 }
2878 EXPORT_SYMBOL_GPL(bpf_event_output);
2879
2880 /* Always built-in helper functions. */
2881 const struct bpf_func_proto bpf_tail_call_proto = {
2882         .func           = NULL,
2883         .gpl_only       = false,
2884         .ret_type       = RET_VOID,
2885         .arg1_type      = ARG_PTR_TO_CTX,
2886         .arg2_type      = ARG_CONST_MAP_PTR,
2887         .arg3_type      = ARG_ANYTHING,
2888 };
2889
2890 /* Stub for JITs that only support cBPF. eBPF programs are interpreted.
2891  * It is encouraged to implement bpf_int_jit_compile() instead, so that
2892  * eBPF and implicitly also cBPF can get JITed!
2893  */
2894 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
2895 {
2896         return prog;
2897 }
2898
2899 /* Stub for JITs that support eBPF. All cBPF code gets transformed into
2900  * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
2901  */
2902 void __weak bpf_jit_compile(struct bpf_prog *prog)
2903 {
2904 }
2905
2906 bool __weak bpf_helper_changes_pkt_data(void *func)
2907 {
2908         return false;
2909 }
2910
2911 /* Return TRUE if the JIT backend wants verifier to enable sub-register usage
2912  * analysis code and wants explicit zero extension inserted by verifier.
2913  * Otherwise, return FALSE.
2914  *
2915  * The verifier inserts an explicit zero extension after BPF_CMPXCHGs even if
2916  * you don't override this. JITs that don't want these extra insns can detect
2917  * them using insn_is_zext.
2918  */
2919 bool __weak bpf_jit_needs_zext(void)
2920 {
2921         return false;
2922 }
2923
2924 /* Return TRUE if the JIT backend supports mixing bpf2bpf and tailcalls. */
2925 bool __weak bpf_jit_supports_subprog_tailcalls(void)
2926 {
2927         return false;
2928 }
2929
2930 bool __weak bpf_jit_supports_kfunc_call(void)
2931 {
2932         return false;
2933 }
2934
2935 bool __weak bpf_jit_supports_far_kfunc_call(void)
2936 {
2937         return false;
2938 }
2939
2940 bool __weak bpf_jit_supports_arena(void)
2941 {
2942         return false;
2943 }
2944
2945 u64 __weak bpf_arch_uaddress_limit(void)
2946 {
2947 #if defined(CONFIG_64BIT) && defined(CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE)
2948         return TASK_SIZE;
2949 #else
2950         return 0;
2951 #endif
2952 }
2953
2954 /* Return TRUE if the JIT backend satisfies the following two conditions:
2955  * 1) JIT backend supports atomic_xchg() on pointer-sized words.
2956  * 2) Under the specific arch, the implementation of xchg() is the same
2957  *    as atomic_xchg() on pointer-sized words.
2958  */
2959 bool __weak bpf_jit_supports_ptr_xchg(void)
2960 {
2961         return false;
2962 }
2963
2964 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
2965  * skb_copy_bits(), so provide a weak definition of it for NET-less config.
2966  */
2967 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
2968                          int len)
2969 {
2970         return -EFAULT;
2971 }
2972
2973 int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
2974                               void *addr1, void *addr2)
2975 {
2976         return -ENOTSUPP;
2977 }
2978
2979 void * __weak bpf_arch_text_copy(void *dst, void *src, size_t len)
2980 {
2981         return ERR_PTR(-ENOTSUPP);
2982 }
2983
2984 int __weak bpf_arch_text_invalidate(void *dst, size_t len)
2985 {
2986         return -ENOTSUPP;
2987 }
2988
2989 bool __weak bpf_jit_supports_exceptions(void)
2990 {
2991         return false;
2992 }
2993
2994 void __weak arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie)
2995 {
2996 }
2997
2998 /* for configs without MMU or 32-bit */
2999 __weak const struct bpf_map_ops arena_map_ops;
3000 __weak u64 bpf_arena_get_user_vm_start(struct bpf_arena *arena)
3001 {
3002         return 0;
3003 }
3004 __weak u64 bpf_arena_get_kern_vm_start(struct bpf_arena *arena)
3005 {
3006         return 0;
3007 }
3008
3009 #ifdef CONFIG_BPF_SYSCALL
3010 static int __init bpf_global_ma_init(void)
3011 {
3012         int ret;
3013
3014         ret = bpf_mem_alloc_init(&bpf_global_ma, 0, false);
3015         bpf_global_ma_set = !ret;
3016         return ret;
3017 }
3018 late_initcall(bpf_global_ma_init);
3019 #endif
3020
3021 DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
3022 EXPORT_SYMBOL(bpf_stats_enabled_key);
3023
3024 /* All definitions of tracepoints related to BPF. */
3025 #define CREATE_TRACE_POINTS
3026 #include <linux/bpf_trace.h>
3027
3028 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
3029 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_bulk_tx);