1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2021, Microsoft Corporation.
6 * Beau Belgrave <beaub@linux.microsoft.com>
9 #include <linux/bitmap.h>
10 #include <linux/cdev.h>
11 #include <linux/hashtable.h>
12 #include <linux/list.h>
14 #include <linux/uio.h>
15 #include <linux/ioctl.h>
16 #include <linux/jhash.h>
17 #include <linux/refcount.h>
18 #include <linux/trace_events.h>
19 #include <linux/tracefs.h>
20 #include <linux/types.h>
21 #include <linux/uaccess.h>
22 #include <linux/highmem.h>
23 #include <linux/init.h>
24 #include <linux/user_events.h>
25 #include "trace_dynevent.h"
26 #include "trace_output.h"
29 #define USER_EVENTS_PREFIX_LEN (sizeof(USER_EVENTS_PREFIX)-1)
31 #define FIELD_DEPTH_TYPE 0
32 #define FIELD_DEPTH_NAME 1
33 #define FIELD_DEPTH_SIZE 2
35 /* Limit how long of an event name plus args within the subsystem. */
36 #define MAX_EVENT_DESC 512
37 #define EVENT_NAME(user_event) ((user_event)->tracepoint.name)
38 #define MAX_FIELD_ARRAY_SIZE 1024
41 * Internal bits (kernel side only) to keep track of connected probes:
42 * These are used when status is requested in text form about an event. These
43 * bits are compared against an internal byte on the event to determine which
44 * probes to print out to the user.
46 * These do not reflect the mapped bytes between the user and kernel space.
48 #define EVENT_STATUS_FTRACE BIT(0)
49 #define EVENT_STATUS_PERF BIT(1)
50 #define EVENT_STATUS_OTHER BIT(7)
53 * Stores the system name, tables, and locks for a group of events. This
54 * allows isolation for events by various means.
56 struct user_event_group {
58 struct hlist_node node;
59 struct mutex reg_mutex;
60 DECLARE_HASHTABLE(register_table, 8);
63 /* Group for init_user_ns mapping, top-most group */
64 static struct user_event_group *init_group;
66 /* Max allowed events for the whole system */
67 static unsigned int max_user_events = 32768;
69 /* Current number of events on the whole system */
70 static unsigned int current_user_events;
73 * Stores per-event properties, as users register events
74 * within a file a user_event might be created if it does not
75 * already exist. These are globally used and their lifetime
76 * is tied to the refcnt member. These cannot go away until the
80 struct user_event_group *group;
81 struct tracepoint tracepoint;
82 struct trace_event_call call;
83 struct trace_event_class class;
84 struct dyn_event devent;
85 struct hlist_node node;
86 struct list_head fields;
87 struct list_head validators;
88 struct work_struct put_work;
96 * Stores per-mm/event properties that enable an address to be
97 * updated properly for each task. As tasks are forked, we use
98 * these to track enablement sites that are tied to an event.
100 struct user_event_enabler {
101 struct list_head mm_enablers_link;
102 struct user_event *event;
105 /* Track enable bit, flags, etc. Aligned for bitops. */
106 unsigned long values;
109 /* Bits 0-5 are for the bit to update upon enable/disable (0-63 allowed) */
110 #define ENABLE_VAL_BIT_MASK 0x3F
112 /* Bit 6 is for faulting status of enablement */
113 #define ENABLE_VAL_FAULTING_BIT 6
115 /* Bit 7 is for freeing status of enablement */
116 #define ENABLE_VAL_FREEING_BIT 7
118 /* Bit 8 is for marking 32-bit on 64-bit */
119 #define ENABLE_VAL_32_ON_64_BIT 8
121 #define ENABLE_VAL_COMPAT_MASK (1 << ENABLE_VAL_32_ON_64_BIT)
123 /* Only duplicate the bit and compat values */
124 #define ENABLE_VAL_DUP_MASK (ENABLE_VAL_BIT_MASK | ENABLE_VAL_COMPAT_MASK)
126 #define ENABLE_BITOPS(e) (&(e)->values)
128 #define ENABLE_BIT(e) ((int)((e)->values & ENABLE_VAL_BIT_MASK))
130 /* Used for asynchronous faulting in of pages */
131 struct user_event_enabler_fault {
132 struct work_struct work;
133 struct user_event_mm *mm;
134 struct user_event_enabler *enabler;
138 static struct kmem_cache *fault_cache;
140 /* Global list of memory descriptors using user_events */
141 static LIST_HEAD(user_event_mms);
142 static DEFINE_SPINLOCK(user_event_mms_lock);
145 * Stores per-file events references, as users register events
146 * within a file this structure is modified and freed via RCU.
147 * The lifetime of this struct is tied to the lifetime of the file.
148 * These are not shared and only accessible by the file that created it.
150 struct user_event_refs {
153 struct user_event *events[];
156 struct user_event_file_info {
157 struct user_event_group *group;
158 struct user_event_refs *refs;
161 #define VALIDATOR_ENSURE_NULL (1 << 0)
162 #define VALIDATOR_REL (1 << 1)
164 struct user_event_validator {
165 struct list_head user_event_link;
170 static inline void align_addr_bit(unsigned long *addr, int *bit,
171 unsigned long *flags)
173 if (IS_ALIGNED(*addr, sizeof(long))) {
175 /* 32 bit on BE 64 bit requires a 32 bit offset when aligned. */
176 if (test_bit(ENABLE_VAL_32_ON_64_BIT, flags))
182 *addr = ALIGN_DOWN(*addr, sizeof(long));
185 * We only support 32 and 64 bit values. The only time we need
186 * to align is a 32 bit value on a 64 bit kernel, which on LE
187 * is always 32 bits, and on BE requires no change when unaligned.
189 #ifdef __LITTLE_ENDIAN
194 typedef void (*user_event_func_t) (struct user_event *user, struct iov_iter *i,
195 void *tpdata, bool *faulted);
197 static int user_event_parse(struct user_event_group *group, char *name,
198 char *args, char *flags,
199 struct user_event **newuser, int reg_flags);
201 static struct user_event_mm *user_event_mm_get(struct user_event_mm *mm);
202 static struct user_event_mm *user_event_mm_get_all(struct user_event *user);
203 static void user_event_mm_put(struct user_event_mm *mm);
204 static int destroy_user_event(struct user_event *user);
206 static u32 user_event_key(char *name)
208 return jhash(name, strlen(name), 0);
211 static bool user_event_capable(u16 reg_flags)
213 /* Persistent events require CAP_PERFMON / CAP_SYS_ADMIN */
214 if (reg_flags & USER_EVENT_REG_PERSIST) {
215 if (!perfmon_capable())
222 static struct user_event *user_event_get(struct user_event *user)
224 refcount_inc(&user->refcnt);
229 static void delayed_destroy_user_event(struct work_struct *work)
231 struct user_event *user = container_of(
232 work, struct user_event, put_work);
234 mutex_lock(&event_mutex);
236 if (!refcount_dec_and_test(&user->refcnt))
239 if (destroy_user_event(user)) {
241 * The only reason this would fail here is if we cannot
242 * update the visibility of the event. In this case the
243 * event stays in the hashtable, waiting for someone to
244 * attempt to delete it later.
246 pr_warn("user_events: Unable to delete event\n");
247 refcount_set(&user->refcnt, 1);
250 mutex_unlock(&event_mutex);
253 static void user_event_put(struct user_event *user, bool locked)
261 * When the event is not enabled for auto-delete there will always
262 * be at least 1 reference to the event. During the event creation
263 * we initially set the refcnt to 2 to achieve this. In those cases
264 * the caller must acquire event_mutex and after decrement check if
265 * the refcnt is 1, meaning this is the last reference. When auto
266 * delete is enabled, there will only be 1 ref, IE: refcnt will be
267 * only set to 1 during creation to allow the below checks to go
268 * through upon the last put. The last put must always be done with
269 * the event mutex held.
272 lockdep_assert_not_held(&event_mutex);
273 delete = refcount_dec_and_mutex_lock(&user->refcnt, &event_mutex);
275 lockdep_assert_held(&event_mutex);
276 delete = refcount_dec_and_test(&user->refcnt);
283 * We now have the event_mutex in all cases, which ensures that
284 * no new references will be taken until event_mutex is released.
285 * New references come through find_user_event(), which requires
286 * the event_mutex to be held.
289 if (user->reg_flags & USER_EVENT_REG_PERSIST) {
290 /* We should not get here when persist flag is set */
291 pr_alert("BUG: Auto-delete engaged on persistent event\n");
296 * Unfortunately we have to attempt the actual destroy in a work
297 * queue. This is because not all cases handle a trace_event_call
298 * being removed within the class->reg() operation for unregister.
300 INIT_WORK(&user->put_work, delayed_destroy_user_event);
303 * Since the event is still in the hashtable, we have to re-inc
304 * the ref count to 1. This count will be decremented and checked
305 * in the work queue to ensure it's still the last ref. This is
306 * needed because a user-process could register the same event in
307 * between the time of event_mutex release and the work queue
308 * running the delayed destroy. If we removed the item now from
309 * the hashtable, this would result in a timing window where a
310 * user process would fail a register because the trace_event_call
311 * register would fail in the tracing layers.
313 refcount_set(&user->refcnt, 1);
315 if (WARN_ON_ONCE(!schedule_work(&user->put_work))) {
317 * If we fail we must wait for an admin to attempt delete or
318 * another register/close of the event, whichever is first.
320 pr_warn("user_events: Unable to queue delayed destroy\n");
323 /* Ensure if we didn't have event_mutex before we unlock it */
325 mutex_unlock(&event_mutex);
328 static void user_event_group_destroy(struct user_event_group *group)
330 kfree(group->system_name);
334 static char *user_event_group_system_name(void)
337 int len = sizeof(USER_EVENTS_SYSTEM) + 1;
339 system_name = kmalloc(len, GFP_KERNEL);
344 snprintf(system_name, len, "%s", USER_EVENTS_SYSTEM);
349 static struct user_event_group *current_user_event_group(void)
354 static struct user_event_group *user_event_group_create(void)
356 struct user_event_group *group;
358 group = kzalloc(sizeof(*group), GFP_KERNEL);
363 group->system_name = user_event_group_system_name();
365 if (!group->system_name)
368 mutex_init(&group->reg_mutex);
369 hash_init(group->register_table);
374 user_event_group_destroy(group);
379 static void user_event_enabler_destroy(struct user_event_enabler *enabler,
382 list_del_rcu(&enabler->mm_enablers_link);
384 /* No longer tracking the event via the enabler */
385 user_event_put(enabler->event, locked);
390 static int user_event_mm_fault_in(struct user_event_mm *mm, unsigned long uaddr,
397 * Normally this is low, ensure that it cannot be taken advantage of by
398 * bad user processes to cause excessive looping.
403 mmap_read_lock(mm->mm);
405 /* Ensure MM has tasks, cannot use after exit_mm() */
406 if (refcount_read(&mm->tasks) == 0) {
411 ret = fixup_user_fault(mm->mm, uaddr, FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE,
414 mmap_read_unlock(mm->mm);
419 static int user_event_enabler_write(struct user_event_mm *mm,
420 struct user_event_enabler *enabler,
421 bool fixup_fault, int *attempt);
423 static void user_event_enabler_fault_fixup(struct work_struct *work)
425 struct user_event_enabler_fault *fault = container_of(
426 work, struct user_event_enabler_fault, work);
427 struct user_event_enabler *enabler = fault->enabler;
428 struct user_event_mm *mm = fault->mm;
429 unsigned long uaddr = enabler->addr;
430 int attempt = fault->attempt;
433 ret = user_event_mm_fault_in(mm, uaddr, attempt);
435 if (ret && ret != -ENOENT) {
436 struct user_event *user = enabler->event;
438 pr_warn("user_events: Fault for mm: 0x%pK @ 0x%llx event: %s\n",
439 mm->mm, (unsigned long long)uaddr, EVENT_NAME(user));
442 /* Prevent state changes from racing */
443 mutex_lock(&event_mutex);
445 /* User asked for enabler to be removed during fault */
446 if (test_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler))) {
447 user_event_enabler_destroy(enabler, true);
452 * If we managed to get the page, re-issue the write. We do not
453 * want to get into a possible infinite loop, which is why we only
454 * attempt again directly if the page came in. If we couldn't get
455 * the page here, then we will try again the next time the event is
458 clear_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler));
461 mmap_read_lock(mm->mm);
462 user_event_enabler_write(mm, enabler, true, &attempt);
463 mmap_read_unlock(mm->mm);
466 mutex_unlock(&event_mutex);
468 /* In all cases we no longer need the mm or fault */
469 user_event_mm_put(mm);
470 kmem_cache_free(fault_cache, fault);
473 static bool user_event_enabler_queue_fault(struct user_event_mm *mm,
474 struct user_event_enabler *enabler,
477 struct user_event_enabler_fault *fault;
479 fault = kmem_cache_zalloc(fault_cache, GFP_NOWAIT | __GFP_NOWARN);
484 INIT_WORK(&fault->work, user_event_enabler_fault_fixup);
485 fault->mm = user_event_mm_get(mm);
486 fault->enabler = enabler;
487 fault->attempt = attempt;
489 /* Don't try to queue in again while we have a pending fault */
490 set_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler));
492 if (!schedule_work(&fault->work)) {
493 /* Allow another attempt later */
494 clear_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler));
496 user_event_mm_put(mm);
497 kmem_cache_free(fault_cache, fault);
505 static int user_event_enabler_write(struct user_event_mm *mm,
506 struct user_event_enabler *enabler,
507 bool fixup_fault, int *attempt)
509 unsigned long uaddr = enabler->addr;
513 int bit = ENABLE_BIT(enabler);
516 lockdep_assert_held(&event_mutex);
517 mmap_assert_locked(mm->mm);
521 /* Ensure MM has tasks, cannot use after exit_mm() */
522 if (refcount_read(&mm->tasks) == 0)
525 if (unlikely(test_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler)) ||
526 test_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler))))
529 align_addr_bit(&uaddr, &bit, ENABLE_BITOPS(enabler));
531 ret = pin_user_pages_remote(mm->mm, uaddr, 1, FOLL_WRITE | FOLL_NOFAULT,
534 if (unlikely(ret <= 0)) {
538 if (!user_event_enabler_queue_fault(mm, enabler, *attempt))
539 pr_warn("user_events: Unable to queue fault handler\n");
544 kaddr = kmap_local_page(page);
545 ptr = kaddr + (uaddr & ~PAGE_MASK);
547 /* Update bit atomically, user tracers must be atomic as well */
548 if (enabler->event && enabler->event->status)
554 unpin_user_pages_dirty_lock(&page, 1, true);
559 static bool user_event_enabler_exists(struct user_event_mm *mm,
560 unsigned long uaddr, unsigned char bit)
562 struct user_event_enabler *enabler;
564 list_for_each_entry(enabler, &mm->enablers, mm_enablers_link) {
565 if (enabler->addr == uaddr && ENABLE_BIT(enabler) == bit)
572 static void user_event_enabler_update(struct user_event *user)
574 struct user_event_enabler *enabler;
575 struct user_event_mm *next;
576 struct user_event_mm *mm;
579 lockdep_assert_held(&event_mutex);
582 * We need to build a one-shot list of all the mms that have an
583 * enabler for the user_event passed in. This list is only valid
584 * while holding the event_mutex. The only reason for this is due
585 * to the global mm list being RCU protected and we use methods
586 * which can wait (mmap_read_lock and pin_user_pages_remote).
588 * NOTE: user_event_mm_get_all() increments the ref count of each
589 * mm that is added to the list to prevent removal timing windows.
590 * We must always put each mm after they are used, which may wait.
592 mm = user_event_mm_get_all(user);
596 mmap_read_lock(mm->mm);
598 list_for_each_entry(enabler, &mm->enablers, mm_enablers_link) {
599 if (enabler->event == user) {
601 user_event_enabler_write(mm, enabler, true, &attempt);
605 mmap_read_unlock(mm->mm);
606 user_event_mm_put(mm);
611 static bool user_event_enabler_dup(struct user_event_enabler *orig,
612 struct user_event_mm *mm)
614 struct user_event_enabler *enabler;
616 /* Skip pending frees */
617 if (unlikely(test_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(orig))))
620 enabler = kzalloc(sizeof(*enabler), GFP_NOWAIT | __GFP_ACCOUNT);
625 enabler->event = user_event_get(orig->event);
626 enabler->addr = orig->addr;
628 /* Only dup part of value (ignore future flags, etc) */
629 enabler->values = orig->values & ENABLE_VAL_DUP_MASK;
631 /* Enablers not exposed yet, RCU not required */
632 list_add(&enabler->mm_enablers_link, &mm->enablers);
637 static struct user_event_mm *user_event_mm_get(struct user_event_mm *mm)
639 refcount_inc(&mm->refcnt);
644 static struct user_event_mm *user_event_mm_get_all(struct user_event *user)
646 struct user_event_mm *found = NULL;
647 struct user_event_enabler *enabler;
648 struct user_event_mm *mm;
651 * We use the mm->next field to build a one-shot list from the global
652 * RCU protected list. To build this list the event_mutex must be held.
653 * This lets us build a list without requiring allocs that could fail
654 * when user based events are most wanted for diagnostics.
656 lockdep_assert_held(&event_mutex);
659 * We do not want to block fork/exec while enablements are being
660 * updated, so we use RCU to walk the current tasks that have used
661 * user_events ABI for 1 or more events. Each enabler found in each
662 * task that matches the event being updated has a write to reflect
663 * the kernel state back into the process. Waits/faults must not occur
664 * during this. So we scan the list under RCU for all the mm that have
665 * the event within it. This is needed because mm_read_lock() can wait.
666 * Each user mm returned has a ref inc to handle remove RCU races.
670 list_for_each_entry_rcu(mm, &user_event_mms, mms_link) {
671 list_for_each_entry_rcu(enabler, &mm->enablers, mm_enablers_link) {
672 if (enabler->event == user) {
674 found = user_event_mm_get(mm);
685 static struct user_event_mm *user_event_mm_alloc(struct task_struct *t)
687 struct user_event_mm *user_mm;
689 user_mm = kzalloc(sizeof(*user_mm), GFP_KERNEL_ACCOUNT);
695 INIT_LIST_HEAD(&user_mm->enablers);
696 refcount_set(&user_mm->refcnt, 1);
697 refcount_set(&user_mm->tasks, 1);
700 * The lifetime of the memory descriptor can slightly outlast
701 * the task lifetime if a ref to the user_event_mm is taken
702 * between list_del_rcu() and call_rcu(). Therefore we need
703 * to take a reference to it to ensure it can live this long
704 * under this corner case. This can also occur in clones that
705 * outlast the parent.
712 static void user_event_mm_attach(struct user_event_mm *user_mm, struct task_struct *t)
716 spin_lock_irqsave(&user_event_mms_lock, flags);
717 list_add_rcu(&user_mm->mms_link, &user_event_mms);
718 spin_unlock_irqrestore(&user_event_mms_lock, flags);
720 t->user_event_mm = user_mm;
723 static struct user_event_mm *current_user_event_mm(void)
725 struct user_event_mm *user_mm = current->user_event_mm;
730 user_mm = user_event_mm_alloc(current);
735 user_event_mm_attach(user_mm, current);
737 refcount_inc(&user_mm->refcnt);
742 static void user_event_mm_destroy(struct user_event_mm *mm)
744 struct user_event_enabler *enabler, *next;
746 list_for_each_entry_safe(enabler, next, &mm->enablers, mm_enablers_link)
747 user_event_enabler_destroy(enabler, false);
753 static void user_event_mm_put(struct user_event_mm *mm)
755 if (mm && refcount_dec_and_test(&mm->refcnt))
756 user_event_mm_destroy(mm);
759 static void delayed_user_event_mm_put(struct work_struct *work)
761 struct user_event_mm *mm;
763 mm = container_of(to_rcu_work(work), struct user_event_mm, put_rwork);
764 user_event_mm_put(mm);
767 void user_event_mm_remove(struct task_struct *t)
769 struct user_event_mm *mm;
774 mm = t->user_event_mm;
775 t->user_event_mm = NULL;
777 /* Clone will increment the tasks, only remove if last clone */
778 if (!refcount_dec_and_test(&mm->tasks))
781 /* Remove the mm from the list, so it can no longer be enabled */
782 spin_lock_irqsave(&user_event_mms_lock, flags);
783 list_del_rcu(&mm->mms_link);
784 spin_unlock_irqrestore(&user_event_mms_lock, flags);
787 * We need to wait for currently occurring writes to stop within
788 * the mm. This is required since exit_mm() snaps the current rss
789 * stats and clears them. On the final mmdrop(), check_mm() will
790 * report a bug if these increment.
792 * All writes/pins are done under mmap_read lock, take the write
793 * lock to ensure in-progress faults have completed. Faults that
794 * are pending but yet to run will check the task count and skip
795 * the fault since the mm is going away.
797 mmap_write_lock(mm->mm);
798 mmap_write_unlock(mm->mm);
801 * Put for mm must be done after RCU delay to handle new refs in
802 * between the list_del_rcu() and now. This ensures any get refs
803 * during rcu_read_lock() are accounted for during list removal.
806 * ---------------------------------------------------------------
807 * user_event_mm_remove() | rcu_read_lock();
808 * list_del_rcu() | list_for_each_entry_rcu();
809 * call_rcu() | refcount_inc();
810 * . | rcu_read_unlock();
811 * schedule_work() | .
812 * user_event_mm_put() | .
814 * mmdrop() cannot be called in the softirq context of call_rcu()
815 * so we use a work queue after call_rcu() to run within.
817 INIT_RCU_WORK(&mm->put_rwork, delayed_user_event_mm_put);
818 queue_rcu_work(system_wq, &mm->put_rwork);
821 void user_event_mm_dup(struct task_struct *t, struct user_event_mm *old_mm)
823 struct user_event_mm *mm = user_event_mm_alloc(t);
824 struct user_event_enabler *enabler;
831 list_for_each_entry_rcu(enabler, &old_mm->enablers, mm_enablers_link) {
832 if (!user_event_enabler_dup(enabler, mm))
838 user_event_mm_attach(mm, t);
842 user_event_mm_destroy(mm);
845 static bool current_user_event_enabler_exists(unsigned long uaddr,
848 struct user_event_mm *user_mm = current_user_event_mm();
854 exists = user_event_enabler_exists(user_mm, uaddr, bit);
856 user_event_mm_put(user_mm);
861 static struct user_event_enabler
862 *user_event_enabler_create(struct user_reg *reg, struct user_event *user,
865 struct user_event_enabler *enabler;
866 struct user_event_mm *user_mm;
867 unsigned long uaddr = (unsigned long)reg->enable_addr;
870 user_mm = current_user_event_mm();
875 enabler = kzalloc(sizeof(*enabler), GFP_KERNEL_ACCOUNT);
880 enabler->event = user;
881 enabler->addr = uaddr;
882 enabler->values = reg->enable_bit;
884 #if BITS_PER_LONG >= 64
885 if (reg->enable_size == 4)
886 set_bit(ENABLE_VAL_32_ON_64_BIT, ENABLE_BITOPS(enabler));
890 /* Prevents state changes from racing with new enablers */
891 mutex_lock(&event_mutex);
893 /* Attempt to reflect the current state within the process */
894 mmap_read_lock(user_mm->mm);
895 *write_result = user_event_enabler_write(user_mm, enabler, false,
897 mmap_read_unlock(user_mm->mm);
900 * If the write works, then we will track the enabler. A ref to the
901 * underlying user_event is held by the enabler to prevent it going
902 * away while the enabler is still in use by a process. The ref is
903 * removed when the enabler is destroyed. This means a event cannot
904 * be forcefully deleted from the system until all tasks using it
905 * exit or run exec(), which includes forks and clones.
907 if (!*write_result) {
908 user_event_get(user);
909 list_add_rcu(&enabler->mm_enablers_link, &user_mm->enablers);
912 mutex_unlock(&event_mutex);
915 /* Attempt to fault-in and retry if it worked */
916 if (!user_event_mm_fault_in(user_mm, uaddr, attempt))
923 user_event_mm_put(user_mm);
928 static __always_inline __must_check
929 bool user_event_last_ref(struct user_event *user)
933 if (user->reg_flags & USER_EVENT_REG_PERSIST)
936 return refcount_read(&user->refcnt) == last;
939 static __always_inline __must_check
940 size_t copy_nofault(void *addr, size_t bytes, struct iov_iter *i)
946 ret = copy_from_iter_nocache(addr, bytes, i);
953 static struct list_head *user_event_get_fields(struct trace_event_call *call)
955 struct user_event *user = (struct user_event *)call->data;
957 return &user->fields;
961 * Parses a register command for user_events
962 * Format: event_name[:FLAG1[,FLAG2...]] [field1[;field2...]]
964 * Example event named 'test' with a 20 char 'msg' field with an unsigned int
966 * test char[20] msg;unsigned int id
968 * NOTE: Offsets are from the user data perspective, they are not from the
969 * trace_entry/buffer perspective. We automatically add the common properties
970 * sizes to the offset for the user.
972 * Upon success user_event has its ref count increased by 1.
974 static int user_event_parse_cmd(struct user_event_group *group,
975 char *raw_command, struct user_event **newuser,
978 char *name = raw_command;
979 char *args = strpbrk(name, " ");
985 flags = strpbrk(name, ":");
990 return user_event_parse(group, name, args, flags, newuser, reg_flags);
993 static int user_field_array_size(const char *type)
995 const char *start = strchr(type, '[');
1003 if (strscpy(val, start + 1, sizeof(val)) <= 0)
1006 bracket = strchr(val, ']');
1013 if (kstrtouint(val, 0, &size))
1016 if (size > MAX_FIELD_ARRAY_SIZE)
1022 static int user_field_size(const char *type)
1024 /* long is not allowed from a user, since it's ambigious in size */
1025 if (strcmp(type, "s64") == 0)
1027 if (strcmp(type, "u64") == 0)
1029 if (strcmp(type, "s32") == 0)
1031 if (strcmp(type, "u32") == 0)
1033 if (strcmp(type, "int") == 0)
1035 if (strcmp(type, "unsigned int") == 0)
1036 return sizeof(unsigned int);
1037 if (strcmp(type, "s16") == 0)
1039 if (strcmp(type, "u16") == 0)
1041 if (strcmp(type, "short") == 0)
1042 return sizeof(short);
1043 if (strcmp(type, "unsigned short") == 0)
1044 return sizeof(unsigned short);
1045 if (strcmp(type, "s8") == 0)
1047 if (strcmp(type, "u8") == 0)
1049 if (strcmp(type, "char") == 0)
1050 return sizeof(char);
1051 if (strcmp(type, "unsigned char") == 0)
1052 return sizeof(unsigned char);
1053 if (str_has_prefix(type, "char["))
1054 return user_field_array_size(type);
1055 if (str_has_prefix(type, "unsigned char["))
1056 return user_field_array_size(type);
1057 if (str_has_prefix(type, "__data_loc "))
1059 if (str_has_prefix(type, "__rel_loc "))
1062 /* Uknown basic type, error */
1066 static void user_event_destroy_validators(struct user_event *user)
1068 struct user_event_validator *validator, *next;
1069 struct list_head *head = &user->validators;
1071 list_for_each_entry_safe(validator, next, head, user_event_link) {
1072 list_del(&validator->user_event_link);
1077 static void user_event_destroy_fields(struct user_event *user)
1079 struct ftrace_event_field *field, *next;
1080 struct list_head *head = &user->fields;
1082 list_for_each_entry_safe(field, next, head, link) {
1083 list_del(&field->link);
1088 static int user_event_add_field(struct user_event *user, const char *type,
1089 const char *name, int offset, int size,
1090 int is_signed, int filter_type)
1092 struct user_event_validator *validator;
1093 struct ftrace_event_field *field;
1094 int validator_flags = 0;
1096 field = kmalloc(sizeof(*field), GFP_KERNEL_ACCOUNT);
1101 if (str_has_prefix(type, "__data_loc "))
1104 if (str_has_prefix(type, "__rel_loc ")) {
1105 validator_flags |= VALIDATOR_REL;
1112 if (strstr(type, "char") != NULL)
1113 validator_flags |= VALIDATOR_ENSURE_NULL;
1115 validator = kmalloc(sizeof(*validator), GFP_KERNEL_ACCOUNT);
1122 validator->flags = validator_flags;
1123 validator->offset = offset;
1125 /* Want sequential access when validating */
1126 list_add_tail(&validator->user_event_link, &user->validators);
1131 field->offset = offset;
1133 field->is_signed = is_signed;
1134 field->filter_type = filter_type;
1136 if (filter_type == FILTER_OTHER)
1137 field->filter_type = filter_assign_type(type);
1139 list_add(&field->link, &user->fields);
1142 * Min size from user writes that are required, this does not include
1143 * the size of trace_entry (common fields).
1145 user->min_size = (offset + size) - sizeof(struct trace_entry);
1151 * Parses the values of a field within the description
1152 * Format: type name [size]
1154 static int user_event_parse_field(char *field, struct user_event *user,
1157 char *part, *type, *name;
1158 u32 depth = 0, saved_offset = *offset;
1159 int len, size = -EINVAL;
1160 bool is_struct = false;
1162 field = skip_spaces(field);
1167 /* Handle types that have a space within */
1168 len = str_has_prefix(field, "unsigned ");
1172 len = str_has_prefix(field, "struct ");
1178 len = str_has_prefix(field, "__data_loc unsigned ");
1182 len = str_has_prefix(field, "__data_loc ");
1186 len = str_has_prefix(field, "__rel_loc unsigned ");
1190 len = str_has_prefix(field, "__rel_loc ");
1197 field = strpbrk(field + len, " ");
1207 while ((part = strsep(&field, " ")) != NULL) {
1209 case FIELD_DEPTH_TYPE:
1212 case FIELD_DEPTH_NAME:
1215 case FIELD_DEPTH_SIZE:
1219 if (kstrtou32(part, 10, &size))
1227 if (depth < FIELD_DEPTH_SIZE || !name)
1230 if (depth == FIELD_DEPTH_SIZE)
1231 size = user_field_size(type);
1239 *offset = saved_offset + size;
1241 return user_event_add_field(user, type, name, saved_offset, size,
1242 type[0] != 'u', FILTER_OTHER);
1245 static int user_event_parse_fields(struct user_event *user, char *args)
1248 u32 offset = sizeof(struct trace_entry);
1254 while ((field = strsep(&args, ";")) != NULL) {
1255 ret = user_event_parse_field(field, user, &offset);
1264 static struct trace_event_fields user_event_fields_array[1];
1266 static const char *user_field_format(const char *type)
1268 if (strcmp(type, "s64") == 0)
1270 if (strcmp(type, "u64") == 0)
1272 if (strcmp(type, "s32") == 0)
1274 if (strcmp(type, "u32") == 0)
1276 if (strcmp(type, "int") == 0)
1278 if (strcmp(type, "unsigned int") == 0)
1280 if (strcmp(type, "s16") == 0)
1282 if (strcmp(type, "u16") == 0)
1284 if (strcmp(type, "short") == 0)
1286 if (strcmp(type, "unsigned short") == 0)
1288 if (strcmp(type, "s8") == 0)
1290 if (strcmp(type, "u8") == 0)
1292 if (strcmp(type, "char") == 0)
1294 if (strcmp(type, "unsigned char") == 0)
1296 if (strstr(type, "char[") != NULL)
1299 /* Unknown, likely struct, allowed treat as 64-bit */
1303 static bool user_field_is_dyn_string(const char *type, const char **str_func)
1305 if (str_has_prefix(type, "__data_loc ")) {
1306 *str_func = "__get_str";
1310 if (str_has_prefix(type, "__rel_loc ")) {
1311 *str_func = "__get_rel_str";
1317 return strstr(type, "char") != NULL;
1320 #define LEN_OR_ZERO (len ? len - pos : 0)
1321 static int user_dyn_field_set_string(int argc, const char **argv, int *iout,
1322 char *buf, int len, bool *colon)
1324 int pos = 0, i = *iout;
1328 for (; i < argc; ++i) {
1330 pos += snprintf(buf + pos, LEN_OR_ZERO, " ");
1332 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", argv[i]);
1334 if (strchr(argv[i], ';')) {
1341 /* Actual set, advance i */
1348 static int user_field_set_string(struct ftrace_event_field *field,
1349 char *buf, int len, bool colon)
1353 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", field->type);
1354 pos += snprintf(buf + pos, LEN_OR_ZERO, " ");
1355 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", field->name);
1357 if (str_has_prefix(field->type, "struct "))
1358 pos += snprintf(buf + pos, LEN_OR_ZERO, " %d", field->size);
1361 pos += snprintf(buf + pos, LEN_OR_ZERO, ";");
1366 static int user_event_set_print_fmt(struct user_event *user, char *buf, int len)
1368 struct ftrace_event_field *field;
1369 struct list_head *head = &user->fields;
1370 int pos = 0, depth = 0;
1371 const char *str_func;
1373 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
1375 list_for_each_entry_reverse(field, head, link) {
1377 pos += snprintf(buf + pos, LEN_OR_ZERO, " ");
1379 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s",
1380 field->name, user_field_format(field->type));
1385 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
1387 list_for_each_entry_reverse(field, head, link) {
1388 if (user_field_is_dyn_string(field->type, &str_func))
1389 pos += snprintf(buf + pos, LEN_OR_ZERO,
1390 ", %s(%s)", str_func, field->name);
1392 pos += snprintf(buf + pos, LEN_OR_ZERO,
1393 ", REC->%s", field->name);
1400 static int user_event_create_print_fmt(struct user_event *user)
1405 len = user_event_set_print_fmt(user, NULL, 0);
1407 print_fmt = kmalloc(len, GFP_KERNEL_ACCOUNT);
1412 user_event_set_print_fmt(user, print_fmt, len);
1414 user->call.print_fmt = print_fmt;
1419 static enum print_line_t user_event_print_trace(struct trace_iterator *iter,
1421 struct trace_event *event)
1423 return print_event_fields(iter, event);
1426 static struct trace_event_functions user_event_funcs = {
1427 .trace = user_event_print_trace,
1430 static int user_event_set_call_visible(struct user_event *user, bool visible)
1433 const struct cred *old_cred;
1436 cred = prepare_creds();
1442 * While by default tracefs is locked down, systems can be configured
1443 * to allow user_event files to be less locked down. The extreme case
1444 * being "other" has read/write access to user_events_data/status.
1446 * When not locked down, processes may not have permissions to
1447 * add/remove calls themselves to tracefs. We need to temporarily
1448 * switch to root file permission to allow for this scenario.
1450 cred->fsuid = GLOBAL_ROOT_UID;
1452 old_cred = override_creds(cred);
1455 ret = trace_add_event_call(&user->call);
1457 ret = trace_remove_event_call(&user->call);
1459 revert_creds(old_cred);
1465 static int destroy_user_event(struct user_event *user)
1469 lockdep_assert_held(&event_mutex);
1471 /* Must destroy fields before call removal */
1472 user_event_destroy_fields(user);
1474 ret = user_event_set_call_visible(user, false);
1479 dyn_event_remove(&user->devent);
1480 hash_del(&user->node);
1482 user_event_destroy_validators(user);
1483 kfree(user->call.print_fmt);
1484 kfree(EVENT_NAME(user));
1487 if (current_user_events > 0)
1488 current_user_events--;
1490 pr_alert("BUG: Bad current_user_events\n");
1495 static struct user_event *find_user_event(struct user_event_group *group,
1496 char *name, u32 *outkey)
1498 struct user_event *user;
1499 u32 key = user_event_key(name);
1503 hash_for_each_possible(group->register_table, user, node, key)
1504 if (!strcmp(EVENT_NAME(user), name))
1505 return user_event_get(user);
1510 static int user_event_validate(struct user_event *user, void *data, int len)
1512 struct list_head *head = &user->validators;
1513 struct user_event_validator *validator;
1514 void *pos, *end = data + len;
1515 u32 loc, offset, size;
1517 list_for_each_entry(validator, head, user_event_link) {
1518 pos = data + validator->offset;
1520 /* Already done min_size check, no bounds check here */
1522 offset = loc & 0xffff;
1525 if (likely(validator->flags & VALIDATOR_REL))
1526 pos += offset + sizeof(loc);
1528 pos = data + offset;
1532 if (unlikely(pos > end))
1535 if (likely(validator->flags & VALIDATOR_ENSURE_NULL))
1536 if (unlikely(*(char *)(pos - 1) != '\0'))
1544 * Writes the user supplied payload out to a trace file.
1546 static void user_event_ftrace(struct user_event *user, struct iov_iter *i,
1547 void *tpdata, bool *faulted)
1549 struct trace_event_file *file;
1550 struct trace_entry *entry;
1551 struct trace_event_buffer event_buffer;
1552 size_t size = sizeof(*entry) + i->count;
1554 file = (struct trace_event_file *)tpdata;
1557 !(file->flags & EVENT_FILE_FL_ENABLED) ||
1558 trace_trigger_soft_disabled(file))
1561 /* Allocates and fills trace_entry, + 1 of this is data payload */
1562 entry = trace_event_buffer_reserve(&event_buffer, file, size);
1564 if (unlikely(!entry))
1567 if (unlikely(i->count != 0 && !copy_nofault(entry + 1, i->count, i)))
1570 if (!list_empty(&user->validators) &&
1571 unlikely(user_event_validate(user, entry, size)))
1574 trace_event_buffer_commit(&event_buffer);
1579 __trace_event_discard_commit(event_buffer.buffer,
1580 event_buffer.event);
1583 #ifdef CONFIG_PERF_EVENTS
1585 * Writes the user supplied payload out to perf ring buffer.
1587 static void user_event_perf(struct user_event *user, struct iov_iter *i,
1588 void *tpdata, bool *faulted)
1590 struct hlist_head *perf_head;
1592 perf_head = this_cpu_ptr(user->call.perf_events);
1594 if (perf_head && !hlist_empty(perf_head)) {
1595 struct trace_entry *perf_entry;
1596 struct pt_regs *regs;
1597 size_t size = sizeof(*perf_entry) + i->count;
1600 perf_entry = perf_trace_buf_alloc(ALIGN(size, 8),
1603 if (unlikely(!perf_entry))
1606 perf_fetch_caller_regs(regs);
1608 if (unlikely(i->count != 0 && !copy_nofault(perf_entry + 1, i->count, i)))
1611 if (!list_empty(&user->validators) &&
1612 unlikely(user_event_validate(user, perf_entry, size)))
1615 perf_trace_buf_submit(perf_entry, size, context,
1616 user->call.event.type, 1, regs,
1622 perf_swevent_put_recursion_context(context);
1628 * Update the enabled bit among all user processes.
1630 static void update_enable_bit_for(struct user_event *user)
1632 struct tracepoint *tp = &user->tracepoint;
1635 if (atomic_read(&tp->key.enabled) > 0) {
1636 struct tracepoint_func *probe_func_ptr;
1637 user_event_func_t probe_func;
1639 rcu_read_lock_sched();
1641 probe_func_ptr = rcu_dereference_sched(tp->funcs);
1643 if (probe_func_ptr) {
1645 probe_func = probe_func_ptr->func;
1647 if (probe_func == user_event_ftrace)
1648 status |= EVENT_STATUS_FTRACE;
1649 #ifdef CONFIG_PERF_EVENTS
1650 else if (probe_func == user_event_perf)
1651 status |= EVENT_STATUS_PERF;
1654 status |= EVENT_STATUS_OTHER;
1655 } while ((++probe_func_ptr)->func);
1658 rcu_read_unlock_sched();
1661 user->status = status;
1663 user_event_enabler_update(user);
1667 * Register callback for our events from tracing sub-systems.
1669 static int user_event_reg(struct trace_event_call *call,
1670 enum trace_reg type,
1673 struct user_event *user = (struct user_event *)call->data;
1680 case TRACE_REG_REGISTER:
1681 ret = tracepoint_probe_register(call->tp,
1688 case TRACE_REG_UNREGISTER:
1689 tracepoint_probe_unregister(call->tp,
1694 #ifdef CONFIG_PERF_EVENTS
1695 case TRACE_REG_PERF_REGISTER:
1696 ret = tracepoint_probe_register(call->tp,
1697 call->class->perf_probe,
1703 case TRACE_REG_PERF_UNREGISTER:
1704 tracepoint_probe_unregister(call->tp,
1705 call->class->perf_probe,
1709 case TRACE_REG_PERF_OPEN:
1710 case TRACE_REG_PERF_CLOSE:
1711 case TRACE_REG_PERF_ADD:
1712 case TRACE_REG_PERF_DEL:
1719 user_event_get(user);
1720 update_enable_bit_for(user);
1723 update_enable_bit_for(user);
1724 user_event_put(user, true);
1728 static int user_event_create(const char *raw_command)
1730 struct user_event_group *group;
1731 struct user_event *user;
1735 if (!str_has_prefix(raw_command, USER_EVENTS_PREFIX))
1738 raw_command += USER_EVENTS_PREFIX_LEN;
1739 raw_command = skip_spaces(raw_command);
1741 name = kstrdup(raw_command, GFP_KERNEL_ACCOUNT);
1746 group = current_user_event_group();
1753 mutex_lock(&group->reg_mutex);
1755 /* Dyn events persist, otherwise they would cleanup immediately */
1756 ret = user_event_parse_cmd(group, name, &user, USER_EVENT_REG_PERSIST);
1759 user_event_put(user, false);
1761 mutex_unlock(&group->reg_mutex);
1769 static int user_event_show(struct seq_file *m, struct dyn_event *ev)
1771 struct user_event *user = container_of(ev, struct user_event, devent);
1772 struct ftrace_event_field *field;
1773 struct list_head *head;
1776 seq_printf(m, "%s%s", USER_EVENTS_PREFIX, EVENT_NAME(user));
1778 head = trace_get_fields(&user->call);
1780 list_for_each_entry_reverse(field, head, link) {
1786 seq_printf(m, "%s %s", field->type, field->name);
1788 if (str_has_prefix(field->type, "struct "))
1789 seq_printf(m, " %d", field->size);
1799 static bool user_event_is_busy(struct dyn_event *ev)
1801 struct user_event *user = container_of(ev, struct user_event, devent);
1803 return !user_event_last_ref(user);
1806 static int user_event_free(struct dyn_event *ev)
1808 struct user_event *user = container_of(ev, struct user_event, devent);
1810 if (!user_event_last_ref(user))
1813 if (!user_event_capable(user->reg_flags))
1816 return destroy_user_event(user);
1819 static bool user_field_match(struct ftrace_event_field *field, int argc,
1820 const char **argv, int *iout)
1822 char *field_name = NULL, *dyn_field_name = NULL;
1823 bool colon = false, match = false;
1829 dyn_len = user_dyn_field_set_string(argc, argv, iout, dyn_field_name,
1832 len = user_field_set_string(field, field_name, 0, colon);
1837 dyn_field_name = kmalloc(dyn_len, GFP_KERNEL);
1838 field_name = kmalloc(len, GFP_KERNEL);
1840 if (!dyn_field_name || !field_name)
1843 user_dyn_field_set_string(argc, argv, iout, dyn_field_name,
1846 user_field_set_string(field, field_name, len, colon);
1848 match = strcmp(dyn_field_name, field_name) == 0;
1850 kfree(dyn_field_name);
1856 static bool user_fields_match(struct user_event *user, int argc,
1859 struct ftrace_event_field *field;
1860 struct list_head *head = &user->fields;
1863 list_for_each_entry_reverse(field, head, link) {
1864 if (!user_field_match(field, argc, argv, &i))
1874 static bool user_event_match(const char *system, const char *event,
1875 int argc, const char **argv, struct dyn_event *ev)
1877 struct user_event *user = container_of(ev, struct user_event, devent);
1880 match = strcmp(EVENT_NAME(user), event) == 0 &&
1881 (!system || strcmp(system, USER_EVENTS_SYSTEM) == 0);
1883 if (match && argc > 0)
1884 match = user_fields_match(user, argc, argv);
1885 else if (match && argc == 0)
1886 match = list_empty(&user->fields);
1891 static struct dyn_event_operations user_event_dops = {
1892 .create = user_event_create,
1893 .show = user_event_show,
1894 .is_busy = user_event_is_busy,
1895 .free = user_event_free,
1896 .match = user_event_match,
1899 static int user_event_trace_register(struct user_event *user)
1903 ret = register_trace_event(&user->call.event);
1908 ret = user_event_set_call_visible(user, true);
1911 unregister_trace_event(&user->call.event);
1917 * Parses the event name, arguments and flags then registers if successful.
1918 * The name buffer lifetime is owned by this method for success cases only.
1919 * Upon success the returned user_event has its ref count increased by 1.
1921 static int user_event_parse(struct user_event_group *group, char *name,
1922 char *args, char *flags,
1923 struct user_event **newuser, int reg_flags)
1927 struct user_event *user;
1931 /* Currently don't support any text based flags */
1935 if (!user_event_capable(reg_flags))
1938 /* Prevent dyn_event from racing */
1939 mutex_lock(&event_mutex);
1940 user = find_user_event(group, name, &key);
1941 mutex_unlock(&event_mutex);
1945 argv = argv_split(GFP_KERNEL, args, &argc);
1951 ret = user_fields_match(user, argc, (const char **)argv);
1955 ret = list_empty(&user->fields);
1960 * Name is allocated by caller, free it since it already exists.
1961 * Caller only worries about failure cases for freeing.
1971 user_event_put(user, false);
1975 user = kzalloc(sizeof(*user), GFP_KERNEL_ACCOUNT);
1980 INIT_LIST_HEAD(&user->class.fields);
1981 INIT_LIST_HEAD(&user->fields);
1982 INIT_LIST_HEAD(&user->validators);
1984 user->group = group;
1985 user->tracepoint.name = name;
1987 ret = user_event_parse_fields(user, args);
1992 ret = user_event_create_print_fmt(user);
1997 user->call.data = user;
1998 user->call.class = &user->class;
1999 user->call.name = name;
2000 user->call.flags = TRACE_EVENT_FL_TRACEPOINT;
2001 user->call.tp = &user->tracepoint;
2002 user->call.event.funcs = &user_event_funcs;
2003 user->class.system = group->system_name;
2005 user->class.fields_array = user_event_fields_array;
2006 user->class.get_fields = user_event_get_fields;
2007 user->class.reg = user_event_reg;
2008 user->class.probe = user_event_ftrace;
2009 #ifdef CONFIG_PERF_EVENTS
2010 user->class.perf_probe = user_event_perf;
2013 mutex_lock(&event_mutex);
2015 if (current_user_events >= max_user_events) {
2020 ret = user_event_trace_register(user);
2025 user->reg_flags = reg_flags;
2027 if (user->reg_flags & USER_EVENT_REG_PERSIST) {
2028 /* Ensure we track self ref and caller ref (2) */
2029 refcount_set(&user->refcnt, 2);
2031 /* Ensure we track only caller ref (1) */
2032 refcount_set(&user->refcnt, 1);
2035 dyn_event_init(&user->devent, &user_event_dops);
2036 dyn_event_add(&user->devent, &user->call);
2037 hash_add(group->register_table, &user->node, key);
2038 current_user_events++;
2040 mutex_unlock(&event_mutex);
2045 mutex_unlock(&event_mutex);
2047 user_event_destroy_fields(user);
2048 user_event_destroy_validators(user);
2049 kfree(user->call.print_fmt);
2055 * Deletes a previously created event if it is no longer being used.
2057 static int delete_user_event(struct user_event_group *group, char *name)
2060 struct user_event *user = find_user_event(group, name, &key);
2065 user_event_put(user, true);
2067 if (!user_event_last_ref(user))
2070 if (!user_event_capable(user->reg_flags))
2073 return destroy_user_event(user);
2077 * Validates the user payload and writes via iterator.
2079 static ssize_t user_events_write_core(struct file *file, struct iov_iter *i)
2081 struct user_event_file_info *info = file->private_data;
2082 struct user_event_refs *refs;
2083 struct user_event *user = NULL;
2084 struct tracepoint *tp;
2085 ssize_t ret = i->count;
2088 if (unlikely(copy_from_iter(&idx, sizeof(idx), i) != sizeof(idx)))
2094 rcu_read_lock_sched();
2096 refs = rcu_dereference_sched(info->refs);
2099 * The refs->events array is protected by RCU, and new items may be
2100 * added. But the user retrieved from indexing into the events array
2101 * shall be immutable while the file is opened.
2103 if (likely(refs && idx < refs->count))
2104 user = refs->events[idx];
2106 rcu_read_unlock_sched();
2108 if (unlikely(user == NULL))
2111 if (unlikely(i->count < user->min_size))
2114 tp = &user->tracepoint;
2117 * It's possible key.enabled disables after this check, however
2118 * we don't mind if a few events are included in this condition.
2120 if (likely(atomic_read(&tp->key.enabled) > 0)) {
2121 struct tracepoint_func *probe_func_ptr;
2122 user_event_func_t probe_func;
2123 struct iov_iter copy;
2127 if (unlikely(fault_in_iov_iter_readable(i, i->count)))
2132 rcu_read_lock_sched();
2134 probe_func_ptr = rcu_dereference_sched(tp->funcs);
2136 if (probe_func_ptr) {
2139 probe_func = probe_func_ptr->func;
2140 tpdata = probe_func_ptr->data;
2141 probe_func(user, ©, tpdata, &faulted);
2142 } while ((++probe_func_ptr)->func);
2145 rcu_read_unlock_sched();
2147 if (unlikely(faulted))
2155 static int user_events_open(struct inode *node, struct file *file)
2157 struct user_event_group *group;
2158 struct user_event_file_info *info;
2160 group = current_user_event_group();
2165 info = kzalloc(sizeof(*info), GFP_KERNEL_ACCOUNT);
2170 info->group = group;
2172 file->private_data = info;
2177 static ssize_t user_events_write(struct file *file, const char __user *ubuf,
2178 size_t count, loff_t *ppos)
2182 if (unlikely(*ppos != 0))
2185 if (unlikely(import_ubuf(ITER_SOURCE, (char __user *)ubuf, count, &i)))
2188 return user_events_write_core(file, &i);
2191 static ssize_t user_events_write_iter(struct kiocb *kp, struct iov_iter *i)
2193 return user_events_write_core(kp->ki_filp, i);
2196 static int user_events_ref_add(struct user_event_file_info *info,
2197 struct user_event *user)
2199 struct user_event_group *group = info->group;
2200 struct user_event_refs *refs, *new_refs;
2201 int i, size, count = 0;
2203 refs = rcu_dereference_protected(info->refs,
2204 lockdep_is_held(&group->reg_mutex));
2207 count = refs->count;
2209 for (i = 0; i < count; ++i)
2210 if (refs->events[i] == user)
2214 size = struct_size(refs, events, count + 1);
2216 new_refs = kzalloc(size, GFP_KERNEL_ACCOUNT);
2221 new_refs->count = count + 1;
2223 for (i = 0; i < count; ++i)
2224 new_refs->events[i] = refs->events[i];
2226 new_refs->events[i] = user_event_get(user);
2228 rcu_assign_pointer(info->refs, new_refs);
2231 kfree_rcu(refs, rcu);
2236 static long user_reg_get(struct user_reg __user *ureg, struct user_reg *kreg)
2241 ret = get_user(size, &ureg->size);
2246 if (size > PAGE_SIZE)
2249 if (size < offsetofend(struct user_reg, write_index))
2252 ret = copy_struct_from_user(kreg, sizeof(*kreg), ureg, size);
2257 /* Ensure only valid flags */
2258 if (kreg->flags & ~(USER_EVENT_REG_MAX-1))
2261 /* Ensure supported size */
2262 switch (kreg->enable_size) {
2266 #if BITS_PER_LONG >= 64
2275 /* Ensure natural alignment */
2276 if (kreg->enable_addr % kreg->enable_size)
2279 /* Ensure bit range for size */
2280 if (kreg->enable_bit > (kreg->enable_size * BITS_PER_BYTE) - 1)
2283 /* Ensure accessible */
2284 if (!access_ok((const void __user *)(uintptr_t)kreg->enable_addr,
2294 * Registers a user_event on behalf of a user process.
2296 static long user_events_ioctl_reg(struct user_event_file_info *info,
2299 struct user_reg __user *ureg = (struct user_reg __user *)uarg;
2300 struct user_reg reg;
2301 struct user_event *user;
2302 struct user_event_enabler *enabler;
2307 ret = user_reg_get(ureg, ®);
2313 * Prevent users from using the same address and bit multiple times
2314 * within the same mm address space. This can cause unexpected behavior
2315 * for user processes that is far easier to debug if this is explictly
2316 * an error upon registering.
2318 if (current_user_event_enabler_exists((unsigned long)reg.enable_addr,
2322 name = strndup_user((const char __user *)(uintptr_t)reg.name_args,
2326 ret = PTR_ERR(name);
2330 ret = user_event_parse_cmd(info->group, name, &user, reg.flags);
2337 ret = user_events_ref_add(info, user);
2339 /* No longer need parse ref, ref_add either worked or not */
2340 user_event_put(user, false);
2342 /* Positive number is index and valid */
2347 * user_events_ref_add succeeded:
2348 * At this point we have a user_event, it's lifetime is bound by the
2349 * reference count, not this file. If anything fails, the user_event
2350 * still has a reference until the file is released. During release
2351 * any remaining references (from user_events_ref_add) are decremented.
2353 * Attempt to create an enabler, which too has a lifetime tied in the
2354 * same way for the event. Once the task that caused the enabler to be
2355 * created exits or issues exec() then the enablers it has created
2356 * will be destroyed and the ref to the event will be decremented.
2358 enabler = user_event_enabler_create(®, user, &write_result);
2363 /* Write failed/faulted, give error back to caller */
2365 return write_result;
2367 put_user((u32)ret, &ureg->write_index);
2373 * Deletes a user_event on behalf of a user process.
2375 static long user_events_ioctl_del(struct user_event_file_info *info,
2378 void __user *ubuf = (void __user *)uarg;
2382 name = strndup_user(ubuf, MAX_EVENT_DESC);
2385 return PTR_ERR(name);
2387 /* event_mutex prevents dyn_event from racing */
2388 mutex_lock(&event_mutex);
2389 ret = delete_user_event(info->group, name);
2390 mutex_unlock(&event_mutex);
2397 static long user_unreg_get(struct user_unreg __user *ureg,
2398 struct user_unreg *kreg)
2403 ret = get_user(size, &ureg->size);
2408 if (size > PAGE_SIZE)
2411 if (size < offsetofend(struct user_unreg, disable_addr))
2414 ret = copy_struct_from_user(kreg, sizeof(*kreg), ureg, size);
2416 /* Ensure no reserved values, since we don't support any yet */
2417 if (kreg->__reserved || kreg->__reserved2)
2423 static int user_event_mm_clear_bit(struct user_event_mm *user_mm,
2424 unsigned long uaddr, unsigned char bit,
2425 unsigned long flags)
2427 struct user_event_enabler enabler;
2431 memset(&enabler, 0, sizeof(enabler));
2432 enabler.addr = uaddr;
2433 enabler.values = bit | flags;
2435 /* Prevents state changes from racing with new enablers */
2436 mutex_lock(&event_mutex);
2438 /* Force the bit to be cleared, since no event is attached */
2439 mmap_read_lock(user_mm->mm);
2440 result = user_event_enabler_write(user_mm, &enabler, false, &attempt);
2441 mmap_read_unlock(user_mm->mm);
2443 mutex_unlock(&event_mutex);
2446 /* Attempt to fault-in and retry if it worked */
2447 if (!user_event_mm_fault_in(user_mm, uaddr, attempt))
2455 * Unregisters an enablement address/bit within a task/user mm.
2457 static long user_events_ioctl_unreg(unsigned long uarg)
2459 struct user_unreg __user *ureg = (struct user_unreg __user *)uarg;
2460 struct user_event_mm *mm = current->user_event_mm;
2461 struct user_event_enabler *enabler, *next;
2462 struct user_unreg reg;
2463 unsigned long flags;
2466 ret = user_unreg_get(ureg, ®);
2478 * Flags freeing and faulting are used to indicate if the enabler is in
2479 * use at all. When faulting is set a page-fault is occurring asyncly.
2480 * During async fault if freeing is set, the enabler will be destroyed.
2481 * If no async fault is happening, we can destroy it now since we hold
2482 * the event_mutex during these checks.
2484 mutex_lock(&event_mutex);
2486 list_for_each_entry_safe(enabler, next, &mm->enablers, mm_enablers_link) {
2487 if (enabler->addr == reg.disable_addr &&
2488 ENABLE_BIT(enabler) == reg.disable_bit) {
2489 set_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler));
2491 /* We must keep compat flags for the clear */
2492 flags |= enabler->values & ENABLE_VAL_COMPAT_MASK;
2494 if (!test_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler)))
2495 user_event_enabler_destroy(enabler, true);
2497 /* Removed at least one */
2502 mutex_unlock(&event_mutex);
2504 /* Ensure bit is now cleared for user, regardless of event status */
2506 ret = user_event_mm_clear_bit(mm, reg.disable_addr,
2507 reg.disable_bit, flags);
2513 * Handles the ioctl from user mode to register or alter operations.
2515 static long user_events_ioctl(struct file *file, unsigned int cmd,
2518 struct user_event_file_info *info = file->private_data;
2519 struct user_event_group *group = info->group;
2524 mutex_lock(&group->reg_mutex);
2525 ret = user_events_ioctl_reg(info, uarg);
2526 mutex_unlock(&group->reg_mutex);
2530 mutex_lock(&group->reg_mutex);
2531 ret = user_events_ioctl_del(info, uarg);
2532 mutex_unlock(&group->reg_mutex);
2535 case DIAG_IOCSUNREG:
2536 mutex_lock(&group->reg_mutex);
2537 ret = user_events_ioctl_unreg(uarg);
2538 mutex_unlock(&group->reg_mutex);
2546 * Handles the final close of the file from user mode.
2548 static int user_events_release(struct inode *node, struct file *file)
2550 struct user_event_file_info *info = file->private_data;
2551 struct user_event_group *group;
2552 struct user_event_refs *refs;
2558 group = info->group;
2561 * Ensure refs cannot change under any situation by taking the
2562 * register mutex during the final freeing of the references.
2564 mutex_lock(&group->reg_mutex);
2572 * The lifetime of refs has reached an end, it's tied to this file.
2573 * The underlying user_events are ref counted, and cannot be freed.
2574 * After this decrement, the user_events may be freed elsewhere.
2576 for (i = 0; i < refs->count; ++i)
2577 user_event_put(refs->events[i], false);
2580 file->private_data = NULL;
2582 mutex_unlock(&group->reg_mutex);
2590 static const struct file_operations user_data_fops = {
2591 .open = user_events_open,
2592 .write = user_events_write,
2593 .write_iter = user_events_write_iter,
2594 .unlocked_ioctl = user_events_ioctl,
2595 .release = user_events_release,
2598 static void *user_seq_start(struct seq_file *m, loff_t *pos)
2606 static void *user_seq_next(struct seq_file *m, void *p, loff_t *pos)
2612 static void user_seq_stop(struct seq_file *m, void *p)
2616 static int user_seq_show(struct seq_file *m, void *p)
2618 struct user_event_group *group = m->private;
2619 struct user_event *user;
2621 int i, active = 0, busy = 0;
2626 mutex_lock(&group->reg_mutex);
2628 hash_for_each(group->register_table, i, user, node) {
2629 status = user->status;
2631 seq_printf(m, "%s", EVENT_NAME(user));
2637 seq_puts(m, " Used by");
2638 if (status & EVENT_STATUS_FTRACE)
2639 seq_puts(m, " ftrace");
2640 if (status & EVENT_STATUS_PERF)
2641 seq_puts(m, " perf");
2642 if (status & EVENT_STATUS_OTHER)
2643 seq_puts(m, " other");
2651 mutex_unlock(&group->reg_mutex);
2654 seq_printf(m, "Active: %d\n", active);
2655 seq_printf(m, "Busy: %d\n", busy);
2660 static const struct seq_operations user_seq_ops = {
2661 .start = user_seq_start,
2662 .next = user_seq_next,
2663 .stop = user_seq_stop,
2664 .show = user_seq_show,
2667 static int user_status_open(struct inode *node, struct file *file)
2669 struct user_event_group *group;
2672 group = current_user_event_group();
2677 ret = seq_open(file, &user_seq_ops);
2680 /* Chain group to seq_file */
2681 struct seq_file *m = file->private_data;
2689 static const struct file_operations user_status_fops = {
2690 .open = user_status_open,
2692 .llseek = seq_lseek,
2693 .release = seq_release,
2697 * Creates a set of tracefs files to allow user mode interactions.
2699 static int create_user_tracefs(void)
2701 struct dentry *edata, *emmap;
2703 edata = tracefs_create_file("user_events_data", TRACE_MODE_WRITE,
2704 NULL, NULL, &user_data_fops);
2707 pr_warn("Could not create tracefs 'user_events_data' entry\n");
2711 emmap = tracefs_create_file("user_events_status", TRACE_MODE_READ,
2712 NULL, NULL, &user_status_fops);
2715 tracefs_remove(edata);
2716 pr_warn("Could not create tracefs 'user_events_mmap' entry\n");
2725 static int set_max_user_events_sysctl(struct ctl_table *table, int write,
2726 void *buffer, size_t *lenp, loff_t *ppos)
2730 mutex_lock(&event_mutex);
2732 ret = proc_douintvec(table, write, buffer, lenp, ppos);
2734 mutex_unlock(&event_mutex);
2739 static struct ctl_table user_event_sysctls[] = {
2741 .procname = "user_events_max",
2742 .data = &max_user_events,
2743 .maxlen = sizeof(unsigned int),
2745 .proc_handler = set_max_user_events_sysctl,
2750 static int __init trace_events_user_init(void)
2754 fault_cache = KMEM_CACHE(user_event_enabler_fault, 0);
2759 init_group = user_event_group_create();
2762 kmem_cache_destroy(fault_cache);
2766 ret = create_user_tracefs();
2769 pr_warn("user_events could not register with tracefs\n");
2770 user_event_group_destroy(init_group);
2771 kmem_cache_destroy(fault_cache);
2776 if (dyn_event_register(&user_event_dops))
2777 pr_warn("user_events could not register with dyn_events\n");
2779 register_sysctl_init("kernel", user_event_sysctls);
2784 fs_initcall(trace_events_user_init);