1 /* SPDX-License-Identifier: MIT */
3 * Copyright © 2022 Intel Corporation
6 #ifndef _XE_VM_TYPES_H_
7 #define _XE_VM_TYPES_H_
9 #include <drm/drm_gpuvm.h>
11 #include <linux/dma-resv.h>
12 #include <linux/kref.h>
13 #include <linux/mmu_notifier.h>
14 #include <linux/scatterlist.h>
16 #include "xe_device_types.h"
17 #include "xe_pt_types.h"
18 #include "xe_range_fence.h"
25 #define XE_VMA_READ_ONLY DRM_GPUVA_USERBITS
26 #define XE_VMA_DESTROYED (DRM_GPUVA_USERBITS << 1)
27 #define XE_VMA_ATOMIC_PTE_BIT (DRM_GPUVA_USERBITS << 2)
28 #define XE_VMA_FIRST_REBIND (DRM_GPUVA_USERBITS << 3)
29 #define XE_VMA_LAST_REBIND (DRM_GPUVA_USERBITS << 4)
30 #define XE_VMA_PTE_4K (DRM_GPUVA_USERBITS << 5)
31 #define XE_VMA_PTE_2M (DRM_GPUVA_USERBITS << 6)
32 #define XE_VMA_PTE_1G (DRM_GPUVA_USERBITS << 7)
33 #define XE_VMA_PTE_64K (DRM_GPUVA_USERBITS << 8)
34 #define XE_VMA_PTE_COMPACT (DRM_GPUVA_USERBITS << 9)
35 #define XE_VMA_DUMPABLE (DRM_GPUVA_USERBITS << 10)
37 /** struct xe_userptr - User pointer */
39 /** @invalidate_link: Link for the vm::userptr.invalidated list */
40 struct list_head invalidate_link;
41 /** @userptr: link into VM repin list if userptr. */
42 struct list_head repin_link;
44 * @notifier: MMU notifier for user pointer (invalidation call back)
46 struct mmu_interval_notifier notifier;
47 /** @sgt: storage for a scatter gather table */
49 /** @sg: allocated scatter gather table */
51 /** @notifier_seq: notifier sequence number */
52 unsigned long notifier_seq;
54 * @initial_bind: user pointer has been bound at least once.
55 * write: vm->userptr.notifier_lock in read mode and vm->resv held.
56 * read: vm->userptr.notifier_lock in write mode or vm->resv held.
59 #if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
65 /** @gpuva: Base GPUVA object */
66 struct drm_gpuva gpuva;
69 * @combined_links: links into lists which are mutually exclusive.
70 * Locking: vm lock in write mode OR vm lock in read mode and the vm's
74 /** @rebind: link into VM if this VMA needs rebinding. */
75 struct list_head rebind;
76 /** @destroy: link to contested list when VM is being closed. */
77 struct list_head destroy;
81 /** @destroy_cb: callback to destroy VMA when unbind job is done */
82 struct dma_fence_cb destroy_cb;
83 /** @destroy_work: worker to destroy this BO */
84 struct work_struct destroy_work;
87 /** @tile_invalidated: VMA has been invalidated */
90 /** @tile_mask: Tile mask of where to create binding for this VMA */
94 * @tile_present: GT mask of binding are present for this VMA.
95 * protected by vm->lock, vm->resv and for userptrs,
96 * vm->userptr.notifier_lock for writing. Needs either for reading,
97 * but if reading is done under the vm->lock only, it needs to be held
103 * @pat_index: The pat index to use when encoding the PTEs for this vma.
108 * @ufence: The user fence that was provided with MAP.
109 * Needs to be signalled before UNMAP can be processed.
111 struct xe_user_fence *ufence;
115 * struct xe_userptr_vma - A userptr vma subclass
117 * @userptr: Additional userptr information.
119 struct xe_userptr_vma {
121 struct xe_userptr userptr;
127 /** @gpuvm: base GPUVM used to track VMAs */
128 struct drm_gpuvm gpuvm;
130 struct xe_device *xe;
132 /* exec queue used for (un)binding vma's */
133 struct xe_exec_queue *q[XE_MAX_TILES_PER_DEVICE];
135 /** @lru_bulk_move: Bulk LRU move list for this VM's BOs */
136 struct ttm_lru_bulk_move lru_bulk_move;
140 struct xe_pt *pt_root[XE_MAX_TILES_PER_DEVICE];
141 struct xe_pt *scratch_pt[XE_MAX_TILES_PER_DEVICE][XE_VM_MAX_LEVEL];
144 * @flags: flags for this VM, statically setup a creation time aside
145 * from XE_VM_FLAG_BANNED which requires vm->lock to set / read safely
147 #define XE_VM_FLAG_64K BIT(0)
148 #define XE_VM_FLAG_LR_MODE BIT(1)
149 #define XE_VM_FLAG_MIGRATION BIT(2)
150 #define XE_VM_FLAG_SCRATCH_PAGE BIT(3)
151 #define XE_VM_FLAG_FAULT_MODE BIT(4)
152 #define XE_VM_FLAG_BANNED BIT(5)
153 #define XE_VM_FLAG_TILE_ID(flags) FIELD_GET(GENMASK(7, 6), flags)
154 #define XE_VM_FLAG_SET_TILE_ID(tile) FIELD_PREP(GENMASK(7, 6), (tile)->id)
157 /** @composite_fence_ctx: context composite fence */
158 u64 composite_fence_ctx;
159 /** @composite_fence_seqno: seqno for composite fence */
160 u32 composite_fence_seqno;
163 * @lock: outer most lock, protects objects of anything attached to this
166 struct rw_semaphore lock;
168 * @snap_mutex: Mutex used to guard insertions and removals from gpuva,
169 * so we can take a snapshot safely from devcoredump.
171 struct mutex snap_mutex;
174 * @rebind_list: list of VMAs that need rebinding. Protected by the
175 * vm->lock in write mode, OR (the vm->lock in read mode and the
178 struct list_head rebind_list;
180 /** @rebind_fence: rebind fence from execbuf */
181 struct dma_fence *rebind_fence;
184 * @destroy_work: worker to destroy VM, needed as a dma_fence signaling
185 * from an irq context can be last put and the destroy needs to be able
188 struct work_struct destroy_work;
191 * @rftree: range fence tree to track updates to page table structure.
192 * Used to implement conflict tracking between independent bind engines.
194 struct xe_range_fence_tree rftree[XE_MAX_TILES_PER_DEVICE];
196 const struct xe_pt_ops *pt_ops;
198 /** @userptr: user pointer state */
201 * @userptr.repin_list: list of VMAs which are user pointers,
202 * and needs repinning. Protected by @lock.
204 struct list_head repin_list;
206 * @notifier_lock: protects notifier in write mode and
207 * submission in read mode.
209 struct rw_semaphore notifier_lock;
211 * @userptr.invalidated_lock: Protects the
212 * @userptr.invalidated list.
214 spinlock_t invalidated_lock;
216 * @userptr.invalidated: List of invalidated userptrs, not yet
218 * up for revalidation. Protected from access with the
219 * @invalidated_lock. Removing items from the list
220 * additionally requires @lock in write mode, and adding
221 * items to the list requires the @userptr.notifer_lock in
224 struct list_head invalidated;
227 /** @preempt: preempt state */
230 * @min_run_period_ms: The minimum run period before preempting
233 s64 min_run_period_ms;
234 /** @exec_queues: list of exec queues attached to this VM */
235 struct list_head exec_queues;
236 /** @num_exec_queues: number exec queues attached to this VM */
239 * @rebind_deactivated: Whether rebind has been temporarily deactivated
240 * due to no work available. Protected by the vm resv.
242 bool rebind_deactivated;
244 * @rebind_work: worker to rebind invalidated userptrs / evicted
247 struct work_struct rebind_work;
250 /** @um: unified memory state */
252 /** @asid: address space ID, unique to each VM */
255 * @last_fault_vma: Last fault VMA, used for fast lookup when we
256 * get a flood of faults to the same VMA
258 struct xe_vma *last_fault_vma;
261 /** @error_capture: allow to track errors */
263 /** @capture_once: capture only one error per VM */
267 /** @batch_invalidate_tlb: Always invalidate TLB before batch start */
268 bool batch_invalidate_tlb;
269 /** @xef: XE file handle for tracking this VM's drm client */
273 /** struct xe_vma_op_map - VMA map operation */
274 struct xe_vma_op_map {
275 /** @vma: VMA to map */
277 /** @is_null: is NULL binding */
279 /** @dumpable: whether BO is dumped on GPU hang */
281 /** @pat_index: The pat index to use for this operation. */
285 /** struct xe_vma_op_remap - VMA remap operation */
286 struct xe_vma_op_remap {
287 /** @prev: VMA preceding part of a split mapping */
289 /** @next: VMA subsequent part of a split mapping */
291 /** @start: start of the VMA unmap */
293 /** @range: range of the VMA unmap */
295 /** @skip_prev: skip prev rebind */
297 /** @skip_next: skip next rebind */
299 /** @unmap_done: unmap operation in done */
303 /** struct xe_vma_op_prefetch - VMA prefetch operation */
304 struct xe_vma_op_prefetch {
305 /** @region: memory region to prefetch to */
309 /** enum xe_vma_op_flags - flags for VMA operation */
310 enum xe_vma_op_flags {
311 /** @XE_VMA_OP_FIRST: first VMA operation for a set of syncs */
312 XE_VMA_OP_FIRST = BIT(0),
313 /** @XE_VMA_OP_LAST: last VMA operation for a set of syncs */
314 XE_VMA_OP_LAST = BIT(1),
315 /** @XE_VMA_OP_COMMITTED: VMA operation committed */
316 XE_VMA_OP_COMMITTED = BIT(2),
317 /** @XE_VMA_OP_PREV_COMMITTED: Previous VMA operation committed */
318 XE_VMA_OP_PREV_COMMITTED = BIT(3),
319 /** @XE_VMA_OP_NEXT_COMMITTED: Next VMA operation committed */
320 XE_VMA_OP_NEXT_COMMITTED = BIT(4),
323 /** struct xe_vma_op - VMA operation */
325 /** @base: GPUVA base operation */
326 struct drm_gpuva_op base;
328 * @ops: GPUVA ops, when set call drm_gpuva_ops_free after this
329 * operations is processed
331 struct drm_gpuva_ops *ops;
332 /** @q: exec queue for this operation */
333 struct xe_exec_queue *q;
335 * @syncs: syncs for this operation, only used on first and last
338 struct xe_sync_entry *syncs;
339 /** @num_syncs: number of syncs */
341 /** @link: async operation link */
342 struct list_head link;
343 /** @flags: operation flags */
344 enum xe_vma_op_flags flags;
347 /** @map: VMA map operation specific data */
348 struct xe_vma_op_map map;
349 /** @remap: VMA remap operation specific data */
350 struct xe_vma_op_remap remap;
351 /** @prefetch: VMA prefetch operation specific data */
352 struct xe_vma_op_prefetch prefetch;