1 // SPDX-License-Identifier: GPL-2.0 OR MIT
3 * Copyright 2020-2021 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/types.h>
25 #include <linux/sched/task.h>
26 #include <linux/dynamic_debug.h>
27 #include <drm/ttm/ttm_tt.h>
28 #include <drm/drm_exec.h>
30 #include "amdgpu_sync.h"
31 #include "amdgpu_object.h"
32 #include "amdgpu_vm.h"
33 #include "amdgpu_hmm.h"
35 #include "amdgpu_xgmi.h"
38 #include "kfd_migrate.h"
39 #include "kfd_smi_events.h"
44 #define dev_fmt(fmt) "kfd_svm: %s: " fmt, __func__
46 #define AMDGPU_SVM_RANGE_RESTORE_DELAY_MS 1
48 /* Long enough to ensure no retry fault comes after svm range is restored and
49 * page table is updated.
51 #define AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING (2UL * NSEC_PER_MSEC)
52 #if IS_ENABLED(CONFIG_DYNAMIC_DEBUG)
53 #define dynamic_svm_range_dump(svms) \
54 _dynamic_func_call_no_desc("svm_range_dump", svm_range_debug_dump, svms)
56 #define dynamic_svm_range_dump(svms) \
57 do { if (0) svm_range_debug_dump(svms); } while (0)
60 /* Giant svm range split into smaller ranges based on this, it is decided using
61 * minimum of all dGPU/APU 1/32 VRAM size, between 2MB to 1GB and alignment to
64 static uint64_t max_svm_range_pages;
66 struct criu_svm_metadata {
67 struct list_head list;
68 struct kfd_criu_svm_range_priv_data data;
71 static void svm_range_evict_svm_bo_worker(struct work_struct *work);
73 svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
74 const struct mmu_notifier_range *range,
75 unsigned long cur_seq);
77 svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
78 uint64_t *bo_s, uint64_t *bo_l);
79 static const struct mmu_interval_notifier_ops svm_range_mn_ops = {
80 .invalidate = svm_range_cpu_invalidate_pagetables,
84 * svm_range_unlink - unlink svm_range from lists and interval tree
85 * @prange: svm range structure to be removed
87 * Remove the svm_range from the svms and svm_bo lists and the svms
90 * Context: The caller must hold svms->lock
92 static void svm_range_unlink(struct svm_range *prange)
94 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
95 prange, prange->start, prange->last);
98 spin_lock(&prange->svm_bo->list_lock);
99 list_del(&prange->svm_bo_list);
100 spin_unlock(&prange->svm_bo->list_lock);
103 list_del(&prange->list);
104 if (prange->it_node.start != 0 && prange->it_node.last != 0)
105 interval_tree_remove(&prange->it_node, &prange->svms->objects);
109 svm_range_add_notifier_locked(struct mm_struct *mm, struct svm_range *prange)
111 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
112 prange, prange->start, prange->last);
114 mmu_interval_notifier_insert_locked(&prange->notifier, mm,
115 prange->start << PAGE_SHIFT,
116 prange->npages << PAGE_SHIFT,
121 * svm_range_add_to_svms - add svm range to svms
122 * @prange: svm range structure to be added
124 * Add the svm range to svms interval tree and link list
126 * Context: The caller must hold svms->lock
128 static void svm_range_add_to_svms(struct svm_range *prange)
130 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
131 prange, prange->start, prange->last);
133 list_move_tail(&prange->list, &prange->svms->list);
134 prange->it_node.start = prange->start;
135 prange->it_node.last = prange->last;
136 interval_tree_insert(&prange->it_node, &prange->svms->objects);
139 static void svm_range_remove_notifier(struct svm_range *prange)
141 pr_debug("remove notifier svms 0x%p prange 0x%p [0x%lx 0x%lx]\n",
142 prange->svms, prange,
143 prange->notifier.interval_tree.start >> PAGE_SHIFT,
144 prange->notifier.interval_tree.last >> PAGE_SHIFT);
146 if (prange->notifier.interval_tree.start != 0 &&
147 prange->notifier.interval_tree.last != 0)
148 mmu_interval_notifier_remove(&prange->notifier);
152 svm_is_valid_dma_mapping_addr(struct device *dev, dma_addr_t dma_addr)
154 return dma_addr && !dma_mapping_error(dev, dma_addr) &&
155 !(dma_addr & SVM_RANGE_VRAM_DOMAIN);
159 svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
160 unsigned long offset, unsigned long npages,
161 unsigned long *hmm_pfns, uint32_t gpuidx)
163 enum dma_data_direction dir = DMA_BIDIRECTIONAL;
164 dma_addr_t *addr = prange->dma_addr[gpuidx];
165 struct device *dev = adev->dev;
170 addr = kvcalloc(prange->npages, sizeof(*addr), GFP_KERNEL);
173 prange->dma_addr[gpuidx] = addr;
177 for (i = 0; i < npages; i++) {
178 if (svm_is_valid_dma_mapping_addr(dev, addr[i]))
179 dma_unmap_page(dev, addr[i], PAGE_SIZE, dir);
181 page = hmm_pfn_to_page(hmm_pfns[i]);
182 if (is_zone_device_page(page)) {
183 struct amdgpu_device *bo_adev = prange->svm_bo->node->adev;
185 addr[i] = (hmm_pfns[i] << PAGE_SHIFT) +
186 bo_adev->vm_manager.vram_base_offset -
187 bo_adev->kfd.pgmap.range.start;
188 addr[i] |= SVM_RANGE_VRAM_DOMAIN;
189 pr_debug_ratelimited("vram address: 0x%llx\n", addr[i]);
192 addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
193 r = dma_mapping_error(dev, addr[i]);
195 dev_err(dev, "failed %d dma_map_page\n", r);
198 pr_debug_ratelimited("dma mapping 0x%llx for page addr 0x%lx\n",
199 addr[i] >> PAGE_SHIFT, page_to_pfn(page));
206 svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
207 unsigned long offset, unsigned long npages,
208 unsigned long *hmm_pfns)
210 struct kfd_process *p;
214 p = container_of(prange->svms, struct kfd_process, svms);
216 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
217 struct kfd_process_device *pdd;
219 pr_debug("mapping to gpu idx 0x%x\n", gpuidx);
220 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
222 pr_debug("failed to find device idx %d\n", gpuidx);
226 r = svm_range_dma_map_dev(pdd->dev->adev, prange, offset, npages,
235 void svm_range_dma_unmap_dev(struct device *dev, dma_addr_t *dma_addr,
236 unsigned long offset, unsigned long npages)
238 enum dma_data_direction dir = DMA_BIDIRECTIONAL;
244 for (i = offset; i < offset + npages; i++) {
245 if (!svm_is_valid_dma_mapping_addr(dev, dma_addr[i]))
247 pr_debug_ratelimited("unmap 0x%llx\n", dma_addr[i] >> PAGE_SHIFT);
248 dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir);
253 void svm_range_dma_unmap(struct svm_range *prange)
255 struct kfd_process_device *pdd;
256 dma_addr_t *dma_addr;
258 struct kfd_process *p;
261 p = container_of(prange->svms, struct kfd_process, svms);
263 for (gpuidx = 0; gpuidx < MAX_GPU_INSTANCE; gpuidx++) {
264 dma_addr = prange->dma_addr[gpuidx];
268 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
270 pr_debug("failed to find device idx %d\n", gpuidx);
273 dev = &pdd->dev->adev->pdev->dev;
275 svm_range_dma_unmap_dev(dev, dma_addr, 0, prange->npages);
279 static void svm_range_free(struct svm_range *prange, bool do_unmap)
281 uint64_t size = (prange->last - prange->start + 1) << PAGE_SHIFT;
282 struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
285 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, prange,
286 prange->start, prange->last);
288 svm_range_vram_node_free(prange);
290 svm_range_dma_unmap(prange);
292 if (do_unmap && !p->xnack_enabled) {
293 pr_debug("unreserve prange 0x%p size: 0x%llx\n", prange, size);
294 amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
295 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
298 /* free dma_addr array for each gpu */
299 for (gpuidx = 0; gpuidx < MAX_GPU_INSTANCE; gpuidx++) {
300 if (prange->dma_addr[gpuidx]) {
301 kvfree(prange->dma_addr[gpuidx]);
302 prange->dma_addr[gpuidx] = NULL;
306 mutex_destroy(&prange->lock);
307 mutex_destroy(&prange->migrate_mutex);
312 svm_range_set_default_attributes(int32_t *location, int32_t *prefetch_loc,
313 uint8_t *granularity, uint32_t *flags)
315 *location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
316 *prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
319 KFD_IOCTL_SVM_FLAG_HOST_ACCESS | KFD_IOCTL_SVM_FLAG_COHERENT;
323 svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start,
324 uint64_t last, bool update_mem_usage)
326 uint64_t size = last - start + 1;
327 struct svm_range *prange;
328 struct kfd_process *p;
330 prange = kzalloc(sizeof(*prange), GFP_KERNEL);
334 p = container_of(svms, struct kfd_process, svms);
335 if (!p->xnack_enabled && update_mem_usage &&
336 amdgpu_amdkfd_reserve_mem_limit(NULL, size << PAGE_SHIFT,
337 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0)) {
338 pr_info("SVM mapping failed, exceeds resident system memory limit\n");
342 prange->npages = size;
344 prange->start = start;
346 INIT_LIST_HEAD(&prange->list);
347 INIT_LIST_HEAD(&prange->update_list);
348 INIT_LIST_HEAD(&prange->svm_bo_list);
349 INIT_LIST_HEAD(&prange->deferred_list);
350 INIT_LIST_HEAD(&prange->child_list);
351 atomic_set(&prange->invalid, 0);
352 prange->validate_timestamp = 0;
353 prange->vram_pages = 0;
354 mutex_init(&prange->migrate_mutex);
355 mutex_init(&prange->lock);
357 if (p->xnack_enabled)
358 bitmap_copy(prange->bitmap_access, svms->bitmap_supported,
361 svm_range_set_default_attributes(&prange->preferred_loc,
362 &prange->prefetch_loc,
363 &prange->granularity, &prange->flags);
365 pr_debug("svms 0x%p [0x%llx 0x%llx]\n", svms, start, last);
370 static bool svm_bo_ref_unless_zero(struct svm_range_bo *svm_bo)
372 if (!svm_bo || !kref_get_unless_zero(&svm_bo->kref))
378 static void svm_range_bo_release(struct kref *kref)
380 struct svm_range_bo *svm_bo;
382 svm_bo = container_of(kref, struct svm_range_bo, kref);
383 pr_debug("svm_bo 0x%p\n", svm_bo);
385 spin_lock(&svm_bo->list_lock);
386 while (!list_empty(&svm_bo->range_list)) {
387 struct svm_range *prange =
388 list_first_entry(&svm_bo->range_list,
389 struct svm_range, svm_bo_list);
390 /* list_del_init tells a concurrent svm_range_vram_node_new when
391 * it's safe to reuse the svm_bo pointer and svm_bo_list head.
393 list_del_init(&prange->svm_bo_list);
394 spin_unlock(&svm_bo->list_lock);
396 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
397 prange->start, prange->last);
398 mutex_lock(&prange->lock);
399 prange->svm_bo = NULL;
400 /* prange should not hold vram page now */
401 WARN_ONCE(prange->actual_loc, "prange should not hold vram page");
402 mutex_unlock(&prange->lock);
404 spin_lock(&svm_bo->list_lock);
406 spin_unlock(&svm_bo->list_lock);
407 if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base)) {
408 /* We're not in the eviction worker.
409 * Signal the fence and synchronize with any
410 * pending eviction work.
412 dma_fence_signal(&svm_bo->eviction_fence->base);
413 cancel_work_sync(&svm_bo->eviction_work);
415 dma_fence_put(&svm_bo->eviction_fence->base);
416 amdgpu_bo_unref(&svm_bo->bo);
420 static void svm_range_bo_wq_release(struct work_struct *work)
422 struct svm_range_bo *svm_bo;
424 svm_bo = container_of(work, struct svm_range_bo, release_work);
425 svm_range_bo_release(&svm_bo->kref);
428 static void svm_range_bo_release_async(struct kref *kref)
430 struct svm_range_bo *svm_bo;
432 svm_bo = container_of(kref, struct svm_range_bo, kref);
433 pr_debug("svm_bo 0x%p\n", svm_bo);
434 INIT_WORK(&svm_bo->release_work, svm_range_bo_wq_release);
435 schedule_work(&svm_bo->release_work);
438 void svm_range_bo_unref_async(struct svm_range_bo *svm_bo)
440 kref_put(&svm_bo->kref, svm_range_bo_release_async);
443 static void svm_range_bo_unref(struct svm_range_bo *svm_bo)
446 kref_put(&svm_bo->kref, svm_range_bo_release);
450 svm_range_validate_svm_bo(struct kfd_node *node, struct svm_range *prange)
452 mutex_lock(&prange->lock);
453 if (!prange->svm_bo) {
454 mutex_unlock(&prange->lock);
457 if (prange->ttm_res) {
458 /* We still have a reference, all is well */
459 mutex_unlock(&prange->lock);
462 if (svm_bo_ref_unless_zero(prange->svm_bo)) {
464 * Migrate from GPU to GPU, remove range from source svm_bo->node
465 * range list, and return false to allocate svm_bo from destination
468 if (prange->svm_bo->node != node) {
469 mutex_unlock(&prange->lock);
471 spin_lock(&prange->svm_bo->list_lock);
472 list_del_init(&prange->svm_bo_list);
473 spin_unlock(&prange->svm_bo->list_lock);
475 svm_range_bo_unref(prange->svm_bo);
478 if (READ_ONCE(prange->svm_bo->evicting)) {
480 struct svm_range_bo *svm_bo;
481 /* The BO is getting evicted,
482 * we need to get a new one
484 mutex_unlock(&prange->lock);
485 svm_bo = prange->svm_bo;
486 f = dma_fence_get(&svm_bo->eviction_fence->base);
487 svm_range_bo_unref(prange->svm_bo);
488 /* wait for the fence to avoid long spin-loop
489 * at list_empty_careful
491 dma_fence_wait(f, false);
494 /* The BO was still around and we got
495 * a new reference to it
497 mutex_unlock(&prange->lock);
498 pr_debug("reuse old bo svms 0x%p [0x%lx 0x%lx]\n",
499 prange->svms, prange->start, prange->last);
501 prange->ttm_res = prange->svm_bo->bo->tbo.resource;
506 mutex_unlock(&prange->lock);
509 /* We need a new svm_bo. Spin-loop to wait for concurrent
510 * svm_range_bo_release to finish removing this range from
511 * its range list and set prange->svm_bo to null. After this,
512 * it is safe to reuse the svm_bo pointer and svm_bo_list head.
514 while (!list_empty_careful(&prange->svm_bo_list) || prange->svm_bo)
520 static struct svm_range_bo *svm_range_bo_new(void)
522 struct svm_range_bo *svm_bo;
524 svm_bo = kzalloc(sizeof(*svm_bo), GFP_KERNEL);
528 kref_init(&svm_bo->kref);
529 INIT_LIST_HEAD(&svm_bo->range_list);
530 spin_lock_init(&svm_bo->list_lock);
536 svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
539 struct amdgpu_bo_param bp;
540 struct svm_range_bo *svm_bo;
541 struct amdgpu_bo_user *ubo;
542 struct amdgpu_bo *bo;
543 struct kfd_process *p;
544 struct mm_struct *mm;
547 p = container_of(prange->svms, struct kfd_process, svms);
548 pr_debug("pasid: %x svms 0x%p [0x%lx 0x%lx]\n", p->pasid, prange->svms,
549 prange->start, prange->last);
551 if (svm_range_validate_svm_bo(node, prange))
554 svm_bo = svm_range_bo_new();
556 pr_debug("failed to alloc svm bo\n");
559 mm = get_task_mm(p->lead_thread);
561 pr_debug("failed to get mm\n");
566 svm_bo->eviction_fence =
567 amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
571 INIT_WORK(&svm_bo->eviction_work, svm_range_evict_svm_bo_worker);
572 svm_bo->evicting = 0;
573 memset(&bp, 0, sizeof(bp));
574 bp.size = prange->npages * PAGE_SIZE;
575 bp.byte_align = PAGE_SIZE;
576 bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
577 bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
578 bp.flags |= clear ? AMDGPU_GEM_CREATE_VRAM_CLEARED : 0;
579 bp.flags |= AMDGPU_GEM_CREATE_DISCARDABLE;
580 bp.type = ttm_bo_type_device;
583 bp.xcp_id_plus1 = node->xcp->id + 1;
585 r = amdgpu_bo_create_user(node->adev, &bp, &ubo);
587 pr_debug("failed %d to create bo\n", r);
588 goto create_bo_failed;
592 pr_debug("alloc bo at offset 0x%lx size 0x%lx on partition %d\n",
593 bo->tbo.resource->start << PAGE_SHIFT, bp.size,
594 bp.xcp_id_plus1 - 1);
596 r = amdgpu_bo_reserve(bo, true);
598 pr_debug("failed %d to reserve bo\n", r);
599 goto reserve_bo_failed;
603 r = amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
605 pr_debug("failed %d to sync bo\n", r);
606 amdgpu_bo_unreserve(bo);
607 goto reserve_bo_failed;
611 r = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
613 pr_debug("failed %d to reserve bo\n", r);
614 amdgpu_bo_unreserve(bo);
615 goto reserve_bo_failed;
617 amdgpu_bo_fence(bo, &svm_bo->eviction_fence->base, true);
619 amdgpu_bo_unreserve(bo);
622 prange->svm_bo = svm_bo;
623 prange->ttm_res = bo->tbo.resource;
626 spin_lock(&svm_bo->list_lock);
627 list_add(&prange->svm_bo_list, &svm_bo->range_list);
628 spin_unlock(&svm_bo->list_lock);
633 amdgpu_bo_unref(&bo);
635 dma_fence_put(&svm_bo->eviction_fence->base);
637 prange->ttm_res = NULL;
642 void svm_range_vram_node_free(struct svm_range *prange)
644 /* serialize prange->svm_bo unref */
645 mutex_lock(&prange->lock);
646 /* prange->svm_bo has not been unref */
647 if (prange->ttm_res) {
648 prange->ttm_res = NULL;
649 mutex_unlock(&prange->lock);
650 svm_range_bo_unref(prange->svm_bo);
652 mutex_unlock(&prange->lock);
656 svm_range_get_node_by_id(struct svm_range *prange, uint32_t gpu_id)
658 struct kfd_process *p;
659 struct kfd_process_device *pdd;
661 p = container_of(prange->svms, struct kfd_process, svms);
662 pdd = kfd_process_device_data_by_id(p, gpu_id);
664 pr_debug("failed to get kfd process device by id 0x%x\n", gpu_id);
671 struct kfd_process_device *
672 svm_range_get_pdd_by_node(struct svm_range *prange, struct kfd_node *node)
674 struct kfd_process *p;
676 p = container_of(prange->svms, struct kfd_process, svms);
678 return kfd_get_process_device_data(node, p);
681 static int svm_range_bo_validate(void *param, struct amdgpu_bo *bo)
683 struct ttm_operation_ctx ctx = { false, false };
685 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
687 return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
691 svm_range_check_attr(struct kfd_process *p,
692 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
696 for (i = 0; i < nattr; i++) {
697 uint32_t val = attrs[i].value;
698 int gpuidx = MAX_GPU_INSTANCE;
700 switch (attrs[i].type) {
701 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
702 if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM &&
703 val != KFD_IOCTL_SVM_LOCATION_UNDEFINED)
704 gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
706 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
707 if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM)
708 gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
710 case KFD_IOCTL_SVM_ATTR_ACCESS:
711 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
712 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
713 gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
715 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
717 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
719 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
722 pr_debug("unknown attr type 0x%x\n", attrs[i].type);
727 pr_debug("no GPU 0x%x found\n", val);
729 } else if (gpuidx < MAX_GPU_INSTANCE &&
730 !test_bit(gpuidx, p->svms.bitmap_supported)) {
731 pr_debug("GPU 0x%x not supported\n", val);
740 svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange,
741 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
742 bool *update_mapping)
747 for (i = 0; i < nattr; i++) {
748 switch (attrs[i].type) {
749 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
750 prange->preferred_loc = attrs[i].value;
752 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
753 prange->prefetch_loc = attrs[i].value;
755 case KFD_IOCTL_SVM_ATTR_ACCESS:
756 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
757 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
758 if (!p->xnack_enabled)
759 *update_mapping = true;
761 gpuidx = kfd_process_gpuidx_from_gpuid(p,
763 if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) {
764 bitmap_clear(prange->bitmap_access, gpuidx, 1);
765 bitmap_clear(prange->bitmap_aip, gpuidx, 1);
766 } else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) {
767 bitmap_set(prange->bitmap_access, gpuidx, 1);
768 bitmap_clear(prange->bitmap_aip, gpuidx, 1);
770 bitmap_clear(prange->bitmap_access, gpuidx, 1);
771 bitmap_set(prange->bitmap_aip, gpuidx, 1);
774 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
775 *update_mapping = true;
776 prange->flags |= attrs[i].value;
778 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
779 *update_mapping = true;
780 prange->flags &= ~attrs[i].value;
782 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
783 prange->granularity = min_t(uint32_t, attrs[i].value, 0x3F);
786 WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
792 svm_range_is_same_attrs(struct kfd_process *p, struct svm_range *prange,
793 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
798 for (i = 0; i < nattr; i++) {
799 switch (attrs[i].type) {
800 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
801 if (prange->preferred_loc != attrs[i].value)
804 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
805 /* Prefetch should always trigger a migration even
806 * if the value of the attribute didn't change.
809 case KFD_IOCTL_SVM_ATTR_ACCESS:
810 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
811 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
812 gpuidx = kfd_process_gpuidx_from_gpuid(p,
814 if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) {
815 if (test_bit(gpuidx, prange->bitmap_access) ||
816 test_bit(gpuidx, prange->bitmap_aip))
818 } else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) {
819 if (!test_bit(gpuidx, prange->bitmap_access))
822 if (!test_bit(gpuidx, prange->bitmap_aip))
826 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
827 if ((prange->flags & attrs[i].value) != attrs[i].value)
830 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
831 if ((prange->flags & attrs[i].value) != 0)
834 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
835 if (prange->granularity != attrs[i].value)
839 WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
847 * svm_range_debug_dump - print all range information from svms
848 * @svms: svm range list header
850 * debug output svm range start, end, prefetch location from svms
851 * interval tree and link list
853 * Context: The caller must hold svms->lock
855 static void svm_range_debug_dump(struct svm_range_list *svms)
857 struct interval_tree_node *node;
858 struct svm_range *prange;
860 pr_debug("dump svms 0x%p list\n", svms);
861 pr_debug("range\tstart\tpage\tend\t\tlocation\n");
863 list_for_each_entry(prange, &svms->list, list) {
864 pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n",
865 prange, prange->start, prange->npages,
866 prange->start + prange->npages - 1,
870 pr_debug("dump svms 0x%p interval tree\n", svms);
871 pr_debug("range\tstart\tpage\tend\t\tlocation\n");
872 node = interval_tree_iter_first(&svms->objects, 0, ~0ULL);
874 prange = container_of(node, struct svm_range, it_node);
875 pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n",
876 prange, prange->start, prange->npages,
877 prange->start + prange->npages - 1,
879 node = interval_tree_iter_next(node, 0, ~0ULL);
884 svm_range_copy_array(void *psrc, size_t size, uint64_t num_elements,
885 uint64_t offset, uint64_t *vram_pages)
887 unsigned char *src = (unsigned char *)psrc + offset;
891 dst = kvmalloc_array(num_elements, size, GFP_KERNEL);
896 memcpy(dst, src, num_elements * size);
901 for (i = 0; i < num_elements; i++) {
903 temp = (dma_addr_t *)dst + i;
904 *temp = *((dma_addr_t *)src + i);
905 if (*temp&SVM_RANGE_VRAM_DOMAIN)
913 svm_range_copy_dma_addrs(struct svm_range *dst, struct svm_range *src)
917 for (i = 0; i < MAX_GPU_INSTANCE; i++) {
918 if (!src->dma_addr[i])
920 dst->dma_addr[i] = svm_range_copy_array(src->dma_addr[i],
921 sizeof(*src->dma_addr[i]), src->npages, 0, NULL);
922 if (!dst->dma_addr[i])
930 svm_range_split_array(void *ppnew, void *ppold, size_t size,
931 uint64_t old_start, uint64_t old_n,
932 uint64_t new_start, uint64_t new_n, uint64_t *new_vram_pages)
934 unsigned char *new, *old, *pold;
939 pold = *(unsigned char **)ppold;
943 d = (new_start - old_start) * size;
944 /* get dma addr array for new range and calculte its vram page number */
945 new = svm_range_copy_array(pold, size, new_n, d, new_vram_pages);
948 d = (new_start == old_start) ? new_n * size : 0;
949 old = svm_range_copy_array(pold, size, old_n, d, NULL);
955 *(void **)ppold = old;
956 *(void **)ppnew = new;
962 svm_range_split_pages(struct svm_range *new, struct svm_range *old,
963 uint64_t start, uint64_t last)
965 uint64_t npages = last - start + 1;
968 for (i = 0; i < MAX_GPU_INSTANCE; i++) {
969 r = svm_range_split_array(&new->dma_addr[i], &old->dma_addr[i],
970 sizeof(*old->dma_addr[i]), old->start,
971 npages, new->start, new->npages,
972 old->actual_loc ? &new->vram_pages : NULL);
977 old->vram_pages -= new->vram_pages;
983 svm_range_split_nodes(struct svm_range *new, struct svm_range *old,
984 uint64_t start, uint64_t last)
986 uint64_t npages = last - start + 1;
988 pr_debug("svms 0x%p new prange 0x%p start 0x%lx [0x%llx 0x%llx]\n",
989 new->svms, new, new->start, start, last);
991 if (new->start == old->start) {
992 new->offset = old->offset;
993 old->offset += new->npages;
995 new->offset = old->offset + npages;
998 new->svm_bo = svm_range_bo_ref(old->svm_bo);
999 new->ttm_res = old->ttm_res;
1001 spin_lock(&new->svm_bo->list_lock);
1002 list_add(&new->svm_bo_list, &new->svm_bo->range_list);
1003 spin_unlock(&new->svm_bo->list_lock);
1009 * svm_range_split_adjust - split range and adjust
1012 * @old: the old range
1013 * @start: the old range adjust to start address in pages
1014 * @last: the old range adjust to last address in pages
1016 * Copy system memory dma_addr or vram ttm_res in old range to new
1017 * range from new_start up to size new->npages, the remaining old range is from
1021 * 0 - OK, -ENOMEM - out of memory
1024 svm_range_split_adjust(struct svm_range *new, struct svm_range *old,
1025 uint64_t start, uint64_t last)
1029 pr_debug("svms 0x%p new 0x%lx old [0x%lx 0x%lx] => [0x%llx 0x%llx]\n",
1030 new->svms, new->start, old->start, old->last, start, last);
1032 if (new->start < old->start ||
1033 new->last > old->last) {
1034 WARN_ONCE(1, "invalid new range start or last\n");
1038 r = svm_range_split_pages(new, old, start, last);
1042 if (old->actual_loc && old->ttm_res) {
1043 r = svm_range_split_nodes(new, old, start, last);
1048 old->npages = last - start + 1;
1051 new->flags = old->flags;
1052 new->preferred_loc = old->preferred_loc;
1053 new->prefetch_loc = old->prefetch_loc;
1054 new->actual_loc = old->actual_loc;
1055 new->granularity = old->granularity;
1056 new->mapped_to_gpu = old->mapped_to_gpu;
1057 bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
1058 bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
1064 * svm_range_split - split a range in 2 ranges
1066 * @prange: the svm range to split
1067 * @start: the remaining range start address in pages
1068 * @last: the remaining range last address in pages
1069 * @new: the result new range generated
1072 * case 1: if start == prange->start
1073 * prange ==> prange[start, last]
1074 * new range [last + 1, prange->last]
1076 * case 2: if last == prange->last
1077 * prange ==> prange[start, last]
1078 * new range [prange->start, start - 1]
1081 * 0 - OK, -ENOMEM - out of memory, -EINVAL - invalid start, last
1084 svm_range_split(struct svm_range *prange, uint64_t start, uint64_t last,
1085 struct svm_range **new)
1087 uint64_t old_start = prange->start;
1088 uint64_t old_last = prange->last;
1089 struct svm_range_list *svms;
1092 pr_debug("svms 0x%p [0x%llx 0x%llx] to [0x%llx 0x%llx]\n", prange->svms,
1093 old_start, old_last, start, last);
1095 if (old_start != start && old_last != last)
1097 if (start < old_start || last > old_last)
1100 svms = prange->svms;
1101 if (old_start == start)
1102 *new = svm_range_new(svms, last + 1, old_last, false);
1104 *new = svm_range_new(svms, old_start, start - 1, false);
1108 r = svm_range_split_adjust(*new, prange, start, last);
1110 pr_debug("failed %d split [0x%llx 0x%llx] to [0x%llx 0x%llx]\n",
1111 r, old_start, old_last, start, last);
1112 svm_range_free(*new, false);
1120 svm_range_split_tail(struct svm_range *prange, uint64_t new_last,
1121 struct list_head *insert_list, struct list_head *remap_list)
1123 struct svm_range *tail = NULL;
1124 int r = svm_range_split(prange, prange->start, new_last, &tail);
1127 list_add(&tail->list, insert_list);
1128 if (!IS_ALIGNED(new_last + 1, 1UL << prange->granularity))
1129 list_add(&tail->update_list, remap_list);
1135 svm_range_split_head(struct svm_range *prange, uint64_t new_start,
1136 struct list_head *insert_list, struct list_head *remap_list)
1138 struct svm_range *head = NULL;
1139 int r = svm_range_split(prange, new_start, prange->last, &head);
1142 list_add(&head->list, insert_list);
1143 if (!IS_ALIGNED(new_start, 1UL << prange->granularity))
1144 list_add(&head->update_list, remap_list);
1150 svm_range_add_child(struct svm_range *prange, struct mm_struct *mm,
1151 struct svm_range *pchild, enum svm_work_list_ops op)
1153 pr_debug("add child 0x%p [0x%lx 0x%lx] to prange 0x%p child list %d\n",
1154 pchild, pchild->start, pchild->last, prange, op);
1156 pchild->work_item.mm = mm;
1157 pchild->work_item.op = op;
1158 list_add_tail(&pchild->child_list, &prange->child_list);
1162 svm_nodes_in_same_hive(struct kfd_node *node_a, struct kfd_node *node_b)
1164 return (node_a->adev == node_b->adev ||
1165 amdgpu_xgmi_same_hive(node_a->adev, node_b->adev));
1169 svm_range_get_pte_flags(struct kfd_node *node,
1170 struct svm_range *prange, int domain)
1172 struct kfd_node *bo_node;
1173 uint32_t flags = prange->flags;
1174 uint32_t mapping_flags = 0;
1176 bool snoop = (domain != SVM_RANGE_VRAM_DOMAIN);
1177 bool coherent = flags & (KFD_IOCTL_SVM_FLAG_COHERENT | KFD_IOCTL_SVM_FLAG_EXT_COHERENT);
1178 bool ext_coherent = flags & KFD_IOCTL_SVM_FLAG_EXT_COHERENT;
1179 bool uncached = false; /*flags & KFD_IOCTL_SVM_FLAG_UNCACHED;*/
1180 unsigned int mtype_local;
1182 if (domain == SVM_RANGE_VRAM_DOMAIN)
1183 bo_node = prange->svm_bo->node;
1185 switch (amdgpu_ip_version(node->adev, GC_HWIP, 0)) {
1186 case IP_VERSION(9, 4, 1):
1187 if (domain == SVM_RANGE_VRAM_DOMAIN) {
1188 if (bo_node == node) {
1189 mapping_flags |= coherent ?
1190 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
1192 mapping_flags |= coherent ?
1193 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1194 if (svm_nodes_in_same_hive(node, bo_node))
1198 mapping_flags |= coherent ?
1199 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1202 case IP_VERSION(9, 4, 2):
1203 if (domain == SVM_RANGE_VRAM_DOMAIN) {
1204 if (bo_node == node) {
1205 mapping_flags |= coherent ?
1206 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
1207 if (node->adev->gmc.xgmi.connected_to_cpu)
1210 mapping_flags |= coherent ?
1211 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1212 if (svm_nodes_in_same_hive(node, bo_node))
1216 mapping_flags |= coherent ?
1217 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1220 case IP_VERSION(9, 4, 3):
1222 mtype_local = node->adev->rev_id ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_UC;
1224 mtype_local = amdgpu_mtype_local == 1 ? AMDGPU_VM_MTYPE_NC :
1225 amdgpu_mtype_local == 2 ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
1228 mapping_flags |= AMDGPU_VM_MTYPE_UC;
1229 } else if (domain == SVM_RANGE_VRAM_DOMAIN) {
1230 /* local HBM region close to partition */
1231 if (bo_node->adev == node->adev &&
1232 (!bo_node->xcp || !node->xcp || bo_node->xcp->mem_id == node->xcp->mem_id))
1233 mapping_flags |= mtype_local;
1234 /* local HBM region far from partition or remote XGMI GPU
1235 * with regular system scope coherence
1237 else if (svm_nodes_in_same_hive(bo_node, node) && !ext_coherent)
1238 mapping_flags |= AMDGPU_VM_MTYPE_NC;
1239 /* PCIe P2P or extended system scope coherence */
1241 mapping_flags |= AMDGPU_VM_MTYPE_UC;
1242 /* system memory accessed by the APU */
1243 } else if (node->adev->flags & AMD_IS_APU) {
1244 /* On NUMA systems, locality is determined per-page
1245 * in amdgpu_gmc_override_vm_pte_flags
1247 if (num_possible_nodes() <= 1)
1248 mapping_flags |= mtype_local;
1250 mapping_flags |= ext_coherent ? AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1251 /* system memory accessed by the dGPU */
1253 mapping_flags |= AMDGPU_VM_MTYPE_UC;
1257 mapping_flags |= coherent ?
1258 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1261 mapping_flags |= AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE;
1263 if (flags & KFD_IOCTL_SVM_FLAG_GPU_RO)
1264 mapping_flags &= ~AMDGPU_VM_PAGE_WRITEABLE;
1265 if (flags & KFD_IOCTL_SVM_FLAG_GPU_EXEC)
1266 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
1268 pte_flags = AMDGPU_PTE_VALID;
1269 pte_flags |= (domain == SVM_RANGE_VRAM_DOMAIN) ? 0 : AMDGPU_PTE_SYSTEM;
1270 pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
1272 pte_flags |= amdgpu_gem_va_map_flags(node->adev, mapping_flags);
1277 svm_range_unmap_from_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1278 uint64_t start, uint64_t last,
1279 struct dma_fence **fence)
1281 uint64_t init_pte_value = 0;
1283 pr_debug("[0x%llx 0x%llx]\n", start, last);
1285 return amdgpu_vm_update_range(adev, vm, false, true, true, false, NULL, start,
1286 last, init_pte_value, 0, 0, NULL, NULL,
1291 svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start,
1292 unsigned long last, uint32_t trigger)
1294 DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
1295 struct kfd_process_device *pdd;
1296 struct dma_fence *fence = NULL;
1297 struct kfd_process *p;
1301 if (!prange->mapped_to_gpu) {
1302 pr_debug("prange 0x%p [0x%lx 0x%lx] not mapped to GPU\n",
1303 prange, prange->start, prange->last);
1307 if (prange->start == start && prange->last == last) {
1308 pr_debug("unmap svms 0x%p prange 0x%p\n", prange->svms, prange);
1309 prange->mapped_to_gpu = false;
1312 bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
1314 p = container_of(prange->svms, struct kfd_process, svms);
1316 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
1317 pr_debug("unmap from gpu idx 0x%x\n", gpuidx);
1318 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1320 pr_debug("failed to find device idx %d\n", gpuidx);
1324 kfd_smi_event_unmap_from_gpu(pdd->dev, p->lead_thread->pid,
1325 start, last, trigger);
1327 r = svm_range_unmap_from_gpu(pdd->dev->adev,
1328 drm_priv_to_vm(pdd->drm_priv),
1329 start, last, &fence);
1334 r = dma_fence_wait(fence, false);
1335 dma_fence_put(fence);
1340 kfd_flush_tlb(pdd, TLB_FLUSH_HEAVYWEIGHT);
1347 svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
1348 unsigned long offset, unsigned long npages, bool readonly,
1349 dma_addr_t *dma_addr, struct amdgpu_device *bo_adev,
1350 struct dma_fence **fence, bool flush_tlb)
1352 struct amdgpu_device *adev = pdd->dev->adev;
1353 struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv);
1355 unsigned long last_start;
1360 last_start = prange->start + offset;
1362 pr_debug("svms 0x%p [0x%lx 0x%lx] readonly %d\n", prange->svms,
1363 last_start, last_start + npages - 1, readonly);
1365 for (i = offset; i < offset + npages; i++) {
1366 last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN;
1367 dma_addr[i] &= ~SVM_RANGE_VRAM_DOMAIN;
1369 /* Collect all pages in the same address range and memory domain
1370 * that can be mapped with a single call to update mapping.
1372 if (i < offset + npages - 1 &&
1373 last_domain == (dma_addr[i + 1] & SVM_RANGE_VRAM_DOMAIN))
1376 pr_debug("Mapping range [0x%lx 0x%llx] on domain: %s\n",
1377 last_start, prange->start + i, last_domain ? "GPU" : "CPU");
1379 pte_flags = svm_range_get_pte_flags(pdd->dev, prange, last_domain);
1381 pte_flags &= ~AMDGPU_PTE_WRITEABLE;
1383 pr_debug("svms 0x%p map [0x%lx 0x%llx] vram %d PTE 0x%llx\n",
1384 prange->svms, last_start, prange->start + i,
1385 (last_domain == SVM_RANGE_VRAM_DOMAIN) ? 1 : 0,
1388 /* For dGPU mode, we use same vm_manager to allocate VRAM for
1389 * different memory partition based on fpfn/lpfn, we should use
1390 * same vm_manager.vram_base_offset regardless memory partition.
1392 r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, true,
1393 NULL, last_start, prange->start + i,
1395 (last_start - prange->start) << PAGE_SHIFT,
1396 bo_adev ? bo_adev->vm_manager.vram_base_offset : 0,
1397 NULL, dma_addr, &vm->last_update);
1399 for (j = last_start - prange->start; j <= i; j++)
1400 dma_addr[j] |= last_domain;
1403 pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start);
1406 last_start = prange->start + i + 1;
1409 r = amdgpu_vm_update_pdes(adev, vm, false);
1411 pr_debug("failed %d to update directories 0x%lx\n", r,
1417 *fence = dma_fence_get(vm->last_update);
1424 svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
1425 unsigned long npages, bool readonly,
1426 unsigned long *bitmap, bool wait, bool flush_tlb)
1428 struct kfd_process_device *pdd;
1429 struct amdgpu_device *bo_adev = NULL;
1430 struct kfd_process *p;
1431 struct dma_fence *fence = NULL;
1435 if (prange->svm_bo && prange->ttm_res)
1436 bo_adev = prange->svm_bo->node->adev;
1438 p = container_of(prange->svms, struct kfd_process, svms);
1439 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
1440 pr_debug("mapping to gpu idx 0x%x\n", gpuidx);
1441 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1443 pr_debug("failed to find device idx %d\n", gpuidx);
1447 pdd = kfd_bind_process_to_device(pdd->dev, p);
1451 if (bo_adev && pdd->dev->adev != bo_adev &&
1452 !amdgpu_xgmi_same_hive(pdd->dev->adev, bo_adev)) {
1453 pr_debug("cannot map to device idx %d\n", gpuidx);
1457 r = svm_range_map_to_gpu(pdd, prange, offset, npages, readonly,
1458 prange->dma_addr[gpuidx],
1459 bo_adev, wait ? &fence : NULL,
1465 r = dma_fence_wait(fence, false);
1466 dma_fence_put(fence);
1469 pr_debug("failed %d to dma fence wait\n", r);
1474 kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY);
1480 struct svm_validate_context {
1481 struct kfd_process *process;
1482 struct svm_range *prange;
1484 DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
1485 struct drm_exec exec;
1488 static int svm_range_reserve_bos(struct svm_validate_context *ctx, bool intr)
1490 struct kfd_process_device *pdd;
1491 struct amdgpu_vm *vm;
1495 drm_exec_init(&ctx->exec, intr ? DRM_EXEC_INTERRUPTIBLE_WAIT: 0, 0);
1496 drm_exec_until_all_locked(&ctx->exec) {
1497 for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
1498 pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
1500 pr_debug("failed to find device idx %d\n", gpuidx);
1504 vm = drm_priv_to_vm(pdd->drm_priv);
1506 r = amdgpu_vm_lock_pd(vm, &ctx->exec, 2);
1507 drm_exec_retry_on_contention(&ctx->exec);
1509 pr_debug("failed %d to reserve bo\n", r);
1515 for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
1516 pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
1518 pr_debug("failed to find device idx %d\n", gpuidx);
1523 r = amdgpu_vm_validate_pt_bos(pdd->dev->adev,
1524 drm_priv_to_vm(pdd->drm_priv),
1525 svm_range_bo_validate, NULL);
1527 pr_debug("failed %d validate pt bos\n", r);
1535 drm_exec_fini(&ctx->exec);
1539 static void svm_range_unreserve_bos(struct svm_validate_context *ctx)
1541 drm_exec_fini(&ctx->exec);
1544 static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx)
1546 struct kfd_process_device *pdd;
1548 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1552 return SVM_ADEV_PGMAP_OWNER(pdd->dev->adev);
1556 * Validation+GPU mapping with concurrent invalidation (MMU notifiers)
1558 * To prevent concurrent destruction or change of range attributes, the
1559 * svm_read_lock must be held. The caller must not hold the svm_write_lock
1560 * because that would block concurrent evictions and lead to deadlocks. To
1561 * serialize concurrent migrations or validations of the same range, the
1562 * prange->migrate_mutex must be held.
1564 * For VRAM ranges, the SVM BO must be allocated and valid (protected by its
1567 * The following sequence ensures race-free validation and GPU mapping:
1569 * 1. Reserve page table (and SVM BO if range is in VRAM)
1570 * 2. hmm_range_fault to get page addresses (if system memory)
1571 * 3. DMA-map pages (if system memory)
1572 * 4-a. Take notifier lock
1573 * 4-b. Check that pages still valid (mmu_interval_read_retry)
1574 * 4-c. Check that the range was not split or otherwise invalidated
1575 * 4-d. Update GPU page table
1576 * 4.e. Release notifier lock
1577 * 5. Release page table (and SVM BO) reservation
1579 static int svm_range_validate_and_map(struct mm_struct *mm,
1580 unsigned long map_start, unsigned long map_last,
1581 struct svm_range *prange, int32_t gpuidx,
1582 bool intr, bool wait, bool flush_tlb)
1584 struct svm_validate_context *ctx;
1585 unsigned long start, end, addr;
1586 struct kfd_process *p;
1591 ctx = kzalloc(sizeof(struct svm_validate_context), GFP_KERNEL);
1594 ctx->process = container_of(prange->svms, struct kfd_process, svms);
1595 ctx->prange = prange;
1598 if (gpuidx < MAX_GPU_INSTANCE) {
1599 bitmap_zero(ctx->bitmap, MAX_GPU_INSTANCE);
1600 bitmap_set(ctx->bitmap, gpuidx, 1);
1601 } else if (ctx->process->xnack_enabled) {
1602 bitmap_copy(ctx->bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
1604 /* If prefetch range to GPU, or GPU retry fault migrate range to
1605 * GPU, which has ACCESS attribute to the range, create mapping
1608 if (prange->actual_loc) {
1609 gpuidx = kfd_process_gpuidx_from_gpuid(ctx->process,
1610 prange->actual_loc);
1612 WARN_ONCE(1, "failed get device by id 0x%x\n",
1613 prange->actual_loc);
1617 if (test_bit(gpuidx, prange->bitmap_access))
1618 bitmap_set(ctx->bitmap, gpuidx, 1);
1622 * If prange is already mapped or with always mapped flag,
1623 * update mapping on GPUs with ACCESS attribute
1625 if (bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) {
1626 if (prange->mapped_to_gpu ||
1627 prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)
1628 bitmap_copy(ctx->bitmap, prange->bitmap_access, MAX_GPU_INSTANCE);
1631 bitmap_or(ctx->bitmap, prange->bitmap_access,
1632 prange->bitmap_aip, MAX_GPU_INSTANCE);
1635 if (bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) {
1640 if (prange->actual_loc && !prange->ttm_res) {
1641 /* This should never happen. actual_loc gets set by
1642 * svm_migrate_ram_to_vram after allocating a BO.
1644 WARN_ONCE(1, "VRAM BO missing during validation\n");
1649 svm_range_reserve_bos(ctx, intr);
1651 p = container_of(prange->svms, struct kfd_process, svms);
1652 owner = kfd_svm_page_owner(p, find_first_bit(ctx->bitmap,
1654 for_each_set_bit(idx, ctx->bitmap, MAX_GPU_INSTANCE) {
1655 if (kfd_svm_page_owner(p, idx) != owner) {
1661 start = map_start << PAGE_SHIFT;
1662 end = (map_last + 1) << PAGE_SHIFT;
1663 for (addr = start; !r && addr < end; ) {
1664 struct hmm_range *hmm_range;
1665 unsigned long map_start_vma;
1666 unsigned long map_last_vma;
1667 struct vm_area_struct *vma;
1668 unsigned long next = 0;
1669 unsigned long offset;
1670 unsigned long npages;
1673 vma = vma_lookup(mm, addr);
1675 readonly = !(vma->vm_flags & VM_WRITE);
1677 next = min(vma->vm_end, end);
1678 npages = (next - addr) >> PAGE_SHIFT;
1679 WRITE_ONCE(p->svms.faulting_task, current);
1680 r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
1681 readonly, owner, NULL,
1683 WRITE_ONCE(p->svms.faulting_task, NULL);
1685 pr_debug("failed %d to get svm range pages\n", r);
1694 offset = (addr >> PAGE_SHIFT) - prange->start;
1695 r = svm_range_dma_map(prange, ctx->bitmap, offset, npages,
1696 hmm_range->hmm_pfns);
1698 pr_debug("failed %d to dma map range\n", r);
1701 svm_range_lock(prange);
1702 if (!r && amdgpu_hmm_range_get_pages_done(hmm_range)) {
1703 pr_debug("hmm update the range, need validate again\n");
1707 if (!r && !list_empty(&prange->child_list)) {
1708 pr_debug("range split by unmap in parallel, validate again\n");
1713 map_start_vma = max(map_start, prange->start + offset);
1714 map_last_vma = min(map_last, prange->start + offset + npages - 1);
1715 if (map_start_vma <= map_last_vma) {
1716 offset = map_start_vma - prange->start;
1717 npages = map_last_vma - map_start_vma + 1;
1718 r = svm_range_map_to_gpus(prange, offset, npages, readonly,
1719 ctx->bitmap, wait, flush_tlb);
1723 if (!r && next == end)
1724 prange->mapped_to_gpu = true;
1726 svm_range_unlock(prange);
1731 svm_range_unreserve_bos(ctx);
1733 prange->validate_timestamp = ktime_get_boottime();
1742 * svm_range_list_lock_and_flush_work - flush pending deferred work
1744 * @svms: the svm range list
1745 * @mm: the mm structure
1747 * Context: Returns with mmap write lock held, pending deferred work flushed
1751 svm_range_list_lock_and_flush_work(struct svm_range_list *svms,
1752 struct mm_struct *mm)
1755 flush_work(&svms->deferred_list_work);
1756 mmap_write_lock(mm);
1758 if (list_empty(&svms->deferred_range_list))
1760 mmap_write_unlock(mm);
1761 pr_debug("retry flush\n");
1762 goto retry_flush_work;
1765 static void svm_range_restore_work(struct work_struct *work)
1767 struct delayed_work *dwork = to_delayed_work(work);
1768 struct amdkfd_process_info *process_info;
1769 struct svm_range_list *svms;
1770 struct svm_range *prange;
1771 struct kfd_process *p;
1772 struct mm_struct *mm;
1777 svms = container_of(dwork, struct svm_range_list, restore_work);
1778 evicted_ranges = atomic_read(&svms->evicted_ranges);
1779 if (!evicted_ranges)
1782 pr_debug("restore svm ranges\n");
1784 p = container_of(svms, struct kfd_process, svms);
1785 process_info = p->kgd_process_info;
1787 /* Keep mm reference when svm_range_validate_and_map ranges */
1788 mm = get_task_mm(p->lead_thread);
1790 pr_debug("svms 0x%p process mm gone\n", svms);
1794 mutex_lock(&process_info->lock);
1795 svm_range_list_lock_and_flush_work(svms, mm);
1796 mutex_lock(&svms->lock);
1798 evicted_ranges = atomic_read(&svms->evicted_ranges);
1800 list_for_each_entry(prange, &svms->list, list) {
1801 invalid = atomic_read(&prange->invalid);
1805 pr_debug("restoring svms 0x%p prange 0x%p [0x%lx %lx] inv %d\n",
1806 prange->svms, prange, prange->start, prange->last,
1810 * If range is migrating, wait for migration is done.
1812 mutex_lock(&prange->migrate_mutex);
1814 r = svm_range_validate_and_map(mm, prange->start, prange->last, prange,
1815 MAX_GPU_INSTANCE, false, true, false);
1817 pr_debug("failed %d to map 0x%lx to gpus\n", r,
1820 mutex_unlock(&prange->migrate_mutex);
1822 goto out_reschedule;
1824 if (atomic_cmpxchg(&prange->invalid, invalid, 0) != invalid)
1825 goto out_reschedule;
1828 if (atomic_cmpxchg(&svms->evicted_ranges, evicted_ranges, 0) !=
1830 goto out_reschedule;
1834 r = kgd2kfd_resume_mm(mm);
1836 /* No recovery from this failure. Probably the CP is
1837 * hanging. No point trying again.
1839 pr_debug("failed %d to resume KFD\n", r);
1842 pr_debug("restore svm ranges successfully\n");
1845 mutex_unlock(&svms->lock);
1846 mmap_write_unlock(mm);
1847 mutex_unlock(&process_info->lock);
1849 /* If validation failed, reschedule another attempt */
1850 if (evicted_ranges) {
1851 pr_debug("reschedule to restore svm range\n");
1852 queue_delayed_work(system_freezable_wq, &svms->restore_work,
1853 msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
1855 kfd_smi_event_queue_restore_rescheduled(mm);
1861 * svm_range_evict - evict svm range
1862 * @prange: svm range structure
1863 * @mm: current process mm_struct
1864 * @start: starting process queue number
1865 * @last: last process queue number
1866 * @event: mmu notifier event when range is evicted or migrated
1868 * Stop all queues of the process to ensure GPU doesn't access the memory, then
1869 * return to let CPU evict the buffer and proceed CPU pagetable update.
1871 * Don't need use lock to sync cpu pagetable invalidation with GPU execution.
1872 * If invalidation happens while restore work is running, restore work will
1873 * restart to ensure to get the latest CPU pages mapping to GPU, then start
1877 svm_range_evict(struct svm_range *prange, struct mm_struct *mm,
1878 unsigned long start, unsigned long last,
1879 enum mmu_notifier_event event)
1881 struct svm_range_list *svms = prange->svms;
1882 struct svm_range *pchild;
1883 struct kfd_process *p;
1886 p = container_of(svms, struct kfd_process, svms);
1888 pr_debug("invalidate svms 0x%p prange [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
1889 svms, prange->start, prange->last, start, last);
1891 if (!p->xnack_enabled ||
1892 (prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)) {
1894 bool mapped = prange->mapped_to_gpu;
1896 list_for_each_entry(pchild, &prange->child_list, child_list) {
1897 if (!pchild->mapped_to_gpu)
1900 mutex_lock_nested(&pchild->lock, 1);
1901 if (pchild->start <= last && pchild->last >= start) {
1902 pr_debug("increment pchild invalid [0x%lx 0x%lx]\n",
1903 pchild->start, pchild->last);
1904 atomic_inc(&pchild->invalid);
1906 mutex_unlock(&pchild->lock);
1912 if (prange->start <= last && prange->last >= start)
1913 atomic_inc(&prange->invalid);
1915 evicted_ranges = atomic_inc_return(&svms->evicted_ranges);
1916 if (evicted_ranges != 1)
1919 pr_debug("evicting svms 0x%p range [0x%lx 0x%lx]\n",
1920 prange->svms, prange->start, prange->last);
1922 /* First eviction, stop the queues */
1923 r = kgd2kfd_quiesce_mm(mm, KFD_QUEUE_EVICTION_TRIGGER_SVM);
1925 pr_debug("failed to quiesce KFD\n");
1927 pr_debug("schedule to restore svm %p ranges\n", svms);
1928 queue_delayed_work(system_freezable_wq, &svms->restore_work,
1929 msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
1934 if (event == MMU_NOTIFY_MIGRATE)
1935 trigger = KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY_MIGRATE;
1937 trigger = KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY;
1939 pr_debug("invalidate unmap svms 0x%p [0x%lx 0x%lx] from GPUs\n",
1940 prange->svms, start, last);
1941 list_for_each_entry(pchild, &prange->child_list, child_list) {
1942 mutex_lock_nested(&pchild->lock, 1);
1943 s = max(start, pchild->start);
1944 l = min(last, pchild->last);
1946 svm_range_unmap_from_gpus(pchild, s, l, trigger);
1947 mutex_unlock(&pchild->lock);
1949 s = max(start, prange->start);
1950 l = min(last, prange->last);
1952 svm_range_unmap_from_gpus(prange, s, l, trigger);
1958 static struct svm_range *svm_range_clone(struct svm_range *old)
1960 struct svm_range *new;
1962 new = svm_range_new(old->svms, old->start, old->last, false);
1965 if (svm_range_copy_dma_addrs(new, old)) {
1966 svm_range_free(new, false);
1970 new->ttm_res = old->ttm_res;
1971 new->offset = old->offset;
1972 new->svm_bo = svm_range_bo_ref(old->svm_bo);
1973 spin_lock(&new->svm_bo->list_lock);
1974 list_add(&new->svm_bo_list, &new->svm_bo->range_list);
1975 spin_unlock(&new->svm_bo->list_lock);
1977 new->flags = old->flags;
1978 new->preferred_loc = old->preferred_loc;
1979 new->prefetch_loc = old->prefetch_loc;
1980 new->actual_loc = old->actual_loc;
1981 new->granularity = old->granularity;
1982 new->mapped_to_gpu = old->mapped_to_gpu;
1983 new->vram_pages = old->vram_pages;
1984 bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
1985 bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
1990 void svm_range_set_max_pages(struct amdgpu_device *adev)
1993 uint64_t pages, _pages;
1994 uint64_t min_pages = 0;
1997 for (i = 0; i < adev->kfd.dev->num_nodes; i++) {
1998 if (adev->kfd.dev->nodes[i]->xcp)
1999 id = adev->kfd.dev->nodes[i]->xcp->id;
2002 pages = KFD_XCP_MEMORY_SIZE(adev, id) >> 17;
2003 pages = clamp(pages, 1ULL << 9, 1ULL << 18);
2004 pages = rounddown_pow_of_two(pages);
2005 min_pages = min_not_zero(min_pages, pages);
2009 max_pages = READ_ONCE(max_svm_range_pages);
2010 _pages = min_not_zero(max_pages, min_pages);
2011 } while (cmpxchg(&max_svm_range_pages, max_pages, _pages) != max_pages);
2015 svm_range_split_new(struct svm_range_list *svms, uint64_t start, uint64_t last,
2016 uint64_t max_pages, struct list_head *insert_list,
2017 struct list_head *update_list)
2019 struct svm_range *prange;
2022 pr_debug("max_svm_range_pages 0x%llx adding [0x%llx 0x%llx]\n",
2023 max_pages, start, last);
2025 while (last >= start) {
2026 l = min(last, ALIGN_DOWN(start + max_pages, max_pages) - 1);
2028 prange = svm_range_new(svms, start, l, true);
2031 list_add(&prange->list, insert_list);
2032 list_add(&prange->update_list, update_list);
2040 * svm_range_add - add svm range and handle overlap
2041 * @p: the range add to this process svms
2042 * @start: page size aligned
2043 * @size: page size aligned
2044 * @nattr: number of attributes
2045 * @attrs: array of attributes
2046 * @update_list: output, the ranges need validate and update GPU mapping
2047 * @insert_list: output, the ranges need insert to svms
2048 * @remove_list: output, the ranges are replaced and need remove from svms
2049 * @remap_list: output, remap unaligned svm ranges
2051 * Check if the virtual address range has overlap with any existing ranges,
2052 * split partly overlapping ranges and add new ranges in the gaps. All changes
2053 * should be applied to the range_list and interval tree transactionally. If
2054 * any range split or allocation fails, the entire update fails. Therefore any
2055 * existing overlapping svm_ranges are cloned and the original svm_ranges left
2058 * If the transaction succeeds, the caller can update and insert clones and
2059 * new ranges, then free the originals.
2061 * Otherwise the caller can free the clones and new ranges, while the old
2062 * svm_ranges remain unchanged.
2064 * Context: Process context, caller must hold svms->lock
2067 * 0 - OK, otherwise error code
2070 svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
2071 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
2072 struct list_head *update_list, struct list_head *insert_list,
2073 struct list_head *remove_list, struct list_head *remap_list)
2075 unsigned long last = start + size - 1UL;
2076 struct svm_range_list *svms = &p->svms;
2077 struct interval_tree_node *node;
2078 struct svm_range *prange;
2079 struct svm_range *tmp;
2080 struct list_head new_list;
2083 pr_debug("svms 0x%p [0x%llx 0x%lx]\n", &p->svms, start, last);
2085 INIT_LIST_HEAD(update_list);
2086 INIT_LIST_HEAD(insert_list);
2087 INIT_LIST_HEAD(remove_list);
2088 INIT_LIST_HEAD(&new_list);
2089 INIT_LIST_HEAD(remap_list);
2091 node = interval_tree_iter_first(&svms->objects, start, last);
2093 struct interval_tree_node *next;
2094 unsigned long next_start;
2096 pr_debug("found overlap node [0x%lx 0x%lx]\n", node->start,
2099 prange = container_of(node, struct svm_range, it_node);
2100 next = interval_tree_iter_next(node, start, last);
2101 next_start = min(node->last, last) + 1;
2103 if (svm_range_is_same_attrs(p, prange, nattr, attrs) &&
2104 prange->mapped_to_gpu) {
2106 } else if (node->start < start || node->last > last) {
2107 /* node intersects the update range and its attributes
2108 * will change. Clone and split it, apply updates only
2109 * to the overlapping part
2111 struct svm_range *old = prange;
2113 prange = svm_range_clone(old);
2119 list_add(&old->update_list, remove_list);
2120 list_add(&prange->list, insert_list);
2121 list_add(&prange->update_list, update_list);
2123 if (node->start < start) {
2124 pr_debug("change old range start\n");
2125 r = svm_range_split_head(prange, start,
2126 insert_list, remap_list);
2130 if (node->last > last) {
2131 pr_debug("change old range last\n");
2132 r = svm_range_split_tail(prange, last,
2133 insert_list, remap_list);
2138 /* The node is contained within start..last,
2141 list_add(&prange->update_list, update_list);
2144 /* insert a new node if needed */
2145 if (node->start > start) {
2146 r = svm_range_split_new(svms, start, node->start - 1,
2147 READ_ONCE(max_svm_range_pages),
2148 &new_list, update_list);
2157 /* add a final range at the end if needed */
2159 r = svm_range_split_new(svms, start, last,
2160 READ_ONCE(max_svm_range_pages),
2161 &new_list, update_list);
2165 list_for_each_entry_safe(prange, tmp, insert_list, list)
2166 svm_range_free(prange, false);
2167 list_for_each_entry_safe(prange, tmp, &new_list, list)
2168 svm_range_free(prange, true);
2170 list_splice(&new_list, insert_list);
2177 svm_range_update_notifier_and_interval_tree(struct mm_struct *mm,
2178 struct svm_range *prange)
2180 unsigned long start;
2183 start = prange->notifier.interval_tree.start >> PAGE_SHIFT;
2184 last = prange->notifier.interval_tree.last >> PAGE_SHIFT;
2186 if (prange->start == start && prange->last == last)
2189 pr_debug("up notifier 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
2190 prange->svms, prange, start, last, prange->start,
2193 if (start != 0 && last != 0) {
2194 interval_tree_remove(&prange->it_node, &prange->svms->objects);
2195 svm_range_remove_notifier(prange);
2197 prange->it_node.start = prange->start;
2198 prange->it_node.last = prange->last;
2200 interval_tree_insert(&prange->it_node, &prange->svms->objects);
2201 svm_range_add_notifier_locked(mm, prange);
2205 svm_range_handle_list_op(struct svm_range_list *svms, struct svm_range *prange,
2206 struct mm_struct *mm)
2208 switch (prange->work_item.op) {
2210 pr_debug("NULL OP 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2211 svms, prange, prange->start, prange->last);
2213 case SVM_OP_UNMAP_RANGE:
2214 pr_debug("remove 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2215 svms, prange, prange->start, prange->last);
2216 svm_range_unlink(prange);
2217 svm_range_remove_notifier(prange);
2218 svm_range_free(prange, true);
2220 case SVM_OP_UPDATE_RANGE_NOTIFIER:
2221 pr_debug("update notifier 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2222 svms, prange, prange->start, prange->last);
2223 svm_range_update_notifier_and_interval_tree(mm, prange);
2225 case SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP:
2226 pr_debug("update and map 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2227 svms, prange, prange->start, prange->last);
2228 svm_range_update_notifier_and_interval_tree(mm, prange);
2229 /* TODO: implement deferred validation and mapping */
2231 case SVM_OP_ADD_RANGE:
2232 pr_debug("add 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms, prange,
2233 prange->start, prange->last);
2234 svm_range_add_to_svms(prange);
2235 svm_range_add_notifier_locked(mm, prange);
2237 case SVM_OP_ADD_RANGE_AND_MAP:
2238 pr_debug("add and map 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms,
2239 prange, prange->start, prange->last);
2240 svm_range_add_to_svms(prange);
2241 svm_range_add_notifier_locked(mm, prange);
2242 /* TODO: implement deferred validation and mapping */
2245 WARN_ONCE(1, "Unknown prange 0x%p work op %d\n", prange,
2246 prange->work_item.op);
2250 static void svm_range_drain_retry_fault(struct svm_range_list *svms)
2252 struct kfd_process_device *pdd;
2253 struct kfd_process *p;
2257 p = container_of(svms, struct kfd_process, svms);
2260 drain = atomic_read(&svms->drain_pagefaults);
2264 for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) {
2269 pr_debug("drain retry fault gpu %d svms %p\n", i, svms);
2271 amdgpu_ih_wait_on_checkpoint_process_ts(pdd->dev->adev,
2272 pdd->dev->adev->irq.retry_cam_enabled ?
2273 &pdd->dev->adev->irq.ih :
2274 &pdd->dev->adev->irq.ih1);
2276 if (pdd->dev->adev->irq.retry_cam_enabled)
2277 amdgpu_ih_wait_on_checkpoint_process_ts(pdd->dev->adev,
2278 &pdd->dev->adev->irq.ih_soft);
2281 pr_debug("drain retry fault gpu %d svms 0x%p done\n", i, svms);
2283 if (atomic_cmpxchg(&svms->drain_pagefaults, drain, 0) != drain)
2287 static void svm_range_deferred_list_work(struct work_struct *work)
2289 struct svm_range_list *svms;
2290 struct svm_range *prange;
2291 struct mm_struct *mm;
2293 svms = container_of(work, struct svm_range_list, deferred_list_work);
2294 pr_debug("enter svms 0x%p\n", svms);
2296 spin_lock(&svms->deferred_list_lock);
2297 while (!list_empty(&svms->deferred_range_list)) {
2298 prange = list_first_entry(&svms->deferred_range_list,
2299 struct svm_range, deferred_list);
2300 spin_unlock(&svms->deferred_list_lock);
2302 pr_debug("prange 0x%p [0x%lx 0x%lx] op %d\n", prange,
2303 prange->start, prange->last, prange->work_item.op);
2305 mm = prange->work_item.mm;
2307 mmap_write_lock(mm);
2309 /* Checking for the need to drain retry faults must be inside
2310 * mmap write lock to serialize with munmap notifiers.
2312 if (unlikely(atomic_read(&svms->drain_pagefaults))) {
2313 mmap_write_unlock(mm);
2314 svm_range_drain_retry_fault(svms);
2318 /* Remove from deferred_list must be inside mmap write lock, for
2320 * 1. unmap_from_cpu may change work_item.op and add the range
2321 * to deferred_list again, cause use after free bug.
2322 * 2. svm_range_list_lock_and_flush_work may hold mmap write
2323 * lock and continue because deferred_list is empty, but
2324 * deferred_list work is actually waiting for mmap lock.
2326 spin_lock(&svms->deferred_list_lock);
2327 list_del_init(&prange->deferred_list);
2328 spin_unlock(&svms->deferred_list_lock);
2330 mutex_lock(&svms->lock);
2331 mutex_lock(&prange->migrate_mutex);
2332 while (!list_empty(&prange->child_list)) {
2333 struct svm_range *pchild;
2335 pchild = list_first_entry(&prange->child_list,
2336 struct svm_range, child_list);
2337 pr_debug("child prange 0x%p op %d\n", pchild,
2338 pchild->work_item.op);
2339 list_del_init(&pchild->child_list);
2340 svm_range_handle_list_op(svms, pchild, mm);
2342 mutex_unlock(&prange->migrate_mutex);
2344 svm_range_handle_list_op(svms, prange, mm);
2345 mutex_unlock(&svms->lock);
2346 mmap_write_unlock(mm);
2348 /* Pairs with mmget in svm_range_add_list_work */
2351 spin_lock(&svms->deferred_list_lock);
2353 spin_unlock(&svms->deferred_list_lock);
2354 pr_debug("exit svms 0x%p\n", svms);
2358 svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange,
2359 struct mm_struct *mm, enum svm_work_list_ops op)
2361 spin_lock(&svms->deferred_list_lock);
2362 /* if prange is on the deferred list */
2363 if (!list_empty(&prange->deferred_list)) {
2364 pr_debug("update exist prange 0x%p work op %d\n", prange, op);
2365 WARN_ONCE(prange->work_item.mm != mm, "unmatch mm\n");
2366 if (op != SVM_OP_NULL &&
2367 prange->work_item.op != SVM_OP_UNMAP_RANGE)
2368 prange->work_item.op = op;
2370 prange->work_item.op = op;
2372 /* Pairs with mmput in deferred_list_work */
2374 prange->work_item.mm = mm;
2375 list_add_tail(&prange->deferred_list,
2376 &prange->svms->deferred_range_list);
2377 pr_debug("add prange 0x%p [0x%lx 0x%lx] to work list op %d\n",
2378 prange, prange->start, prange->last, op);
2380 spin_unlock(&svms->deferred_list_lock);
2383 void schedule_deferred_list_work(struct svm_range_list *svms)
2385 spin_lock(&svms->deferred_list_lock);
2386 if (!list_empty(&svms->deferred_range_list))
2387 schedule_work(&svms->deferred_list_work);
2388 spin_unlock(&svms->deferred_list_lock);
2392 svm_range_unmap_split(struct mm_struct *mm, struct svm_range *parent,
2393 struct svm_range *prange, unsigned long start,
2396 struct svm_range *head;
2397 struct svm_range *tail;
2399 if (prange->work_item.op == SVM_OP_UNMAP_RANGE) {
2400 pr_debug("prange 0x%p [0x%lx 0x%lx] is already freed\n", prange,
2401 prange->start, prange->last);
2404 if (start > prange->last || last < prange->start)
2407 head = tail = prange;
2408 if (start > prange->start)
2409 svm_range_split(prange, prange->start, start - 1, &tail);
2410 if (last < tail->last)
2411 svm_range_split(tail, last + 1, tail->last, &head);
2413 if (head != prange && tail != prange) {
2414 svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE);
2415 svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE);
2416 } else if (tail != prange) {
2417 svm_range_add_child(parent, mm, tail, SVM_OP_UNMAP_RANGE);
2418 } else if (head != prange) {
2419 svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE);
2420 } else if (parent != prange) {
2421 prange->work_item.op = SVM_OP_UNMAP_RANGE;
2426 svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange,
2427 unsigned long start, unsigned long last)
2429 uint32_t trigger = KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU;
2430 struct svm_range_list *svms;
2431 struct svm_range *pchild;
2432 struct kfd_process *p;
2436 p = kfd_lookup_process_by_mm(mm);
2441 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n", svms,
2442 prange, prange->start, prange->last, start, last);
2444 /* Make sure pending page faults are drained in the deferred worker
2445 * before the range is freed to avoid straggler interrupts on
2446 * unmapped memory causing "phantom faults".
2448 atomic_inc(&svms->drain_pagefaults);
2450 unmap_parent = start <= prange->start && last >= prange->last;
2452 list_for_each_entry(pchild, &prange->child_list, child_list) {
2453 mutex_lock_nested(&pchild->lock, 1);
2454 s = max(start, pchild->start);
2455 l = min(last, pchild->last);
2457 svm_range_unmap_from_gpus(pchild, s, l, trigger);
2458 svm_range_unmap_split(mm, prange, pchild, start, last);
2459 mutex_unlock(&pchild->lock);
2461 s = max(start, prange->start);
2462 l = min(last, prange->last);
2464 svm_range_unmap_from_gpus(prange, s, l, trigger);
2465 svm_range_unmap_split(mm, prange, prange, start, last);
2468 svm_range_add_list_work(svms, prange, mm, SVM_OP_UNMAP_RANGE);
2470 svm_range_add_list_work(svms, prange, mm,
2471 SVM_OP_UPDATE_RANGE_NOTIFIER);
2472 schedule_deferred_list_work(svms);
2474 kfd_unref_process(p);
2478 * svm_range_cpu_invalidate_pagetables - interval notifier callback
2479 * @mni: mmu_interval_notifier struct
2480 * @range: mmu_notifier_range struct
2481 * @cur_seq: value to pass to mmu_interval_set_seq()
2483 * If event is MMU_NOTIFY_UNMAP, this is from CPU unmap range, otherwise, it
2484 * is from migration, or CPU page invalidation callback.
2486 * For unmap event, unmap range from GPUs, remove prange from svms in a delayed
2487 * work thread, and split prange if only part of prange is unmapped.
2489 * For invalidation event, if GPU retry fault is not enabled, evict the queues,
2490 * then schedule svm_range_restore_work to update GPU mapping and resume queues.
2491 * If GPU retry fault is enabled, unmap the svm range from GPU, retry fault will
2492 * update GPU mapping to recover.
2494 * Context: mmap lock, notifier_invalidate_start lock are held
2495 * for invalidate event, prange lock is held if this is from migration
2498 svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
2499 const struct mmu_notifier_range *range,
2500 unsigned long cur_seq)
2502 struct svm_range *prange;
2503 unsigned long start;
2506 if (range->event == MMU_NOTIFY_RELEASE)
2508 if (!mmget_not_zero(mni->mm))
2511 start = mni->interval_tree.start;
2512 last = mni->interval_tree.last;
2513 start = max(start, range->start) >> PAGE_SHIFT;
2514 last = min(last, range->end - 1) >> PAGE_SHIFT;
2515 pr_debug("[0x%lx 0x%lx] range[0x%lx 0x%lx] notifier[0x%lx 0x%lx] %d\n",
2516 start, last, range->start >> PAGE_SHIFT,
2517 (range->end - 1) >> PAGE_SHIFT,
2518 mni->interval_tree.start >> PAGE_SHIFT,
2519 mni->interval_tree.last >> PAGE_SHIFT, range->event);
2521 prange = container_of(mni, struct svm_range, notifier);
2523 svm_range_lock(prange);
2524 mmu_interval_set_seq(mni, cur_seq);
2526 switch (range->event) {
2527 case MMU_NOTIFY_UNMAP:
2528 svm_range_unmap_from_cpu(mni->mm, prange, start, last);
2531 svm_range_evict(prange, mni->mm, start, last, range->event);
2535 svm_range_unlock(prange);
2542 * svm_range_from_addr - find svm range from fault address
2543 * @svms: svm range list header
2544 * @addr: address to search range interval tree, in pages
2545 * @parent: parent range if range is on child list
2547 * Context: The caller must hold svms->lock
2549 * Return: the svm_range found or NULL
2552 svm_range_from_addr(struct svm_range_list *svms, unsigned long addr,
2553 struct svm_range **parent)
2555 struct interval_tree_node *node;
2556 struct svm_range *prange;
2557 struct svm_range *pchild;
2559 node = interval_tree_iter_first(&svms->objects, addr, addr);
2563 prange = container_of(node, struct svm_range, it_node);
2564 pr_debug("address 0x%lx prange [0x%lx 0x%lx] node [0x%lx 0x%lx]\n",
2565 addr, prange->start, prange->last, node->start, node->last);
2567 if (addr >= prange->start && addr <= prange->last) {
2572 list_for_each_entry(pchild, &prange->child_list, child_list)
2573 if (addr >= pchild->start && addr <= pchild->last) {
2574 pr_debug("found address 0x%lx pchild [0x%lx 0x%lx]\n",
2575 addr, pchild->start, pchild->last);
2584 /* svm_range_best_restore_location - decide the best fault restore location
2585 * @prange: svm range structure
2586 * @adev: the GPU on which vm fault happened
2588 * This is only called when xnack is on, to decide the best location to restore
2589 * the range mapping after GPU vm fault. Caller uses the best location to do
2590 * migration if actual loc is not best location, then update GPU page table
2591 * mapping to the best location.
2593 * If the preferred loc is accessible by faulting GPU, use preferred loc.
2594 * If vm fault gpu idx is on range ACCESSIBLE bitmap, best_loc is vm fault gpu
2595 * If vm fault gpu idx is on range ACCESSIBLE_IN_PLACE bitmap, then
2596 * if range actual loc is cpu, best_loc is cpu
2597 * if vm fault gpu is on xgmi same hive of range actual loc gpu, best_loc is
2599 * Otherwise, GPU no access, best_loc is -1.
2602 * -1 means vm fault GPU no access
2603 * 0 for CPU or GPU id
2606 svm_range_best_restore_location(struct svm_range *prange,
2607 struct kfd_node *node,
2610 struct kfd_node *bo_node, *preferred_node;
2611 struct kfd_process *p;
2615 p = container_of(prange->svms, struct kfd_process, svms);
2617 r = kfd_process_gpuid_from_node(p, node, &gpuid, gpuidx);
2619 pr_debug("failed to get gpuid from kgd\n");
2623 if (node->adev->gmc.is_app_apu)
2626 if (prange->preferred_loc == gpuid ||
2627 prange->preferred_loc == KFD_IOCTL_SVM_LOCATION_SYSMEM) {
2628 return prange->preferred_loc;
2629 } else if (prange->preferred_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED) {
2630 preferred_node = svm_range_get_node_by_id(prange, prange->preferred_loc);
2631 if (preferred_node && svm_nodes_in_same_hive(node, preferred_node))
2632 return prange->preferred_loc;
2636 if (test_bit(*gpuidx, prange->bitmap_access))
2639 if (test_bit(*gpuidx, prange->bitmap_aip)) {
2640 if (!prange->actual_loc)
2643 bo_node = svm_range_get_node_by_id(prange, prange->actual_loc);
2644 if (bo_node && svm_nodes_in_same_hive(node, bo_node))
2645 return prange->actual_loc;
2654 svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
2655 unsigned long *start, unsigned long *last,
2656 bool *is_heap_stack)
2658 struct vm_area_struct *vma;
2659 struct interval_tree_node *node;
2660 unsigned long start_limit, end_limit;
2662 vma = vma_lookup(p->mm, addr << PAGE_SHIFT);
2664 pr_debug("VMA does not exist in address [0x%llx]\n", addr);
2668 *is_heap_stack = vma_is_initial_heap(vma) || vma_is_initial_stack(vma);
2670 start_limit = max(vma->vm_start >> PAGE_SHIFT,
2671 (unsigned long)ALIGN_DOWN(addr, 2UL << 8));
2672 end_limit = min(vma->vm_end >> PAGE_SHIFT,
2673 (unsigned long)ALIGN(addr + 1, 2UL << 8));
2674 /* First range that starts after the fault address */
2675 node = interval_tree_iter_first(&p->svms.objects, addr + 1, ULONG_MAX);
2677 end_limit = min(end_limit, node->start);
2678 /* Last range that ends before the fault address */
2679 node = container_of(rb_prev(&node->rb),
2680 struct interval_tree_node, rb);
2682 /* Last range must end before addr because
2683 * there was no range after addr
2685 node = container_of(rb_last(&p->svms.objects.rb_root),
2686 struct interval_tree_node, rb);
2689 if (node->last >= addr) {
2690 WARN(1, "Overlap with prev node and page fault addr\n");
2693 start_limit = max(start_limit, node->last + 1);
2696 *start = start_limit;
2697 *last = end_limit - 1;
2699 pr_debug("vma [0x%lx 0x%lx] range [0x%lx 0x%lx] is_heap_stack %d\n",
2700 vma->vm_start >> PAGE_SHIFT, vma->vm_end >> PAGE_SHIFT,
2701 *start, *last, *is_heap_stack);
2707 svm_range_check_vm_userptr(struct kfd_process *p, uint64_t start, uint64_t last,
2708 uint64_t *bo_s, uint64_t *bo_l)
2710 struct amdgpu_bo_va_mapping *mapping;
2711 struct interval_tree_node *node;
2712 struct amdgpu_bo *bo = NULL;
2713 unsigned long userptr;
2717 for (i = 0; i < p->n_pdds; i++) {
2718 struct amdgpu_vm *vm;
2720 if (!p->pdds[i]->drm_priv)
2723 vm = drm_priv_to_vm(p->pdds[i]->drm_priv);
2724 r = amdgpu_bo_reserve(vm->root.bo, false);
2728 /* Check userptr by searching entire vm->va interval tree */
2729 node = interval_tree_iter_first(&vm->va, 0, ~0ULL);
2731 mapping = container_of((struct rb_node *)node,
2732 struct amdgpu_bo_va_mapping, rb);
2733 bo = mapping->bo_va->base.bo;
2735 if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm,
2736 start << PAGE_SHIFT,
2739 node = interval_tree_iter_next(node, 0, ~0ULL);
2743 pr_debug("[0x%llx 0x%llx] already userptr mapped\n",
2746 *bo_s = userptr >> PAGE_SHIFT;
2747 *bo_l = *bo_s + bo->tbo.ttm->num_pages - 1;
2749 amdgpu_bo_unreserve(vm->root.bo);
2752 amdgpu_bo_unreserve(vm->root.bo);
2758 svm_range *svm_range_create_unregistered_range(struct kfd_node *node,
2759 struct kfd_process *p,
2760 struct mm_struct *mm,
2763 struct svm_range *prange = NULL;
2764 unsigned long start, last;
2765 uint32_t gpuid, gpuidx;
2771 if (svm_range_get_range_boundaries(p, addr, &start, &last,
2775 r = svm_range_check_vm(p, start, last, &bo_s, &bo_l);
2776 if (r != -EADDRINUSE)
2777 r = svm_range_check_vm_userptr(p, start, last, &bo_s, &bo_l);
2779 if (r == -EADDRINUSE) {
2780 if (addr >= bo_s && addr <= bo_l)
2783 /* Create one page svm range if 2MB range overlapping */
2788 prange = svm_range_new(&p->svms, start, last, true);
2790 pr_debug("Failed to create prange in address [0x%llx]\n", addr);
2793 if (kfd_process_gpuid_from_node(p, node, &gpuid, &gpuidx)) {
2794 pr_debug("failed to get gpuid from kgd\n");
2795 svm_range_free(prange, true);
2800 prange->preferred_loc = KFD_IOCTL_SVM_LOCATION_SYSMEM;
2802 svm_range_add_to_svms(prange);
2803 svm_range_add_notifier_locked(mm, prange);
2808 /* svm_range_skip_recover - decide if prange can be recovered
2809 * @prange: svm range structure
2811 * GPU vm retry fault handle skip recover the range for cases:
2812 * 1. prange is on deferred list to be removed after unmap, it is stale fault,
2813 * deferred list work will drain the stale fault before free the prange.
2814 * 2. prange is on deferred list to add interval notifier after split, or
2815 * 3. prange is child range, it is split from parent prange, recover later
2816 * after interval notifier is added.
2818 * Return: true to skip recover, false to recover
2820 static bool svm_range_skip_recover(struct svm_range *prange)
2822 struct svm_range_list *svms = prange->svms;
2824 spin_lock(&svms->deferred_list_lock);
2825 if (list_empty(&prange->deferred_list) &&
2826 list_empty(&prange->child_list)) {
2827 spin_unlock(&svms->deferred_list_lock);
2830 spin_unlock(&svms->deferred_list_lock);
2832 if (prange->work_item.op == SVM_OP_UNMAP_RANGE) {
2833 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] unmapped\n",
2834 svms, prange, prange->start, prange->last);
2837 if (prange->work_item.op == SVM_OP_ADD_RANGE_AND_MAP ||
2838 prange->work_item.op == SVM_OP_ADD_RANGE) {
2839 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] not added yet\n",
2840 svms, prange, prange->start, prange->last);
2847 svm_range_count_fault(struct kfd_node *node, struct kfd_process *p,
2850 struct kfd_process_device *pdd;
2852 /* fault is on different page of same range
2853 * or fault is skipped to recover later
2854 * or fault is on invalid virtual address
2856 if (gpuidx == MAX_GPU_INSTANCE) {
2860 r = kfd_process_gpuid_from_node(p, node, &gpuid, &gpuidx);
2865 /* fault is recovered
2866 * or fault cannot recover because GPU no access on the range
2868 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
2870 WRITE_ONCE(pdd->faults, pdd->faults + 1);
2874 svm_fault_allowed(struct vm_area_struct *vma, bool write_fault)
2876 unsigned long requested = VM_READ;
2879 requested |= VM_WRITE;
2881 pr_debug("requested 0x%lx, vma permission flags 0x%lx\n", requested,
2883 return (vma->vm_flags & requested) == requested;
2887 svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
2888 uint32_t vmid, uint32_t node_id,
2889 uint64_t addr, bool write_fault)
2891 unsigned long start, last, size;
2892 struct mm_struct *mm = NULL;
2893 struct svm_range_list *svms;
2894 struct svm_range *prange;
2895 struct kfd_process *p;
2896 ktime_t timestamp = ktime_get_boottime();
2897 struct kfd_node *node;
2899 int32_t gpuidx = MAX_GPU_INSTANCE;
2900 bool write_locked = false;
2901 struct vm_area_struct *vma;
2902 bool migration = false;
2905 if (!KFD_IS_SVM_API_SUPPORTED(adev)) {
2906 pr_debug("device does not support SVM\n");
2910 p = kfd_lookup_process_by_pasid(pasid);
2912 pr_debug("kfd process not founded pasid 0x%x\n", pasid);
2917 pr_debug("restoring svms 0x%p fault address 0x%llx\n", svms, addr);
2919 if (atomic_read(&svms->drain_pagefaults)) {
2920 pr_debug("draining retry fault, drop fault 0x%llx\n", addr);
2925 if (!p->xnack_enabled) {
2926 pr_debug("XNACK not enabled for pasid 0x%x\n", pasid);
2931 /* p->lead_thread is available as kfd_process_wq_release flush the work
2932 * before releasing task ref.
2934 mm = get_task_mm(p->lead_thread);
2936 pr_debug("svms 0x%p failed to get mm\n", svms);
2941 node = kfd_node_by_irq_ids(adev, node_id, vmid);
2943 pr_debug("kfd node does not exist node_id: %d, vmid: %d\n", node_id,
2950 mutex_lock(&svms->lock);
2951 prange = svm_range_from_addr(svms, addr, NULL);
2953 pr_debug("failed to find prange svms 0x%p address [0x%llx]\n",
2955 if (!write_locked) {
2956 /* Need the write lock to create new range with MMU notifier.
2957 * Also flush pending deferred work to make sure the interval
2958 * tree is up to date before we add a new range
2960 mutex_unlock(&svms->lock);
2961 mmap_read_unlock(mm);
2962 mmap_write_lock(mm);
2963 write_locked = true;
2964 goto retry_write_locked;
2966 prange = svm_range_create_unregistered_range(node, p, mm, addr);
2968 pr_debug("failed to create unregistered range svms 0x%p address [0x%llx]\n",
2970 mmap_write_downgrade(mm);
2972 goto out_unlock_svms;
2976 mmap_write_downgrade(mm);
2978 mutex_lock(&prange->migrate_mutex);
2980 if (svm_range_skip_recover(prange)) {
2981 amdgpu_gmc_filter_faults_remove(node->adev, addr, pasid);
2983 goto out_unlock_range;
2986 /* skip duplicate vm fault on different pages of same range */
2987 if (ktime_before(timestamp, ktime_add_ns(prange->validate_timestamp,
2988 AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING))) {
2989 pr_debug("svms 0x%p [0x%lx %lx] already restored\n",
2990 svms, prange->start, prange->last);
2992 goto out_unlock_range;
2995 /* __do_munmap removed VMA, return success as we are handling stale
2998 vma = vma_lookup(mm, addr << PAGE_SHIFT);
3000 pr_debug("address 0x%llx VMA is removed\n", addr);
3002 goto out_unlock_range;
3005 if (!svm_fault_allowed(vma, write_fault)) {
3006 pr_debug("fault addr 0x%llx no %s permission\n", addr,
3007 write_fault ? "write" : "read");
3009 goto out_unlock_range;
3012 best_loc = svm_range_best_restore_location(prange, node, &gpuidx);
3013 if (best_loc == -1) {
3014 pr_debug("svms %p failed get best restore loc [0x%lx 0x%lx]\n",
3015 svms, prange->start, prange->last);
3017 goto out_unlock_range;
3020 pr_debug("svms %p [0x%lx 0x%lx] best restore 0x%x, actual loc 0x%x\n",
3021 svms, prange->start, prange->last, best_loc,
3022 prange->actual_loc);
3024 kfd_smi_event_page_fault_start(node, p->lead_thread->pid, addr,
3025 write_fault, timestamp);
3027 /* Align migration range start and size to granularity size */
3028 size = 1UL << prange->granularity;
3029 start = max_t(unsigned long, ALIGN_DOWN(addr, size), prange->start);
3030 last = min_t(unsigned long, ALIGN(addr + 1, size) - 1, prange->last);
3031 if (prange->actual_loc != 0 || best_loc != 0) {
3035 r = svm_migrate_to_vram(prange, best_loc, start, last,
3036 mm, KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU);
3038 pr_debug("svm_migrate_to_vram failed (%d) at %llx, falling back to system memory\n",
3040 /* Fallback to system memory if migration to
3043 if (prange->actual_loc && prange->actual_loc != best_loc)
3044 r = svm_migrate_vram_to_ram(prange, mm, start, last,
3045 KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, NULL);
3050 r = svm_migrate_vram_to_ram(prange, mm, start, last,
3051 KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, NULL);
3054 pr_debug("failed %d to migrate svms %p [0x%lx 0x%lx]\n",
3055 r, svms, start, last);
3056 goto out_unlock_range;
3060 r = svm_range_validate_and_map(mm, start, last, prange, gpuidx, false,
3063 pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n",
3064 r, svms, start, last);
3066 kfd_smi_event_page_fault_end(node, p->lead_thread->pid, addr,
3070 mutex_unlock(&prange->migrate_mutex);
3072 mutex_unlock(&svms->lock);
3073 mmap_read_unlock(mm);
3075 svm_range_count_fault(node, p, gpuidx);
3079 kfd_unref_process(p);
3082 pr_debug("recover vm fault later\n");
3083 amdgpu_gmc_filter_faults_remove(node->adev, addr, pasid);
3090 svm_range_switch_xnack_reserve_mem(struct kfd_process *p, bool xnack_enabled)
3092 struct svm_range *prange, *pchild;
3093 uint64_t reserved_size = 0;
3097 pr_debug("switching xnack from %d to %d\n", p->xnack_enabled, xnack_enabled);
3099 mutex_lock(&p->svms.lock);
3101 list_for_each_entry(prange, &p->svms.list, list) {
3102 svm_range_lock(prange);
3103 list_for_each_entry(pchild, &prange->child_list, child_list) {
3104 size = (pchild->last - pchild->start + 1) << PAGE_SHIFT;
3105 if (xnack_enabled) {
3106 amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
3107 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3109 r = amdgpu_amdkfd_reserve_mem_limit(NULL, size,
3110 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3113 reserved_size += size;
3117 size = (prange->last - prange->start + 1) << PAGE_SHIFT;
3118 if (xnack_enabled) {
3119 amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
3120 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3122 r = amdgpu_amdkfd_reserve_mem_limit(NULL, size,
3123 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3126 reserved_size += size;
3129 svm_range_unlock(prange);
3135 amdgpu_amdkfd_unreserve_mem_limit(NULL, reserved_size,
3136 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3138 /* Change xnack mode must be inside svms lock, to avoid race with
3139 * svm_range_deferred_list_work unreserve memory in parallel.
3141 p->xnack_enabled = xnack_enabled;
3143 mutex_unlock(&p->svms.lock);
3147 void svm_range_list_fini(struct kfd_process *p)
3149 struct svm_range *prange;
3150 struct svm_range *next;
3152 pr_debug("pasid 0x%x svms 0x%p\n", p->pasid, &p->svms);
3154 cancel_delayed_work_sync(&p->svms.restore_work);
3156 /* Ensure list work is finished before process is destroyed */
3157 flush_work(&p->svms.deferred_list_work);
3160 * Ensure no retry fault comes in afterwards, as page fault handler will
3161 * not find kfd process and take mm lock to recover fault.
3163 atomic_inc(&p->svms.drain_pagefaults);
3164 svm_range_drain_retry_fault(&p->svms);
3166 list_for_each_entry_safe(prange, next, &p->svms.list, list) {
3167 svm_range_unlink(prange);
3168 svm_range_remove_notifier(prange);
3169 svm_range_free(prange, true);
3172 mutex_destroy(&p->svms.lock);
3174 pr_debug("pasid 0x%x svms 0x%p done\n", p->pasid, &p->svms);
3177 int svm_range_list_init(struct kfd_process *p)
3179 struct svm_range_list *svms = &p->svms;
3182 svms->objects = RB_ROOT_CACHED;
3183 mutex_init(&svms->lock);
3184 INIT_LIST_HEAD(&svms->list);
3185 atomic_set(&svms->evicted_ranges, 0);
3186 atomic_set(&svms->drain_pagefaults, 0);
3187 INIT_DELAYED_WORK(&svms->restore_work, svm_range_restore_work);
3188 INIT_WORK(&svms->deferred_list_work, svm_range_deferred_list_work);
3189 INIT_LIST_HEAD(&svms->deferred_range_list);
3190 INIT_LIST_HEAD(&svms->criu_svm_metadata_list);
3191 spin_lock_init(&svms->deferred_list_lock);
3193 for (i = 0; i < p->n_pdds; i++)
3194 if (KFD_IS_SVM_API_SUPPORTED(p->pdds[i]->dev->adev))
3195 bitmap_set(svms->bitmap_supported, i, 1);
3201 * svm_range_check_vm - check if virtual address range mapped already
3202 * @p: current kfd_process
3203 * @start: range start address, in pages
3204 * @last: range last address, in pages
3205 * @bo_s: mapping start address in pages if address range already mapped
3206 * @bo_l: mapping last address in pages if address range already mapped
3208 * The purpose is to avoid virtual address ranges already allocated by
3209 * kfd_ioctl_alloc_memory_of_gpu ioctl.
3210 * It looks for each pdd in the kfd_process.
3212 * Context: Process context
3214 * Return 0 - OK, if the range is not mapped.
3215 * Otherwise error code:
3216 * -EADDRINUSE - if address is mapped already by kfd_ioctl_alloc_memory_of_gpu
3217 * -ERESTARTSYS - A wait for the buffer to become unreserved was interrupted by
3218 * a signal. Release all buffer reservations and return to user-space.
3221 svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
3222 uint64_t *bo_s, uint64_t *bo_l)
3224 struct amdgpu_bo_va_mapping *mapping;
3225 struct interval_tree_node *node;
3229 for (i = 0; i < p->n_pdds; i++) {
3230 struct amdgpu_vm *vm;
3232 if (!p->pdds[i]->drm_priv)
3235 vm = drm_priv_to_vm(p->pdds[i]->drm_priv);
3236 r = amdgpu_bo_reserve(vm->root.bo, false);
3240 node = interval_tree_iter_first(&vm->va, start, last);
3242 pr_debug("range [0x%llx 0x%llx] already TTM mapped\n",
3244 mapping = container_of((struct rb_node *)node,
3245 struct amdgpu_bo_va_mapping, rb);
3247 *bo_s = mapping->start;
3248 *bo_l = mapping->last;
3250 amdgpu_bo_unreserve(vm->root.bo);
3253 amdgpu_bo_unreserve(vm->root.bo);
3260 * svm_range_is_valid - check if virtual address range is valid
3261 * @p: current kfd_process
3262 * @start: range start address, in pages
3263 * @size: range size, in pages
3265 * Valid virtual address range means it belongs to one or more VMAs
3267 * Context: Process context
3270 * 0 - OK, otherwise error code
3273 svm_range_is_valid(struct kfd_process *p, uint64_t start, uint64_t size)
3275 const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
3276 struct vm_area_struct *vma;
3278 unsigned long start_unchg = start;
3280 start <<= PAGE_SHIFT;
3281 end = start + (size << PAGE_SHIFT);
3283 vma = vma_lookup(p->mm, start);
3284 if (!vma || (vma->vm_flags & device_vma))
3286 start = min(end, vma->vm_end);
3287 } while (start < end);
3289 return svm_range_check_vm(p, start_unchg, (end - 1) >> PAGE_SHIFT, NULL,
3294 * svm_range_best_prefetch_location - decide the best prefetch location
3295 * @prange: svm range structure
3298 * If range map to single GPU, the best prefetch location is prefetch_loc, which
3299 * can be CPU or GPU.
3301 * If range is ACCESS or ACCESS_IN_PLACE by mGPUs, only if mGPU connection on
3302 * XGMI same hive, the best prefetch location is prefetch_loc GPU, othervise
3303 * the best prefetch location is always CPU, because GPU can not have coherent
3304 * mapping VRAM of other GPUs even with large-BAR PCIe connection.
3307 * If range is not ACCESS_IN_PLACE by mGPUs, the best prefetch location is
3308 * prefetch_loc, other GPU access will generate vm fault and trigger migration.
3310 * If range is ACCESS_IN_PLACE by mGPUs, only if mGPU connection on XGMI same
3311 * hive, the best prefetch location is prefetch_loc GPU, otherwise the best
3312 * prefetch location is always CPU.
3314 * Context: Process context
3317 * 0 for CPU or GPU id
3320 svm_range_best_prefetch_location(struct svm_range *prange)
3322 DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
3323 uint32_t best_loc = prange->prefetch_loc;
3324 struct kfd_process_device *pdd;
3325 struct kfd_node *bo_node;
3326 struct kfd_process *p;
3329 p = container_of(prange->svms, struct kfd_process, svms);
3331 if (!best_loc || best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED)
3334 bo_node = svm_range_get_node_by_id(prange, best_loc);
3336 WARN_ONCE(1, "failed to get valid kfd node at id%x\n", best_loc);
3341 if (bo_node->adev->gmc.is_app_apu) {
3346 if (p->xnack_enabled)
3347 bitmap_copy(bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
3349 bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
3352 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
3353 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
3355 pr_debug("failed to get device by idx 0x%x\n", gpuidx);
3359 if (pdd->dev->adev == bo_node->adev)
3362 if (!svm_nodes_in_same_hive(pdd->dev, bo_node)) {
3369 pr_debug("xnack %d svms 0x%p [0x%lx 0x%lx] best loc 0x%x\n",
3370 p->xnack_enabled, &p->svms, prange->start, prange->last,
3376 /* svm_range_trigger_migration - start page migration if prefetch loc changed
3377 * @mm: current process mm_struct
3378 * @prange: svm range structure
3379 * @migrated: output, true if migration is triggered
3381 * If range perfetch_loc is GPU, actual loc is cpu 0, then migrate the range
3383 * If range prefetch_loc is cpu 0, actual loc is GPU, then migrate the range
3386 * If GPU vm fault retry is not enabled, migration interact with MMU notifier
3388 * 1. migrate_vma_setup invalidate pages, MMU notifier callback svm_range_evict
3389 * stops all queues, schedule restore work
3390 * 2. svm_range_restore_work wait for migration is done by
3391 * a. svm_range_validate_vram takes prange->migrate_mutex
3392 * b. svm_range_validate_ram HMM get pages wait for CPU fault handle returns
3393 * 3. restore work update mappings of GPU, resume all queues.
3395 * Context: Process context
3398 * 0 - OK, otherwise - error code of migration
3401 svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
3408 best_loc = svm_range_best_prefetch_location(prange);
3410 /* when best_loc is a gpu node and same as prange->actual_loc
3411 * we still need do migration as prange->actual_loc !=0 does
3412 * not mean all pages in prange are vram. hmm migrate will pick
3413 * up right pages during migration.
3415 if ((best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED) ||
3416 (best_loc == 0 && prange->actual_loc == 0))
3420 r = svm_migrate_vram_to_ram(prange, mm, prange->start, prange->last,
3421 KFD_MIGRATE_TRIGGER_PREFETCH, NULL);
3426 r = svm_migrate_to_vram(prange, best_loc, prange->start, prange->last,
3427 mm, KFD_MIGRATE_TRIGGER_PREFETCH);
3433 int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence)
3438 if (dma_fence_is_signaled(&fence->base))
3441 if (fence->svm_bo) {
3442 WRITE_ONCE(fence->svm_bo->evicting, 1);
3443 schedule_work(&fence->svm_bo->eviction_work);
3449 static void svm_range_evict_svm_bo_worker(struct work_struct *work)
3451 struct svm_range_bo *svm_bo;
3452 struct mm_struct *mm;
3455 svm_bo = container_of(work, struct svm_range_bo, eviction_work);
3456 if (!svm_bo_ref_unless_zero(svm_bo))
3457 return; /* svm_bo was freed while eviction was pending */
3459 if (mmget_not_zero(svm_bo->eviction_fence->mm)) {
3460 mm = svm_bo->eviction_fence->mm;
3462 svm_range_bo_unref(svm_bo);
3467 spin_lock(&svm_bo->list_lock);
3468 while (!list_empty(&svm_bo->range_list) && !r) {
3469 struct svm_range *prange =
3470 list_first_entry(&svm_bo->range_list,
3471 struct svm_range, svm_bo_list);
3474 list_del_init(&prange->svm_bo_list);
3475 spin_unlock(&svm_bo->list_lock);
3477 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
3478 prange->start, prange->last);
3480 mutex_lock(&prange->migrate_mutex);
3482 /* migrate all vram pages in this prange to sys ram
3483 * after that prange->actual_loc should be zero
3485 r = svm_migrate_vram_to_ram(prange, mm,
3486 prange->start, prange->last,
3487 KFD_MIGRATE_TRIGGER_TTM_EVICTION, NULL);
3488 } while (!r && prange->actual_loc && --retries);
3490 if (!r && prange->actual_loc)
3491 pr_info_once("Migration failed during eviction");
3493 if (!prange->actual_loc) {
3494 mutex_lock(&prange->lock);
3495 prange->svm_bo = NULL;
3496 mutex_unlock(&prange->lock);
3498 mutex_unlock(&prange->migrate_mutex);
3500 spin_lock(&svm_bo->list_lock);
3502 spin_unlock(&svm_bo->list_lock);
3503 mmap_read_unlock(mm);
3506 dma_fence_signal(&svm_bo->eviction_fence->base);
3508 /* This is the last reference to svm_bo, after svm_range_vram_node_free
3509 * has been called in svm_migrate_vram_to_ram
3511 WARN_ONCE(!r && kref_read(&svm_bo->kref) != 1, "This was not the last reference\n");
3512 svm_range_bo_unref(svm_bo);
3516 svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
3517 uint64_t start, uint64_t size, uint32_t nattr,
3518 struct kfd_ioctl_svm_attribute *attrs)
3520 struct amdkfd_process_info *process_info = p->kgd_process_info;
3521 struct list_head update_list;
3522 struct list_head insert_list;
3523 struct list_head remove_list;
3524 struct list_head remap_list;
3525 struct svm_range_list *svms;
3526 struct svm_range *prange;
3527 struct svm_range *next;
3528 bool update_mapping = false;
3532 pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] pages 0x%llx\n",
3533 p->pasid, &p->svms, start, start + size - 1, size);
3535 r = svm_range_check_attr(p, nattr, attrs);
3541 mutex_lock(&process_info->lock);
3543 svm_range_list_lock_and_flush_work(svms, mm);
3545 r = svm_range_is_valid(p, start, size);
3547 pr_debug("invalid range r=%d\n", r);
3548 mmap_write_unlock(mm);
3552 mutex_lock(&svms->lock);
3554 /* Add new range and split existing ranges as needed */
3555 r = svm_range_add(p, start, size, nattr, attrs, &update_list,
3556 &insert_list, &remove_list, &remap_list);
3558 mutex_unlock(&svms->lock);
3559 mmap_write_unlock(mm);
3562 /* Apply changes as a transaction */
3563 list_for_each_entry_safe(prange, next, &insert_list, list) {
3564 svm_range_add_to_svms(prange);
3565 svm_range_add_notifier_locked(mm, prange);
3567 list_for_each_entry(prange, &update_list, update_list) {
3568 svm_range_apply_attrs(p, prange, nattr, attrs, &update_mapping);
3569 /* TODO: unmap ranges from GPU that lost access */
3571 list_for_each_entry_safe(prange, next, &remove_list, update_list) {
3572 pr_debug("unlink old 0x%p prange 0x%p [0x%lx 0x%lx]\n",
3573 prange->svms, prange, prange->start,
3575 svm_range_unlink(prange);
3576 svm_range_remove_notifier(prange);
3577 svm_range_free(prange, false);
3580 mmap_write_downgrade(mm);
3581 /* Trigger migrations and revalidate and map to GPUs as needed. If
3582 * this fails we may be left with partially completed actions. There
3583 * is no clean way of rolling back to the previous state in such a
3584 * case because the rollback wouldn't be guaranteed to work either.
3586 list_for_each_entry(prange, &update_list, update_list) {
3589 mutex_lock(&prange->migrate_mutex);
3591 r = svm_range_trigger_migration(mm, prange, &migrated);
3593 goto out_unlock_range;
3595 if (migrated && (!p->xnack_enabled ||
3596 (prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)) &&
3597 prange->mapped_to_gpu) {
3598 pr_debug("restore_work will update mappings of GPUs\n");
3599 mutex_unlock(&prange->migrate_mutex);
3603 if (!migrated && !update_mapping) {
3604 mutex_unlock(&prange->migrate_mutex);
3608 flush_tlb = !migrated && update_mapping && prange->mapped_to_gpu;
3610 r = svm_range_validate_and_map(mm, prange->start, prange->last, prange,
3611 MAX_GPU_INSTANCE, true, true, flush_tlb);
3613 pr_debug("failed %d to map svm range\n", r);
3616 mutex_unlock(&prange->migrate_mutex);
3621 list_for_each_entry(prange, &remap_list, update_list) {
3622 pr_debug("Remapping prange 0x%p [0x%lx 0x%lx]\n",
3623 prange, prange->start, prange->last);
3624 mutex_lock(&prange->migrate_mutex);
3625 r = svm_range_validate_and_map(mm, prange->start, prange->last, prange,
3626 MAX_GPU_INSTANCE, true, true, prange->mapped_to_gpu);
3628 pr_debug("failed %d on remap svm range\n", r);
3629 mutex_unlock(&prange->migrate_mutex);
3634 dynamic_svm_range_dump(svms);
3636 mutex_unlock(&svms->lock);
3637 mmap_read_unlock(mm);
3639 mutex_unlock(&process_info->lock);
3641 pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] done, r=%d\n", p->pasid,
3642 &p->svms, start, start + size - 1, r);
3644 return ret ? ret : r;
3648 svm_range_get_attr(struct kfd_process *p, struct mm_struct *mm,
3649 uint64_t start, uint64_t size, uint32_t nattr,
3650 struct kfd_ioctl_svm_attribute *attrs)
3652 DECLARE_BITMAP(bitmap_access, MAX_GPU_INSTANCE);
3653 DECLARE_BITMAP(bitmap_aip, MAX_GPU_INSTANCE);
3654 bool get_preferred_loc = false;
3655 bool get_prefetch_loc = false;
3656 bool get_granularity = false;
3657 bool get_accessible = false;
3658 bool get_flags = false;
3659 uint64_t last = start + size - 1UL;
3660 uint8_t granularity = 0xff;
3661 struct interval_tree_node *node;
3662 struct svm_range_list *svms;
3663 struct svm_range *prange;
3664 uint32_t prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3665 uint32_t location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3666 uint32_t flags_and = 0xffffffff;
3667 uint32_t flags_or = 0;
3672 pr_debug("svms 0x%p [0x%llx 0x%llx] nattr 0x%x\n", &p->svms, start,
3673 start + size - 1, nattr);
3675 /* Flush pending deferred work to avoid racing with deferred actions from
3676 * previous memory map changes (e.g. munmap). Concurrent memory map changes
3677 * can still race with get_attr because we don't hold the mmap lock. But that
3678 * would be a race condition in the application anyway, and undefined
3679 * behaviour is acceptable in that case.
3681 flush_work(&p->svms.deferred_list_work);
3684 r = svm_range_is_valid(p, start, size);
3685 mmap_read_unlock(mm);
3687 pr_debug("invalid range r=%d\n", r);
3691 for (i = 0; i < nattr; i++) {
3692 switch (attrs[i].type) {
3693 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
3694 get_preferred_loc = true;
3696 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3697 get_prefetch_loc = true;
3699 case KFD_IOCTL_SVM_ATTR_ACCESS:
3700 get_accessible = true;
3702 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3703 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
3706 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
3707 get_granularity = true;
3709 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
3710 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
3713 pr_debug("get invalid attr type 0x%x\n", attrs[i].type);
3720 mutex_lock(&svms->lock);
3722 node = interval_tree_iter_first(&svms->objects, start, last);
3724 pr_debug("range attrs not found return default values\n");
3725 svm_range_set_default_attributes(&location, &prefetch_loc,
3726 &granularity, &flags_and);
3727 flags_or = flags_and;
3728 if (p->xnack_enabled)
3729 bitmap_copy(bitmap_access, svms->bitmap_supported,
3732 bitmap_zero(bitmap_access, MAX_GPU_INSTANCE);
3733 bitmap_zero(bitmap_aip, MAX_GPU_INSTANCE);
3736 bitmap_copy(bitmap_access, svms->bitmap_supported, MAX_GPU_INSTANCE);
3737 bitmap_copy(bitmap_aip, svms->bitmap_supported, MAX_GPU_INSTANCE);
3740 struct interval_tree_node *next;
3742 prange = container_of(node, struct svm_range, it_node);
3743 next = interval_tree_iter_next(node, start, last);
3745 if (get_preferred_loc) {
3746 if (prange->preferred_loc ==
3747 KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3748 (location != KFD_IOCTL_SVM_LOCATION_UNDEFINED &&
3749 location != prange->preferred_loc)) {
3750 location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3751 get_preferred_loc = false;
3753 location = prange->preferred_loc;
3756 if (get_prefetch_loc) {
3757 if (prange->prefetch_loc ==
3758 KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3759 (prefetch_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED &&
3760 prefetch_loc != prange->prefetch_loc)) {
3761 prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3762 get_prefetch_loc = false;
3764 prefetch_loc = prange->prefetch_loc;
3767 if (get_accessible) {
3768 bitmap_and(bitmap_access, bitmap_access,
3769 prange->bitmap_access, MAX_GPU_INSTANCE);
3770 bitmap_and(bitmap_aip, bitmap_aip,
3771 prange->bitmap_aip, MAX_GPU_INSTANCE);
3774 flags_and &= prange->flags;
3775 flags_or |= prange->flags;
3778 if (get_granularity && prange->granularity < granularity)
3779 granularity = prange->granularity;
3784 mutex_unlock(&svms->lock);
3786 for (i = 0; i < nattr; i++) {
3787 switch (attrs[i].type) {
3788 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
3789 attrs[i].value = location;
3791 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3792 attrs[i].value = prefetch_loc;
3794 case KFD_IOCTL_SVM_ATTR_ACCESS:
3795 gpuidx = kfd_process_gpuidx_from_gpuid(p,
3798 pr_debug("invalid gpuid %x\n", attrs[i].value);
3801 if (test_bit(gpuidx, bitmap_access))
3802 attrs[i].type = KFD_IOCTL_SVM_ATTR_ACCESS;
3803 else if (test_bit(gpuidx, bitmap_aip))
3805 KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE;
3807 attrs[i].type = KFD_IOCTL_SVM_ATTR_NO_ACCESS;
3809 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3810 attrs[i].value = flags_and;
3812 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
3813 attrs[i].value = ~flags_or;
3815 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
3816 attrs[i].value = (uint32_t)granularity;
3824 int kfd_criu_resume_svm(struct kfd_process *p)
3826 struct kfd_ioctl_svm_attribute *set_attr_new, *set_attr = NULL;
3827 int nattr_common = 4, nattr_accessibility = 1;
3828 struct criu_svm_metadata *criu_svm_md = NULL;
3829 struct svm_range_list *svms = &p->svms;
3830 struct criu_svm_metadata *next = NULL;
3831 uint32_t set_flags = 0xffffffff;
3832 int i, j, num_attrs, ret = 0;
3833 uint64_t set_attr_size;
3834 struct mm_struct *mm;
3836 if (list_empty(&svms->criu_svm_metadata_list)) {
3837 pr_debug("No SVM data from CRIU restore stage 2\n");
3841 mm = get_task_mm(p->lead_thread);
3843 pr_err("failed to get mm for the target process\n");
3847 num_attrs = nattr_common + (nattr_accessibility * p->n_pdds);
3850 list_for_each_entry(criu_svm_md, &svms->criu_svm_metadata_list, list) {
3851 pr_debug("criu_svm_md[%d]\n\tstart: 0x%llx size: 0x%llx (npages)\n",
3852 i, criu_svm_md->data.start_addr, criu_svm_md->data.size);
3854 for (j = 0; j < num_attrs; j++) {
3855 pr_debug("\ncriu_svm_md[%d]->attrs[%d].type : 0x%x\ncriu_svm_md[%d]->attrs[%d].value : 0x%x\n",
3856 i, j, criu_svm_md->data.attrs[j].type,
3857 i, j, criu_svm_md->data.attrs[j].value);
3858 switch (criu_svm_md->data.attrs[j].type) {
3859 /* During Checkpoint operation, the query for
3860 * KFD_IOCTL_SVM_ATTR_PREFETCH_LOC attribute might
3861 * return KFD_IOCTL_SVM_LOCATION_UNDEFINED if they were
3862 * not used by the range which was checkpointed. Care
3863 * must be taken to not restore with an invalid value
3864 * otherwise the gpuidx value will be invalid and
3865 * set_attr would eventually fail so just replace those
3866 * with another dummy attribute such as
3867 * KFD_IOCTL_SVM_ATTR_SET_FLAGS.
3869 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3870 if (criu_svm_md->data.attrs[j].value ==
3871 KFD_IOCTL_SVM_LOCATION_UNDEFINED) {
3872 criu_svm_md->data.attrs[j].type =
3873 KFD_IOCTL_SVM_ATTR_SET_FLAGS;
3874 criu_svm_md->data.attrs[j].value = 0;
3877 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3878 set_flags = criu_svm_md->data.attrs[j].value;
3885 /* CLR_FLAGS is not available via get_attr during checkpoint but
3886 * it needs to be inserted before restoring the ranges so
3887 * allocate extra space for it before calling set_attr
3889 set_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
3891 set_attr_new = krealloc(set_attr, set_attr_size,
3893 if (!set_attr_new) {
3897 set_attr = set_attr_new;
3899 memcpy(set_attr, criu_svm_md->data.attrs, num_attrs *
3900 sizeof(struct kfd_ioctl_svm_attribute));
3901 set_attr[num_attrs].type = KFD_IOCTL_SVM_ATTR_CLR_FLAGS;
3902 set_attr[num_attrs].value = ~set_flags;
3904 ret = svm_range_set_attr(p, mm, criu_svm_md->data.start_addr,
3905 criu_svm_md->data.size, num_attrs + 1,
3908 pr_err("CRIU: failed to set range attributes\n");
3916 list_for_each_entry_safe(criu_svm_md, next, &svms->criu_svm_metadata_list, list) {
3917 pr_debug("freeing criu_svm_md[]\n\tstart: 0x%llx\n",
3918 criu_svm_md->data.start_addr);
3927 int kfd_criu_restore_svm(struct kfd_process *p,
3928 uint8_t __user *user_priv_ptr,
3929 uint64_t *priv_data_offset,
3930 uint64_t max_priv_data_size)
3932 uint64_t svm_priv_data_size, svm_object_md_size, svm_attrs_size;
3933 int nattr_common = 4, nattr_accessibility = 1;
3934 struct criu_svm_metadata *criu_svm_md = NULL;
3935 struct svm_range_list *svms = &p->svms;
3936 uint32_t num_devices;
3939 num_devices = p->n_pdds;
3940 /* Handle one SVM range object at a time, also the number of gpus are
3941 * assumed to be same on the restore node, checking must be done while
3942 * evaluating the topology earlier
3945 svm_attrs_size = sizeof(struct kfd_ioctl_svm_attribute) *
3946 (nattr_common + nattr_accessibility * num_devices);
3947 svm_object_md_size = sizeof(struct criu_svm_metadata) + svm_attrs_size;
3949 svm_priv_data_size = sizeof(struct kfd_criu_svm_range_priv_data) +
3952 criu_svm_md = kzalloc(svm_object_md_size, GFP_KERNEL);
3954 pr_err("failed to allocate memory to store svm metadata\n");
3957 if (*priv_data_offset + svm_priv_data_size > max_priv_data_size) {
3962 ret = copy_from_user(&criu_svm_md->data, user_priv_ptr + *priv_data_offset,
3963 svm_priv_data_size);
3968 *priv_data_offset += svm_priv_data_size;
3970 list_add_tail(&criu_svm_md->list, &svms->criu_svm_metadata_list);
3980 int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
3981 uint64_t *svm_priv_data_size)
3983 uint64_t total_size, accessibility_size, common_attr_size;
3984 int nattr_common = 4, nattr_accessibility = 1;
3985 int num_devices = p->n_pdds;
3986 struct svm_range_list *svms;
3987 struct svm_range *prange;
3990 *svm_priv_data_size = 0;
3996 mutex_lock(&svms->lock);
3997 list_for_each_entry(prange, &svms->list, list) {
3998 pr_debug("prange: 0x%p start: 0x%lx\t npages: 0x%llx\t end: 0x%llx\n",
3999 prange, prange->start, prange->npages,
4000 prange->start + prange->npages - 1);
4003 mutex_unlock(&svms->lock);
4005 *num_svm_ranges = count;
4006 /* Only the accessbility attributes need to be queried for all the gpus
4007 * individually, remaining ones are spanned across the entire process
4008 * regardless of the various gpu nodes. Of the remaining attributes,
4009 * KFD_IOCTL_SVM_ATTR_CLR_FLAGS need not be saved.
4011 * KFD_IOCTL_SVM_ATTR_PREFERRED_LOC
4012 * KFD_IOCTL_SVM_ATTR_PREFETCH_LOC
4013 * KFD_IOCTL_SVM_ATTR_SET_FLAGS
4014 * KFD_IOCTL_SVM_ATTR_GRANULARITY
4016 * ** ACCESSBILITY ATTRIBUTES **
4017 * (Considered as one, type is altered during query, value is gpuid)
4018 * KFD_IOCTL_SVM_ATTR_ACCESS
4019 * KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE
4020 * KFD_IOCTL_SVM_ATTR_NO_ACCESS
4022 if (*num_svm_ranges > 0) {
4023 common_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
4025 accessibility_size = sizeof(struct kfd_ioctl_svm_attribute) *
4026 nattr_accessibility * num_devices;
4028 total_size = sizeof(struct kfd_criu_svm_range_priv_data) +
4029 common_attr_size + accessibility_size;
4031 *svm_priv_data_size = *num_svm_ranges * total_size;
4034 pr_debug("num_svm_ranges %u total_priv_size %llu\n", *num_svm_ranges,
4035 *svm_priv_data_size);
4039 int kfd_criu_checkpoint_svm(struct kfd_process *p,
4040 uint8_t __user *user_priv_data,
4041 uint64_t *priv_data_offset)
4043 struct kfd_criu_svm_range_priv_data *svm_priv = NULL;
4044 struct kfd_ioctl_svm_attribute *query_attr = NULL;
4045 uint64_t svm_priv_data_size, query_attr_size = 0;
4046 int index, nattr_common = 4, ret = 0;
4047 struct svm_range_list *svms;
4048 int num_devices = p->n_pdds;
4049 struct svm_range *prange;
4050 struct mm_struct *mm;
4056 mm = get_task_mm(p->lead_thread);
4058 pr_err("failed to get mm for the target process\n");
4062 query_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
4063 (nattr_common + num_devices);
4065 query_attr = kzalloc(query_attr_size, GFP_KERNEL);
4071 query_attr[0].type = KFD_IOCTL_SVM_ATTR_PREFERRED_LOC;
4072 query_attr[1].type = KFD_IOCTL_SVM_ATTR_PREFETCH_LOC;
4073 query_attr[2].type = KFD_IOCTL_SVM_ATTR_SET_FLAGS;
4074 query_attr[3].type = KFD_IOCTL_SVM_ATTR_GRANULARITY;
4076 for (index = 0; index < num_devices; index++) {
4077 struct kfd_process_device *pdd = p->pdds[index];
4079 query_attr[index + nattr_common].type =
4080 KFD_IOCTL_SVM_ATTR_ACCESS;
4081 query_attr[index + nattr_common].value = pdd->user_gpu_id;
4084 svm_priv_data_size = sizeof(*svm_priv) + query_attr_size;
4086 svm_priv = kzalloc(svm_priv_data_size, GFP_KERNEL);
4093 list_for_each_entry(prange, &svms->list, list) {
4095 svm_priv->object_type = KFD_CRIU_OBJECT_TYPE_SVM_RANGE;
4096 svm_priv->start_addr = prange->start;
4097 svm_priv->size = prange->npages;
4098 memcpy(&svm_priv->attrs, query_attr, query_attr_size);
4099 pr_debug("CRIU: prange: 0x%p start: 0x%lx\t npages: 0x%llx end: 0x%llx\t size: 0x%llx\n",
4100 prange, prange->start, prange->npages,
4101 prange->start + prange->npages - 1,
4102 prange->npages * PAGE_SIZE);
4104 ret = svm_range_get_attr(p, mm, svm_priv->start_addr,
4106 (nattr_common + num_devices),
4109 pr_err("CRIU: failed to obtain range attributes\n");
4113 if (copy_to_user(user_priv_data + *priv_data_offset, svm_priv,
4114 svm_priv_data_size)) {
4115 pr_err("Failed to copy svm priv to user\n");
4120 *priv_data_offset += svm_priv_data_size;
4135 svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start,
4136 uint64_t size, uint32_t nattrs, struct kfd_ioctl_svm_attribute *attrs)
4138 struct mm_struct *mm = current->mm;
4141 start >>= PAGE_SHIFT;
4142 size >>= PAGE_SHIFT;
4145 case KFD_IOCTL_SVM_OP_SET_ATTR:
4146 r = svm_range_set_attr(p, mm, start, size, nattrs, attrs);
4148 case KFD_IOCTL_SVM_OP_GET_ATTR:
4149 r = svm_range_get_attr(p, mm, start, size, nattrs, attrs);