drm/xe: Map both mem.kernel_bb_pool and usm.bb_pool
authorMatthew Brost <matthew.brost@intel.com>
Fri, 2 Feb 2024 03:34:40 +0000 (19:34 -0800)
committerThomas Hellström <thomas.hellstrom@linux.intel.com>
Thu, 8 Feb 2024 08:49:59 +0000 (09:49 +0100)
For integrated devices we need to map both mem.kernel_bb_pool and
usm.bb_pool to be able to run batches from both pools.

Fixes: a682b6a42d4d ("drm/xe: Support device page faults on integrated platforms")
Tested-by: Brian Welty <brian.welty@intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Brian Welty <brian.welty@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240202033440.2351862-1-matthew.brost@intel.com
(cherry picked from commit 72f86ed3c88933d6fa09b036de93621ea71097a7)
Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
drivers/gpu/drm/xe/xe_gt.c
drivers/gpu/drm/xe/xe_migrate.c

index 3af2adec129561850bfb378c04ca2d7caacdf325..35474ddbaf97ecc974a6b55643e578dbcfe135f9 100644 (file)
@@ -437,7 +437,10 @@ static int all_fw_domain_init(struct xe_gt *gt)
                 * USM has its only SA pool to non-block behind user operations
                 */
                if (gt_to_xe(gt)->info.has_usm) {
-                       gt->usm.bb_pool = xe_sa_bo_manager_init(gt_to_tile(gt), SZ_1M, 16);
+                       struct xe_device *xe = gt_to_xe(gt);
+
+                       gt->usm.bb_pool = xe_sa_bo_manager_init(gt_to_tile(gt),
+                                                               IS_DGFX(xe) ? SZ_1M : SZ_512K, 16);
                        if (IS_ERR(gt->usm.bb_pool)) {
                                err = PTR_ERR(gt->usm.bb_pool);
                                goto err_force_wake;
index 0cc31837ef26bc65123d431326fd4f79bee68010..70480c30560215ff7fece9a824fd01c92008562d 100644 (file)
@@ -170,11 +170,6 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
        if (!IS_DGFX(xe)) {
                /* Write out batch too */
                m->batch_base_ofs = NUM_PT_SLOTS * XE_PAGE_SIZE;
-               if (xe->info.has_usm) {
-                       batch = tile->primary_gt->usm.bb_pool->bo;
-                       m->usm_batch_base_ofs = m->batch_base_ofs;
-               }
-
                for (i = 0; i < batch->size;
                     i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
                     XE_PAGE_SIZE) {
@@ -185,6 +180,24 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
                                  entry);
                        level++;
                }
+               if (xe->info.has_usm) {
+                       xe_tile_assert(tile, batch->size == SZ_1M);
+
+                       batch = tile->primary_gt->usm.bb_pool->bo;
+                       m->usm_batch_base_ofs = m->batch_base_ofs + SZ_1M;
+                       xe_tile_assert(tile, batch->size == SZ_512K);
+
+                       for (i = 0; i < batch->size;
+                            i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
+                            XE_PAGE_SIZE) {
+                               entry = vm->pt_ops->pte_encode_bo(batch, i,
+                                                                 pat_index, 0);
+
+                               xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64,
+                                         entry);
+                               level++;
+                       }
+               }
        } else {
                u64 batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE);