2 * Copyright 2023 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include <subdev/timer.h>
26 #include <subdev/vfn.h>
27 #include <engine/fifo/chan.h>
28 #include <engine/sec2.h>
32 #include <nvrm/nvtypes.h>
33 #include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0000.h>
34 #include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0005.h>
35 #include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0080.h>
36 #include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080.h>
37 #include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h>
38 #include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h>
39 #include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h>
40 #include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
41 #include <nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h>
42 #include <nvrm/535.113.01/common/uproc/os/common/include/libos_init_args.h>
43 #include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_sr_meta.h>
44 #include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_wpr_meta.h>
45 #include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h>
46 #include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmgspseq.h>
47 #include <nvrm/535.113.01/nvidia/generated/g_allclasses.h>
48 #include <nvrm/535.113.01/nvidia/generated/g_os_nvoc.h>
49 #include <nvrm/535.113.01/nvidia/generated/g_rpc-structures.h>
50 #include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h>
51 #include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h>
52 #include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h>
53 #include <nvrm/535.113.01/nvidia/inc/kernel/gpu/intr/engine_idx.h>
54 #include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h>
56 #include <linux/acpi.h>
58 #define GSP_MSG_MIN_SIZE GSP_PAGE_SIZE
59 #define GSP_MSG_MAX_SIZE GSP_PAGE_MIN_SIZE * 16
62 u8 auth_tag_buffer[16];
71 #define GSP_MSG_HDR_SIZE offsetof(struct r535_gsp_msg, data)
74 r535_rpc_status_to_errno(uint32_t rpc_status)
77 case 0x55: /* NV_ERR_NOT_READY */
78 case 0x66: /* NV_ERR_TIMEOUT_RETRY */
80 case 0x51: /* NV_ERR_NO_MEMORY */
88 r535_gsp_msgq_wait(struct nvkm_gsp *gsp, u32 repc, u32 *prepc, int *ptime)
90 struct r535_gsp_msg *mqe;
91 u32 size, rptr = *gsp->msgq.rptr;
96 size = DIV_ROUND_UP(GSP_MSG_HDR_SIZE + repc, GSP_PAGE_SIZE);
97 if (WARN_ON(!size || size >= gsp->msgq.cnt))
98 return ERR_PTR(-EINVAL);
101 u32 wptr = *gsp->msgq.wptr;
103 used = wptr + gsp->msgq.cnt - rptr;
104 if (used >= gsp->msgq.cnt)
105 used -= gsp->msgq.cnt;
110 } while (--(*ptime));
112 if (WARN_ON(!*ptime))
113 return ERR_PTR(-ETIMEDOUT);
115 mqe = (void *)((u8 *)gsp->shm.msgq.ptr + 0x1000 + rptr * 0x1000);
118 *prepc = (used * GSP_PAGE_SIZE) - sizeof(*mqe);
122 msg = kvmalloc(repc, GFP_KERNEL);
124 return ERR_PTR(-ENOMEM);
126 len = ((gsp->msgq.cnt - rptr) * GSP_PAGE_SIZE) - sizeof(*mqe);
127 len = min_t(u32, repc, len);
128 memcpy(msg, mqe->data, len);
130 rptr += DIV_ROUND_UP(len, GSP_PAGE_SIZE);
131 if (rptr == gsp->msgq.cnt)
137 mqe = (void *)((u8 *)gsp->shm.msgq.ptr + 0x1000 + 0 * 0x1000);
138 memcpy(msg + len, mqe, repc);
140 rptr += DIV_ROUND_UP(repc, GSP_PAGE_SIZE);
144 (*gsp->msgq.rptr) = rptr;
149 r535_gsp_msgq_recv(struct nvkm_gsp *gsp, u32 repc, int *ptime)
151 return r535_gsp_msgq_wait(gsp, repc, NULL, ptime);
155 r535_gsp_cmdq_push(struct nvkm_gsp *gsp, void *argv)
157 struct r535_gsp_msg *cmd = container_of(argv, typeof(*cmd), data);
158 struct r535_gsp_msg *cqe;
159 u32 argc = cmd->checksum;
160 u64 *ptr = (void *)cmd;
163 int free, time = 1000000;
167 argc = ALIGN(GSP_MSG_HDR_SIZE + argc, GSP_PAGE_SIZE);
169 end = (u64 *)((char *)ptr + argc);
172 cmd->sequence = gsp->cmdq.seq++;
173 cmd->elem_count = DIV_ROUND_UP(argc, 0x1000);
178 cmd->checksum = upper_32_bits(csum) ^ lower_32_bits(csum);
180 wptr = *gsp->cmdq.wptr;
183 free = *gsp->cmdq.rptr + gsp->cmdq.cnt - wptr - 1;
184 if (free >= gsp->cmdq.cnt)
185 free -= gsp->cmdq.cnt;
192 if (WARN_ON(!time)) {
197 cqe = (void *)((u8 *)gsp->shm.cmdq.ptr + 0x1000 + wptr * 0x1000);
198 size = min_t(u32, argc, (gsp->cmdq.cnt - wptr) * GSP_PAGE_SIZE);
199 memcpy(cqe, (u8 *)cmd + off, size);
201 wptr += DIV_ROUND_UP(size, 0x1000);
202 if (wptr == gsp->cmdq.cnt)
209 nvkm_trace(&gsp->subdev, "cmdq: wptr %d\n", wptr);
211 (*gsp->cmdq.wptr) = wptr;
214 nvkm_falcon_wr32(&gsp->falcon, 0xc00, 0x00000000);
221 r535_gsp_cmdq_get(struct nvkm_gsp *gsp, u32 argc)
223 struct r535_gsp_msg *cmd;
224 u32 size = GSP_MSG_HDR_SIZE + argc;
226 size = ALIGN(size, GSP_MSG_MIN_SIZE);
227 cmd = kvzalloc(size, GFP_KERNEL);
229 return ERR_PTR(-ENOMEM);
231 cmd->checksum = argc;
235 struct nvfw_gsp_rpc {
241 u32 rpc_result_private;
251 r535_gsp_msg_done(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg)
257 r535_gsp_msg_dump(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg, int lvl)
259 if (gsp->subdev.debug >= lvl) {
260 nvkm_printk__(&gsp->subdev, lvl, info,
261 "msg fn:%d len:0x%x/0x%zx res:0x%x resp:0x%x\n",
262 msg->function, msg->length, msg->length - sizeof(*msg),
263 msg->rpc_result, msg->rpc_result_private);
264 print_hex_dump(KERN_INFO, "msg: ", DUMP_PREFIX_OFFSET, 16, 1,
265 msg->data, msg->length - sizeof(*msg), true);
269 static struct nvfw_gsp_rpc *
270 r535_gsp_msg_recv(struct nvkm_gsp *gsp, int fn, u32 repc)
272 struct nvkm_subdev *subdev = &gsp->subdev;
273 struct nvfw_gsp_rpc *msg;
274 int time = 4000000, i;
278 msg = r535_gsp_msgq_wait(gsp, sizeof(*msg), &size, &time);
279 if (IS_ERR_OR_NULL(msg))
282 msg = r535_gsp_msgq_recv(gsp, msg->length, &time);
283 if (IS_ERR_OR_NULL(msg))
286 if (msg->rpc_result) {
287 r535_gsp_msg_dump(gsp, msg, NV_DBG_ERROR);
288 r535_gsp_msg_done(gsp, msg);
289 return ERR_PTR(-EINVAL);
292 r535_gsp_msg_dump(gsp, msg, NV_DBG_TRACE);
294 if (fn && msg->function == fn) {
296 if (msg->length < sizeof(*msg) + repc) {
297 nvkm_error(subdev, "msg len %d < %zd\n",
298 msg->length, sizeof(*msg) + repc);
299 r535_gsp_msg_dump(gsp, msg, NV_DBG_ERROR);
300 r535_gsp_msg_done(gsp, msg);
301 return ERR_PTR(-EIO);
307 r535_gsp_msg_done(gsp, msg);
311 for (i = 0; i < gsp->msgq.ntfy_nr; i++) {
312 struct nvkm_gsp_msgq_ntfy *ntfy = &gsp->msgq.ntfy[i];
314 if (ntfy->fn == msg->function) {
316 ntfy->func(ntfy->priv, ntfy->fn, msg->data, msg->length - sizeof(*msg));
321 if (i == gsp->msgq.ntfy_nr)
322 r535_gsp_msg_dump(gsp, msg, NV_DBG_WARN);
324 r535_gsp_msg_done(gsp, msg);
328 if (*gsp->msgq.rptr != *gsp->msgq.wptr)
335 r535_gsp_msg_ntfy_add(struct nvkm_gsp *gsp, u32 fn, nvkm_gsp_msg_ntfy_func func, void *priv)
339 mutex_lock(&gsp->msgq.mutex);
340 if (WARN_ON(gsp->msgq.ntfy_nr >= ARRAY_SIZE(gsp->msgq.ntfy))) {
343 gsp->msgq.ntfy[gsp->msgq.ntfy_nr].fn = fn;
344 gsp->msgq.ntfy[gsp->msgq.ntfy_nr].func = func;
345 gsp->msgq.ntfy[gsp->msgq.ntfy_nr].priv = priv;
348 mutex_unlock(&gsp->msgq.mutex);
353 r535_gsp_rpc_poll(struct nvkm_gsp *gsp, u32 fn)
357 mutex_lock(&gsp->cmdq.mutex);
358 repv = r535_gsp_msg_recv(gsp, fn, 0);
359 mutex_unlock(&gsp->cmdq.mutex);
361 return PTR_ERR(repv);
367 r535_gsp_rpc_send(struct nvkm_gsp *gsp, void *argv, bool wait, u32 repc)
369 struct nvfw_gsp_rpc *rpc = container_of(argv, typeof(*rpc), data);
370 struct nvfw_gsp_rpc *msg;
371 u32 fn = rpc->function;
375 if (gsp->subdev.debug >= NV_DBG_TRACE) {
376 nvkm_trace(&gsp->subdev, "rpc fn:%d len:0x%x/0x%zx\n", rpc->function,
377 rpc->length, rpc->length - sizeof(*rpc));
378 print_hex_dump(KERN_INFO, "rpc: ", DUMP_PREFIX_OFFSET, 16, 1,
379 rpc->data, rpc->length - sizeof(*rpc), true);
382 ret = r535_gsp_cmdq_push(gsp, rpc);
387 msg = r535_gsp_msg_recv(gsp, fn, repc);
388 if (!IS_ERR_OR_NULL(msg))
398 r535_gsp_event_dtor(struct nvkm_gsp_event *event)
400 struct nvkm_gsp_device *device = event->device;
401 struct nvkm_gsp_client *client = device->object.client;
402 struct nvkm_gsp *gsp = client->gsp;
404 mutex_lock(&gsp->client_id.mutex);
406 list_del(&event->head);
409 mutex_unlock(&gsp->client_id.mutex);
411 nvkm_gsp_rm_free(&event->object);
412 event->device = NULL;
416 r535_gsp_device_event_get(struct nvkm_gsp_event *event)
418 struct nvkm_gsp_device *device = event->device;
419 NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS *ctrl;
421 ctrl = nvkm_gsp_rm_ctrl_get(&device->subdevice,
422 NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION, sizeof(*ctrl));
424 return PTR_ERR(ctrl);
426 ctrl->event = event->id;
427 ctrl->action = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT;
428 return nvkm_gsp_rm_ctrl_wr(&device->subdevice, ctrl);
432 r535_gsp_device_event_ctor(struct nvkm_gsp_device *device, u32 handle, u32 id,
433 nvkm_gsp_event_func func, struct nvkm_gsp_event *event)
435 struct nvkm_gsp_client *client = device->object.client;
436 struct nvkm_gsp *gsp = client->gsp;
437 NV0005_ALLOC_PARAMETERS *args;
440 args = nvkm_gsp_rm_alloc_get(&device->subdevice, handle,
441 NV01_EVENT_KERNEL_CALLBACK_EX, sizeof(*args),
444 return PTR_ERR(args);
446 args->hParentClient = client->object.handle;
447 args->hSrcResource = 0;
448 args->hClass = NV01_EVENT_KERNEL_CALLBACK_EX;
449 args->notifyIndex = NV01_EVENT_CLIENT_RM | id;
452 ret = nvkm_gsp_rm_alloc_wr(&event->object, args);
456 event->device = device;
459 ret = r535_gsp_device_event_get(event);
461 nvkm_gsp_event_dtor(event);
465 mutex_lock(&gsp->client_id.mutex);
467 list_add(&event->head, &client->events);
468 mutex_unlock(&gsp->client_id.mutex);
473 r535_gsp_device_dtor(struct nvkm_gsp_device *device)
475 nvkm_gsp_rm_free(&device->subdevice);
476 nvkm_gsp_rm_free(&device->object);
480 r535_gsp_subdevice_ctor(struct nvkm_gsp_device *device)
482 NV2080_ALLOC_PARAMETERS *args;
484 return nvkm_gsp_rm_alloc(&device->object, 0x5d1d0000, NV20_SUBDEVICE_0, sizeof(*args),
489 r535_gsp_device_ctor(struct nvkm_gsp_client *client, struct nvkm_gsp_device *device)
491 NV0080_ALLOC_PARAMETERS *args;
494 args = nvkm_gsp_rm_alloc_get(&client->object, 0xde1d0000, NV01_DEVICE_0, sizeof(*args),
497 return PTR_ERR(args);
499 args->hClientShare = client->object.handle;
501 ret = nvkm_gsp_rm_alloc_wr(&device->object, args);
505 ret = r535_gsp_subdevice_ctor(device);
507 nvkm_gsp_rm_free(&device->object);
513 r535_gsp_client_dtor(struct nvkm_gsp_client *client)
515 struct nvkm_gsp *gsp = client->gsp;
517 nvkm_gsp_rm_free(&client->object);
519 mutex_lock(&gsp->client_id.mutex);
520 idr_remove(&gsp->client_id.idr, client->object.handle & 0xffff);
521 mutex_unlock(&gsp->client_id.mutex);
527 r535_gsp_client_ctor(struct nvkm_gsp *gsp, struct nvkm_gsp_client *client)
529 NV0000_ALLOC_PARAMETERS *args;
532 mutex_lock(&gsp->client_id.mutex);
533 ret = idr_alloc(&gsp->client_id.idr, client, 0, 0xffff + 1, GFP_KERNEL);
534 mutex_unlock(&gsp->client_id.mutex);
539 client->object.client = client;
540 INIT_LIST_HEAD(&client->events);
542 args = nvkm_gsp_rm_alloc_get(&client->object, 0xc1d00000 | ret, NV01_ROOT, sizeof(*args),
545 r535_gsp_client_dtor(client);
549 args->hClient = client->object.handle;
550 args->processID = ~0;
552 ret = nvkm_gsp_rm_alloc_wr(&client->object, args);
554 r535_gsp_client_dtor(client);
562 r535_gsp_rpc_rm_free(struct nvkm_gsp_object *object)
564 struct nvkm_gsp_client *client = object->client;
565 struct nvkm_gsp *gsp = client->gsp;
566 rpc_free_v03_00 *rpc;
568 nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x free\n",
569 client->object.handle, object->handle);
571 rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_FREE, sizeof(*rpc));
572 if (WARN_ON(IS_ERR_OR_NULL(rpc)))
575 rpc->params.hRoot = client->object.handle;
576 rpc->params.hObjectParent = 0;
577 rpc->params.hObjectOld = object->handle;
578 return nvkm_gsp_rpc_wr(gsp, rpc, true);
582 r535_gsp_rpc_rm_alloc_done(struct nvkm_gsp_object *object, void *repv)
584 rpc_gsp_rm_alloc_v03_00 *rpc = container_of(repv, typeof(*rpc), params);
586 nvkm_gsp_rpc_done(object->client->gsp, rpc);
590 r535_gsp_rpc_rm_alloc_push(struct nvkm_gsp_object *object, void *argv, u32 repc)
592 rpc_gsp_rm_alloc_v03_00 *rpc = container_of(argv, typeof(*rpc), params);
593 struct nvkm_gsp *gsp = object->client->gsp;
596 rpc = nvkm_gsp_rpc_push(gsp, rpc, true, sizeof(*rpc) + repc);
597 if (IS_ERR_OR_NULL(rpc))
601 ret = ERR_PTR(r535_rpc_status_to_errno(rpc->status));
602 if (PTR_ERR(ret) != -EAGAIN)
603 nvkm_error(&gsp->subdev, "RM_ALLOC: 0x%x\n", rpc->status);
605 ret = repc ? rpc->params : NULL;
608 nvkm_gsp_rpc_done(gsp, rpc);
614 r535_gsp_rpc_rm_alloc_get(struct nvkm_gsp_object *object, u32 oclass, u32 argc)
616 struct nvkm_gsp_client *client = object->client;
617 struct nvkm_gsp *gsp = client->gsp;
618 rpc_gsp_rm_alloc_v03_00 *rpc;
620 nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x new obj:0x%08x cls:0x%08x argc:%d\n",
621 client->object.handle, object->parent->handle, object->handle, oclass, argc);
623 rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_ALLOC, sizeof(*rpc) + argc);
627 rpc->hClient = client->object.handle;
628 rpc->hParent = object->parent->handle;
629 rpc->hObject = object->handle;
630 rpc->hClass = oclass;
632 rpc->paramsSize = argc;
637 r535_gsp_rpc_rm_ctrl_done(struct nvkm_gsp_object *object, void *repv)
639 rpc_gsp_rm_control_v03_00 *rpc = container_of(repv, typeof(*rpc), params);
643 nvkm_gsp_rpc_done(object->client->gsp, rpc);
647 r535_gsp_rpc_rm_ctrl_push(struct nvkm_gsp_object *object, void **argv, u32 repc)
649 rpc_gsp_rm_control_v03_00 *rpc = container_of((*argv), typeof(*rpc), params);
650 struct nvkm_gsp *gsp = object->client->gsp;
653 rpc = nvkm_gsp_rpc_push(gsp, rpc, true, repc);
654 if (IS_ERR_OR_NULL(rpc)) {
660 ret = r535_rpc_status_to_errno(rpc->status);
662 nvkm_error(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x failed: 0x%08x\n",
663 object->client->object.handle, object->handle, rpc->cmd, rpc->status);
669 nvkm_gsp_rpc_done(gsp, rpc);
675 r535_gsp_rpc_rm_ctrl_get(struct nvkm_gsp_object *object, u32 cmd, u32 argc)
677 struct nvkm_gsp_client *client = object->client;
678 struct nvkm_gsp *gsp = client->gsp;
679 rpc_gsp_rm_control_v03_00 *rpc;
681 nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x argc:%d\n",
682 client->object.handle, object->handle, cmd, argc);
684 rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL, sizeof(*rpc) + argc);
688 rpc->hClient = client->object.handle;
689 rpc->hObject = object->handle;
692 rpc->paramsSize = argc;
697 r535_gsp_rpc_done(struct nvkm_gsp *gsp, void *repv)
699 struct nvfw_gsp_rpc *rpc = container_of(repv, typeof(*rpc), data);
701 r535_gsp_msg_done(gsp, rpc);
705 r535_gsp_rpc_get(struct nvkm_gsp *gsp, u32 fn, u32 argc)
707 struct nvfw_gsp_rpc *rpc;
709 rpc = r535_gsp_cmdq_get(gsp, ALIGN(sizeof(*rpc) + argc, sizeof(u64)));
711 return ERR_CAST(rpc);
713 rpc->header_version = 0x03000000;
714 rpc->signature = ('C' << 24) | ('P' << 16) | ('R' << 8) | 'V';
716 rpc->rpc_result = 0xffffffff;
717 rpc->rpc_result_private = 0xffffffff;
718 rpc->length = sizeof(*rpc) + argc;
723 r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *argv, bool wait, u32 repc)
725 struct nvfw_gsp_rpc *rpc = container_of(argv, typeof(*rpc), data);
726 struct r535_gsp_msg *cmd = container_of((void *)rpc, typeof(*cmd), data);
727 const u32 max_msg_size = (16 * 0x1000) - sizeof(struct r535_gsp_msg);
728 const u32 max_rpc_size = max_msg_size - sizeof(*rpc);
729 u32 rpc_size = rpc->length - sizeof(*rpc);
732 mutex_lock(&gsp->cmdq.mutex);
733 if (rpc_size > max_rpc_size) {
734 const u32 fn = rpc->function;
736 /* Adjust length, and send initial RPC. */
737 rpc->length = sizeof(*rpc) + max_rpc_size;
738 cmd->checksum = rpc->length;
740 repv = r535_gsp_rpc_send(gsp, argv, false, 0);
744 argv += max_rpc_size;
745 rpc_size -= max_rpc_size;
747 /* Remaining chunks sent as CONTINUATION_RECORD RPCs. */
749 u32 size = min(rpc_size, max_rpc_size);
752 next = r535_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD, size);
758 memcpy(next, argv, size);
760 repv = r535_gsp_rpc_send(gsp, next, false, 0);
768 /* Wait for reply. */
770 rpc = r535_gsp_msg_recv(gsp, fn, repc);
771 if (!IS_ERR_OR_NULL(rpc))
779 repv = r535_gsp_rpc_send(gsp, argv, wait, repc);
783 mutex_unlock(&gsp->cmdq.mutex);
787 const struct nvkm_gsp_rm
789 .rpc_get = r535_gsp_rpc_get,
790 .rpc_push = r535_gsp_rpc_push,
791 .rpc_done = r535_gsp_rpc_done,
793 .rm_ctrl_get = r535_gsp_rpc_rm_ctrl_get,
794 .rm_ctrl_push = r535_gsp_rpc_rm_ctrl_push,
795 .rm_ctrl_done = r535_gsp_rpc_rm_ctrl_done,
797 .rm_alloc_get = r535_gsp_rpc_rm_alloc_get,
798 .rm_alloc_push = r535_gsp_rpc_rm_alloc_push,
799 .rm_alloc_done = r535_gsp_rpc_rm_alloc_done,
801 .rm_free = r535_gsp_rpc_rm_free,
803 .client_ctor = r535_gsp_client_ctor,
804 .client_dtor = r535_gsp_client_dtor,
806 .device_ctor = r535_gsp_device_ctor,
807 .device_dtor = r535_gsp_device_dtor,
809 .event_ctor = r535_gsp_device_event_ctor,
810 .event_dtor = r535_gsp_event_dtor,
814 r535_gsp_msgq_work(struct work_struct *work)
816 struct nvkm_gsp *gsp = container_of(work, typeof(*gsp), msgq.work);
818 mutex_lock(&gsp->cmdq.mutex);
819 if (*gsp->msgq.rptr != *gsp->msgq.wptr)
820 r535_gsp_msg_recv(gsp, 0, 0);
821 mutex_unlock(&gsp->cmdq.mutex);
825 r535_gsp_intr(struct nvkm_inth *inth)
827 struct nvkm_gsp *gsp = container_of(inth, typeof(*gsp), subdev.inth);
828 struct nvkm_subdev *subdev = &gsp->subdev;
829 u32 intr = nvkm_falcon_rd32(&gsp->falcon, 0x0008);
830 u32 inte = nvkm_falcon_rd32(&gsp->falcon, gsp->falcon.func->addr2 +
831 gsp->falcon.func->riscv_irqmask);
832 u32 stat = intr & inte;
835 nvkm_debug(subdev, "inte %08x %08x\n", intr, inte);
839 if (stat & 0x00000040) {
840 nvkm_falcon_wr32(&gsp->falcon, 0x004, 0x00000040);
841 schedule_work(&gsp->msgq.work);
846 nvkm_error(subdev, "intr %08x\n", stat);
847 nvkm_falcon_wr32(&gsp->falcon, 0x014, stat);
848 nvkm_falcon_wr32(&gsp->falcon, 0x004, stat);
851 nvkm_falcon_intr_retrigger(&gsp->falcon);
856 r535_gsp_intr_get_table(struct nvkm_gsp *gsp)
858 NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS *ctrl;
861 ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
862 NV2080_CTRL_CMD_INTERNAL_INTR_GET_KERNEL_TABLE, sizeof(*ctrl));
864 return PTR_ERR(ctrl);
866 ret = nvkm_gsp_rm_ctrl_push(&gsp->internal.device.subdevice, &ctrl, sizeof(*ctrl));
868 nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
872 for (unsigned i = 0; i < ctrl->tableLen; i++) {
873 enum nvkm_subdev_type type;
876 nvkm_debug(&gsp->subdev,
877 "%2d: engineIdx %3d pmcIntrMask %08x stall %08x nonStall %08x\n", i,
878 ctrl->table[i].engineIdx, ctrl->table[i].pmcIntrMask,
879 ctrl->table[i].vectorStall, ctrl->table[i].vectorNonStall);
881 switch (ctrl->table[i].engineIdx) {
882 case MC_ENGINE_IDX_GSP:
883 type = NVKM_SUBDEV_GSP;
886 case MC_ENGINE_IDX_DISP:
887 type = NVKM_ENGINE_DISP;
890 case MC_ENGINE_IDX_CE0 ... MC_ENGINE_IDX_CE9:
891 type = NVKM_ENGINE_CE;
892 inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_CE0;
894 case MC_ENGINE_IDX_GR0:
895 type = NVKM_ENGINE_GR;
898 case MC_ENGINE_IDX_NVDEC0 ... MC_ENGINE_IDX_NVDEC7:
899 type = NVKM_ENGINE_NVDEC;
900 inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_NVDEC0;
902 case MC_ENGINE_IDX_MSENC ... MC_ENGINE_IDX_MSENC2:
903 type = NVKM_ENGINE_NVENC;
904 inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_MSENC;
906 case MC_ENGINE_IDX_NVJPEG0 ... MC_ENGINE_IDX_NVJPEG7:
907 type = NVKM_ENGINE_NVJPG;
908 inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_NVJPEG0;
910 case MC_ENGINE_IDX_OFA0:
911 type = NVKM_ENGINE_OFA;
918 if (WARN_ON(gsp->intr_nr == ARRAY_SIZE(gsp->intr))) {
923 gsp->intr[gsp->intr_nr].type = type;
924 gsp->intr[gsp->intr_nr].inst = inst;
925 gsp->intr[gsp->intr_nr].stall = ctrl->table[i].vectorStall;
926 gsp->intr[gsp->intr_nr].nonstall = ctrl->table[i].vectorNonStall;
930 nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
935 r535_gsp_rpc_get_gsp_static_info(struct nvkm_gsp *gsp)
937 GspStaticConfigInfo *rpc;
938 int last_usable = -1;
940 rpc = nvkm_gsp_rpc_rd(gsp, NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO, sizeof(*rpc));
944 gsp->internal.client.object.client = &gsp->internal.client;
945 gsp->internal.client.object.parent = NULL;
946 gsp->internal.client.object.handle = rpc->hInternalClient;
947 gsp->internal.client.gsp = gsp;
949 gsp->internal.device.object.client = &gsp->internal.client;
950 gsp->internal.device.object.parent = &gsp->internal.client.object;
951 gsp->internal.device.object.handle = rpc->hInternalDevice;
953 gsp->internal.device.subdevice.client = &gsp->internal.client;
954 gsp->internal.device.subdevice.parent = &gsp->internal.device.object;
955 gsp->internal.device.subdevice.handle = rpc->hInternalSubdevice;
957 gsp->bar.rm_bar1_pdb = rpc->bar1PdeBase;
958 gsp->bar.rm_bar2_pdb = rpc->bar2PdeBase;
960 for (int i = 0; i < rpc->fbRegionInfoParams.numFBRegions; i++) {
961 NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO *reg =
962 &rpc->fbRegionInfoParams.fbRegion[i];
964 nvkm_debug(&gsp->subdev, "fb region %d: "
965 "%016llx-%016llx rsvd:%016llx perf:%08x comp:%d iso:%d prot:%d\n", i,
966 reg->base, reg->limit, reg->reserved, reg->performance,
967 reg->supportCompressed, reg->supportISO, reg->bProtected);
969 if (!reg->reserved && !reg->bProtected) {
970 if (reg->supportCompressed && reg->supportISO &&
971 !WARN_ON_ONCE(gsp->fb.region_nr >= ARRAY_SIZE(gsp->fb.region))) {
972 const u64 size = (reg->limit + 1) - reg->base;
974 gsp->fb.region[gsp->fb.region_nr].addr = reg->base;
975 gsp->fb.region[gsp->fb.region_nr].size = size;
983 if (last_usable >= 0) {
984 u32 rsvd_base = rpc->fbRegionInfoParams.fbRegion[last_usable].limit + 1;
986 gsp->fb.rsvd_size = gsp->fb.heap.addr - rsvd_base;
989 for (int gpc = 0; gpc < ARRAY_SIZE(rpc->tpcInfo); gpc++) {
990 if (rpc->gpcInfo.gpcMask & BIT(gpc)) {
991 gsp->gr.tpcs += hweight32(rpc->tpcInfo[gpc].tpcMask);
996 nvkm_gsp_rpc_done(gsp, rpc);
1001 r535_gsp_postinit(struct nvkm_gsp *gsp)
1003 struct nvkm_device *device = gsp->subdev.device;
1006 ret = r535_gsp_rpc_get_gsp_static_info(gsp);
1010 INIT_WORK(&gsp->msgq.work, r535_gsp_msgq_work);
1012 ret = r535_gsp_intr_get_table(gsp);
1016 ret = nvkm_gsp_intr_stall(gsp, gsp->subdev.type, gsp->subdev.inst);
1017 if (WARN_ON(ret < 0))
1020 ret = nvkm_inth_add(&device->vfn->intr, ret, NVKM_INTR_PRIO_NORMAL, &gsp->subdev,
1021 r535_gsp_intr, &gsp->subdev.inth);
1025 nvkm_inth_allow(&gsp->subdev.inth);
1026 nvkm_wr32(device, 0x110004, 0x00000040);
1031 r535_gsp_rpc_unloading_guest_driver(struct nvkm_gsp *gsp, bool suspend)
1033 rpc_unloading_guest_driver_v1F_07 *rpc;
1035 rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_UNLOADING_GUEST_DRIVER, sizeof(*rpc));
1037 return PTR_ERR(rpc);
1040 rpc->bInPMTransition = 1;
1041 rpc->bGc6Entering = 0;
1042 rpc->newLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3;
1044 rpc->bInPMTransition = 0;
1045 rpc->bGc6Entering = 0;
1046 rpc->newLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_0;
1049 return nvkm_gsp_rpc_wr(gsp, rpc, true);
1053 struct nv_gsp_registry_entries {
1058 static const struct nv_gsp_registry_entries r535_registry_entries[] = {
1059 { "RMSecBusResetEnable", 1 },
1060 { "RMForcePcieConfigSave", 1 },
1062 #define NV_GSP_REG_NUM_ENTRIES ARRAY_SIZE(r535_registry_entries)
1065 r535_gsp_rpc_set_registry(struct nvkm_gsp *gsp)
1067 PACKED_REGISTRY_TABLE *rpc;
1071 size_t rpc_size = struct_size(rpc, entries, NV_GSP_REG_NUM_ENTRIES);
1073 /* add strings + null terminator */
1074 for (i = 0; i < NV_GSP_REG_NUM_ENTRIES; i++)
1075 rpc_size += strlen(r535_registry_entries[i].name) + 1;
1077 rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_SET_REGISTRY, rpc_size);
1079 return PTR_ERR(rpc);
1081 rpc->numEntries = NV_GSP_REG_NUM_ENTRIES;
1083 str_offset = offsetof(typeof(*rpc), entries[NV_GSP_REG_NUM_ENTRIES]);
1084 strings = (char *)&rpc->entries[NV_GSP_REG_NUM_ENTRIES];
1085 for (i = 0; i < NV_GSP_REG_NUM_ENTRIES; i++) {
1086 int name_len = strlen(r535_registry_entries[i].name) + 1;
1088 rpc->entries[i].nameOffset = str_offset;
1089 rpc->entries[i].type = 1;
1090 rpc->entries[i].data = r535_registry_entries[i].value;
1091 rpc->entries[i].length = 4;
1092 memcpy(strings, r535_registry_entries[i].name, name_len);
1093 strings += name_len;
1094 str_offset += name_len;
1096 rpc->size = str_offset;
1098 return nvkm_gsp_rpc_wr(gsp, rpc, false);
1101 #if defined(CONFIG_ACPI) && defined(CONFIG_X86)
1103 r535_gsp_acpi_caps(acpi_handle handle, CAPS_METHOD_DATA *caps)
1105 const guid_t NVOP_DSM_GUID =
1106 GUID_INIT(0xA486D8F8, 0x0BDA, 0x471B,
1107 0xA7, 0x2B, 0x60, 0x42, 0xA6, 0xB5, 0xBE, 0xE0);
1108 u64 NVOP_DSM_REV = 0x00000100;
1109 union acpi_object argv4 = {
1110 .buffer.type = ACPI_TYPE_BUFFER,
1112 .buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL),
1115 caps->status = 0xffff;
1117 if (!acpi_check_dsm(handle, &NVOP_DSM_GUID, NVOP_DSM_REV, BIT_ULL(0x1a)))
1120 obj = acpi_evaluate_dsm(handle, &NVOP_DSM_GUID, NVOP_DSM_REV, 0x1a, &argv4);
1124 if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) ||
1125 WARN_ON(obj->buffer.length != 4))
1129 caps->optimusCaps = *(u32 *)obj->buffer.pointer;
1133 kfree(argv4.buffer.pointer);
1137 r535_gsp_acpi_jt(acpi_handle handle, JT_METHOD_DATA *jt)
1139 const guid_t JT_DSM_GUID =
1140 GUID_INIT(0xCBECA351L, 0x067B, 0x4924,
1141 0x9C, 0xBD, 0xB4, 0x6B, 0x00, 0xB8, 0x6F, 0x34);
1142 u64 JT_DSM_REV = 0x00000103;
1144 union acpi_object argv4 = {
1145 .buffer.type = ACPI_TYPE_BUFFER,
1146 .buffer.length = sizeof(caps),
1147 .buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL),
1150 jt->status = 0xffff;
1152 obj = acpi_evaluate_dsm(handle, &JT_DSM_GUID, JT_DSM_REV, 0x1, &argv4);
1156 if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) ||
1157 WARN_ON(obj->buffer.length != 4))
1161 jt->jtCaps = *(u32 *)obj->buffer.pointer;
1162 jt->jtRevId = (jt->jtCaps & 0xfff00000) >> 20;
1167 kfree(argv4.buffer.pointer);
1171 r535_gsp_acpi_mux_id(acpi_handle handle, u32 id, MUX_METHOD_DATA_ELEMENT *mode,
1172 MUX_METHOD_DATA_ELEMENT *part)
1174 union acpi_object mux_arg = { ACPI_TYPE_INTEGER };
1175 struct acpi_object_list input = { 1, &mux_arg };
1176 acpi_handle iter = NULL, handle_mux = NULL;
1178 unsigned long long value;
1180 mode->status = 0xffff;
1181 part->status = 0xffff;
1184 status = acpi_get_next_object(ACPI_TYPE_DEVICE, handle, iter, &iter);
1185 if (ACPI_FAILURE(status) || !iter)
1188 status = acpi_evaluate_integer(iter, "_ADR", NULL, &value);
1189 if (ACPI_FAILURE(status) || value != id)
1193 } while (!handle_mux);
1198 /* I -think- 0 means "acquire" according to nvidia's driver source */
1199 input.pointer->integer.type = ACPI_TYPE_INTEGER;
1200 input.pointer->integer.value = 0;
1202 status = acpi_evaluate_integer(handle_mux, "MXDM", &input, &value);
1203 if (ACPI_SUCCESS(status)) {
1209 status = acpi_evaluate_integer(handle_mux, "MXDS", &input, &value);
1210 if (ACPI_SUCCESS(status)) {
1218 r535_gsp_acpi_mux(acpi_handle handle, DOD_METHOD_DATA *dod, MUX_METHOD_DATA *mux)
1220 mux->tableLen = dod->acpiIdListLen / sizeof(dod->acpiIdList[0]);
1222 for (int i = 0; i < mux->tableLen; i++) {
1223 r535_gsp_acpi_mux_id(handle, dod->acpiIdList[i], &mux->acpiIdMuxModeTable[i],
1224 &mux->acpiIdMuxPartTable[i]);
1229 r535_gsp_acpi_dod(acpi_handle handle, DOD_METHOD_DATA *dod)
1232 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
1233 union acpi_object *_DOD;
1235 dod->status = 0xffff;
1237 status = acpi_evaluate_object(handle, "_DOD", NULL, &output);
1238 if (ACPI_FAILURE(status))
1241 _DOD = output.pointer;
1243 if (WARN_ON(_DOD->type != ACPI_TYPE_PACKAGE) ||
1244 WARN_ON(_DOD->package.count > ARRAY_SIZE(dod->acpiIdList)))
1247 for (int i = 0; i < _DOD->package.count; i++) {
1248 if (WARN_ON(_DOD->package.elements[i].type != ACPI_TYPE_INTEGER))
1251 dod->acpiIdList[i] = _DOD->package.elements[i].integer.value;
1252 dod->acpiIdListLen += sizeof(dod->acpiIdList[0]);
1256 kfree(output.pointer);
1261 r535_gsp_acpi_info(struct nvkm_gsp *gsp, ACPI_METHOD_DATA *acpi)
1263 #if defined(CONFIG_ACPI) && defined(CONFIG_X86)
1264 acpi_handle handle = ACPI_HANDLE(gsp->subdev.device->dev);
1271 r535_gsp_acpi_dod(handle, &acpi->dodMethodData);
1272 if (acpi->dodMethodData.status == 0)
1273 r535_gsp_acpi_mux(handle, &acpi->dodMethodData, &acpi->muxMethodData);
1275 r535_gsp_acpi_jt(handle, &acpi->jtMethodData);
1276 r535_gsp_acpi_caps(handle, &acpi->capsMethodData);
1281 r535_gsp_rpc_set_system_info(struct nvkm_gsp *gsp)
1283 struct nvkm_device *device = gsp->subdev.device;
1284 struct nvkm_device_pci *pdev = container_of(device, typeof(*pdev), device);
1285 GspSystemInfo *info;
1287 if (WARN_ON(device->type == NVKM_DEVICE_TEGRA))
1290 info = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_SET_SYSTEM_INFO, sizeof(*info));
1292 return PTR_ERR(info);
1294 info->gpuPhysAddr = device->func->resource_addr(device, 0);
1295 info->gpuPhysFbAddr = device->func->resource_addr(device, 1);
1296 info->gpuPhysInstAddr = device->func->resource_addr(device, 3);
1297 info->nvDomainBusDeviceFunc = pci_dev_id(pdev->pdev);
1298 info->maxUserVa = TASK_SIZE;
1299 info->pciConfigMirrorBase = 0x088000;
1300 info->pciConfigMirrorSize = 0x001000;
1301 r535_gsp_acpi_info(gsp, &info->acpiMethodData);
1303 return nvkm_gsp_rpc_wr(gsp, info, false);
1307 r535_gsp_msg_os_error_log(void *priv, u32 fn, void *repv, u32 repc)
1309 struct nvkm_gsp *gsp = priv;
1310 struct nvkm_subdev *subdev = &gsp->subdev;
1311 rpc_os_error_log_v17_00 *msg = repv;
1313 if (WARN_ON(repc < sizeof(*msg)))
1316 nvkm_error(subdev, "Xid:%d %s\n", msg->exceptType, msg->errString);
1321 r535_gsp_msg_rc_triggered(void *priv, u32 fn, void *repv, u32 repc)
1323 rpc_rc_triggered_v17_02 *msg = repv;
1324 struct nvkm_gsp *gsp = priv;
1325 struct nvkm_subdev *subdev = &gsp->subdev;
1326 struct nvkm_chan *chan;
1327 unsigned long flags;
1329 if (WARN_ON(repc < sizeof(*msg)))
1332 nvkm_error(subdev, "rc engn:%08x chid:%d type:%d scope:%d part:%d\n",
1333 msg->nv2080EngineType, msg->chid, msg->exceptType, msg->scope,
1334 msg->partitionAttributionId);
1336 chan = nvkm_chan_get_chid(&subdev->device->fifo->engine, msg->chid / 8, &flags);
1338 nvkm_error(subdev, "rc chid:%d not found!\n", msg->chid);
1342 nvkm_chan_error(chan, false);
1343 nvkm_chan_put(&chan, flags);
1348 r535_gsp_msg_mmu_fault_queued(void *priv, u32 fn, void *repv, u32 repc)
1350 struct nvkm_gsp *gsp = priv;
1351 struct nvkm_subdev *subdev = &gsp->subdev;
1355 nvkm_error(subdev, "mmu fault queued\n");
1360 r535_gsp_msg_post_event(void *priv, u32 fn, void *repv, u32 repc)
1362 struct nvkm_gsp *gsp = priv;
1363 struct nvkm_gsp_client *client;
1364 struct nvkm_subdev *subdev = &gsp->subdev;
1365 rpc_post_event_v17_00 *msg = repv;
1367 if (WARN_ON(repc < sizeof(*msg)))
1369 if (WARN_ON(repc != sizeof(*msg) + msg->eventDataSize))
1372 nvkm_debug(subdev, "event: %08x %08x %d %08x %08x %d %d\n",
1373 msg->hClient, msg->hEvent, msg->notifyIndex, msg->data,
1374 msg->status, msg->eventDataSize, msg->bNotifyList);
1376 mutex_lock(&gsp->client_id.mutex);
1377 client = idr_find(&gsp->client_id.idr, msg->hClient & 0xffff);
1379 struct nvkm_gsp_event *event;
1380 bool handled = false;
1382 list_for_each_entry(event, &client->events, head) {
1383 if (event->object.handle == msg->hEvent) {
1384 event->func(event, msg->eventData, msg->eventDataSize);
1390 nvkm_error(subdev, "event: cid 0x%08x event 0x%08x not found!\n",
1391 msg->hClient, msg->hEvent);
1394 nvkm_error(subdev, "event: cid 0x%08x not found!\n", msg->hClient);
1396 mutex_unlock(&gsp->client_id.mutex);
1401 * r535_gsp_msg_run_cpu_sequencer() -- process I/O commands from the GSP
1403 * The GSP sequencer is a list of I/O commands that the GSP can send to
1404 * the driver to perform for various purposes. The most common usage is to
1405 * perform a special mid-initialization reset.
1408 r535_gsp_msg_run_cpu_sequencer(void *priv, u32 fn, void *repv, u32 repc)
1410 struct nvkm_gsp *gsp = priv;
1411 struct nvkm_subdev *subdev = &gsp->subdev;
1412 struct nvkm_device *device = subdev->device;
1413 rpc_run_cpu_sequencer_v17_00 *seq = repv;
1416 nvkm_debug(subdev, "seq: %08x %08x\n", seq->bufferSizeDWord, seq->cmdIndex);
1418 while (ptr < seq->cmdIndex) {
1419 GSP_SEQUENCER_BUFFER_CMD *cmd = (void *)&seq->commandBuffer[ptr];
1422 ptr += GSP_SEQUENCER_PAYLOAD_SIZE_DWORDS(cmd->opCode);
1424 switch (cmd->opCode) {
1425 case GSP_SEQ_BUF_OPCODE_REG_WRITE: {
1426 u32 addr = cmd->payload.regWrite.addr;
1427 u32 data = cmd->payload.regWrite.val;
1429 nvkm_trace(subdev, "seq wr32 %06x %08x\n", addr, data);
1430 nvkm_wr32(device, addr, data);
1433 case GSP_SEQ_BUF_OPCODE_REG_MODIFY: {
1434 u32 addr = cmd->payload.regModify.addr;
1435 u32 mask = cmd->payload.regModify.mask;
1436 u32 data = cmd->payload.regModify.val;
1438 nvkm_trace(subdev, "seq mask %06x %08x %08x\n", addr, mask, data);
1439 nvkm_mask(device, addr, mask, data);
1442 case GSP_SEQ_BUF_OPCODE_REG_POLL: {
1443 u32 addr = cmd->payload.regPoll.addr;
1444 u32 mask = cmd->payload.regPoll.mask;
1445 u32 data = cmd->payload.regPoll.val;
1446 u32 usec = cmd->payload.regPoll.timeout ?: 4000000;
1447 //u32 error = cmd->payload.regPoll.error;
1449 nvkm_trace(subdev, "seq poll %06x %08x %08x %d\n", addr, mask, data, usec);
1450 nvkm_rd32(device, addr);
1451 nvkm_usec(device, usec,
1452 if ((nvkm_rd32(device, addr) & mask) == data)
1457 case GSP_SEQ_BUF_OPCODE_DELAY_US: {
1458 u32 usec = cmd->payload.delayUs.val;
1460 nvkm_trace(subdev, "seq usec %d\n", usec);
1464 case GSP_SEQ_BUF_OPCODE_REG_STORE: {
1465 u32 addr = cmd->payload.regStore.addr;
1466 u32 slot = cmd->payload.regStore.index;
1468 seq->regSaveArea[slot] = nvkm_rd32(device, addr);
1469 nvkm_trace(subdev, "seq save %08x -> %d: %08x\n", addr, slot,
1470 seq->regSaveArea[slot]);
1473 case GSP_SEQ_BUF_OPCODE_CORE_RESET:
1474 nvkm_trace(subdev, "seq core reset\n");
1475 nvkm_falcon_reset(&gsp->falcon);
1476 nvkm_falcon_mask(&gsp->falcon, 0x624, 0x00000080, 0x00000080);
1477 nvkm_falcon_wr32(&gsp->falcon, 0x10c, 0x00000000);
1479 case GSP_SEQ_BUF_OPCODE_CORE_START:
1480 nvkm_trace(subdev, "seq core start\n");
1481 if (nvkm_falcon_rd32(&gsp->falcon, 0x100) & 0x00000040)
1482 nvkm_falcon_wr32(&gsp->falcon, 0x130, 0x00000002);
1484 nvkm_falcon_wr32(&gsp->falcon, 0x100, 0x00000002);
1486 case GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT:
1487 nvkm_trace(subdev, "seq core wait halt\n");
1488 nvkm_msec(device, 2000,
1489 if (nvkm_falcon_rd32(&gsp->falcon, 0x100) & 0x00000010)
1493 case GSP_SEQ_BUF_OPCODE_CORE_RESUME: {
1494 struct nvkm_sec2 *sec2 = device->sec2;
1497 nvkm_trace(subdev, "seq core resume\n");
1499 ret = gsp->func->reset(gsp);
1503 nvkm_falcon_wr32(&gsp->falcon, 0x040, lower_32_bits(gsp->libos.addr));
1504 nvkm_falcon_wr32(&gsp->falcon, 0x044, upper_32_bits(gsp->libos.addr));
1506 nvkm_falcon_start(&sec2->falcon);
1508 if (nvkm_msec(device, 2000,
1509 if (nvkm_rd32(device, 0x1180f8) & 0x04000000)
1514 mbox0 = nvkm_falcon_rd32(&sec2->falcon, 0x040);
1515 if (WARN_ON(mbox0)) {
1516 nvkm_error(&gsp->subdev, "seq core resume sec2: 0x%x\n", mbox0);
1520 nvkm_falcon_wr32(&gsp->falcon, 0x080, gsp->boot.app_version);
1522 if (WARN_ON(!nvkm_falcon_riscv_active(&gsp->falcon)))
1527 nvkm_error(subdev, "unknown sequencer opcode %08x\n", cmd->opCode);
1536 nvkm_gsp_mem_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_mem *mem)
1539 dma_free_coherent(gsp->subdev.device->dev, mem->size, mem->data, mem->addr);
1545 nvkm_gsp_mem_ctor(struct nvkm_gsp *gsp, u32 size, struct nvkm_gsp_mem *mem)
1548 mem->data = dma_alloc_coherent(gsp->subdev.device->dev, size, &mem->addr, GFP_KERNEL);
1549 if (WARN_ON(!mem->data))
1557 r535_gsp_booter_unload(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1)
1559 struct nvkm_subdev *subdev = &gsp->subdev;
1560 struct nvkm_device *device = subdev->device;
1564 wpr2_hi = nvkm_rd32(device, 0x1fa828);
1566 nvkm_debug(subdev, "WPR2 not set - skipping booter unload\n");
1570 ret = nvkm_falcon_fw_boot(&gsp->booter.unload, &gsp->subdev, true, &mbox0, &mbox1, 0, 0);
1574 wpr2_hi = nvkm_rd32(device, 0x1fa828);
1575 if (WARN_ON(wpr2_hi))
1582 r535_gsp_booter_load(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1)
1586 ret = nvkm_falcon_fw_boot(&gsp->booter.load, &gsp->subdev, true, &mbox0, &mbox1, 0, 0);
1590 nvkm_falcon_wr32(&gsp->falcon, 0x080, gsp->boot.app_version);
1592 if (WARN_ON(!nvkm_falcon_riscv_active(&gsp->falcon)))
1599 r535_gsp_wpr_meta_init(struct nvkm_gsp *gsp)
1604 ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->wpr_meta);
1608 meta = gsp->wpr_meta.data;
1610 meta->magic = GSP_FW_WPR_META_MAGIC;
1611 meta->revision = GSP_FW_WPR_META_REVISION;
1613 meta->sysmemAddrOfRadix3Elf = gsp->radix3.mem[0].addr;
1614 meta->sizeOfRadix3Elf = gsp->fb.wpr2.elf.size;
1616 meta->sysmemAddrOfBootloader = gsp->boot.fw.addr;
1617 meta->sizeOfBootloader = gsp->boot.fw.size;
1618 meta->bootloaderCodeOffset = gsp->boot.code_offset;
1619 meta->bootloaderDataOffset = gsp->boot.data_offset;
1620 meta->bootloaderManifestOffset = gsp->boot.manifest_offset;
1622 meta->sysmemAddrOfSignature = gsp->sig.addr;
1623 meta->sizeOfSignature = gsp->sig.size;
1625 meta->gspFwRsvdStart = gsp->fb.heap.addr;
1626 meta->nonWprHeapOffset = gsp->fb.heap.addr;
1627 meta->nonWprHeapSize = gsp->fb.heap.size;
1628 meta->gspFwWprStart = gsp->fb.wpr2.addr;
1629 meta->gspFwHeapOffset = gsp->fb.wpr2.heap.addr;
1630 meta->gspFwHeapSize = gsp->fb.wpr2.heap.size;
1631 meta->gspFwOffset = gsp->fb.wpr2.elf.addr;
1632 meta->bootBinOffset = gsp->fb.wpr2.boot.addr;
1633 meta->frtsOffset = gsp->fb.wpr2.frts.addr;
1634 meta->frtsSize = gsp->fb.wpr2.frts.size;
1635 meta->gspFwWprEnd = ALIGN_DOWN(gsp->fb.bios.vga_workspace.addr, 0x20000);
1636 meta->fbSize = gsp->fb.size;
1637 meta->vgaWorkspaceOffset = gsp->fb.bios.vga_workspace.addr;
1638 meta->vgaWorkspaceSize = gsp->fb.bios.vga_workspace.size;
1639 meta->bootCount = 0;
1640 meta->partitionRpcAddr = 0;
1641 meta->partitionRpcRequestOffset = 0;
1642 meta->partitionRpcReplyOffset = 0;
1648 r535_gsp_shared_init(struct nvkm_gsp *gsp)
1656 gsp->shm.cmdq.size = 0x40000;
1657 gsp->shm.msgq.size = 0x40000;
1659 gsp->shm.ptes.nr = (gsp->shm.cmdq.size + gsp->shm.msgq.size) >> GSP_PAGE_SHIFT;
1660 gsp->shm.ptes.nr += DIV_ROUND_UP(gsp->shm.ptes.nr * sizeof(u64), GSP_PAGE_SIZE);
1661 gsp->shm.ptes.size = ALIGN(gsp->shm.ptes.nr * sizeof(u64), GSP_PAGE_SIZE);
1663 ret = nvkm_gsp_mem_ctor(gsp, gsp->shm.ptes.size +
1664 gsp->shm.cmdq.size +
1670 gsp->shm.ptes.ptr = gsp->shm.mem.data;
1671 gsp->shm.cmdq.ptr = (u8 *)gsp->shm.ptes.ptr + gsp->shm.ptes.size;
1672 gsp->shm.msgq.ptr = (u8 *)gsp->shm.cmdq.ptr + gsp->shm.cmdq.size;
1674 for (i = 0; i < gsp->shm.ptes.nr; i++)
1675 gsp->shm.ptes.ptr[i] = gsp->shm.mem.addr + (i << GSP_PAGE_SHIFT);
1677 cmdq = gsp->shm.cmdq.ptr;
1678 cmdq->tx.version = 0;
1679 cmdq->tx.size = gsp->shm.cmdq.size;
1680 cmdq->tx.entryOff = GSP_PAGE_SIZE;
1681 cmdq->tx.msgSize = GSP_PAGE_SIZE;
1682 cmdq->tx.msgCount = (cmdq->tx.size - cmdq->tx.entryOff) / cmdq->tx.msgSize;
1683 cmdq->tx.writePtr = 0;
1685 cmdq->tx.rxHdrOff = offsetof(typeof(*cmdq), rx.readPtr);
1687 msgq = gsp->shm.msgq.ptr;
1689 gsp->cmdq.cnt = cmdq->tx.msgCount;
1690 gsp->cmdq.wptr = &cmdq->tx.writePtr;
1691 gsp->cmdq.rptr = &msgq->rx.readPtr;
1692 gsp->msgq.cnt = cmdq->tx.msgCount;
1693 gsp->msgq.wptr = &msgq->tx.writePtr;
1694 gsp->msgq.rptr = &cmdq->rx.readPtr;
1699 r535_gsp_rmargs_init(struct nvkm_gsp *gsp, bool resume)
1701 GSP_ARGUMENTS_CACHED *args;
1705 ret = r535_gsp_shared_init(gsp);
1709 ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->rmargs);
1714 args = gsp->rmargs.data;
1715 args->messageQueueInitArguments.sharedMemPhysAddr = gsp->shm.mem.addr;
1716 args->messageQueueInitArguments.pageTableEntryCount = gsp->shm.ptes.nr;
1717 args->messageQueueInitArguments.cmdQueueOffset =
1718 (u8 *)gsp->shm.cmdq.ptr - (u8 *)gsp->shm.mem.data;
1719 args->messageQueueInitArguments.statQueueOffset =
1720 (u8 *)gsp->shm.msgq.ptr - (u8 *)gsp->shm.mem.data;
1723 args->srInitArguments.oldLevel = 0;
1724 args->srInitArguments.flags = 0;
1725 args->srInitArguments.bInPMTransition = 0;
1727 args->srInitArguments.oldLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3;
1728 args->srInitArguments.flags = 0;
1729 args->srInitArguments.bInPMTransition = 1;
1736 r535_gsp_libos_id8(const char *name)
1740 for (int i = 0; i < sizeof(id) && *name; i++, name++)
1741 id = (id << 8) | *name;
1747 * create_pte_array() - creates a PTE array of a physically contiguous buffer
1748 * @ptes: pointer to the array
1749 * @addr: base address of physically contiguous buffer (GSP_PAGE_SIZE aligned)
1750 * @size: size of the buffer
1752 * GSP-RM sometimes expects physically-contiguous buffers to have an array of
1753 * "PTEs" for each page in that buffer. Although in theory that allows for
1754 * the buffer to be physically discontiguous, GSP-RM does not currently
1757 * In this case, the PTEs are DMA addresses of each page of the buffer. Since
1758 * the buffer is physically contiguous, calculating all the PTEs is simple
1761 * See memdescGetPhysAddrsForGpu()
1763 static void create_pte_array(u64 *ptes, dma_addr_t addr, size_t size)
1765 unsigned int num_pages = DIV_ROUND_UP_ULL(size, GSP_PAGE_SIZE);
1768 for (i = 0; i < num_pages; i++)
1769 ptes[i] = (u64)addr + (i << GSP_PAGE_SHIFT);
1773 * r535_gsp_libos_init() -- create the libos arguments structure
1775 * The logging buffers are byte queues that contain encoded printf-like
1776 * messages from GSP-RM. They need to be decoded by a special application
1777 * that can parse the buffers.
1779 * The 'loginit' buffer contains logs from early GSP-RM init and
1780 * exception dumps. The 'logrm' buffer contains the subsequent logs. Both are
1781 * written to directly by GSP-RM and can be any multiple of GSP_PAGE_SIZE.
1783 * The physical address map for the log buffer is stored in the buffer
1784 * itself, starting with offset 1. Offset 0 contains the "put" pointer.
1786 * The GSP only understands 4K pages (GSP_PAGE_SIZE), so even if the kernel is
1787 * configured for a larger page size (e.g. 64K pages), we need to give
1788 * the GSP an array of 4K pages. Fortunately, since the buffer is
1789 * physically contiguous, it's simple math to calculate the addresses.
1791 * The buffers must be a multiple of GSP_PAGE_SIZE. GSP-RM also currently
1792 * ignores the @kind field for LOGINIT, LOGINTR, and LOGRM, but expects the
1793 * buffers to be physically contiguous anyway.
1795 * The memory allocated for the arguments must remain until the GSP sends the
1798 * See _kgspInitLibosLoggingStructures (allocates memory for buffers)
1799 * See kgspSetupLibosInitArgs_IMPL (creates pLibosInitArgs[] array)
1802 r535_gsp_libos_init(struct nvkm_gsp *gsp)
1804 LibosMemoryRegionInitArgument *args;
1807 ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->libos);
1811 args = gsp->libos.data;
1813 ret = nvkm_gsp_mem_ctor(gsp, 0x10000, &gsp->loginit);
1817 args[0].id8 = r535_gsp_libos_id8("LOGINIT");
1818 args[0].pa = gsp->loginit.addr;
1819 args[0].size = gsp->loginit.size;
1820 args[0].kind = LIBOS_MEMORY_REGION_CONTIGUOUS;
1821 args[0].loc = LIBOS_MEMORY_REGION_LOC_SYSMEM;
1822 create_pte_array(gsp->loginit.data + sizeof(u64), gsp->loginit.addr, gsp->loginit.size);
1824 ret = nvkm_gsp_mem_ctor(gsp, 0x10000, &gsp->logintr);
1828 args[1].id8 = r535_gsp_libos_id8("LOGINTR");
1829 args[1].pa = gsp->logintr.addr;
1830 args[1].size = gsp->logintr.size;
1831 args[1].kind = LIBOS_MEMORY_REGION_CONTIGUOUS;
1832 args[1].loc = LIBOS_MEMORY_REGION_LOC_SYSMEM;
1833 create_pte_array(gsp->logintr.data + sizeof(u64), gsp->logintr.addr, gsp->logintr.size);
1835 ret = nvkm_gsp_mem_ctor(gsp, 0x10000, &gsp->logrm);
1839 args[2].id8 = r535_gsp_libos_id8("LOGRM");
1840 args[2].pa = gsp->logrm.addr;
1841 args[2].size = gsp->logrm.size;
1842 args[2].kind = LIBOS_MEMORY_REGION_CONTIGUOUS;
1843 args[2].loc = LIBOS_MEMORY_REGION_LOC_SYSMEM;
1844 create_pte_array(gsp->logrm.data + sizeof(u64), gsp->logrm.addr, gsp->logrm.size);
1846 ret = r535_gsp_rmargs_init(gsp, false);
1850 args[3].id8 = r535_gsp_libos_id8("RMARGS");
1851 args[3].pa = gsp->rmargs.addr;
1852 args[3].size = gsp->rmargs.size;
1853 args[3].kind = LIBOS_MEMORY_REGION_CONTIGUOUS;
1854 args[3].loc = LIBOS_MEMORY_REGION_LOC_SYSMEM;
1859 nvkm_gsp_sg_free(struct nvkm_device *device, struct sg_table *sgt)
1861 struct scatterlist *sgl;
1864 dma_unmap_sgtable(device->dev, sgt, DMA_BIDIRECTIONAL, 0);
1866 for_each_sgtable_sg(sgt, sgl, i) {
1867 struct page *page = sg_page(sgl);
1876 nvkm_gsp_sg(struct nvkm_device *device, u64 size, struct sg_table *sgt)
1878 const u64 pages = DIV_ROUND_UP(size, PAGE_SIZE);
1879 struct scatterlist *sgl;
1882 ret = sg_alloc_table(sgt, pages, GFP_KERNEL);
1886 for_each_sgtable_sg(sgt, sgl, i) {
1887 struct page *page = alloc_page(GFP_KERNEL);
1890 nvkm_gsp_sg_free(device, sgt);
1894 sg_set_page(sgl, page, PAGE_SIZE, 0);
1897 ret = dma_map_sgtable(device->dev, sgt, DMA_BIDIRECTIONAL, 0);
1899 nvkm_gsp_sg_free(device, sgt);
1905 nvkm_gsp_radix3_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_radix3 *rx3)
1907 for (int i = ARRAY_SIZE(rx3->mem) - 1; i >= 0; i--)
1908 nvkm_gsp_mem_dtor(gsp, &rx3->mem[i]);
1912 * nvkm_gsp_radix3_sg - build a radix3 table from a S/G list
1914 * The GSP uses a three-level page table, called radix3, to map the firmware.
1915 * Each 64-bit "pointer" in the table is either the bus address of an entry in
1916 * the next table (for levels 0 and 1) or the bus address of the next page in
1917 * the GSP firmware image itself.
1919 * Level 0 contains a single entry in one page that points to the first page
1922 * Level 1, since it's also only one page in size, contains up to 512 entries,
1923 * one for each page in Level 2.
1925 * Level 2 can be up to 512 pages in size, and each of those entries points to
1926 * the next page of the firmware image. Since there can be up to 512*512
1927 * pages, that limits the size of the firmware to 512*512*GSP_PAGE_SIZE = 1GB.
1929 * Internally, the GSP has its window into system memory, but the base
1930 * physical address of the aperture is not 0. In fact, it varies depending on
1931 * the GPU architecture. Since the GPU is a PCI device, this window is
1932 * accessed via DMA and is therefore bound by IOMMU translation. The end
1933 * result is that GSP-RM must translate the bus addresses in the table to GSP
1934 * physical addresses. All this should happen transparently.
1936 * Returns 0 on success, or negative error code
1938 * See kgspCreateRadix3_IMPL
1941 nvkm_gsp_radix3_sg(struct nvkm_device *device, struct sg_table *sgt, u64 size,
1942 struct nvkm_gsp_radix3 *rx3)
1946 for (int i = ARRAY_SIZE(rx3->mem) - 1; i >= 0; i--) {
1950 rx3->mem[i].size = ALIGN((size / GSP_PAGE_SIZE) * sizeof(u64), GSP_PAGE_SIZE);
1951 rx3->mem[i].data = dma_alloc_coherent(device->dev, rx3->mem[i].size,
1952 &rx3->mem[i].addr, GFP_KERNEL);
1953 if (WARN_ON(!rx3->mem[i].data))
1956 ptes = rx3->mem[i].data;
1958 struct scatterlist *sgl;
1960 for_each_sgtable_dma_sg(sgt, sgl, idx) {
1961 for (int j = 0; j < sg_dma_len(sgl) / GSP_PAGE_SIZE; j++)
1962 *ptes++ = sg_dma_address(sgl) + (GSP_PAGE_SIZE * j);
1965 for (int j = 0; j < size / GSP_PAGE_SIZE; j++)
1966 *ptes++ = addr + GSP_PAGE_SIZE * j;
1969 size = rx3->mem[i].size;
1970 addr = rx3->mem[i].addr;
1977 r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend)
1979 u32 mbox0 = 0xff, mbox1 = 0xff;
1986 GspFwWprMeta *meta = gsp->wpr_meta.data;
1987 u64 len = meta->gspFwWprEnd - meta->gspFwWprStart;
1990 ret = nvkm_gsp_sg(gsp->subdev.device, len, &gsp->sr.sgt);
1994 ret = nvkm_gsp_radix3_sg(gsp->subdev.device, &gsp->sr.sgt, len, &gsp->sr.radix3);
1998 ret = nvkm_gsp_mem_ctor(gsp, sizeof(*sr), &gsp->sr.meta);
2002 sr = gsp->sr.meta.data;
2003 sr->magic = GSP_FW_SR_META_MAGIC;
2004 sr->revision = GSP_FW_SR_META_REVISION;
2005 sr->sysmemAddrOfSuspendResumeData = gsp->sr.radix3.mem[0].addr;
2006 sr->sizeOfSuspendResumeData = len;
2008 mbox0 = lower_32_bits(gsp->sr.meta.addr);
2009 mbox1 = upper_32_bits(gsp->sr.meta.addr);
2012 ret = r535_gsp_rpc_unloading_guest_driver(gsp, suspend);
2016 nvkm_msec(gsp->subdev.device, 2000,
2017 if (nvkm_falcon_rd32(&gsp->falcon, 0x040) & 0x80000000)
2021 nvkm_falcon_reset(&gsp->falcon);
2023 ret = nvkm_gsp_fwsec_sb(gsp);
2026 ret = r535_gsp_booter_unload(gsp, mbox0, mbox1);
2029 gsp->running = false;
2034 r535_gsp_init(struct nvkm_gsp *gsp)
2039 if (!gsp->sr.meta.data) {
2040 mbox0 = lower_32_bits(gsp->wpr_meta.addr);
2041 mbox1 = upper_32_bits(gsp->wpr_meta.addr);
2043 r535_gsp_rmargs_init(gsp, true);
2045 mbox0 = lower_32_bits(gsp->sr.meta.addr);
2046 mbox1 = upper_32_bits(gsp->sr.meta.addr);
2049 /* Execute booter to handle (eventually...) booting GSP-RM. */
2050 ret = r535_gsp_booter_load(gsp, mbox0, mbox1);
2054 ret = r535_gsp_rpc_poll(gsp, NV_VGPU_MSG_EVENT_GSP_INIT_DONE);
2058 gsp->running = true;
2061 if (gsp->sr.meta.data) {
2062 nvkm_gsp_mem_dtor(gsp, &gsp->sr.meta);
2063 nvkm_gsp_radix3_dtor(gsp, &gsp->sr.radix3);
2064 nvkm_gsp_sg_free(gsp->subdev.device, &gsp->sr.sgt);
2069 ret = r535_gsp_postinit(gsp);
2075 r535_gsp_rm_boot_ctor(struct nvkm_gsp *gsp)
2077 const struct firmware *fw = gsp->fws.bl;
2078 const struct nvfw_bin_hdr *hdr;
2079 RM_RISCV_UCODE_DESC *desc;
2082 hdr = nvfw_bin_hdr(&gsp->subdev, fw->data);
2083 desc = (void *)fw->data + hdr->header_offset;
2085 ret = nvkm_gsp_mem_ctor(gsp, hdr->data_size, &gsp->boot.fw);
2089 memcpy(gsp->boot.fw.data, fw->data + hdr->data_offset, hdr->data_size);
2091 gsp->boot.code_offset = desc->monitorCodeOffset;
2092 gsp->boot.data_offset = desc->monitorDataOffset;
2093 gsp->boot.manifest_offset = desc->manifestOffset;
2094 gsp->boot.app_version = desc->appVersion;
2098 static const struct nvkm_firmware_func
2100 .type = NVKM_FIRMWARE_IMG_SGT,
2104 r535_gsp_elf_section(struct nvkm_gsp *gsp, const char *name, const u8 **pdata, u64 *psize)
2106 const u8 *img = gsp->fws.rm->data;
2107 const struct elf64_hdr *ehdr = (const struct elf64_hdr *)img;
2108 const struct elf64_shdr *shdr = (const struct elf64_shdr *)&img[ehdr->e_shoff];
2109 const char *names = &img[shdr[ehdr->e_shstrndx].sh_offset];
2111 for (int i = 0; i < ehdr->e_shnum; i++, shdr++) {
2112 if (!strcmp(&names[shdr->sh_name], name)) {
2113 *pdata = &img[shdr->sh_offset];
2114 *psize = shdr->sh_size;
2119 nvkm_error(&gsp->subdev, "section '%s' not found\n", name);
2124 r535_gsp_dtor_fws(struct nvkm_gsp *gsp)
2126 nvkm_firmware_put(gsp->fws.bl);
2128 nvkm_firmware_put(gsp->fws.booter.unload);
2129 gsp->fws.booter.unload = NULL;
2130 nvkm_firmware_put(gsp->fws.booter.load);
2131 gsp->fws.booter.load = NULL;
2132 nvkm_firmware_put(gsp->fws.rm);
2137 r535_gsp_dtor(struct nvkm_gsp *gsp)
2139 idr_destroy(&gsp->client_id.idr);
2140 mutex_destroy(&gsp->client_id.mutex);
2142 nvkm_gsp_radix3_dtor(gsp, &gsp->radix3);
2143 nvkm_gsp_mem_dtor(gsp, &gsp->sig);
2144 nvkm_firmware_dtor(&gsp->fw);
2146 nvkm_falcon_fw_dtor(&gsp->booter.unload);
2147 nvkm_falcon_fw_dtor(&gsp->booter.load);
2149 mutex_destroy(&gsp->msgq.mutex);
2150 mutex_destroy(&gsp->cmdq.mutex);
2152 r535_gsp_dtor_fws(gsp);
2156 r535_gsp_oneinit(struct nvkm_gsp *gsp)
2158 struct nvkm_device *device = gsp->subdev.device;
2163 mutex_init(&gsp->cmdq.mutex);
2164 mutex_init(&gsp->msgq.mutex);
2166 ret = gsp->func->booter.ctor(gsp, "booter-load", gsp->fws.booter.load,
2167 &device->sec2->falcon, &gsp->booter.load);
2171 ret = gsp->func->booter.ctor(gsp, "booter-unload", gsp->fws.booter.unload,
2172 &device->sec2->falcon, &gsp->booter.unload);
2176 /* Load GSP firmware from ELF image into DMA-accessible memory. */
2177 ret = r535_gsp_elf_section(gsp, ".fwimage", &data, &size);
2181 ret = nvkm_firmware_ctor(&r535_gsp_fw, "gsp-rm", device, data, size, &gsp->fw);
2185 /* Load relevant signature from ELF image. */
2186 ret = r535_gsp_elf_section(gsp, gsp->func->sig_section, &data, &size);
2190 ret = nvkm_gsp_mem_ctor(gsp, ALIGN(size, 256), &gsp->sig);
2194 memcpy(gsp->sig.data, data, size);
2196 /* Build radix3 page table for ELF image. */
2197 ret = nvkm_gsp_radix3_sg(device, &gsp->fw.mem.sgt, gsp->fw.len, &gsp->radix3);
2201 r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_RUN_CPU_SEQUENCER,
2202 r535_gsp_msg_run_cpu_sequencer, gsp);
2203 r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_POST_EVENT, r535_gsp_msg_post_event, gsp);
2204 r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_RC_TRIGGERED,
2205 r535_gsp_msg_rc_triggered, gsp);
2206 r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_MMU_FAULT_QUEUED,
2207 r535_gsp_msg_mmu_fault_queued, gsp);
2208 r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_OS_ERROR_LOG, r535_gsp_msg_os_error_log, gsp);
2209 r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_PERF_BRIDGELESS_INFO_UPDATE, NULL, NULL);
2210 r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT, NULL, NULL);
2211 r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_SEND_USER_SHARED_DATA, NULL, NULL);
2212 ret = r535_gsp_rm_boot_ctor(gsp);
2216 /* Release FW images - we've copied them to DMA buffers now. */
2217 r535_gsp_dtor_fws(gsp);
2219 /* Calculate FB layout. */
2220 gsp->fb.wpr2.frts.size = 0x100000;
2221 gsp->fb.wpr2.frts.addr = ALIGN_DOWN(gsp->fb.bios.addr, 0x20000) - gsp->fb.wpr2.frts.size;
2223 gsp->fb.wpr2.boot.size = gsp->boot.fw.size;
2224 gsp->fb.wpr2.boot.addr = ALIGN_DOWN(gsp->fb.wpr2.frts.addr - gsp->fb.wpr2.boot.size, 0x1000);
2226 gsp->fb.wpr2.elf.size = gsp->fw.len;
2227 gsp->fb.wpr2.elf.addr = ALIGN_DOWN(gsp->fb.wpr2.boot.addr - gsp->fb.wpr2.elf.size, 0x10000);
2230 u32 fb_size_gb = DIV_ROUND_UP_ULL(gsp->fb.size, 1 << 30);
2232 gsp->fb.wpr2.heap.size =
2233 gsp->func->wpr_heap.os_carveout_size +
2234 gsp->func->wpr_heap.base_size +
2235 ALIGN(GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB * fb_size_gb, 1 << 20) +
2236 ALIGN(GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE, 1 << 20);
2238 gsp->fb.wpr2.heap.size = max(gsp->fb.wpr2.heap.size, gsp->func->wpr_heap.min_size);
2241 gsp->fb.wpr2.heap.addr = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.size, 0x100000);
2242 gsp->fb.wpr2.heap.size = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.addr, 0x100000);
2244 gsp->fb.wpr2.addr = ALIGN_DOWN(gsp->fb.wpr2.heap.addr - sizeof(GspFwWprMeta), 0x100000);
2245 gsp->fb.wpr2.size = gsp->fb.wpr2.frts.addr + gsp->fb.wpr2.frts.size - gsp->fb.wpr2.addr;
2247 gsp->fb.heap.size = 0x100000;
2248 gsp->fb.heap.addr = gsp->fb.wpr2.addr - gsp->fb.heap.size;
2250 ret = nvkm_gsp_fwsec_frts(gsp);
2254 ret = r535_gsp_libos_init(gsp);
2258 ret = r535_gsp_wpr_meta_init(gsp);
2262 ret = r535_gsp_rpc_set_system_info(gsp);
2266 ret = r535_gsp_rpc_set_registry(gsp);
2270 /* Reset GSP into RISC-V mode. */
2271 ret = gsp->func->reset(gsp);
2275 nvkm_falcon_wr32(&gsp->falcon, 0x040, lower_32_bits(gsp->libos.addr));
2276 nvkm_falcon_wr32(&gsp->falcon, 0x044, upper_32_bits(gsp->libos.addr));
2278 mutex_init(&gsp->client_id.mutex);
2279 idr_init(&gsp->client_id.idr);
2284 r535_gsp_load_fw(struct nvkm_gsp *gsp, const char *name, const char *ver,
2285 const struct firmware **pfw)
2289 snprintf(fwname, sizeof(fwname), "gsp/%s-%s", name, ver);
2290 return nvkm_firmware_get(&gsp->subdev, fwname, 0, pfw);
2294 r535_gsp_load(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif)
2296 struct nvkm_subdev *subdev = &gsp->subdev;
2299 if (!nvkm_boolopt(subdev->device->cfgopt, "NvGspRm", fwif->enable))
2302 if ((ret = r535_gsp_load_fw(gsp, "gsp", fwif->ver, &gsp->fws.rm)) ||
2303 (ret = r535_gsp_load_fw(gsp, "booter_load", fwif->ver, &gsp->fws.booter.load)) ||
2304 (ret = r535_gsp_load_fw(gsp, "booter_unload", fwif->ver, &gsp->fws.booter.unload)) ||
2305 (ret = r535_gsp_load_fw(gsp, "bootloader", fwif->ver, &gsp->fws.bl))) {
2306 r535_gsp_dtor_fws(gsp);
2313 #define NVKM_GSP_FIRMWARE(chip) \
2314 MODULE_FIRMWARE("nvidia/"#chip"/gsp/booter_load-535.113.01.bin"); \
2315 MODULE_FIRMWARE("nvidia/"#chip"/gsp/booter_unload-535.113.01.bin"); \
2316 MODULE_FIRMWARE("nvidia/"#chip"/gsp/bootloader-535.113.01.bin"); \
2317 MODULE_FIRMWARE("nvidia/"#chip"/gsp/gsp-535.113.01.bin")
2319 NVKM_GSP_FIRMWARE(tu102);
2320 NVKM_GSP_FIRMWARE(tu104);
2321 NVKM_GSP_FIRMWARE(tu106);
2323 NVKM_GSP_FIRMWARE(tu116);
2324 NVKM_GSP_FIRMWARE(tu117);
2326 NVKM_GSP_FIRMWARE(ga100);
2328 NVKM_GSP_FIRMWARE(ga102);
2329 NVKM_GSP_FIRMWARE(ga103);
2330 NVKM_GSP_FIRMWARE(ga104);
2331 NVKM_GSP_FIRMWARE(ga106);
2332 NVKM_GSP_FIRMWARE(ga107);
2334 NVKM_GSP_FIRMWARE(ad102);
2335 NVKM_GSP_FIRMWARE(ad103);
2336 NVKM_GSP_FIRMWARE(ad104);
2337 NVKM_GSP_FIRMWARE(ad106);
2338 NVKM_GSP_FIRMWARE(ad107);