1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
11 * Yaniv Kamay <yaniv@qumranet.com>
12 * Avi Kivity <avi@qumranet.com>
18 #include <linux/kvm_types.h>
19 #include <linux/kvm_host.h>
20 #include <linux/bits.h>
24 #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
26 static const struct svm_host_save_msrs {
27 u32 index; /* Index of the MSR */
28 bool sev_es_restored; /* True if MSR is restored on SEV-ES VMEXIT */
29 } host_save_user_msrs[] = {
31 { .index = MSR_STAR, .sev_es_restored = true },
32 { .index = MSR_LSTAR, .sev_es_restored = true },
33 { .index = MSR_CSTAR, .sev_es_restored = true },
34 { .index = MSR_SYSCALL_MASK, .sev_es_restored = true },
35 { .index = MSR_KERNEL_GS_BASE, .sev_es_restored = true },
36 { .index = MSR_FS_BASE, .sev_es_restored = true },
38 { .index = MSR_IA32_SYSENTER_CS, .sev_es_restored = true },
39 { .index = MSR_IA32_SYSENTER_ESP, .sev_es_restored = true },
40 { .index = MSR_IA32_SYSENTER_EIP, .sev_es_restored = true },
41 { .index = MSR_TSC_AUX, .sev_es_restored = false },
43 #define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
45 #define MAX_DIRECT_ACCESS_MSRS 18
46 #define MSRPM_OFFSETS 16
47 extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
48 extern bool npt_enabled;
51 VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
53 VMCB_PERM_MAP, /* IOPM Base and MSRPM Base */
55 VMCB_INTR, /* int_ctl, int_vector */
56 VMCB_NPT, /* npt_en, nCR3, gPAT */
57 VMCB_CR, /* CR0, CR3, CR4, EFER */
58 VMCB_DR, /* DR6, DR7 */
59 VMCB_DT, /* GDT, IDT */
60 VMCB_SEG, /* CS, DS, SS, ES, CPL */
61 VMCB_CR2, /* CR2 only */
62 VMCB_LBR, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
63 VMCB_AVIC, /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE,
64 * AVIC PHYSICAL_TABLE pointer,
65 * AVIC LOGICAL_TABLE pointer
70 /* TPR and CR2 are always written before VMRUN */
71 #define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2))
74 bool active; /* SEV enabled guest */
75 bool es_active; /* SEV-ES enabled guest */
76 unsigned int asid; /* ASID used for this guest */
77 unsigned int handle; /* SEV firmware handle */
78 int fd; /* SEV device fd */
79 unsigned long pages_locked; /* Number of pages locked */
80 struct list_head regions_list; /* List of registered regions */
86 /* Struct members for AVIC */
88 struct page *avic_logical_id_table_page;
89 struct page *avic_physical_id_table_page;
90 struct hlist_node hnode;
92 struct kvm_sev_info sev_info;
97 struct svm_nested_state {
103 /* These are the merged vectors */
106 /* A VMRUN has started but has not yet been performed, so
107 * we cannot inject a nested vmexit yet. */
108 bool nested_run_pending;
110 /* cache for control fields of the guest */
111 struct vmcb_control_area ctl;
117 struct kvm_vcpu vcpu;
119 unsigned long vmcb_pa;
120 struct svm_cpu_data *svm_data;
122 uint64_t asid_generation;
123 uint64_t sysenter_esp;
124 uint64_t sysenter_eip;
131 u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
141 * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
142 * translated into the appropriate L2_CFG bits on the host to
143 * perform speculative control.
151 struct svm_nested_state nested;
154 u64 nmi_singlestep_guest_rflags;
156 unsigned int3_injected;
157 unsigned long int3_rip;
159 /* cached guest cpuid flags for faster access */
160 bool nrips_enabled : 1;
164 struct page *avic_backing_page;
165 u64 *avic_physical_id_cache;
166 bool avic_is_running;
169 * Per-vcpu list of struct amd_svm_iommu_ir:
170 * This is used mainly to store interrupt remapping information used
171 * when update the vcpu affinity. This avoids the need to scan for
172 * IRTE and try to match ga_tag in the IOMMU driver.
174 struct list_head ir_list;
175 spinlock_t ir_list_lock;
177 /* Save desired MSR intercept (read: pass-through) state */
179 DECLARE_BITMAP(read, MAX_DIRECT_ACCESS_MSRS);
180 DECLARE_BITMAP(write, MAX_DIRECT_ACCESS_MSRS);
181 } shadow_msr_intercept;
184 struct vmcb_save_area *vmsa;
186 struct kvm_host_map ghcb_map;
188 /* SEV-ES scratch area support */
195 struct svm_cpu_data {
202 struct kvm_ldttss_desc *tss_desc;
204 struct page *save_area;
205 struct vmcb *current_vmcb;
207 /* index = sev_asid, value = vmcb pointer */
208 struct vmcb **sev_vmcbs;
211 DECLARE_PER_CPU(struct svm_cpu_data *, svm_data);
213 void recalc_intercepts(struct vcpu_svm *svm);
215 static inline struct kvm_svm *to_kvm_svm(struct kvm *kvm)
217 return container_of(kvm, struct kvm_svm, kvm);
220 static inline bool sev_guest(struct kvm *kvm)
222 #ifdef CONFIG_KVM_AMD_SEV
223 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
231 static inline bool sev_es_guest(struct kvm *kvm)
233 #ifdef CONFIG_KVM_AMD_SEV
234 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
236 return sev_guest(kvm) && sev->es_active;
242 static inline void vmcb_mark_all_dirty(struct vmcb *vmcb)
244 vmcb->control.clean = 0;
247 static inline void vmcb_mark_all_clean(struct vmcb *vmcb)
249 vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1)
250 & ~VMCB_ALWAYS_DIRTY_MASK;
253 static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit)
255 vmcb->control.clean &= ~(1 << bit);
258 static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
260 return container_of(vcpu, struct vcpu_svm, vcpu);
263 static inline struct vmcb *get_host_vmcb(struct vcpu_svm *svm)
265 if (is_guest_mode(&svm->vcpu))
266 return svm->nested.hsave;
271 static inline void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit)
273 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
274 __set_bit(bit, (unsigned long *)&control->intercepts);
277 static inline void vmcb_clr_intercept(struct vmcb_control_area *control, u32 bit)
279 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
280 __clear_bit(bit, (unsigned long *)&control->intercepts);
283 static inline bool vmcb_is_intercept(struct vmcb_control_area *control, u32 bit)
285 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
286 return test_bit(bit, (unsigned long *)&control->intercepts);
289 static inline void set_dr_intercepts(struct vcpu_svm *svm)
291 struct vmcb *vmcb = get_host_vmcb(svm);
293 if (!sev_es_guest(svm->vcpu.kvm)) {
294 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_READ);
295 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_READ);
296 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_READ);
297 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_READ);
298 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_READ);
299 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_READ);
300 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_READ);
301 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_WRITE);
302 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_WRITE);
303 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_WRITE);
304 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_WRITE);
305 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_WRITE);
306 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_WRITE);
307 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_WRITE);
310 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
311 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
313 recalc_intercepts(svm);
316 static inline void clr_dr_intercepts(struct vcpu_svm *svm)
318 struct vmcb *vmcb = get_host_vmcb(svm);
320 vmcb->control.intercepts[INTERCEPT_DR] = 0;
322 /* DR7 access must remain intercepted for an SEV-ES guest */
323 if (sev_es_guest(svm->vcpu.kvm)) {
324 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
325 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
328 recalc_intercepts(svm);
331 static inline void set_exception_intercept(struct vcpu_svm *svm, u32 bit)
333 struct vmcb *vmcb = get_host_vmcb(svm);
335 WARN_ON_ONCE(bit >= 32);
336 vmcb_set_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
338 recalc_intercepts(svm);
341 static inline void clr_exception_intercept(struct vcpu_svm *svm, u32 bit)
343 struct vmcb *vmcb = get_host_vmcb(svm);
345 WARN_ON_ONCE(bit >= 32);
346 vmcb_clr_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
348 recalc_intercepts(svm);
351 static inline void svm_set_intercept(struct vcpu_svm *svm, int bit)
353 struct vmcb *vmcb = get_host_vmcb(svm);
355 vmcb_set_intercept(&vmcb->control, bit);
357 recalc_intercepts(svm);
360 static inline void svm_clr_intercept(struct vcpu_svm *svm, int bit)
362 struct vmcb *vmcb = get_host_vmcb(svm);
364 vmcb_clr_intercept(&vmcb->control, bit);
366 recalc_intercepts(svm);
369 static inline bool svm_is_intercept(struct vcpu_svm *svm, int bit)
371 return vmcb_is_intercept(&svm->vmcb->control, bit);
374 static inline bool vgif_enabled(struct vcpu_svm *svm)
376 return !!(svm->vmcb->control.int_ctl & V_GIF_ENABLE_MASK);
379 static inline void enable_gif(struct vcpu_svm *svm)
381 if (vgif_enabled(svm))
382 svm->vmcb->control.int_ctl |= V_GIF_MASK;
384 svm->vcpu.arch.hflags |= HF_GIF_MASK;
387 static inline void disable_gif(struct vcpu_svm *svm)
389 if (vgif_enabled(svm))
390 svm->vmcb->control.int_ctl &= ~V_GIF_MASK;
392 svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
395 static inline bool gif_set(struct vcpu_svm *svm)
397 if (vgif_enabled(svm))
398 return !!(svm->vmcb->control.int_ctl & V_GIF_MASK);
400 return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
404 #define MSR_CR3_LEGACY_RESERVED_MASK 0xfe7U
405 #define MSR_CR3_LEGACY_PAE_RESERVED_MASK 0x7U
406 #define MSR_CR3_LONG_MBZ_MASK 0xfff0000000000000U
407 #define MSR_INVALID 0xffffffffU
411 extern bool dump_invalid_vmcb;
413 u32 svm_msrpm_offset(u32 msr);
414 u32 *svm_vcpu_alloc_msrpm(void);
415 void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm);
416 void svm_vcpu_free_msrpm(u32 *msrpm);
418 int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);
419 void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
420 void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
421 void svm_flush_tlb(struct kvm_vcpu *vcpu);
422 void disable_nmi_singlestep(struct vcpu_svm *svm);
423 bool svm_smi_blocked(struct kvm_vcpu *vcpu);
424 bool svm_nmi_blocked(struct kvm_vcpu *vcpu);
425 bool svm_interrupt_blocked(struct kvm_vcpu *vcpu);
426 void svm_set_gif(struct vcpu_svm *svm, bool value);
427 int svm_invoke_exit_handler(struct vcpu_svm *svm, u64 exit_code);
428 void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
429 int read, int write);
433 #define NESTED_EXIT_HOST 0 /* Exit handled on host level */
434 #define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */
435 #define NESTED_EXIT_CONTINUE 2 /* Further checks needed */
437 static inline bool nested_svm_virtualize_tpr(struct kvm_vcpu *vcpu)
439 struct vcpu_svm *svm = to_svm(vcpu);
441 return is_guest_mode(vcpu) && (svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK);
444 static inline bool nested_exit_on_smi(struct vcpu_svm *svm)
446 return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_SMI);
449 static inline bool nested_exit_on_intr(struct vcpu_svm *svm)
451 return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_INTR);
454 static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
456 return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_NMI);
459 int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
460 struct vmcb *nested_vmcb);
461 void svm_leave_nested(struct vcpu_svm *svm);
462 void svm_free_nested(struct vcpu_svm *svm);
463 int svm_allocate_nested(struct vcpu_svm *svm);
464 int nested_svm_vmrun(struct vcpu_svm *svm);
465 void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb);
466 int nested_svm_vmexit(struct vcpu_svm *svm);
467 int nested_svm_exit_handled(struct vcpu_svm *svm);
468 int nested_svm_check_permissions(struct vcpu_svm *svm);
469 int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
470 bool has_error_code, u32 error_code);
471 int nested_svm_exit_special(struct vcpu_svm *svm);
472 void sync_nested_vmcb_control(struct vcpu_svm *svm);
474 extern struct kvm_x86_nested_ops svm_nested_ops;
478 #define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF)
479 #define AVIC_LOGICAL_ID_ENTRY_VALID_BIT 31
480 #define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31)
482 #define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK (0xFFULL)
483 #define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12)
484 #define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62)
485 #define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63)
487 #define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL
491 static inline void avic_update_vapic_bar(struct vcpu_svm *svm, u64 data)
493 svm->vmcb->control.avic_vapic_bar = data & VMCB_AVIC_APIC_BAR_MASK;
494 vmcb_mark_dirty(svm->vmcb, VMCB_AVIC);
497 static inline bool avic_vcpu_is_running(struct kvm_vcpu *vcpu)
499 struct vcpu_svm *svm = to_svm(vcpu);
500 u64 *entry = svm->avic_physical_id_cache;
505 return (READ_ONCE(*entry) & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
508 int avic_ga_log_notifier(u32 ga_tag);
509 void avic_vm_destroy(struct kvm *kvm);
510 int avic_vm_init(struct kvm *kvm);
511 void avic_init_vmcb(struct vcpu_svm *svm);
512 void svm_toggle_avic_for_irq_window(struct kvm_vcpu *vcpu, bool activate);
513 int avic_incomplete_ipi_interception(struct vcpu_svm *svm);
514 int avic_unaccelerated_access_interception(struct vcpu_svm *svm);
515 int avic_init_vcpu(struct vcpu_svm *svm);
516 void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
517 void avic_vcpu_put(struct kvm_vcpu *vcpu);
518 void avic_post_state_restore(struct kvm_vcpu *vcpu);
519 void svm_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
520 void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu);
521 bool svm_check_apicv_inhibit_reasons(ulong bit);
522 void svm_pre_update_apicv_exec_ctrl(struct kvm *kvm, bool activate);
523 void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
524 void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr);
525 void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr);
526 int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec);
527 bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu);
528 int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
529 uint32_t guest_irq, bool set);
530 void svm_vcpu_blocking(struct kvm_vcpu *vcpu);
531 void svm_vcpu_unblocking(struct kvm_vcpu *vcpu);
535 #define GHCB_VERSION_MAX 1ULL
536 #define GHCB_VERSION_MIN 1ULL
538 #define GHCB_MSR_INFO_POS 0
539 #define GHCB_MSR_INFO_MASK (BIT_ULL(12) - 1)
541 #define GHCB_MSR_SEV_INFO_RESP 0x001
542 #define GHCB_MSR_SEV_INFO_REQ 0x002
543 #define GHCB_MSR_VER_MAX_POS 48
544 #define GHCB_MSR_VER_MAX_MASK 0xffff
545 #define GHCB_MSR_VER_MIN_POS 32
546 #define GHCB_MSR_VER_MIN_MASK 0xffff
547 #define GHCB_MSR_CBIT_POS 24
548 #define GHCB_MSR_CBIT_MASK 0xff
549 #define GHCB_MSR_SEV_INFO(_max, _min, _cbit) \
550 ((((_max) & GHCB_MSR_VER_MAX_MASK) << GHCB_MSR_VER_MAX_POS) | \
551 (((_min) & GHCB_MSR_VER_MIN_MASK) << GHCB_MSR_VER_MIN_POS) | \
552 (((_cbit) & GHCB_MSR_CBIT_MASK) << GHCB_MSR_CBIT_POS) | \
553 GHCB_MSR_SEV_INFO_RESP)
555 #define GHCB_MSR_CPUID_REQ 0x004
556 #define GHCB_MSR_CPUID_RESP 0x005
557 #define GHCB_MSR_CPUID_FUNC_POS 32
558 #define GHCB_MSR_CPUID_FUNC_MASK 0xffffffff
559 #define GHCB_MSR_CPUID_VALUE_POS 32
560 #define GHCB_MSR_CPUID_VALUE_MASK 0xffffffff
561 #define GHCB_MSR_CPUID_REG_POS 30
562 #define GHCB_MSR_CPUID_REG_MASK 0x3
564 #define GHCB_MSR_TERM_REQ 0x100
565 #define GHCB_MSR_TERM_REASON_SET_POS 12
566 #define GHCB_MSR_TERM_REASON_SET_MASK 0xf
567 #define GHCB_MSR_TERM_REASON_POS 16
568 #define GHCB_MSR_TERM_REASON_MASK 0xff
570 extern unsigned int max_sev_asid;
572 static inline bool svm_sev_enabled(void)
574 return IS_ENABLED(CONFIG_KVM_AMD_SEV) ? max_sev_asid : 0;
577 void sev_vm_destroy(struct kvm *kvm);
578 int svm_mem_enc_op(struct kvm *kvm, void __user *argp);
579 int svm_register_enc_region(struct kvm *kvm,
580 struct kvm_enc_region *range);
581 int svm_unregister_enc_region(struct kvm *kvm,
582 struct kvm_enc_region *range);
583 void pre_sev_run(struct vcpu_svm *svm, int cpu);
584 void __init sev_hardware_setup(void);
585 void sev_hardware_teardown(void);
586 void sev_free_vcpu(struct kvm_vcpu *vcpu);
587 int sev_handle_vmgexit(struct vcpu_svm *svm);
588 int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in);
589 void sev_es_init_vmcb(struct vcpu_svm *svm);
590 void sev_es_create_vcpu(struct vcpu_svm *svm);
591 void sev_es_vcpu_load(struct vcpu_svm *svm, int cpu);
592 void sev_es_vcpu_put(struct vcpu_svm *svm);
596 void __svm_sev_es_vcpu_run(unsigned long vmcb_pa);
597 void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs);