| 1 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 2 | |
| 3 | #include <linux/kvm_host.h> |
| 4 | |
| 5 | #include "irq.h" |
| 6 | #include "mmu.h" |
| 7 | #include "kvm_cache_regs.h" |
| 8 | #include "x86.h" |
| 9 | #include "smm.h" |
| 10 | #include "cpuid.h" |
| 11 | #include "pmu.h" |
| 12 | |
| 13 | #include <linux/module.h> |
| 14 | #include <linux/mod_devicetable.h> |
| 15 | #include <linux/kernel.h> |
| 16 | #include <linux/vmalloc.h> |
| 17 | #include <linux/highmem.h> |
| 18 | #include <linux/amd-iommu.h> |
| 19 | #include <linux/sched.h> |
| 20 | #include <linux/trace_events.h> |
| 21 | #include <linux/slab.h> |
| 22 | #include <linux/hashtable.h> |
| 23 | #include <linux/objtool.h> |
| 24 | #include <linux/psp-sev.h> |
| 25 | #include <linux/file.h> |
| 26 | #include <linux/pagemap.h> |
| 27 | #include <linux/swap.h> |
| 28 | #include <linux/rwsem.h> |
| 29 | #include <linux/cc_platform.h> |
| 30 | #include <linux/smp.h> |
| 31 | #include <linux/string_choices.h> |
| 32 | #include <linux/mutex.h> |
| 33 | |
| 34 | #include <asm/apic.h> |
| 35 | #include <asm/msr.h> |
| 36 | #include <asm/perf_event.h> |
| 37 | #include <asm/tlbflush.h> |
| 38 | #include <asm/desc.h> |
| 39 | #include <asm/debugreg.h> |
| 40 | #include <asm/kvm_para.h> |
| 41 | #include <asm/irq_remapping.h> |
| 42 | #include <asm/spec-ctrl.h> |
| 43 | #include <asm/cpu_device_id.h> |
| 44 | #include <asm/traps.h> |
| 45 | #include <asm/reboot.h> |
| 46 | #include <asm/fpu/api.h> |
| 47 | |
| 48 | #include <trace/events/ipi.h> |
| 49 | |
| 50 | #include "trace.h" |
| 51 | |
| 52 | #include "svm.h" |
| 53 | #include "svm_ops.h" |
| 54 | |
| 55 | #include "kvm_onhyperv.h" |
| 56 | #include "svm_onhyperv.h" |
| 57 | |
| 58 | MODULE_AUTHOR("Qumranet" ); |
| 59 | MODULE_DESCRIPTION("KVM support for SVM (AMD-V) extensions" ); |
| 60 | MODULE_LICENSE("GPL" ); |
| 61 | |
| 62 | #ifdef MODULE |
| 63 | static const struct x86_cpu_id svm_cpu_id[] = { |
| 64 | X86_MATCH_FEATURE(X86_FEATURE_SVM, NULL), |
| 65 | {} |
| 66 | }; |
| 67 | MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id); |
| 68 | #endif |
| 69 | |
| 70 | #define SEG_TYPE_LDT 2 |
| 71 | #define SEG_TYPE_BUSY_TSS16 3 |
| 72 | |
| 73 | static bool erratum_383_found __read_mostly; |
| 74 | |
| 75 | /* |
| 76 | * Set osvw_len to higher value when updated Revision Guides |
| 77 | * are published and we know what the new status bits are |
| 78 | */ |
| 79 | static uint64_t osvw_len = 4, osvw_status; |
| 80 | |
| 81 | static DEFINE_PER_CPU(u64, current_tsc_ratio); |
| 82 | |
| 83 | /* |
| 84 | * These 2 parameters are used to config the controls for Pause-Loop Exiting: |
| 85 | * pause_filter_count: On processors that support Pause filtering(indicated |
| 86 | * by CPUID Fn8000_000A_EDX), the VMCB provides a 16 bit pause filter |
| 87 | * count value. On VMRUN this value is loaded into an internal counter. |
| 88 | * Each time a pause instruction is executed, this counter is decremented |
| 89 | * until it reaches zero at which time a #VMEXIT is generated if pause |
| 90 | * intercept is enabled. Refer to AMD APM Vol 2 Section 15.14.4 Pause |
| 91 | * Intercept Filtering for more details. |
| 92 | * This also indicate if ple logic enabled. |
| 93 | * |
| 94 | * pause_filter_thresh: In addition, some processor families support advanced |
| 95 | * pause filtering (indicated by CPUID Fn8000_000A_EDX) upper bound on |
| 96 | * the amount of time a guest is allowed to execute in a pause loop. |
| 97 | * In this mode, a 16-bit pause filter threshold field is added in the |
| 98 | * VMCB. The threshold value is a cycle count that is used to reset the |
| 99 | * pause counter. As with simple pause filtering, VMRUN loads the pause |
| 100 | * count value from VMCB into an internal counter. Then, on each pause |
| 101 | * instruction the hardware checks the elapsed number of cycles since |
| 102 | * the most recent pause instruction against the pause filter threshold. |
| 103 | * If the elapsed cycle count is greater than the pause filter threshold, |
| 104 | * then the internal pause count is reloaded from the VMCB and execution |
| 105 | * continues. If the elapsed cycle count is less than the pause filter |
| 106 | * threshold, then the internal pause count is decremented. If the count |
| 107 | * value is less than zero and PAUSE intercept is enabled, a #VMEXIT is |
| 108 | * triggered. If advanced pause filtering is supported and pause filter |
| 109 | * threshold field is set to zero, the filter will operate in the simpler, |
| 110 | * count only mode. |
| 111 | */ |
| 112 | |
| 113 | static unsigned short pause_filter_thresh = KVM_DEFAULT_PLE_GAP; |
| 114 | module_param(pause_filter_thresh, ushort, 0444); |
| 115 | |
| 116 | static unsigned short pause_filter_count = KVM_SVM_DEFAULT_PLE_WINDOW; |
| 117 | module_param(pause_filter_count, ushort, 0444); |
| 118 | |
| 119 | /* Default doubles per-vcpu window every exit. */ |
| 120 | static unsigned short pause_filter_count_grow = KVM_DEFAULT_PLE_WINDOW_GROW; |
| 121 | module_param(pause_filter_count_grow, ushort, 0444); |
| 122 | |
| 123 | /* Default resets per-vcpu window every exit to pause_filter_count. */ |
| 124 | static unsigned short pause_filter_count_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK; |
| 125 | module_param(pause_filter_count_shrink, ushort, 0444); |
| 126 | |
| 127 | /* Default is to compute the maximum so we can never overflow. */ |
| 128 | static unsigned short pause_filter_count_max = KVM_SVM_DEFAULT_PLE_WINDOW_MAX; |
| 129 | module_param(pause_filter_count_max, ushort, 0444); |
| 130 | |
| 131 | /* |
| 132 | * Use nested page tables by default. Note, NPT may get forced off by |
| 133 | * svm_hardware_setup() if it's unsupported by hardware or the host kernel. |
| 134 | */ |
| 135 | bool npt_enabled = true; |
| 136 | module_param_named(npt, npt_enabled, bool, 0444); |
| 137 | |
| 138 | /* allow nested virtualization in KVM/SVM */ |
| 139 | static int nested = true; |
| 140 | module_param(nested, int, 0444); |
| 141 | |
| 142 | /* enable/disable Next RIP Save */ |
| 143 | int nrips = true; |
| 144 | module_param(nrips, int, 0444); |
| 145 | |
| 146 | /* enable/disable Virtual VMLOAD VMSAVE */ |
| 147 | static int vls = true; |
| 148 | module_param(vls, int, 0444); |
| 149 | |
| 150 | /* enable/disable Virtual GIF */ |
| 151 | int vgif = true; |
| 152 | module_param(vgif, int, 0444); |
| 153 | |
| 154 | /* enable/disable LBR virtualization */ |
| 155 | int lbrv = true; |
| 156 | module_param(lbrv, int, 0444); |
| 157 | |
| 158 | static int tsc_scaling = true; |
| 159 | module_param(tsc_scaling, int, 0444); |
| 160 | |
| 161 | module_param(enable_device_posted_irqs, bool, 0444); |
| 162 | |
| 163 | bool __read_mostly dump_invalid_vmcb; |
| 164 | module_param(dump_invalid_vmcb, bool, 0644); |
| 165 | |
| 166 | |
| 167 | bool intercept_smi = true; |
| 168 | module_param(intercept_smi, bool, 0444); |
| 169 | |
| 170 | bool vnmi = true; |
| 171 | module_param(vnmi, bool, 0444); |
| 172 | |
| 173 | static bool svm_gp_erratum_intercept = true; |
| 174 | |
| 175 | static u8 rsm_ins_bytes[] = "\x0f\xaa" ; |
| 176 | |
| 177 | static unsigned long iopm_base; |
| 178 | |
| 179 | DEFINE_PER_CPU(struct svm_cpu_data, svm_data); |
| 180 | |
| 181 | static DEFINE_MUTEX(vmcb_dump_mutex); |
| 182 | |
| 183 | /* |
| 184 | * Only MSR_TSC_AUX is switched via the user return hook. EFER is switched via |
| 185 | * the VMCB, and the SYSCALL/SYSENTER MSRs are handled by VMLOAD/VMSAVE. |
| 186 | * |
| 187 | * RDTSCP and RDPID are not used in the kernel, specifically to allow KVM to |
| 188 | * defer the restoration of TSC_AUX until the CPU returns to userspace. |
| 189 | */ |
| 190 | int tsc_aux_uret_slot __ro_after_init = -1; |
| 191 | |
| 192 | static int get_npt_level(void) |
| 193 | { |
| 194 | #ifdef CONFIG_X86_64 |
| 195 | return pgtable_l5_enabled() ? PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL; |
| 196 | #else |
| 197 | return PT32E_ROOT_LEVEL; |
| 198 | #endif |
| 199 | } |
| 200 | |
| 201 | int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) |
| 202 | { |
| 203 | struct vcpu_svm *svm = to_svm(vcpu); |
| 204 | u64 old_efer = vcpu->arch.efer; |
| 205 | vcpu->arch.efer = efer; |
| 206 | |
| 207 | if (!npt_enabled) { |
| 208 | /* Shadow paging assumes NX to be available. */ |
| 209 | efer |= EFER_NX; |
| 210 | |
| 211 | if (!(efer & EFER_LMA)) |
| 212 | efer &= ~EFER_LME; |
| 213 | } |
| 214 | |
| 215 | if ((old_efer & EFER_SVME) != (efer & EFER_SVME)) { |
| 216 | if (!(efer & EFER_SVME)) { |
| 217 | svm_leave_nested(vcpu); |
| 218 | svm_set_gif(svm, value: true); |
| 219 | /* #GP intercept is still needed for vmware backdoor */ |
| 220 | if (!enable_vmware_backdoor) |
| 221 | clr_exception_intercept(svm, GP_VECTOR); |
| 222 | |
| 223 | /* |
| 224 | * Free the nested guest state, unless we are in SMM. |
| 225 | * In this case we will return to the nested guest |
| 226 | * as soon as we leave SMM. |
| 227 | */ |
| 228 | if (!is_smm(vcpu)) |
| 229 | svm_free_nested(svm); |
| 230 | |
| 231 | } else { |
| 232 | int ret = svm_allocate_nested(svm); |
| 233 | |
| 234 | if (ret) { |
| 235 | vcpu->arch.efer = old_efer; |
| 236 | return ret; |
| 237 | } |
| 238 | |
| 239 | /* |
| 240 | * Never intercept #GP for SEV guests, KVM can't |
| 241 | * decrypt guest memory to workaround the erratum. |
| 242 | */ |
| 243 | if (svm_gp_erratum_intercept && !sev_guest(kvm: vcpu->kvm)) |
| 244 | set_exception_intercept(svm, GP_VECTOR); |
| 245 | } |
| 246 | } |
| 247 | |
| 248 | svm->vmcb->save.efer = efer | EFER_SVME; |
| 249 | vmcb_mark_dirty(vmcb: svm->vmcb, bit: VMCB_CR); |
| 250 | return 0; |
| 251 | } |
| 252 | |
| 253 | static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu) |
| 254 | { |
| 255 | struct vcpu_svm *svm = to_svm(vcpu); |
| 256 | u32 ret = 0; |
| 257 | |
| 258 | if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) |
| 259 | ret = KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS; |
| 260 | return ret; |
| 261 | } |
| 262 | |
| 263 | static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) |
| 264 | { |
| 265 | struct vcpu_svm *svm = to_svm(vcpu); |
| 266 | |
| 267 | if (mask == 0) |
| 268 | svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; |
| 269 | else |
| 270 | svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK; |
| 271 | |
| 272 | } |
| 273 | |
| 274 | static int __svm_skip_emulated_instruction(struct kvm_vcpu *vcpu, |
| 275 | int emul_type, |
| 276 | bool commit_side_effects) |
| 277 | { |
| 278 | struct vcpu_svm *svm = to_svm(vcpu); |
| 279 | unsigned long old_rflags; |
| 280 | |
| 281 | /* |
| 282 | * SEV-ES does not expose the next RIP. The RIP update is controlled by |
| 283 | * the type of exit and the #VC handler in the guest. |
| 284 | */ |
| 285 | if (sev_es_guest(kvm: vcpu->kvm)) |
| 286 | goto done; |
| 287 | |
| 288 | if (nrips && svm->vmcb->control.next_rip != 0) { |
| 289 | WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS)); |
| 290 | svm->next_rip = svm->vmcb->control.next_rip; |
| 291 | } |
| 292 | |
| 293 | if (!svm->next_rip) { |
| 294 | if (unlikely(!commit_side_effects)) |
| 295 | old_rflags = svm->vmcb->save.rflags; |
| 296 | |
| 297 | if (!kvm_emulate_instruction(vcpu, emulation_type: emul_type)) |
| 298 | return 0; |
| 299 | |
| 300 | if (unlikely(!commit_side_effects)) |
| 301 | svm->vmcb->save.rflags = old_rflags; |
| 302 | } else { |
| 303 | kvm_rip_write(vcpu, svm->next_rip); |
| 304 | } |
| 305 | |
| 306 | done: |
| 307 | if (likely(commit_side_effects)) |
| 308 | svm_set_interrupt_shadow(vcpu, mask: 0); |
| 309 | |
| 310 | return 1; |
| 311 | } |
| 312 | |
| 313 | static int svm_skip_emulated_instruction(struct kvm_vcpu *vcpu) |
| 314 | { |
| 315 | return __svm_skip_emulated_instruction(vcpu, EMULTYPE_SKIP, commit_side_effects: true); |
| 316 | } |
| 317 | |
| 318 | static int svm_update_soft_interrupt_rip(struct kvm_vcpu *vcpu, u8 vector) |
| 319 | { |
| 320 | const int emul_type = EMULTYPE_SKIP | EMULTYPE_SKIP_SOFT_INT | |
| 321 | EMULTYPE_SET_SOFT_INT_VECTOR(vector); |
| 322 | unsigned long rip, old_rip = kvm_rip_read(vcpu); |
| 323 | struct vcpu_svm *svm = to_svm(vcpu); |
| 324 | |
| 325 | /* |
| 326 | * Due to architectural shortcomings, the CPU doesn't always provide |
| 327 | * NextRIP, e.g. if KVM intercepted an exception that occurred while |
| 328 | * the CPU was vectoring an INTO/INT3 in the guest. Temporarily skip |
| 329 | * the instruction even if NextRIP is supported to acquire the next |
| 330 | * RIP so that it can be shoved into the NextRIP field, otherwise |
| 331 | * hardware will fail to advance guest RIP during event injection. |
| 332 | * Drop the exception/interrupt if emulation fails and effectively |
| 333 | * retry the instruction, it's the least awful option. If NRIPS is |
| 334 | * in use, the skip must not commit any side effects such as clearing |
| 335 | * the interrupt shadow or RFLAGS.RF. |
| 336 | */ |
| 337 | if (!__svm_skip_emulated_instruction(vcpu, emul_type, commit_side_effects: !nrips)) |
| 338 | return -EIO; |
| 339 | |
| 340 | rip = kvm_rip_read(vcpu); |
| 341 | |
| 342 | /* |
| 343 | * Save the injection information, even when using next_rip, as the |
| 344 | * VMCB's next_rip will be lost (cleared on VM-Exit) if the injection |
| 345 | * doesn't complete due to a VM-Exit occurring while the CPU is |
| 346 | * vectoring the event. Decoding the instruction isn't guaranteed to |
| 347 | * work as there may be no backing instruction, e.g. if the event is |
| 348 | * being injected by L1 for L2, or if the guest is patching INT3 into |
| 349 | * a different instruction. |
| 350 | */ |
| 351 | svm->soft_int_injected = true; |
| 352 | svm->soft_int_csbase = svm->vmcb->save.cs.base; |
| 353 | svm->soft_int_old_rip = old_rip; |
| 354 | svm->soft_int_next_rip = rip; |
| 355 | |
| 356 | if (nrips) |
| 357 | kvm_rip_write(vcpu, old_rip); |
| 358 | |
| 359 | if (static_cpu_has(X86_FEATURE_NRIPS)) |
| 360 | svm->vmcb->control.next_rip = rip; |
| 361 | |
| 362 | return 0; |
| 363 | } |
| 364 | |
| 365 | static void svm_inject_exception(struct kvm_vcpu *vcpu) |
| 366 | { |
| 367 | struct kvm_queued_exception *ex = &vcpu->arch.exception; |
| 368 | struct vcpu_svm *svm = to_svm(vcpu); |
| 369 | |
| 370 | kvm_deliver_exception_payload(vcpu, ex); |
| 371 | |
| 372 | if (kvm_exception_is_soft(ex->vector) && |
| 373 | svm_update_soft_interrupt_rip(vcpu, vector: ex->vector)) |
| 374 | return; |
| 375 | |
| 376 | svm->vmcb->control.event_inj = ex->vector |
| 377 | | SVM_EVTINJ_VALID |
| 378 | | (ex->has_error_code ? SVM_EVTINJ_VALID_ERR : 0) |
| 379 | | SVM_EVTINJ_TYPE_EXEPT; |
| 380 | svm->vmcb->control.event_inj_err = ex->error_code; |
| 381 | } |
| 382 | |
| 383 | static void svm_init_erratum_383(void) |
| 384 | { |
| 385 | u64 val; |
| 386 | |
| 387 | if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH)) |
| 388 | return; |
| 389 | |
| 390 | /* Use _safe variants to not break nested virtualization */ |
| 391 | if (native_read_msr_safe(MSR_AMD64_DC_CFG, p: &val)) |
| 392 | return; |
| 393 | |
| 394 | val |= (1ULL << 47); |
| 395 | |
| 396 | native_write_msr_safe(MSR_AMD64_DC_CFG, val); |
| 397 | |
| 398 | erratum_383_found = true; |
| 399 | } |
| 400 | |
| 401 | static void svm_init_osvw(struct kvm_vcpu *vcpu) |
| 402 | { |
| 403 | /* |
| 404 | * Guests should see errata 400 and 415 as fixed (assuming that |
| 405 | * HLT and IO instructions are intercepted). |
| 406 | */ |
| 407 | vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3; |
| 408 | vcpu->arch.osvw.status = osvw_status & ~(6ULL); |
| 409 | |
| 410 | /* |
| 411 | * By increasing VCPU's osvw.length to 3 we are telling the guest that |
| 412 | * all osvw.status bits inside that length, including bit 0 (which is |
| 413 | * reserved for erratum 298), are valid. However, if host processor's |
| 414 | * osvw_len is 0 then osvw_status[0] carries no information. We need to |
| 415 | * be conservative here and therefore we tell the guest that erratum 298 |
| 416 | * is present (because we really don't know). |
| 417 | */ |
| 418 | if (osvw_len == 0 && boot_cpu_data.x86 == 0x10) |
| 419 | vcpu->arch.osvw.status |= 1; |
| 420 | } |
| 421 | |
| 422 | static bool __kvm_is_svm_supported(void) |
| 423 | { |
| 424 | int cpu = smp_processor_id(); |
| 425 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
| 426 | |
| 427 | if (c->x86_vendor != X86_VENDOR_AMD && |
| 428 | c->x86_vendor != X86_VENDOR_HYGON) { |
| 429 | pr_err("CPU %d isn't AMD or Hygon\n" , cpu); |
| 430 | return false; |
| 431 | } |
| 432 | |
| 433 | if (!cpu_has(c, X86_FEATURE_SVM)) { |
| 434 | pr_err("SVM not supported by CPU %d\n" , cpu); |
| 435 | return false; |
| 436 | } |
| 437 | |
| 438 | if (cc_platform_has(attr: CC_ATTR_GUEST_MEM_ENCRYPT)) { |
| 439 | pr_info("KVM is unsupported when running as an SEV guest\n" ); |
| 440 | return false; |
| 441 | } |
| 442 | |
| 443 | return true; |
| 444 | } |
| 445 | |
| 446 | static bool kvm_is_svm_supported(void) |
| 447 | { |
| 448 | bool supported; |
| 449 | |
| 450 | migrate_disable(); |
| 451 | supported = __kvm_is_svm_supported(); |
| 452 | migrate_enable(); |
| 453 | |
| 454 | return supported; |
| 455 | } |
| 456 | |
| 457 | static int svm_check_processor_compat(void) |
| 458 | { |
| 459 | if (!__kvm_is_svm_supported()) |
| 460 | return -EIO; |
| 461 | |
| 462 | return 0; |
| 463 | } |
| 464 | |
| 465 | static void __svm_write_tsc_multiplier(u64 multiplier) |
| 466 | { |
| 467 | if (multiplier == __this_cpu_read(current_tsc_ratio)) |
| 468 | return; |
| 469 | |
| 470 | wrmsrq(MSR_AMD64_TSC_RATIO, val: multiplier); |
| 471 | __this_cpu_write(current_tsc_ratio, multiplier); |
| 472 | } |
| 473 | |
| 474 | static __always_inline struct sev_es_save_area *sev_es_host_save_area(struct svm_cpu_data *sd) |
| 475 | { |
| 476 | return &sd->save_area->host_sev_es_save; |
| 477 | } |
| 478 | |
| 479 | static inline void kvm_cpu_svm_disable(void) |
| 480 | { |
| 481 | uint64_t efer; |
| 482 | |
| 483 | wrmsrq(MSR_VM_HSAVE_PA, val: 0); |
| 484 | rdmsrq(MSR_EFER, efer); |
| 485 | if (efer & EFER_SVME) { |
| 486 | /* |
| 487 | * Force GIF=1 prior to disabling SVM, e.g. to ensure INIT and |
| 488 | * NMI aren't blocked. |
| 489 | */ |
| 490 | stgi(); |
| 491 | wrmsrq(MSR_EFER, val: efer & ~EFER_SVME); |
| 492 | } |
| 493 | } |
| 494 | |
| 495 | static void svm_emergency_disable_virtualization_cpu(void) |
| 496 | { |
| 497 | kvm_rebooting = true; |
| 498 | |
| 499 | kvm_cpu_svm_disable(); |
| 500 | } |
| 501 | |
| 502 | static void svm_disable_virtualization_cpu(void) |
| 503 | { |
| 504 | /* Make sure we clean up behind us */ |
| 505 | if (tsc_scaling) |
| 506 | __svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT); |
| 507 | |
| 508 | kvm_cpu_svm_disable(); |
| 509 | |
| 510 | amd_pmu_disable_virt(); |
| 511 | } |
| 512 | |
| 513 | static int svm_enable_virtualization_cpu(void) |
| 514 | { |
| 515 | |
| 516 | struct svm_cpu_data *sd; |
| 517 | uint64_t efer; |
| 518 | int me = raw_smp_processor_id(); |
| 519 | |
| 520 | rdmsrq(MSR_EFER, efer); |
| 521 | if (efer & EFER_SVME) |
| 522 | return -EBUSY; |
| 523 | |
| 524 | sd = per_cpu_ptr(&svm_data, me); |
| 525 | sd->asid_generation = 1; |
| 526 | sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1; |
| 527 | sd->next_asid = sd->max_asid + 1; |
| 528 | sd->min_asid = max_sev_asid + 1; |
| 529 | |
| 530 | wrmsrq(MSR_EFER, val: efer | EFER_SVME); |
| 531 | |
| 532 | wrmsrq(MSR_VM_HSAVE_PA, val: sd->save_area_pa); |
| 533 | |
| 534 | if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) { |
| 535 | /* |
| 536 | * Set the default value, even if we don't use TSC scaling |
| 537 | * to avoid having stale value in the msr |
| 538 | */ |
| 539 | __svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT); |
| 540 | } |
| 541 | |
| 542 | |
| 543 | /* |
| 544 | * Get OSVW bits. |
| 545 | * |
| 546 | * Note that it is possible to have a system with mixed processor |
| 547 | * revisions and therefore different OSVW bits. If bits are not the same |
| 548 | * on different processors then choose the worst case (i.e. if erratum |
| 549 | * is present on one processor and not on another then assume that the |
| 550 | * erratum is present everywhere). |
| 551 | */ |
| 552 | if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) { |
| 553 | u64 len, status = 0; |
| 554 | int err; |
| 555 | |
| 556 | err = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, p: &len); |
| 557 | if (!err) |
| 558 | err = native_read_msr_safe(MSR_AMD64_OSVW_STATUS, p: &status); |
| 559 | |
| 560 | if (err) |
| 561 | osvw_status = osvw_len = 0; |
| 562 | else { |
| 563 | if (len < osvw_len) |
| 564 | osvw_len = len; |
| 565 | osvw_status |= status; |
| 566 | osvw_status &= (1ULL << osvw_len) - 1; |
| 567 | } |
| 568 | } else |
| 569 | osvw_status = osvw_len = 0; |
| 570 | |
| 571 | svm_init_erratum_383(); |
| 572 | |
| 573 | amd_pmu_enable_virt(); |
| 574 | |
| 575 | return 0; |
| 576 | } |
| 577 | |
| 578 | static void svm_cpu_uninit(int cpu) |
| 579 | { |
| 580 | struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu); |
| 581 | |
| 582 | if (!sd->save_area) |
| 583 | return; |
| 584 | |
| 585 | kfree(objp: sd->sev_vmcbs); |
| 586 | __free_page(__sme_pa_to_page(sd->save_area_pa)); |
| 587 | sd->save_area_pa = 0; |
| 588 | sd->save_area = NULL; |
| 589 | } |
| 590 | |
| 591 | static int svm_cpu_init(int cpu) |
| 592 | { |
| 593 | struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu); |
| 594 | struct page *save_area_page; |
| 595 | int ret = -ENOMEM; |
| 596 | |
| 597 | memset(sd, 0, sizeof(struct svm_cpu_data)); |
| 598 | save_area_page = snp_safe_alloc_page_node(cpu_to_node(cpu), GFP_KERNEL); |
| 599 | if (!save_area_page) |
| 600 | return ret; |
| 601 | |
| 602 | ret = sev_cpu_init(sd); |
| 603 | if (ret) |
| 604 | goto free_save_area; |
| 605 | |
| 606 | sd->save_area = page_address(save_area_page); |
| 607 | sd->save_area_pa = __sme_page_pa(page: save_area_page); |
| 608 | return 0; |
| 609 | |
| 610 | free_save_area: |
| 611 | __free_page(save_area_page); |
| 612 | return ret; |
| 613 | |
| 614 | } |
| 615 | |
| 616 | static void set_dr_intercepts(struct vcpu_svm *svm) |
| 617 | { |
| 618 | struct vmcb *vmcb = svm->vmcb01.ptr; |
| 619 | |
| 620 | vmcb_set_intercept(control: &vmcb->control, bit: INTERCEPT_DR0_READ); |
| 621 | vmcb_set_intercept(control: &vmcb->control, bit: INTERCEPT_DR1_READ); |
| 622 | vmcb_set_intercept(control: &vmcb->control, bit: INTERCEPT_DR2_READ); |
| 623 | vmcb_set_intercept(control: &vmcb->control, bit: INTERCEPT_DR3_READ); |
| 624 | vmcb_set_intercept(control: &vmcb->control, bit: INTERCEPT_DR4_READ); |
| 625 | vmcb_set_intercept(control: &vmcb->control, bit: INTERCEPT_DR5_READ); |
| 626 | vmcb_set_intercept(control: &vmcb->control, bit: INTERCEPT_DR6_READ); |
| 627 | vmcb_set_intercept(control: &vmcb->control, bit: INTERCEPT_DR0_WRITE); |
| 628 | vmcb_set_intercept(control: &vmcb->control, bit: INTERCEPT_DR1_WRITE); |
| 629 | vmcb_set_intercept(control: &vmcb->control, bit: INTERCEPT_DR2_WRITE); |
| 630 | vmcb_set_intercept(control: &vmcb->control, bit: INTERCEPT_DR3_WRITE); |
| 631 | vmcb_set_intercept(control: &vmcb->control, bit: INTERCEPT_DR4_WRITE); |
| 632 | vmcb_set_intercept(control: &vmcb->control, bit: INTERCEPT_DR5_WRITE); |
| 633 | vmcb_set_intercept(control: &vmcb->control, bit: INTERCEPT_DR6_WRITE); |
| 634 | vmcb_set_intercept(control: &vmcb->control, bit: INTERCEPT_DR7_READ); |
| 635 | vmcb_set_intercept(control: &vmcb->control, bit: INTERCEPT_DR7_WRITE); |
| 636 | |
| 637 | recalc_intercepts(svm); |
| 638 | } |
| 639 | |
| 640 | static void clr_dr_intercepts(struct vcpu_svm *svm) |
| 641 | { |
| 642 | struct vmcb *vmcb = svm->vmcb01.ptr; |
| 643 | |
| 644 | vmcb->control.intercepts[INTERCEPT_DR] = 0; |
| 645 | |
| 646 | recalc_intercepts(svm); |
| 647 | } |
| 648 | |
| 649 | static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr) |
| 650 | { |
| 651 | /* |
| 652 | * For non-nested case: |
| 653 | * If the L01 MSR bitmap does not intercept the MSR, then we need to |
| 654 | * save it. |
| 655 | * |
| 656 | * For nested case: |
| 657 | * If the L02 MSR bitmap does not intercept the MSR, then we need to |
| 658 | * save it. |
| 659 | */ |
| 660 | void *msrpm = is_guest_mode(vcpu) ? to_svm(vcpu)->nested.msrpm : |
| 661 | to_svm(vcpu)->msrpm; |
| 662 | |
| 663 | return svm_test_msr_bitmap_write(bitmap: msrpm, msr); |
| 664 | } |
| 665 | |
| 666 | void svm_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type, bool set) |
| 667 | { |
| 668 | struct vcpu_svm *svm = to_svm(vcpu); |
| 669 | void *msrpm = svm->msrpm; |
| 670 | |
| 671 | /* Don't disable interception for MSRs userspace wants to handle. */ |
| 672 | if (type & MSR_TYPE_R) { |
| 673 | if (!set && kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ)) |
| 674 | svm_clear_msr_bitmap_read(bitmap: msrpm, msr); |
| 675 | else |
| 676 | svm_set_msr_bitmap_read(bitmap: msrpm, msr); |
| 677 | } |
| 678 | |
| 679 | if (type & MSR_TYPE_W) { |
| 680 | if (!set && kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE)) |
| 681 | svm_clear_msr_bitmap_write(bitmap: msrpm, msr); |
| 682 | else |
| 683 | svm_set_msr_bitmap_write(bitmap: msrpm, msr); |
| 684 | } |
| 685 | |
| 686 | svm_hv_vmcb_dirty_nested_enlightenments(vcpu); |
| 687 | svm->nested.force_msr_bitmap_recalc = true; |
| 688 | } |
| 689 | |
| 690 | void *svm_alloc_permissions_map(unsigned long size, gfp_t gfp_mask) |
| 691 | { |
| 692 | unsigned int order = get_order(size); |
| 693 | struct page *pages = alloc_pages(gfp_mask, order); |
| 694 | void *pm; |
| 695 | |
| 696 | if (!pages) |
| 697 | return NULL; |
| 698 | |
| 699 | /* |
| 700 | * Set all bits in the permissions map so that all MSR and I/O accesses |
| 701 | * are intercepted by default. |
| 702 | */ |
| 703 | pm = page_address(pages); |
| 704 | memset(pm, 0xff, PAGE_SIZE * (1 << order)); |
| 705 | |
| 706 | return pm; |
| 707 | } |
| 708 | |
| 709 | static void svm_recalc_lbr_msr_intercepts(struct kvm_vcpu *vcpu) |
| 710 | { |
| 711 | struct vcpu_svm *svm = to_svm(vcpu); |
| 712 | bool intercept = !(svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK); |
| 713 | |
| 714 | if (intercept == svm->lbr_msrs_intercepted) |
| 715 | return; |
| 716 | |
| 717 | svm_set_intercept_for_msr(vcpu, MSR_IA32_LASTBRANCHFROMIP, type: MSR_TYPE_RW, set: intercept); |
| 718 | svm_set_intercept_for_msr(vcpu, MSR_IA32_LASTBRANCHTOIP, type: MSR_TYPE_RW, set: intercept); |
| 719 | svm_set_intercept_for_msr(vcpu, MSR_IA32_LASTINTFROMIP, type: MSR_TYPE_RW, set: intercept); |
| 720 | svm_set_intercept_for_msr(vcpu, MSR_IA32_LASTINTTOIP, type: MSR_TYPE_RW, set: intercept); |
| 721 | |
| 722 | if (sev_es_guest(kvm: vcpu->kvm)) |
| 723 | svm_set_intercept_for_msr(vcpu, MSR_IA32_DEBUGCTLMSR, type: MSR_TYPE_RW, set: intercept); |
| 724 | |
| 725 | svm->lbr_msrs_intercepted = intercept; |
| 726 | } |
| 727 | |
| 728 | void svm_vcpu_free_msrpm(void *msrpm) |
| 729 | { |
| 730 | __free_pages(virt_to_page(msrpm), order: get_order(MSRPM_SIZE)); |
| 731 | } |
| 732 | |
| 733 | static void svm_recalc_msr_intercepts(struct kvm_vcpu *vcpu) |
| 734 | { |
| 735 | struct vcpu_svm *svm = to_svm(vcpu); |
| 736 | |
| 737 | svm_disable_intercept_for_msr(vcpu, MSR_STAR, type: MSR_TYPE_RW); |
| 738 | svm_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_CS, type: MSR_TYPE_RW); |
| 739 | |
| 740 | #ifdef CONFIG_X86_64 |
| 741 | svm_disable_intercept_for_msr(vcpu, MSR_GS_BASE, type: MSR_TYPE_RW); |
| 742 | svm_disable_intercept_for_msr(vcpu, MSR_FS_BASE, type: MSR_TYPE_RW); |
| 743 | svm_disable_intercept_for_msr(vcpu, MSR_KERNEL_GS_BASE, type: MSR_TYPE_RW); |
| 744 | svm_disable_intercept_for_msr(vcpu, MSR_LSTAR, type: MSR_TYPE_RW); |
| 745 | svm_disable_intercept_for_msr(vcpu, MSR_CSTAR, type: MSR_TYPE_RW); |
| 746 | svm_disable_intercept_for_msr(vcpu, MSR_SYSCALL_MASK, type: MSR_TYPE_RW); |
| 747 | #endif |
| 748 | |
| 749 | if (lbrv) |
| 750 | svm_recalc_lbr_msr_intercepts(vcpu); |
| 751 | |
| 752 | if (cpu_feature_enabled(X86_FEATURE_IBPB)) |
| 753 | svm_set_intercept_for_msr(vcpu, MSR_IA32_PRED_CMD, type: MSR_TYPE_W, |
| 754 | set: !guest_has_pred_cmd_msr(vcpu)); |
| 755 | |
| 756 | if (cpu_feature_enabled(X86_FEATURE_FLUSH_L1D)) |
| 757 | svm_set_intercept_for_msr(vcpu, MSR_IA32_FLUSH_CMD, type: MSR_TYPE_W, |
| 758 | set: !guest_cpu_cap_has(vcpu, X86_FEATURE_FLUSH_L1D)); |
| 759 | |
| 760 | /* |
| 761 | * Disable interception of SPEC_CTRL if KVM doesn't need to manually |
| 762 | * context switch the MSR (SPEC_CTRL is virtualized by the CPU), or if |
| 763 | * the guest has a non-zero SPEC_CTRL value, i.e. is likely actively |
| 764 | * using SPEC_CTRL. |
| 765 | */ |
| 766 | if (cpu_feature_enabled(X86_FEATURE_V_SPEC_CTRL)) |
| 767 | svm_set_intercept_for_msr(vcpu, MSR_IA32_SPEC_CTRL, type: MSR_TYPE_RW, |
| 768 | set: !guest_has_spec_ctrl_msr(vcpu)); |
| 769 | else |
| 770 | svm_set_intercept_for_msr(vcpu, MSR_IA32_SPEC_CTRL, type: MSR_TYPE_RW, |
| 771 | set: !svm->spec_ctrl); |
| 772 | |
| 773 | /* |
| 774 | * Intercept SYSENTER_EIP and SYSENTER_ESP when emulating an Intel CPU, |
| 775 | * as AMD hardware only store 32 bits, whereas Intel CPUs track 64 bits. |
| 776 | */ |
| 777 | svm_set_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_EIP, type: MSR_TYPE_RW, |
| 778 | set: guest_cpuid_is_intel_compatible(vcpu)); |
| 779 | svm_set_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_ESP, type: MSR_TYPE_RW, |
| 780 | set: guest_cpuid_is_intel_compatible(vcpu)); |
| 781 | |
| 782 | if (kvm_aperfmperf_in_guest(vcpu->kvm)) { |
| 783 | svm_disable_intercept_for_msr(vcpu, MSR_IA32_APERF, type: MSR_TYPE_R); |
| 784 | svm_disable_intercept_for_msr(vcpu, MSR_IA32_MPERF, MSR_TYPE_R); |
| 785 | } |
| 786 | |
| 787 | if (kvm_cpu_cap_has(X86_FEATURE_SHSTK)) { |
| 788 | bool shstk_enabled = guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK); |
| 789 | |
| 790 | svm_set_intercept_for_msr(vcpu, MSR_IA32_U_CET, MSR_TYPE_RW, !shstk_enabled); |
| 791 | svm_set_intercept_for_msr(vcpu, MSR_IA32_S_CET, MSR_TYPE_RW, !shstk_enabled); |
| 792 | svm_set_intercept_for_msr(vcpu, MSR_IA32_PL0_SSP, MSR_TYPE_RW, !shstk_enabled); |
| 793 | svm_set_intercept_for_msr(vcpu, MSR_IA32_PL1_SSP, MSR_TYPE_RW, !shstk_enabled); |
| 794 | svm_set_intercept_for_msr(vcpu, MSR_IA32_PL2_SSP, MSR_TYPE_RW, !shstk_enabled); |
| 795 | svm_set_intercept_for_msr(vcpu, MSR_IA32_PL3_SSP, MSR_TYPE_RW, !shstk_enabled); |
| 796 | } |
| 797 | |
| 798 | if (sev_es_guest(kvm: vcpu->kvm)) |
| 799 | sev_es_recalc_msr_intercepts(vcpu); |
| 800 | |
| 801 | /* |
| 802 | * x2APIC intercepts are modified on-demand and cannot be filtered by |
| 803 | * userspace. |
| 804 | */ |
| 805 | } |
| 806 | |
| 807 | void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb) |
| 808 | { |
| 809 | to_vmcb->save.dbgctl = from_vmcb->save.dbgctl; |
| 810 | to_vmcb->save.br_from = from_vmcb->save.br_from; |
| 811 | to_vmcb->save.br_to = from_vmcb->save.br_to; |
| 812 | to_vmcb->save.last_excp_from = from_vmcb->save.last_excp_from; |
| 813 | to_vmcb->save.last_excp_to = from_vmcb->save.last_excp_to; |
| 814 | |
| 815 | vmcb_mark_dirty(vmcb: to_vmcb, bit: VMCB_LBR); |
| 816 | } |
| 817 | |
| 818 | static void __svm_enable_lbrv(struct kvm_vcpu *vcpu) |
| 819 | { |
| 820 | to_svm(vcpu)->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK; |
| 821 | } |
| 822 | |
| 823 | void svm_enable_lbrv(struct kvm_vcpu *vcpu) |
| 824 | { |
| 825 | __svm_enable_lbrv(vcpu); |
| 826 | svm_recalc_lbr_msr_intercepts(vcpu); |
| 827 | } |
| 828 | |
| 829 | static void __svm_disable_lbrv(struct kvm_vcpu *vcpu) |
| 830 | { |
| 831 | KVM_BUG_ON(sev_es_guest(vcpu->kvm), vcpu->kvm); |
| 832 | to_svm(vcpu)->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK; |
| 833 | } |
| 834 | |
| 835 | void svm_update_lbrv(struct kvm_vcpu *vcpu) |
| 836 | { |
| 837 | struct vcpu_svm *svm = to_svm(vcpu); |
| 838 | bool current_enable_lbrv = svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK; |
| 839 | bool enable_lbrv = (svm->vmcb->save.dbgctl & DEBUGCTLMSR_LBR) || |
| 840 | (is_guest_mode(vcpu) && guest_cpu_cap_has(vcpu, X86_FEATURE_LBRV) && |
| 841 | (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK)); |
| 842 | |
| 843 | if (enable_lbrv && !current_enable_lbrv) |
| 844 | __svm_enable_lbrv(vcpu); |
| 845 | else if (!enable_lbrv && current_enable_lbrv) |
| 846 | __svm_disable_lbrv(vcpu); |
| 847 | |
| 848 | /* |
| 849 | * During nested transitions, it is possible that the current VMCB has |
| 850 | * LBR_CTL set, but the previous LBR_CTL had it cleared (or vice versa). |
| 851 | * In this case, even though LBR_CTL does not need an update, intercepts |
| 852 | * do, so always recalculate the intercepts here. |
| 853 | */ |
| 854 | svm_recalc_lbr_msr_intercepts(vcpu); |
| 855 | } |
| 856 | |
| 857 | void disable_nmi_singlestep(struct vcpu_svm *svm) |
| 858 | { |
| 859 | svm->nmi_singlestep = false; |
| 860 | |
| 861 | if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) { |
| 862 | /* Clear our flags if they were not set by the guest */ |
| 863 | if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF)) |
| 864 | svm->vmcb->save.rflags &= ~X86_EFLAGS_TF; |
| 865 | if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF)) |
| 866 | svm->vmcb->save.rflags &= ~X86_EFLAGS_RF; |
| 867 | } |
| 868 | } |
| 869 | |
| 870 | static void grow_ple_window(struct kvm_vcpu *vcpu) |
| 871 | { |
| 872 | struct vcpu_svm *svm = to_svm(vcpu); |
| 873 | struct vmcb_control_area *control = &svm->vmcb->control; |
| 874 | int old = control->pause_filter_count; |
| 875 | |
| 876 | if (kvm_pause_in_guest(vcpu->kvm)) |
| 877 | return; |
| 878 | |
| 879 | control->pause_filter_count = __grow_ple_window(old, |
| 880 | pause_filter_count, |
| 881 | pause_filter_count_grow, |
| 882 | pause_filter_count_max); |
| 883 | |
| 884 | if (control->pause_filter_count != old) { |
| 885 | vmcb_mark_dirty(vmcb: svm->vmcb, bit: VMCB_INTERCEPTS); |
| 886 | trace_kvm_ple_window_update(vcpu->vcpu_id, |
| 887 | control->pause_filter_count, old); |
| 888 | } |
| 889 | } |
| 890 | |
| 891 | static void shrink_ple_window(struct kvm_vcpu *vcpu) |
| 892 | { |
| 893 | struct vcpu_svm *svm = to_svm(vcpu); |
| 894 | struct vmcb_control_area *control = &svm->vmcb->control; |
| 895 | int old = control->pause_filter_count; |
| 896 | |
| 897 | if (kvm_pause_in_guest(vcpu->kvm)) |
| 898 | return; |
| 899 | |
| 900 | control->pause_filter_count = |
| 901 | __shrink_ple_window(old, |
| 902 | pause_filter_count, |
| 903 | pause_filter_count_shrink, |
| 904 | pause_filter_count); |
| 905 | if (control->pause_filter_count != old) { |
| 906 | vmcb_mark_dirty(vmcb: svm->vmcb, bit: VMCB_INTERCEPTS); |
| 907 | trace_kvm_ple_window_update(vcpu->vcpu_id, |
| 908 | control->pause_filter_count, old); |
| 909 | } |
| 910 | } |
| 911 | |
| 912 | static void svm_hardware_unsetup(void) |
| 913 | { |
| 914 | int cpu; |
| 915 | |
| 916 | avic_hardware_unsetup(); |
| 917 | |
| 918 | sev_hardware_unsetup(); |
| 919 | |
| 920 | for_each_possible_cpu(cpu) |
| 921 | svm_cpu_uninit(cpu); |
| 922 | |
| 923 | __free_pages(page: __sme_pa_to_page(pa: iopm_base), order: get_order(IOPM_SIZE)); |
| 924 | iopm_base = 0; |
| 925 | } |
| 926 | |
| 927 | static void init_seg(struct vmcb_seg *seg) |
| 928 | { |
| 929 | seg->selector = 0; |
| 930 | seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK | |
| 931 | SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */ |
| 932 | seg->limit = 0xffff; |
| 933 | seg->base = 0; |
| 934 | } |
| 935 | |
| 936 | static void init_sys_seg(struct vmcb_seg *seg, uint32_t type) |
| 937 | { |
| 938 | seg->selector = 0; |
| 939 | seg->attrib = SVM_SELECTOR_P_MASK | type; |
| 940 | seg->limit = 0xffff; |
| 941 | seg->base = 0; |
| 942 | } |
| 943 | |
| 944 | static u64 svm_get_l2_tsc_offset(struct kvm_vcpu *vcpu) |
| 945 | { |
| 946 | struct vcpu_svm *svm = to_svm(vcpu); |
| 947 | |
| 948 | return svm->nested.ctl.tsc_offset; |
| 949 | } |
| 950 | |
| 951 | static u64 svm_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu) |
| 952 | { |
| 953 | struct vcpu_svm *svm = to_svm(vcpu); |
| 954 | |
| 955 | return svm->tsc_ratio_msr; |
| 956 | } |
| 957 | |
| 958 | static void svm_write_tsc_offset(struct kvm_vcpu *vcpu) |
| 959 | { |
| 960 | struct vcpu_svm *svm = to_svm(vcpu); |
| 961 | |
| 962 | svm->vmcb01.ptr->control.tsc_offset = vcpu->arch.l1_tsc_offset; |
| 963 | svm->vmcb->control.tsc_offset = vcpu->arch.tsc_offset; |
| 964 | vmcb_mark_dirty(vmcb: svm->vmcb, bit: VMCB_INTERCEPTS); |
| 965 | } |
| 966 | |
| 967 | void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu) |
| 968 | { |
| 969 | preempt_disable(); |
| 970 | if (to_svm(vcpu)->guest_state_loaded) |
| 971 | __svm_write_tsc_multiplier(multiplier: vcpu->arch.tsc_scaling_ratio); |
| 972 | preempt_enable(); |
| 973 | } |
| 974 | |
| 975 | /* Evaluate instruction intercepts that depend on guest CPUID features. */ |
| 976 | static void svm_recalc_instruction_intercepts(struct kvm_vcpu *vcpu) |
| 977 | { |
| 978 | struct vcpu_svm *svm = to_svm(vcpu); |
| 979 | |
| 980 | /* |
| 981 | * Intercept INVPCID if shadow paging is enabled to sync/free shadow |
| 982 | * roots, or if INVPCID is disabled in the guest to inject #UD. |
| 983 | */ |
| 984 | if (kvm_cpu_cap_has(X86_FEATURE_INVPCID)) { |
| 985 | if (!npt_enabled || |
| 986 | !guest_cpu_cap_has(&svm->vcpu, X86_FEATURE_INVPCID)) |
| 987 | svm_set_intercept(svm, bit: INTERCEPT_INVPCID); |
| 988 | else |
| 989 | svm_clr_intercept(svm, bit: INTERCEPT_INVPCID); |
| 990 | } |
| 991 | |
| 992 | if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP)) { |
| 993 | if (guest_cpu_cap_has(vcpu, X86_FEATURE_RDTSCP)) |
| 994 | svm_clr_intercept(svm, bit: INTERCEPT_RDTSCP); |
| 995 | else |
| 996 | svm_set_intercept(svm, bit: INTERCEPT_RDTSCP); |
| 997 | } |
| 998 | |
| 999 | if (guest_cpuid_is_intel_compatible(vcpu)) { |
| 1000 | svm_set_intercept(svm, bit: INTERCEPT_VMLOAD); |
| 1001 | svm_set_intercept(svm, bit: INTERCEPT_VMSAVE); |
| 1002 | svm->vmcb->control.virt_ext &= ~VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK; |
| 1003 | } else { |
| 1004 | /* |
| 1005 | * If hardware supports Virtual VMLOAD VMSAVE then enable it |
| 1006 | * in VMCB and clear intercepts to avoid #VMEXIT. |
| 1007 | */ |
| 1008 | if (vls) { |
| 1009 | svm_clr_intercept(svm, bit: INTERCEPT_VMLOAD); |
| 1010 | svm_clr_intercept(svm, bit: INTERCEPT_VMSAVE); |
| 1011 | svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK; |
| 1012 | } |
| 1013 | } |
| 1014 | } |
| 1015 | |
| 1016 | static void svm_recalc_intercepts(struct kvm_vcpu *vcpu) |
| 1017 | { |
| 1018 | svm_recalc_instruction_intercepts(vcpu); |
| 1019 | svm_recalc_msr_intercepts(vcpu); |
| 1020 | } |
| 1021 | |
| 1022 | static void init_vmcb(struct kvm_vcpu *vcpu, bool init_event) |
| 1023 | { |
| 1024 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1025 | struct vmcb *vmcb = svm->vmcb01.ptr; |
| 1026 | struct vmcb_control_area *control = &vmcb->control; |
| 1027 | struct vmcb_save_area *save = &vmcb->save; |
| 1028 | |
| 1029 | svm_set_intercept(svm, bit: INTERCEPT_CR0_READ); |
| 1030 | svm_set_intercept(svm, bit: INTERCEPT_CR3_READ); |
| 1031 | svm_set_intercept(svm, bit: INTERCEPT_CR4_READ); |
| 1032 | svm_set_intercept(svm, bit: INTERCEPT_CR0_WRITE); |
| 1033 | svm_set_intercept(svm, bit: INTERCEPT_CR3_WRITE); |
| 1034 | svm_set_intercept(svm, bit: INTERCEPT_CR4_WRITE); |
| 1035 | if (!kvm_vcpu_apicv_active(vcpu)) |
| 1036 | svm_set_intercept(svm, bit: INTERCEPT_CR8_WRITE); |
| 1037 | |
| 1038 | set_dr_intercepts(svm); |
| 1039 | |
| 1040 | set_exception_intercept(svm, PF_VECTOR); |
| 1041 | set_exception_intercept(svm, UD_VECTOR); |
| 1042 | set_exception_intercept(svm, MC_VECTOR); |
| 1043 | set_exception_intercept(svm, AC_VECTOR); |
| 1044 | set_exception_intercept(svm, DB_VECTOR); |
| 1045 | /* |
| 1046 | * Guest access to VMware backdoor ports could legitimately |
| 1047 | * trigger #GP because of TSS I/O permission bitmap. |
| 1048 | * We intercept those #GP and allow access to them anyway |
| 1049 | * as VMware does. |
| 1050 | */ |
| 1051 | if (enable_vmware_backdoor) |
| 1052 | set_exception_intercept(svm, GP_VECTOR); |
| 1053 | |
| 1054 | svm_set_intercept(svm, bit: INTERCEPT_INTR); |
| 1055 | svm_set_intercept(svm, bit: INTERCEPT_NMI); |
| 1056 | |
| 1057 | if (intercept_smi) |
| 1058 | svm_set_intercept(svm, bit: INTERCEPT_SMI); |
| 1059 | |
| 1060 | svm_set_intercept(svm, bit: INTERCEPT_SELECTIVE_CR0); |
| 1061 | svm_set_intercept(svm, bit: INTERCEPT_RDPMC); |
| 1062 | svm_set_intercept(svm, bit: INTERCEPT_CPUID); |
| 1063 | svm_set_intercept(svm, bit: INTERCEPT_INVD); |
| 1064 | svm_set_intercept(svm, bit: INTERCEPT_INVLPG); |
| 1065 | svm_set_intercept(svm, bit: INTERCEPT_INVLPGA); |
| 1066 | svm_set_intercept(svm, bit: INTERCEPT_IOIO_PROT); |
| 1067 | svm_set_intercept(svm, bit: INTERCEPT_MSR_PROT); |
| 1068 | svm_set_intercept(svm, bit: INTERCEPT_TASK_SWITCH); |
| 1069 | svm_set_intercept(svm, bit: INTERCEPT_SHUTDOWN); |
| 1070 | svm_set_intercept(svm, bit: INTERCEPT_VMRUN); |
| 1071 | svm_set_intercept(svm, bit: INTERCEPT_VMMCALL); |
| 1072 | svm_set_intercept(svm, bit: INTERCEPT_VMLOAD); |
| 1073 | svm_set_intercept(svm, bit: INTERCEPT_VMSAVE); |
| 1074 | svm_set_intercept(svm, bit: INTERCEPT_STGI); |
| 1075 | svm_set_intercept(svm, bit: INTERCEPT_CLGI); |
| 1076 | svm_set_intercept(svm, bit: INTERCEPT_SKINIT); |
| 1077 | svm_set_intercept(svm, bit: INTERCEPT_WBINVD); |
| 1078 | svm_set_intercept(svm, bit: INTERCEPT_XSETBV); |
| 1079 | svm_set_intercept(svm, bit: INTERCEPT_RDPRU); |
| 1080 | svm_set_intercept(svm, bit: INTERCEPT_RSM); |
| 1081 | |
| 1082 | if (!kvm_mwait_in_guest(vcpu->kvm)) { |
| 1083 | svm_set_intercept(svm, bit: INTERCEPT_MONITOR); |
| 1084 | svm_set_intercept(svm, bit: INTERCEPT_MWAIT); |
| 1085 | } |
| 1086 | |
| 1087 | if (!kvm_hlt_in_guest(vcpu->kvm)) { |
| 1088 | if (cpu_feature_enabled(X86_FEATURE_IDLE_HLT)) |
| 1089 | svm_set_intercept(svm, bit: INTERCEPT_IDLE_HLT); |
| 1090 | else |
| 1091 | svm_set_intercept(svm, bit: INTERCEPT_HLT); |
| 1092 | } |
| 1093 | |
| 1094 | control->iopm_base_pa = iopm_base; |
| 1095 | control->msrpm_base_pa = __sme_set(__pa(svm->msrpm)); |
| 1096 | control->int_ctl = V_INTR_MASKING_MASK; |
| 1097 | |
| 1098 | init_seg(seg: &save->es); |
| 1099 | init_seg(seg: &save->ss); |
| 1100 | init_seg(seg: &save->ds); |
| 1101 | init_seg(seg: &save->fs); |
| 1102 | init_seg(seg: &save->gs); |
| 1103 | |
| 1104 | save->cs.selector = 0xf000; |
| 1105 | save->cs.base = 0xffff0000; |
| 1106 | /* Executable/Readable Code Segment */ |
| 1107 | save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK | |
| 1108 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK; |
| 1109 | save->cs.limit = 0xffff; |
| 1110 | |
| 1111 | save->gdtr.base = 0; |
| 1112 | save->gdtr.limit = 0xffff; |
| 1113 | save->idtr.base = 0; |
| 1114 | save->idtr.limit = 0xffff; |
| 1115 | |
| 1116 | init_sys_seg(seg: &save->ldtr, SEG_TYPE_LDT); |
| 1117 | init_sys_seg(seg: &save->tr, SEG_TYPE_BUSY_TSS16); |
| 1118 | |
| 1119 | if (npt_enabled) { |
| 1120 | /* Setup VMCB for Nested Paging */ |
| 1121 | control->nested_ctl |= SVM_NESTED_CTL_NP_ENABLE; |
| 1122 | svm_clr_intercept(svm, bit: INTERCEPT_INVLPG); |
| 1123 | clr_exception_intercept(svm, PF_VECTOR); |
| 1124 | svm_clr_intercept(svm, bit: INTERCEPT_CR3_READ); |
| 1125 | svm_clr_intercept(svm, bit: INTERCEPT_CR3_WRITE); |
| 1126 | save->g_pat = vcpu->arch.pat; |
| 1127 | save->cr3 = 0; |
| 1128 | } |
| 1129 | svm->current_vmcb->asid_generation = 0; |
| 1130 | svm->asid = 0; |
| 1131 | |
| 1132 | svm->nested.vmcb12_gpa = INVALID_GPA; |
| 1133 | svm->nested.last_vmcb12_gpa = INVALID_GPA; |
| 1134 | |
| 1135 | if (!kvm_pause_in_guest(vcpu->kvm)) { |
| 1136 | control->pause_filter_count = pause_filter_count; |
| 1137 | if (pause_filter_thresh) |
| 1138 | control->pause_filter_thresh = pause_filter_thresh; |
| 1139 | svm_set_intercept(svm, bit: INTERCEPT_PAUSE); |
| 1140 | } else { |
| 1141 | svm_clr_intercept(svm, bit: INTERCEPT_PAUSE); |
| 1142 | } |
| 1143 | |
| 1144 | if (kvm_vcpu_apicv_active(vcpu)) |
| 1145 | avic_init_vmcb(svm, vmcb); |
| 1146 | |
| 1147 | if (vnmi) |
| 1148 | svm->vmcb->control.int_ctl |= V_NMI_ENABLE_MASK; |
| 1149 | |
| 1150 | if (vgif) { |
| 1151 | svm_clr_intercept(svm, bit: INTERCEPT_STGI); |
| 1152 | svm_clr_intercept(svm, bit: INTERCEPT_CLGI); |
| 1153 | svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK; |
| 1154 | } |
| 1155 | |
| 1156 | if (vcpu->kvm->arch.bus_lock_detection_enabled) |
| 1157 | svm_set_intercept(svm, bit: INTERCEPT_BUSLOCK); |
| 1158 | |
| 1159 | if (sev_guest(kvm: vcpu->kvm)) |
| 1160 | sev_init_vmcb(svm, init_event); |
| 1161 | |
| 1162 | svm_hv_init_vmcb(vmcb); |
| 1163 | |
| 1164 | kvm_make_request(KVM_REQ_RECALC_INTERCEPTS, vcpu); |
| 1165 | |
| 1166 | vmcb_mark_all_dirty(vmcb); |
| 1167 | |
| 1168 | enable_gif(svm); |
| 1169 | } |
| 1170 | |
| 1171 | static void __svm_vcpu_reset(struct kvm_vcpu *vcpu) |
| 1172 | { |
| 1173 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1174 | |
| 1175 | svm_init_osvw(vcpu); |
| 1176 | |
| 1177 | if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_STUFF_FEATURE_MSRS)) |
| 1178 | vcpu->arch.microcode_version = 0x01000065; |
| 1179 | svm->tsc_ratio_msr = kvm_caps.default_tsc_scaling_ratio; |
| 1180 | |
| 1181 | svm->nmi_masked = false; |
| 1182 | svm->awaiting_iret_completion = false; |
| 1183 | } |
| 1184 | |
| 1185 | static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) |
| 1186 | { |
| 1187 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1188 | |
| 1189 | svm->spec_ctrl = 0; |
| 1190 | svm->virt_spec_ctrl = 0; |
| 1191 | |
| 1192 | init_vmcb(vcpu, init_event); |
| 1193 | |
| 1194 | if (!init_event) |
| 1195 | __svm_vcpu_reset(vcpu); |
| 1196 | } |
| 1197 | |
| 1198 | void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb) |
| 1199 | { |
| 1200 | svm->current_vmcb = target_vmcb; |
| 1201 | svm->vmcb = target_vmcb->ptr; |
| 1202 | } |
| 1203 | |
| 1204 | static int svm_vcpu_precreate(struct kvm *kvm) |
| 1205 | { |
| 1206 | return avic_alloc_physical_id_table(kvm); |
| 1207 | } |
| 1208 | |
| 1209 | static int svm_vcpu_create(struct kvm_vcpu *vcpu) |
| 1210 | { |
| 1211 | struct vcpu_svm *svm; |
| 1212 | struct page *vmcb01_page; |
| 1213 | int err; |
| 1214 | |
| 1215 | BUILD_BUG_ON(offsetof(struct vcpu_svm, vcpu) != 0); |
| 1216 | svm = to_svm(vcpu); |
| 1217 | |
| 1218 | err = -ENOMEM; |
| 1219 | vmcb01_page = snp_safe_alloc_page(); |
| 1220 | if (!vmcb01_page) |
| 1221 | goto out; |
| 1222 | |
| 1223 | err = sev_vcpu_create(vcpu); |
| 1224 | if (err) |
| 1225 | goto error_free_vmcb_page; |
| 1226 | |
| 1227 | err = avic_init_vcpu(svm); |
| 1228 | if (err) |
| 1229 | goto error_free_sev; |
| 1230 | |
| 1231 | svm->msrpm = svm_vcpu_alloc_msrpm(); |
| 1232 | if (!svm->msrpm) { |
| 1233 | err = -ENOMEM; |
| 1234 | goto error_free_sev; |
| 1235 | } |
| 1236 | |
| 1237 | svm->x2avic_msrs_intercepted = true; |
| 1238 | svm->lbr_msrs_intercepted = true; |
| 1239 | |
| 1240 | svm->vmcb01.ptr = page_address(vmcb01_page); |
| 1241 | svm->vmcb01.pa = __sme_set(page_to_pfn(vmcb01_page) << PAGE_SHIFT); |
| 1242 | svm_switch_vmcb(svm, target_vmcb: &svm->vmcb01); |
| 1243 | |
| 1244 | svm->guest_state_loaded = false; |
| 1245 | |
| 1246 | return 0; |
| 1247 | |
| 1248 | error_free_sev: |
| 1249 | sev_free_vcpu(vcpu); |
| 1250 | error_free_vmcb_page: |
| 1251 | __free_page(vmcb01_page); |
| 1252 | out: |
| 1253 | return err; |
| 1254 | } |
| 1255 | |
| 1256 | static void svm_vcpu_free(struct kvm_vcpu *vcpu) |
| 1257 | { |
| 1258 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1259 | |
| 1260 | WARN_ON_ONCE(!list_empty(&svm->ir_list)); |
| 1261 | |
| 1262 | svm_leave_nested(vcpu); |
| 1263 | svm_free_nested(svm); |
| 1264 | |
| 1265 | sev_free_vcpu(vcpu); |
| 1266 | |
| 1267 | __free_page(__sme_pa_to_page(svm->vmcb01.pa)); |
| 1268 | svm_vcpu_free_msrpm(msrpm: svm->msrpm); |
| 1269 | } |
| 1270 | |
| 1271 | #ifdef CONFIG_CPU_MITIGATIONS |
| 1272 | static DEFINE_SPINLOCK(srso_lock); |
| 1273 | static atomic_t srso_nr_vms; |
| 1274 | |
| 1275 | static void svm_srso_clear_bp_spec_reduce(void *ign) |
| 1276 | { |
| 1277 | struct svm_cpu_data *sd = this_cpu_ptr(&svm_data); |
| 1278 | |
| 1279 | if (!sd->bp_spec_reduce_set) |
| 1280 | return; |
| 1281 | |
| 1282 | msr_clear_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT); |
| 1283 | sd->bp_spec_reduce_set = false; |
| 1284 | } |
| 1285 | |
| 1286 | static void svm_srso_vm_destroy(void) |
| 1287 | { |
| 1288 | if (!cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE)) |
| 1289 | return; |
| 1290 | |
| 1291 | if (atomic_dec_return(v: &srso_nr_vms)) |
| 1292 | return; |
| 1293 | |
| 1294 | guard(spinlock)(l: &srso_lock); |
| 1295 | |
| 1296 | /* |
| 1297 | * Verify a new VM didn't come along, acquire the lock, and increment |
| 1298 | * the count before this task acquired the lock. |
| 1299 | */ |
| 1300 | if (atomic_read(v: &srso_nr_vms)) |
| 1301 | return; |
| 1302 | |
| 1303 | on_each_cpu(func: svm_srso_clear_bp_spec_reduce, NULL, wait: 1); |
| 1304 | } |
| 1305 | |
| 1306 | static void svm_srso_vm_init(void) |
| 1307 | { |
| 1308 | if (!cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE)) |
| 1309 | return; |
| 1310 | |
| 1311 | /* |
| 1312 | * Acquire the lock on 0 => 1 transitions to ensure a potential 1 => 0 |
| 1313 | * transition, i.e. destroying the last VM, is fully complete, e.g. so |
| 1314 | * that a delayed IPI doesn't clear BP_SPEC_REDUCE after a vCPU runs. |
| 1315 | */ |
| 1316 | if (atomic_inc_not_zero(v: &srso_nr_vms)) |
| 1317 | return; |
| 1318 | |
| 1319 | guard(spinlock)(l: &srso_lock); |
| 1320 | |
| 1321 | atomic_inc(v: &srso_nr_vms); |
| 1322 | } |
| 1323 | #else |
| 1324 | static void svm_srso_vm_init(void) { } |
| 1325 | static void svm_srso_vm_destroy(void) { } |
| 1326 | #endif |
| 1327 | |
| 1328 | static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu) |
| 1329 | { |
| 1330 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1331 | struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu); |
| 1332 | |
| 1333 | if (sev_es_guest(kvm: vcpu->kvm)) |
| 1334 | sev_es_unmap_ghcb(svm); |
| 1335 | |
| 1336 | if (svm->guest_state_loaded) |
| 1337 | return; |
| 1338 | |
| 1339 | /* |
| 1340 | * Save additional host state that will be restored on VMEXIT (sev-es) |
| 1341 | * or subsequent vmload of host save area. |
| 1342 | */ |
| 1343 | vmsave(pa: sd->save_area_pa); |
| 1344 | if (sev_es_guest(kvm: vcpu->kvm)) |
| 1345 | sev_es_prepare_switch_to_guest(svm, hostsa: sev_es_host_save_area(sd)); |
| 1346 | |
| 1347 | if (tsc_scaling) |
| 1348 | __svm_write_tsc_multiplier(multiplier: vcpu->arch.tsc_scaling_ratio); |
| 1349 | |
| 1350 | /* |
| 1351 | * TSC_AUX is always virtualized (context switched by hardware) for |
| 1352 | * SEV-ES guests when the feature is available. For non-SEV-ES guests, |
| 1353 | * context switch TSC_AUX via the user_return MSR infrastructure (not |
| 1354 | * all CPUs support TSC_AUX virtualization). |
| 1355 | */ |
| 1356 | if (likely(tsc_aux_uret_slot >= 0) && |
| 1357 | (!boot_cpu_has(X86_FEATURE_V_TSC_AUX) || !sev_es_guest(kvm: vcpu->kvm))) |
| 1358 | kvm_set_user_return_msr(index: tsc_aux_uret_slot, val: svm->tsc_aux, mask: -1ull); |
| 1359 | |
| 1360 | if (cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE) && |
| 1361 | !sd->bp_spec_reduce_set) { |
| 1362 | sd->bp_spec_reduce_set = true; |
| 1363 | msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT); |
| 1364 | } |
| 1365 | svm->guest_state_loaded = true; |
| 1366 | } |
| 1367 | |
| 1368 | static void svm_prepare_host_switch(struct kvm_vcpu *vcpu) |
| 1369 | { |
| 1370 | to_svm(vcpu)->guest_state_loaded = false; |
| 1371 | } |
| 1372 | |
| 1373 | static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
| 1374 | { |
| 1375 | if (vcpu->scheduled_out && !kvm_pause_in_guest(vcpu->kvm)) |
| 1376 | shrink_ple_window(vcpu); |
| 1377 | |
| 1378 | if (kvm_vcpu_apicv_active(vcpu)) |
| 1379 | avic_vcpu_load(vcpu, cpu); |
| 1380 | } |
| 1381 | |
| 1382 | static void svm_vcpu_put(struct kvm_vcpu *vcpu) |
| 1383 | { |
| 1384 | if (kvm_vcpu_apicv_active(vcpu)) |
| 1385 | avic_vcpu_put(vcpu); |
| 1386 | |
| 1387 | svm_prepare_host_switch(vcpu); |
| 1388 | |
| 1389 | ++vcpu->stat.host_state_reload; |
| 1390 | } |
| 1391 | |
| 1392 | static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu) |
| 1393 | { |
| 1394 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1395 | unsigned long rflags = svm->vmcb->save.rflags; |
| 1396 | |
| 1397 | if (svm->nmi_singlestep) { |
| 1398 | /* Hide our flags if they were not set by the guest */ |
| 1399 | if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF)) |
| 1400 | rflags &= ~X86_EFLAGS_TF; |
| 1401 | if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF)) |
| 1402 | rflags &= ~X86_EFLAGS_RF; |
| 1403 | } |
| 1404 | return rflags; |
| 1405 | } |
| 1406 | |
| 1407 | static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) |
| 1408 | { |
| 1409 | if (to_svm(vcpu)->nmi_singlestep) |
| 1410 | rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF); |
| 1411 | |
| 1412 | /* |
| 1413 | * Any change of EFLAGS.VM is accompanied by a reload of SS |
| 1414 | * (caused by either a task switch or an inter-privilege IRET), |
| 1415 | * so we do not need to update the CPL here. |
| 1416 | */ |
| 1417 | to_svm(vcpu)->vmcb->save.rflags = rflags; |
| 1418 | } |
| 1419 | |
| 1420 | static bool svm_get_if_flag(struct kvm_vcpu *vcpu) |
| 1421 | { |
| 1422 | struct vmcb *vmcb = to_svm(vcpu)->vmcb; |
| 1423 | |
| 1424 | return sev_es_guest(kvm: vcpu->kvm) |
| 1425 | ? vmcb->control.int_state & SVM_GUEST_INTERRUPT_MASK |
| 1426 | : kvm_get_rflags(vcpu) & X86_EFLAGS_IF; |
| 1427 | } |
| 1428 | |
| 1429 | static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) |
| 1430 | { |
| 1431 | kvm_register_mark_available(vcpu, reg); |
| 1432 | |
| 1433 | switch (reg) { |
| 1434 | case VCPU_EXREG_PDPTR: |
| 1435 | /* |
| 1436 | * When !npt_enabled, mmu->pdptrs[] is already available since |
| 1437 | * it is always updated per SDM when moving to CRs. |
| 1438 | */ |
| 1439 | if (npt_enabled) |
| 1440 | load_pdptrs(vcpu, cr3: kvm_read_cr3(vcpu)); |
| 1441 | break; |
| 1442 | default: |
| 1443 | KVM_BUG_ON(1, vcpu->kvm); |
| 1444 | } |
| 1445 | } |
| 1446 | |
| 1447 | static void svm_set_vintr(struct vcpu_svm *svm) |
| 1448 | { |
| 1449 | struct vmcb_control_area *control; |
| 1450 | |
| 1451 | /* |
| 1452 | * The following fields are ignored when AVIC is enabled |
| 1453 | */ |
| 1454 | WARN_ON(kvm_vcpu_apicv_activated(&svm->vcpu)); |
| 1455 | |
| 1456 | svm_set_intercept(svm, bit: INTERCEPT_VINTR); |
| 1457 | |
| 1458 | /* |
| 1459 | * Recalculating intercepts may have cleared the VINTR intercept. If |
| 1460 | * V_INTR_MASKING is enabled in vmcb12, then the effective RFLAGS.IF |
| 1461 | * for L1 physical interrupts is L1's RFLAGS.IF at the time of VMRUN. |
| 1462 | * Requesting an interrupt window if save.RFLAGS.IF=0 is pointless as |
| 1463 | * interrupts will never be unblocked while L2 is running. |
| 1464 | */ |
| 1465 | if (!svm_is_intercept(svm, bit: INTERCEPT_VINTR)) |
| 1466 | return; |
| 1467 | |
| 1468 | /* |
| 1469 | * This is just a dummy VINTR to actually cause a vmexit to happen. |
| 1470 | * Actual injection of virtual interrupts happens through EVENTINJ. |
| 1471 | */ |
| 1472 | control = &svm->vmcb->control; |
| 1473 | control->int_vector = 0x0; |
| 1474 | control->int_ctl &= ~V_INTR_PRIO_MASK; |
| 1475 | control->int_ctl |= V_IRQ_MASK | |
| 1476 | ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT); |
| 1477 | vmcb_mark_dirty(vmcb: svm->vmcb, bit: VMCB_INTR); |
| 1478 | } |
| 1479 | |
| 1480 | static void svm_clear_vintr(struct vcpu_svm *svm) |
| 1481 | { |
| 1482 | svm_clr_intercept(svm, bit: INTERCEPT_VINTR); |
| 1483 | |
| 1484 | /* Drop int_ctl fields related to VINTR injection. */ |
| 1485 | svm->vmcb->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK; |
| 1486 | if (is_guest_mode(&svm->vcpu)) { |
| 1487 | svm->vmcb01.ptr->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK; |
| 1488 | |
| 1489 | WARN_ON((svm->vmcb->control.int_ctl & V_TPR_MASK) != |
| 1490 | (svm->nested.ctl.int_ctl & V_TPR_MASK)); |
| 1491 | |
| 1492 | svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl & |
| 1493 | V_IRQ_INJECTION_BITS_MASK; |
| 1494 | |
| 1495 | svm->vmcb->control.int_vector = svm->nested.ctl.int_vector; |
| 1496 | } |
| 1497 | |
| 1498 | vmcb_mark_dirty(vmcb: svm->vmcb, bit: VMCB_INTR); |
| 1499 | } |
| 1500 | |
| 1501 | static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg) |
| 1502 | { |
| 1503 | struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save; |
| 1504 | struct vmcb_save_area *save01 = &to_svm(vcpu)->vmcb01.ptr->save; |
| 1505 | |
| 1506 | switch (seg) { |
| 1507 | case VCPU_SREG_CS: return &save->cs; |
| 1508 | case VCPU_SREG_DS: return &save->ds; |
| 1509 | case VCPU_SREG_ES: return &save->es; |
| 1510 | case VCPU_SREG_FS: return &save01->fs; |
| 1511 | case VCPU_SREG_GS: return &save01->gs; |
| 1512 | case VCPU_SREG_SS: return &save->ss; |
| 1513 | case VCPU_SREG_TR: return &save01->tr; |
| 1514 | case VCPU_SREG_LDTR: return &save01->ldtr; |
| 1515 | } |
| 1516 | BUG(); |
| 1517 | return NULL; |
| 1518 | } |
| 1519 | |
| 1520 | static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg) |
| 1521 | { |
| 1522 | struct vmcb_seg *s = svm_seg(vcpu, seg); |
| 1523 | |
| 1524 | return s->base; |
| 1525 | } |
| 1526 | |
| 1527 | static void svm_get_segment(struct kvm_vcpu *vcpu, |
| 1528 | struct kvm_segment *var, int seg) |
| 1529 | { |
| 1530 | struct vmcb_seg *s = svm_seg(vcpu, seg); |
| 1531 | |
| 1532 | var->base = s->base; |
| 1533 | var->limit = s->limit; |
| 1534 | var->selector = s->selector; |
| 1535 | var->type = s->attrib & SVM_SELECTOR_TYPE_MASK; |
| 1536 | var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1; |
| 1537 | var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3; |
| 1538 | var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1; |
| 1539 | var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1; |
| 1540 | var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1; |
| 1541 | var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1; |
| 1542 | |
| 1543 | /* |
| 1544 | * AMD CPUs circa 2014 track the G bit for all segments except CS. |
| 1545 | * However, the SVM spec states that the G bit is not observed by the |
| 1546 | * CPU, and some VMware virtual CPUs drop the G bit for all segments. |
| 1547 | * So let's synthesize a legal G bit for all segments, this helps |
| 1548 | * running KVM nested. It also helps cross-vendor migration, because |
| 1549 | * Intel's vmentry has a check on the 'G' bit. |
| 1550 | */ |
| 1551 | var->g = s->limit > 0xfffff; |
| 1552 | |
| 1553 | /* |
| 1554 | * AMD's VMCB does not have an explicit unusable field, so emulate it |
| 1555 | * for cross vendor migration purposes by "not present" |
| 1556 | */ |
| 1557 | var->unusable = !var->present; |
| 1558 | |
| 1559 | switch (seg) { |
| 1560 | case VCPU_SREG_TR: |
| 1561 | /* |
| 1562 | * Work around a bug where the busy flag in the tr selector |
| 1563 | * isn't exposed |
| 1564 | */ |
| 1565 | var->type |= 0x2; |
| 1566 | break; |
| 1567 | case VCPU_SREG_DS: |
| 1568 | case VCPU_SREG_ES: |
| 1569 | case VCPU_SREG_FS: |
| 1570 | case VCPU_SREG_GS: |
| 1571 | /* |
| 1572 | * The accessed bit must always be set in the segment |
| 1573 | * descriptor cache, although it can be cleared in the |
| 1574 | * descriptor, the cached bit always remains at 1. Since |
| 1575 | * Intel has a check on this, set it here to support |
| 1576 | * cross-vendor migration. |
| 1577 | */ |
| 1578 | if (!var->unusable) |
| 1579 | var->type |= 0x1; |
| 1580 | break; |
| 1581 | case VCPU_SREG_SS: |
| 1582 | /* |
| 1583 | * On AMD CPUs sometimes the DB bit in the segment |
| 1584 | * descriptor is left as 1, although the whole segment has |
| 1585 | * been made unusable. Clear it here to pass an Intel VMX |
| 1586 | * entry check when cross vendor migrating. |
| 1587 | */ |
| 1588 | if (var->unusable) |
| 1589 | var->db = 0; |
| 1590 | /* This is symmetric with svm_set_segment() */ |
| 1591 | var->dpl = to_svm(vcpu)->vmcb->save.cpl; |
| 1592 | break; |
| 1593 | } |
| 1594 | } |
| 1595 | |
| 1596 | static int svm_get_cpl(struct kvm_vcpu *vcpu) |
| 1597 | { |
| 1598 | struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save; |
| 1599 | |
| 1600 | return save->cpl; |
| 1601 | } |
| 1602 | |
| 1603 | static void svm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) |
| 1604 | { |
| 1605 | struct kvm_segment cs; |
| 1606 | |
| 1607 | svm_get_segment(vcpu, var: &cs, seg: VCPU_SREG_CS); |
| 1608 | *db = cs.db; |
| 1609 | *l = cs.l; |
| 1610 | } |
| 1611 | |
| 1612 | static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) |
| 1613 | { |
| 1614 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1615 | |
| 1616 | dt->size = svm->vmcb->save.idtr.limit; |
| 1617 | dt->address = svm->vmcb->save.idtr.base; |
| 1618 | } |
| 1619 | |
| 1620 | static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) |
| 1621 | { |
| 1622 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1623 | |
| 1624 | svm->vmcb->save.idtr.limit = dt->size; |
| 1625 | svm->vmcb->save.idtr.base = dt->address ; |
| 1626 | vmcb_mark_dirty(vmcb: svm->vmcb, bit: VMCB_DT); |
| 1627 | } |
| 1628 | |
| 1629 | static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) |
| 1630 | { |
| 1631 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1632 | |
| 1633 | dt->size = svm->vmcb->save.gdtr.limit; |
| 1634 | dt->address = svm->vmcb->save.gdtr.base; |
| 1635 | } |
| 1636 | |
| 1637 | static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) |
| 1638 | { |
| 1639 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1640 | |
| 1641 | svm->vmcb->save.gdtr.limit = dt->size; |
| 1642 | svm->vmcb->save.gdtr.base = dt->address ; |
| 1643 | vmcb_mark_dirty(vmcb: svm->vmcb, bit: VMCB_DT); |
| 1644 | } |
| 1645 | |
| 1646 | static void sev_post_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) |
| 1647 | { |
| 1648 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1649 | |
| 1650 | /* |
| 1651 | * For guests that don't set guest_state_protected, the cr3 update is |
| 1652 | * handled via kvm_mmu_load() while entering the guest. For guests |
| 1653 | * that do (SEV-ES/SEV-SNP), the cr3 update needs to be written to |
| 1654 | * VMCB save area now, since the save area will become the initial |
| 1655 | * contents of the VMSA, and future VMCB save area updates won't be |
| 1656 | * seen. |
| 1657 | */ |
| 1658 | if (sev_es_guest(kvm: vcpu->kvm)) { |
| 1659 | svm->vmcb->save.cr3 = cr3; |
| 1660 | vmcb_mark_dirty(vmcb: svm->vmcb, bit: VMCB_CR); |
| 1661 | } |
| 1662 | } |
| 1663 | |
| 1664 | static bool svm_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) |
| 1665 | { |
| 1666 | return true; |
| 1667 | } |
| 1668 | |
| 1669 | void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) |
| 1670 | { |
| 1671 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1672 | u64 hcr0 = cr0; |
| 1673 | bool old_paging = is_paging(vcpu); |
| 1674 | |
| 1675 | #ifdef CONFIG_X86_64 |
| 1676 | if (vcpu->arch.efer & EFER_LME) { |
| 1677 | if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { |
| 1678 | vcpu->arch.efer |= EFER_LMA; |
| 1679 | if (!vcpu->arch.guest_state_protected) |
| 1680 | svm->vmcb->save.efer |= EFER_LMA | EFER_LME; |
| 1681 | } |
| 1682 | |
| 1683 | if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) { |
| 1684 | vcpu->arch.efer &= ~EFER_LMA; |
| 1685 | if (!vcpu->arch.guest_state_protected) |
| 1686 | svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME); |
| 1687 | } |
| 1688 | } |
| 1689 | #endif |
| 1690 | vcpu->arch.cr0 = cr0; |
| 1691 | |
| 1692 | if (!npt_enabled) { |
| 1693 | hcr0 |= X86_CR0_PG | X86_CR0_WP; |
| 1694 | if (old_paging != is_paging(vcpu)) |
| 1695 | svm_set_cr4(vcpu, cr4: kvm_read_cr4(vcpu)); |
| 1696 | } |
| 1697 | |
| 1698 | /* |
| 1699 | * re-enable caching here because the QEMU bios |
| 1700 | * does not do it - this results in some delay at |
| 1701 | * reboot |
| 1702 | */ |
| 1703 | if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) |
| 1704 | hcr0 &= ~(X86_CR0_CD | X86_CR0_NW); |
| 1705 | |
| 1706 | svm->vmcb->save.cr0 = hcr0; |
| 1707 | vmcb_mark_dirty(vmcb: svm->vmcb, bit: VMCB_CR); |
| 1708 | |
| 1709 | /* |
| 1710 | * SEV-ES guests must always keep the CR intercepts cleared. CR |
| 1711 | * tracking is done using the CR write traps. |
| 1712 | */ |
| 1713 | if (sev_es_guest(kvm: vcpu->kvm)) |
| 1714 | return; |
| 1715 | |
| 1716 | if (hcr0 == cr0) { |
| 1717 | /* Selective CR0 write remains on. */ |
| 1718 | svm_clr_intercept(svm, bit: INTERCEPT_CR0_READ); |
| 1719 | svm_clr_intercept(svm, bit: INTERCEPT_CR0_WRITE); |
| 1720 | } else { |
| 1721 | svm_set_intercept(svm, bit: INTERCEPT_CR0_READ); |
| 1722 | svm_set_intercept(svm, bit: INTERCEPT_CR0_WRITE); |
| 1723 | } |
| 1724 | } |
| 1725 | |
| 1726 | static bool svm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) |
| 1727 | { |
| 1728 | return true; |
| 1729 | } |
| 1730 | |
| 1731 | void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) |
| 1732 | { |
| 1733 | unsigned long host_cr4_mce = cr4_read_shadow() & X86_CR4_MCE; |
| 1734 | unsigned long old_cr4 = vcpu->arch.cr4; |
| 1735 | |
| 1736 | vcpu->arch.cr4 = cr4; |
| 1737 | if (!npt_enabled) { |
| 1738 | cr4 |= X86_CR4_PAE; |
| 1739 | |
| 1740 | if (!is_paging(vcpu)) |
| 1741 | cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE); |
| 1742 | } |
| 1743 | cr4 |= host_cr4_mce; |
| 1744 | to_svm(vcpu)->vmcb->save.cr4 = cr4; |
| 1745 | vmcb_mark_dirty(vmcb: to_svm(vcpu)->vmcb, bit: VMCB_CR); |
| 1746 | |
| 1747 | if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE)) |
| 1748 | vcpu->arch.cpuid_dynamic_bits_dirty = true; |
| 1749 | } |
| 1750 | |
| 1751 | static void svm_set_segment(struct kvm_vcpu *vcpu, |
| 1752 | struct kvm_segment *var, int seg) |
| 1753 | { |
| 1754 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1755 | struct vmcb_seg *s = svm_seg(vcpu, seg); |
| 1756 | |
| 1757 | s->base = var->base; |
| 1758 | s->limit = var->limit; |
| 1759 | s->selector = var->selector; |
| 1760 | s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK); |
| 1761 | s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT; |
| 1762 | s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT; |
| 1763 | s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT; |
| 1764 | s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT; |
| 1765 | s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT; |
| 1766 | s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT; |
| 1767 | s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT; |
| 1768 | |
| 1769 | /* |
| 1770 | * This is always accurate, except if SYSRET returned to a segment |
| 1771 | * with SS.DPL != 3. Intel does not have this quirk, and always |
| 1772 | * forces SS.DPL to 3 on sysret, so we ignore that case; fixing it |
| 1773 | * would entail passing the CPL to userspace and back. |
| 1774 | */ |
| 1775 | if (seg == VCPU_SREG_SS) |
| 1776 | /* This is symmetric with svm_get_segment() */ |
| 1777 | svm->vmcb->save.cpl = (var->dpl & 3); |
| 1778 | |
| 1779 | vmcb_mark_dirty(vmcb: svm->vmcb, bit: VMCB_SEG); |
| 1780 | } |
| 1781 | |
| 1782 | static void svm_update_exception_bitmap(struct kvm_vcpu *vcpu) |
| 1783 | { |
| 1784 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1785 | |
| 1786 | clr_exception_intercept(svm, BP_VECTOR); |
| 1787 | |
| 1788 | if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) { |
| 1789 | if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) |
| 1790 | set_exception_intercept(svm, BP_VECTOR); |
| 1791 | } |
| 1792 | } |
| 1793 | |
| 1794 | static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd) |
| 1795 | { |
| 1796 | if (sd->next_asid > sd->max_asid) { |
| 1797 | ++sd->asid_generation; |
| 1798 | sd->next_asid = sd->min_asid; |
| 1799 | svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID; |
| 1800 | vmcb_mark_dirty(vmcb: svm->vmcb, bit: VMCB_ASID); |
| 1801 | } |
| 1802 | |
| 1803 | svm->current_vmcb->asid_generation = sd->asid_generation; |
| 1804 | svm->asid = sd->next_asid++; |
| 1805 | } |
| 1806 | |
| 1807 | static void svm_set_dr6(struct kvm_vcpu *vcpu, unsigned long value) |
| 1808 | { |
| 1809 | struct vmcb *vmcb = to_svm(vcpu)->vmcb; |
| 1810 | |
| 1811 | if (vcpu->arch.guest_state_protected) |
| 1812 | return; |
| 1813 | |
| 1814 | if (unlikely(value != vmcb->save.dr6)) { |
| 1815 | vmcb->save.dr6 = value; |
| 1816 | vmcb_mark_dirty(vmcb, bit: VMCB_DR); |
| 1817 | } |
| 1818 | } |
| 1819 | |
| 1820 | static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu) |
| 1821 | { |
| 1822 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1823 | |
| 1824 | if (WARN_ON_ONCE(sev_es_guest(vcpu->kvm))) |
| 1825 | return; |
| 1826 | |
| 1827 | get_debugreg(vcpu->arch.db[0], 0); |
| 1828 | get_debugreg(vcpu->arch.db[1], 1); |
| 1829 | get_debugreg(vcpu->arch.db[2], 2); |
| 1830 | get_debugreg(vcpu->arch.db[3], 3); |
| 1831 | /* |
| 1832 | * We cannot reset svm->vmcb->save.dr6 to DR6_ACTIVE_LOW here, |
| 1833 | * because db_interception might need it. We can do it before vmentry. |
| 1834 | */ |
| 1835 | vcpu->arch.dr6 = svm->vmcb->save.dr6; |
| 1836 | vcpu->arch.dr7 = svm->vmcb->save.dr7; |
| 1837 | vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT; |
| 1838 | set_dr_intercepts(svm); |
| 1839 | } |
| 1840 | |
| 1841 | static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value) |
| 1842 | { |
| 1843 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1844 | |
| 1845 | if (vcpu->arch.guest_state_protected) |
| 1846 | return; |
| 1847 | |
| 1848 | svm->vmcb->save.dr7 = value; |
| 1849 | vmcb_mark_dirty(vmcb: svm->vmcb, bit: VMCB_DR); |
| 1850 | } |
| 1851 | |
| 1852 | static int pf_interception(struct kvm_vcpu *vcpu) |
| 1853 | { |
| 1854 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1855 | |
| 1856 | u64 fault_address = svm->vmcb->control.exit_info_2; |
| 1857 | u64 error_code = svm->vmcb->control.exit_info_1; |
| 1858 | |
| 1859 | return kvm_handle_page_fault(vcpu, error_code, fault_address, |
| 1860 | static_cpu_has(X86_FEATURE_DECODEASSISTS) ? |
| 1861 | svm->vmcb->control.insn_bytes : NULL, |
| 1862 | svm->vmcb->control.insn_len); |
| 1863 | } |
| 1864 | |
| 1865 | static int npf_interception(struct kvm_vcpu *vcpu) |
| 1866 | { |
| 1867 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1868 | int rc; |
| 1869 | |
| 1870 | u64 fault_address = svm->vmcb->control.exit_info_2; |
| 1871 | u64 error_code = svm->vmcb->control.exit_info_1; |
| 1872 | |
| 1873 | /* |
| 1874 | * WARN if hardware generates a fault with an error code that collides |
| 1875 | * with KVM-defined sythentic flags. Clear the flags and continue on, |
| 1876 | * i.e. don't terminate the VM, as KVM can't possibly be relying on a |
| 1877 | * flag that KVM doesn't know about. |
| 1878 | */ |
| 1879 | if (WARN_ON_ONCE(error_code & PFERR_SYNTHETIC_MASK)) |
| 1880 | error_code &= ~PFERR_SYNTHETIC_MASK; |
| 1881 | |
| 1882 | if (sev_snp_guest(kvm: vcpu->kvm) && (error_code & PFERR_GUEST_ENC_MASK)) |
| 1883 | error_code |= PFERR_PRIVATE_ACCESS; |
| 1884 | |
| 1885 | trace_kvm_page_fault(vcpu, fault_address, error_code); |
| 1886 | rc = kvm_mmu_page_fault(vcpu, cr2_or_gpa: fault_address, error_code, |
| 1887 | static_cpu_has(X86_FEATURE_DECODEASSISTS) ? |
| 1888 | svm->vmcb->control.insn_bytes : NULL, |
| 1889 | insn_len: svm->vmcb->control.insn_len); |
| 1890 | |
| 1891 | if (rc > 0 && error_code & PFERR_GUEST_RMP_MASK) |
| 1892 | sev_handle_rmp_fault(vcpu, gpa: fault_address, error_code); |
| 1893 | |
| 1894 | return rc; |
| 1895 | } |
| 1896 | |
| 1897 | static int db_interception(struct kvm_vcpu *vcpu) |
| 1898 | { |
| 1899 | struct kvm_run *kvm_run = vcpu->run; |
| 1900 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1901 | |
| 1902 | if (!(vcpu->guest_debug & |
| 1903 | (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) && |
| 1904 | !svm->nmi_singlestep) { |
| 1905 | u32 payload = svm->vmcb->save.dr6 ^ DR6_ACTIVE_LOW; |
| 1906 | kvm_queue_exception_p(vcpu, DB_VECTOR, payload); |
| 1907 | return 1; |
| 1908 | } |
| 1909 | |
| 1910 | if (svm->nmi_singlestep) { |
| 1911 | disable_nmi_singlestep(svm); |
| 1912 | /* Make sure we check for pending NMIs upon entry */ |
| 1913 | kvm_make_request(KVM_REQ_EVENT, vcpu); |
| 1914 | } |
| 1915 | |
| 1916 | if (vcpu->guest_debug & |
| 1917 | (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) { |
| 1918 | kvm_run->exit_reason = KVM_EXIT_DEBUG; |
| 1919 | kvm_run->debug.arch.dr6 = svm->vmcb->save.dr6; |
| 1920 | kvm_run->debug.arch.dr7 = svm->vmcb->save.dr7; |
| 1921 | kvm_run->debug.arch.pc = |
| 1922 | svm->vmcb->save.cs.base + svm->vmcb->save.rip; |
| 1923 | kvm_run->debug.arch.exception = DB_VECTOR; |
| 1924 | return 0; |
| 1925 | } |
| 1926 | |
| 1927 | return 1; |
| 1928 | } |
| 1929 | |
| 1930 | static int bp_interception(struct kvm_vcpu *vcpu) |
| 1931 | { |
| 1932 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1933 | struct kvm_run *kvm_run = vcpu->run; |
| 1934 | |
| 1935 | kvm_run->exit_reason = KVM_EXIT_DEBUG; |
| 1936 | kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip; |
| 1937 | kvm_run->debug.arch.exception = BP_VECTOR; |
| 1938 | return 0; |
| 1939 | } |
| 1940 | |
| 1941 | static int ud_interception(struct kvm_vcpu *vcpu) |
| 1942 | { |
| 1943 | return handle_ud(vcpu); |
| 1944 | } |
| 1945 | |
| 1946 | static int ac_interception(struct kvm_vcpu *vcpu) |
| 1947 | { |
| 1948 | kvm_queue_exception_e(vcpu, AC_VECTOR, error_code: 0); |
| 1949 | return 1; |
| 1950 | } |
| 1951 | |
| 1952 | static bool is_erratum_383(void) |
| 1953 | { |
| 1954 | int i; |
| 1955 | u64 value; |
| 1956 | |
| 1957 | if (!erratum_383_found) |
| 1958 | return false; |
| 1959 | |
| 1960 | if (native_read_msr_safe(MSR_IA32_MC0_STATUS, p: &value)) |
| 1961 | return false; |
| 1962 | |
| 1963 | /* Bit 62 may or may not be set for this mce */ |
| 1964 | value &= ~(1ULL << 62); |
| 1965 | |
| 1966 | if (value != 0xb600000000010015ULL) |
| 1967 | return false; |
| 1968 | |
| 1969 | /* Clear MCi_STATUS registers */ |
| 1970 | for (i = 0; i < 6; ++i) |
| 1971 | native_write_msr_safe(MSR_IA32_MCx_STATUS(i), val: 0); |
| 1972 | |
| 1973 | if (!native_read_msr_safe(MSR_IA32_MCG_STATUS, p: &value)) { |
| 1974 | value &= ~(1ULL << 2); |
| 1975 | native_write_msr_safe(MSR_IA32_MCG_STATUS, val: value); |
| 1976 | } |
| 1977 | |
| 1978 | /* Flush tlb to evict multi-match entries */ |
| 1979 | __flush_tlb_all(); |
| 1980 | |
| 1981 | return true; |
| 1982 | } |
| 1983 | |
| 1984 | static void svm_handle_mce(struct kvm_vcpu *vcpu) |
| 1985 | { |
| 1986 | if (is_erratum_383()) { |
| 1987 | /* |
| 1988 | * Erratum 383 triggered. Guest state is corrupt so kill the |
| 1989 | * guest. |
| 1990 | */ |
| 1991 | pr_err("Guest triggered AMD Erratum 383\n" ); |
| 1992 | |
| 1993 | kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); |
| 1994 | |
| 1995 | return; |
| 1996 | } |
| 1997 | |
| 1998 | /* |
| 1999 | * On an #MC intercept the MCE handler is not called automatically in |
| 2000 | * the host. So do it by hand here. |
| 2001 | */ |
| 2002 | kvm_machine_check(); |
| 2003 | } |
| 2004 | |
| 2005 | static int mc_interception(struct kvm_vcpu *vcpu) |
| 2006 | { |
| 2007 | return 1; |
| 2008 | } |
| 2009 | |
| 2010 | static int shutdown_interception(struct kvm_vcpu *vcpu) |
| 2011 | { |
| 2012 | struct kvm_run *kvm_run = vcpu->run; |
| 2013 | struct vcpu_svm *svm = to_svm(vcpu); |
| 2014 | |
| 2015 | |
| 2016 | /* |
| 2017 | * VMCB is undefined after a SHUTDOWN intercept. INIT the vCPU to put |
| 2018 | * the VMCB in a known good state. Unfortuately, KVM doesn't have |
| 2019 | * KVM_MP_STATE_SHUTDOWN and can't add it without potentially breaking |
| 2020 | * userspace. At a platform view, INIT is acceptable behavior as |
| 2021 | * there exist bare metal platforms that automatically INIT the CPU |
| 2022 | * in response to shutdown. |
| 2023 | * |
| 2024 | * The VM save area for SEV-ES guests has already been encrypted so it |
| 2025 | * cannot be reinitialized, i.e. synthesizing INIT is futile. |
| 2026 | */ |
| 2027 | if (!sev_es_guest(kvm: vcpu->kvm)) { |
| 2028 | clear_page(page: svm->vmcb); |
| 2029 | #ifdef CONFIG_KVM_SMM |
| 2030 | if (is_smm(vcpu)) |
| 2031 | kvm_smm_changed(vcpu, false); |
| 2032 | #endif |
| 2033 | kvm_vcpu_reset(vcpu, init_event: true); |
| 2034 | } |
| 2035 | |
| 2036 | kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; |
| 2037 | return 0; |
| 2038 | } |
| 2039 | |
| 2040 | static int io_interception(struct kvm_vcpu *vcpu) |
| 2041 | { |
| 2042 | struct vcpu_svm *svm = to_svm(vcpu); |
| 2043 | u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */ |
| 2044 | int size, in, string; |
| 2045 | unsigned port; |
| 2046 | |
| 2047 | ++vcpu->stat.io_exits; |
| 2048 | string = (io_info & SVM_IOIO_STR_MASK) != 0; |
| 2049 | in = (io_info & SVM_IOIO_TYPE_MASK) != 0; |
| 2050 | port = io_info >> 16; |
| 2051 | size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT; |
| 2052 | |
| 2053 | if (string) { |
| 2054 | if (sev_es_guest(kvm: vcpu->kvm)) |
| 2055 | return sev_es_string_io(svm, size, port, in); |
| 2056 | else |
| 2057 | return kvm_emulate_instruction(vcpu, emulation_type: 0); |
| 2058 | } |
| 2059 | |
| 2060 | svm->next_rip = svm->vmcb->control.exit_info_2; |
| 2061 | |
| 2062 | return kvm_fast_pio(vcpu, size, port, in); |
| 2063 | } |
| 2064 | |
| 2065 | static int nmi_interception(struct kvm_vcpu *vcpu) |
| 2066 | { |
| 2067 | return 1; |
| 2068 | } |
| 2069 | |
| 2070 | static int smi_interception(struct kvm_vcpu *vcpu) |
| 2071 | { |
| 2072 | return 1; |
| 2073 | } |
| 2074 | |
| 2075 | static int intr_interception(struct kvm_vcpu *vcpu) |
| 2076 | { |
| 2077 | ++vcpu->stat.irq_exits; |
| 2078 | return 1; |
| 2079 | } |
| 2080 | |
| 2081 | static int vmload_vmsave_interception(struct kvm_vcpu *vcpu, bool vmload) |
| 2082 | { |
| 2083 | struct vcpu_svm *svm = to_svm(vcpu); |
| 2084 | struct vmcb *vmcb12; |
| 2085 | struct kvm_host_map map; |
| 2086 | int ret; |
| 2087 | |
| 2088 | if (nested_svm_check_permissions(vcpu)) |
| 2089 | return 1; |
| 2090 | |
| 2091 | ret = kvm_vcpu_map(vcpu, gpa: gpa_to_gfn(gpa: svm->vmcb->save.rax), map: &map); |
| 2092 | if (ret) { |
| 2093 | if (ret == -EINVAL) |
| 2094 | kvm_inject_gp(vcpu, error_code: 0); |
| 2095 | return 1; |
| 2096 | } |
| 2097 | |
| 2098 | vmcb12 = map.hva; |
| 2099 | |
| 2100 | ret = kvm_skip_emulated_instruction(vcpu); |
| 2101 | |
| 2102 | if (vmload) { |
| 2103 | svm_copy_vmloadsave_state(to_vmcb: svm->vmcb, from_vmcb: vmcb12); |
| 2104 | svm->sysenter_eip_hi = 0; |
| 2105 | svm->sysenter_esp_hi = 0; |
| 2106 | } else { |
| 2107 | svm_copy_vmloadsave_state(to_vmcb: vmcb12, from_vmcb: svm->vmcb); |
| 2108 | } |
| 2109 | |
| 2110 | kvm_vcpu_unmap(vcpu, map: &map); |
| 2111 | |
| 2112 | return ret; |
| 2113 | } |
| 2114 | |
| 2115 | static int vmload_interception(struct kvm_vcpu *vcpu) |
| 2116 | { |
| 2117 | return vmload_vmsave_interception(vcpu, vmload: true); |
| 2118 | } |
| 2119 | |
| 2120 | static int vmsave_interception(struct kvm_vcpu *vcpu) |
| 2121 | { |
| 2122 | return vmload_vmsave_interception(vcpu, vmload: false); |
| 2123 | } |
| 2124 | |
| 2125 | static int vmrun_interception(struct kvm_vcpu *vcpu) |
| 2126 | { |
| 2127 | if (nested_svm_check_permissions(vcpu)) |
| 2128 | return 1; |
| 2129 | |
| 2130 | return nested_svm_vmrun(vcpu); |
| 2131 | } |
| 2132 | |
| 2133 | enum { |
| 2134 | NONE_SVM_INSTR, |
| 2135 | SVM_INSTR_VMRUN, |
| 2136 | SVM_INSTR_VMLOAD, |
| 2137 | SVM_INSTR_VMSAVE, |
| 2138 | }; |
| 2139 | |
| 2140 | /* Return NONE_SVM_INSTR if not SVM instrs, otherwise return decode result */ |
| 2141 | static int svm_instr_opcode(struct kvm_vcpu *vcpu) |
| 2142 | { |
| 2143 | struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; |
| 2144 | |
| 2145 | if (ctxt->b != 0x1 || ctxt->opcode_len != 2) |
| 2146 | return NONE_SVM_INSTR; |
| 2147 | |
| 2148 | switch (ctxt->modrm) { |
| 2149 | case 0xd8: /* VMRUN */ |
| 2150 | return SVM_INSTR_VMRUN; |
| 2151 | case 0xda: /* VMLOAD */ |
| 2152 | return SVM_INSTR_VMLOAD; |
| 2153 | case 0xdb: /* VMSAVE */ |
| 2154 | return SVM_INSTR_VMSAVE; |
| 2155 | default: |
| 2156 | break; |
| 2157 | } |
| 2158 | |
| 2159 | return NONE_SVM_INSTR; |
| 2160 | } |
| 2161 | |
| 2162 | static int emulate_svm_instr(struct kvm_vcpu *vcpu, int opcode) |
| 2163 | { |
| 2164 | const int guest_mode_exit_codes[] = { |
| 2165 | [SVM_INSTR_VMRUN] = SVM_EXIT_VMRUN, |
| 2166 | [SVM_INSTR_VMLOAD] = SVM_EXIT_VMLOAD, |
| 2167 | [SVM_INSTR_VMSAVE] = SVM_EXIT_VMSAVE, |
| 2168 | }; |
| 2169 | int (*const svm_instr_handlers[])(struct kvm_vcpu *vcpu) = { |
| 2170 | [SVM_INSTR_VMRUN] = vmrun_interception, |
| 2171 | [SVM_INSTR_VMLOAD] = vmload_interception, |
| 2172 | [SVM_INSTR_VMSAVE] = vmsave_interception, |
| 2173 | }; |
| 2174 | struct vcpu_svm *svm = to_svm(vcpu); |
| 2175 | int ret; |
| 2176 | |
| 2177 | if (is_guest_mode(vcpu)) { |
| 2178 | /* Returns '1' or -errno on failure, '0' on success. */ |
| 2179 | ret = nested_svm_simple_vmexit(svm, exit_code: guest_mode_exit_codes[opcode]); |
| 2180 | if (ret) |
| 2181 | return ret; |
| 2182 | return 1; |
| 2183 | } |
| 2184 | return svm_instr_handlers[opcode](vcpu); |
| 2185 | } |
| 2186 | |
| 2187 | /* |
| 2188 | * #GP handling code. Note that #GP can be triggered under the following two |
| 2189 | * cases: |
| 2190 | * 1) SVM VM-related instructions (VMRUN/VMSAVE/VMLOAD) that trigger #GP on |
| 2191 | * some AMD CPUs when EAX of these instructions are in the reserved memory |
| 2192 | * regions (e.g. SMM memory on host). |
| 2193 | * 2) VMware backdoor |
| 2194 | */ |
| 2195 | static int gp_interception(struct kvm_vcpu *vcpu) |
| 2196 | { |
| 2197 | struct vcpu_svm *svm = to_svm(vcpu); |
| 2198 | u32 error_code = svm->vmcb->control.exit_info_1; |
| 2199 | int opcode; |
| 2200 | |
| 2201 | /* Both #GP cases have zero error_code */ |
| 2202 | if (error_code) |
| 2203 | goto reinject; |
| 2204 | |
| 2205 | /* Decode the instruction for usage later */ |
| 2206 | if (x86_decode_emulated_instruction(vcpu, 0, NULL, 0) != EMULATION_OK) |
| 2207 | goto reinject; |
| 2208 | |
| 2209 | opcode = svm_instr_opcode(vcpu); |
| 2210 | |
| 2211 | if (opcode == NONE_SVM_INSTR) { |
| 2212 | if (!enable_vmware_backdoor) |
| 2213 | goto reinject; |
| 2214 | |
| 2215 | /* |
| 2216 | * VMware backdoor emulation on #GP interception only handles |
| 2217 | * IN{S}, OUT{S}, and RDPMC. |
| 2218 | */ |
| 2219 | if (!is_guest_mode(vcpu)) |
| 2220 | return kvm_emulate_instruction(vcpu, |
| 2221 | EMULTYPE_VMWARE_GP | EMULTYPE_NO_DECODE); |
| 2222 | } else { |
| 2223 | /* All SVM instructions expect page aligned RAX */ |
| 2224 | if (svm->vmcb->save.rax & ~PAGE_MASK) |
| 2225 | goto reinject; |
| 2226 | |
| 2227 | return emulate_svm_instr(vcpu, opcode); |
| 2228 | } |
| 2229 | |
| 2230 | reinject: |
| 2231 | kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); |
| 2232 | return 1; |
| 2233 | } |
| 2234 | |
| 2235 | void svm_set_gif(struct vcpu_svm *svm, bool value) |
| 2236 | { |
| 2237 | if (value) { |
| 2238 | /* |
| 2239 | * If VGIF is enabled, the STGI intercept is only added to |
| 2240 | * detect the opening of the SMI/NMI window; remove it now. |
| 2241 | * Likewise, clear the VINTR intercept, we will set it |
| 2242 | * again while processing KVM_REQ_EVENT if needed. |
| 2243 | */ |
| 2244 | if (vgif) |
| 2245 | svm_clr_intercept(svm, bit: INTERCEPT_STGI); |
| 2246 | if (svm_is_intercept(svm, bit: INTERCEPT_VINTR)) |
| 2247 | svm_clear_vintr(svm); |
| 2248 | |
| 2249 | enable_gif(svm); |
| 2250 | if (svm->vcpu.arch.smi_pending || |
| 2251 | svm->vcpu.arch.nmi_pending || |
| 2252 | kvm_cpu_has_injectable_intr(v: &svm->vcpu) || |
| 2253 | kvm_apic_has_pending_init_or_sipi(&svm->vcpu)) |
| 2254 | kvm_make_request(KVM_REQ_EVENT, vcpu: &svm->vcpu); |
| 2255 | } else { |
| 2256 | disable_gif(svm); |
| 2257 | |
| 2258 | /* |
| 2259 | * After a CLGI no interrupts should come. But if vGIF is |
| 2260 | * in use, we still rely on the VINTR intercept (rather than |
| 2261 | * STGI) to detect an open interrupt window. |
| 2262 | */ |
| 2263 | if (!vgif) |
| 2264 | svm_clear_vintr(svm); |
| 2265 | } |
| 2266 | } |
| 2267 | |
| 2268 | static int stgi_interception(struct kvm_vcpu *vcpu) |
| 2269 | { |
| 2270 | int ret; |
| 2271 | |
| 2272 | if (nested_svm_check_permissions(vcpu)) |
| 2273 | return 1; |
| 2274 | |
| 2275 | ret = kvm_skip_emulated_instruction(vcpu); |
| 2276 | svm_set_gif(svm: to_svm(vcpu), value: true); |
| 2277 | return ret; |
| 2278 | } |
| 2279 | |
| 2280 | static int clgi_interception(struct kvm_vcpu *vcpu) |
| 2281 | { |
| 2282 | int ret; |
| 2283 | |
| 2284 | if (nested_svm_check_permissions(vcpu)) |
| 2285 | return 1; |
| 2286 | |
| 2287 | ret = kvm_skip_emulated_instruction(vcpu); |
| 2288 | svm_set_gif(svm: to_svm(vcpu), value: false); |
| 2289 | return ret; |
| 2290 | } |
| 2291 | |
| 2292 | static int invlpga_interception(struct kvm_vcpu *vcpu) |
| 2293 | { |
| 2294 | gva_t gva = kvm_rax_read(vcpu); |
| 2295 | u32 asid = kvm_rcx_read(vcpu); |
| 2296 | |
| 2297 | /* FIXME: Handle an address size prefix. */ |
| 2298 | if (!is_long_mode(vcpu)) |
| 2299 | gva = (u32)gva; |
| 2300 | |
| 2301 | trace_kvm_invlpga(to_svm(vcpu)->vmcb->save.rip, asid, gva); |
| 2302 | |
| 2303 | /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */ |
| 2304 | kvm_mmu_invlpg(vcpu, gva); |
| 2305 | |
| 2306 | return kvm_skip_emulated_instruction(vcpu); |
| 2307 | } |
| 2308 | |
| 2309 | static int skinit_interception(struct kvm_vcpu *vcpu) |
| 2310 | { |
| 2311 | trace_kvm_skinit(to_svm(vcpu)->vmcb->save.rip, kvm_rax_read(vcpu)); |
| 2312 | |
| 2313 | kvm_queue_exception(vcpu, UD_VECTOR); |
| 2314 | return 1; |
| 2315 | } |
| 2316 | |
| 2317 | static int task_switch_interception(struct kvm_vcpu *vcpu) |
| 2318 | { |
| 2319 | struct vcpu_svm *svm = to_svm(vcpu); |
| 2320 | u16 tss_selector; |
| 2321 | int reason; |
| 2322 | int int_type = svm->vmcb->control.exit_int_info & |
| 2323 | SVM_EXITINTINFO_TYPE_MASK; |
| 2324 | int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK; |
| 2325 | uint32_t type = |
| 2326 | svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK; |
| 2327 | uint32_t idt_v = |
| 2328 | svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID; |
| 2329 | bool has_error_code = false; |
| 2330 | u32 error_code = 0; |
| 2331 | |
| 2332 | tss_selector = (u16)svm->vmcb->control.exit_info_1; |
| 2333 | |
| 2334 | if (svm->vmcb->control.exit_info_2 & |
| 2335 | (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET)) |
| 2336 | reason = TASK_SWITCH_IRET; |
| 2337 | else if (svm->vmcb->control.exit_info_2 & |
| 2338 | (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP)) |
| 2339 | reason = TASK_SWITCH_JMP; |
| 2340 | else if (idt_v) |
| 2341 | reason = TASK_SWITCH_GATE; |
| 2342 | else |
| 2343 | reason = TASK_SWITCH_CALL; |
| 2344 | |
| 2345 | if (reason == TASK_SWITCH_GATE) { |
| 2346 | switch (type) { |
| 2347 | case SVM_EXITINTINFO_TYPE_NMI: |
| 2348 | vcpu->arch.nmi_injected = false; |
| 2349 | break; |
| 2350 | case SVM_EXITINTINFO_TYPE_EXEPT: |
| 2351 | if (svm->vmcb->control.exit_info_2 & |
| 2352 | (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) { |
| 2353 | has_error_code = true; |
| 2354 | error_code = |
| 2355 | (u32)svm->vmcb->control.exit_info_2; |
| 2356 | } |
| 2357 | kvm_clear_exception_queue(vcpu); |
| 2358 | break; |
| 2359 | case SVM_EXITINTINFO_TYPE_INTR: |
| 2360 | case SVM_EXITINTINFO_TYPE_SOFT: |
| 2361 | kvm_clear_interrupt_queue(vcpu); |
| 2362 | break; |
| 2363 | default: |
| 2364 | break; |
| 2365 | } |
| 2366 | } |
| 2367 | |
| 2368 | if (reason != TASK_SWITCH_GATE || |
| 2369 | int_type == SVM_EXITINTINFO_TYPE_SOFT || |
| 2370 | (int_type == SVM_EXITINTINFO_TYPE_EXEPT && |
| 2371 | (int_vec == OF_VECTOR || int_vec == BP_VECTOR))) { |
| 2372 | if (!svm_skip_emulated_instruction(vcpu)) |
| 2373 | return 0; |
| 2374 | } |
| 2375 | |
| 2376 | if (int_type != SVM_EXITINTINFO_TYPE_SOFT) |
| 2377 | int_vec = -1; |
| 2378 | |
| 2379 | return kvm_task_switch(vcpu, tss_selector, idt_index: int_vec, reason, |
| 2380 | has_error_code, error_code); |
| 2381 | } |
| 2382 | |
| 2383 | static void svm_clr_iret_intercept(struct vcpu_svm *svm) |
| 2384 | { |
| 2385 | if (!sev_es_guest(kvm: svm->vcpu.kvm)) |
| 2386 | svm_clr_intercept(svm, bit: INTERCEPT_IRET); |
| 2387 | } |
| 2388 | |
| 2389 | static void svm_set_iret_intercept(struct vcpu_svm *svm) |
| 2390 | { |
| 2391 | if (!sev_es_guest(kvm: svm->vcpu.kvm)) |
| 2392 | svm_set_intercept(svm, bit: INTERCEPT_IRET); |
| 2393 | } |
| 2394 | |
| 2395 | static int iret_interception(struct kvm_vcpu *vcpu) |
| 2396 | { |
| 2397 | struct vcpu_svm *svm = to_svm(vcpu); |
| 2398 | |
| 2399 | WARN_ON_ONCE(sev_es_guest(vcpu->kvm)); |
| 2400 | |
| 2401 | ++vcpu->stat.nmi_window_exits; |
| 2402 | svm->awaiting_iret_completion = true; |
| 2403 | |
| 2404 | svm_clr_iret_intercept(svm); |
| 2405 | svm->nmi_iret_rip = kvm_rip_read(vcpu); |
| 2406 | |
| 2407 | kvm_make_request(KVM_REQ_EVENT, vcpu); |
| 2408 | return 1; |
| 2409 | } |
| 2410 | |
| 2411 | static int invlpg_interception(struct kvm_vcpu *vcpu) |
| 2412 | { |
| 2413 | if (!static_cpu_has(X86_FEATURE_DECODEASSISTS)) |
| 2414 | return kvm_emulate_instruction(vcpu, emulation_type: 0); |
| 2415 | |
| 2416 | kvm_mmu_invlpg(vcpu, gva: to_svm(vcpu)->vmcb->control.exit_info_1); |
| 2417 | return kvm_skip_emulated_instruction(vcpu); |
| 2418 | } |
| 2419 | |
| 2420 | static int emulate_on_interception(struct kvm_vcpu *vcpu) |
| 2421 | { |
| 2422 | return kvm_emulate_instruction(vcpu, emulation_type: 0); |
| 2423 | } |
| 2424 | |
| 2425 | static int rsm_interception(struct kvm_vcpu *vcpu) |
| 2426 | { |
| 2427 | return kvm_emulate_instruction_from_buffer(vcpu, insn: rsm_ins_bytes, insn_len: 2); |
| 2428 | } |
| 2429 | |
| 2430 | static bool check_selective_cr0_intercepted(struct kvm_vcpu *vcpu, |
| 2431 | unsigned long val) |
| 2432 | { |
| 2433 | struct vcpu_svm *svm = to_svm(vcpu); |
| 2434 | unsigned long cr0 = vcpu->arch.cr0; |
| 2435 | bool ret = false; |
| 2436 | |
| 2437 | if (!is_guest_mode(vcpu) || |
| 2438 | (!(vmcb12_is_intercept(control: &svm->nested.ctl, bit: INTERCEPT_SELECTIVE_CR0)))) |
| 2439 | return false; |
| 2440 | |
| 2441 | cr0 &= ~SVM_CR0_SELECTIVE_MASK; |
| 2442 | val &= ~SVM_CR0_SELECTIVE_MASK; |
| 2443 | |
| 2444 | if (cr0 ^ val) { |
| 2445 | svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE; |
| 2446 | svm->vmcb->control.exit_code_hi = 0; |
| 2447 | ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE); |
| 2448 | } |
| 2449 | |
| 2450 | return ret; |
| 2451 | } |
| 2452 | |
| 2453 | #define CR_VALID (1ULL << 63) |
| 2454 | |
| 2455 | static int cr_interception(struct kvm_vcpu *vcpu) |
| 2456 | { |
| 2457 | struct vcpu_svm *svm = to_svm(vcpu); |
| 2458 | int reg, cr; |
| 2459 | unsigned long val; |
| 2460 | int err; |
| 2461 | |
| 2462 | if (!static_cpu_has(X86_FEATURE_DECODEASSISTS)) |
| 2463 | return emulate_on_interception(vcpu); |
| 2464 | |
| 2465 | if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0)) |
| 2466 | return emulate_on_interception(vcpu); |
| 2467 | |
| 2468 | reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; |
| 2469 | if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE) |
| 2470 | cr = SVM_EXIT_WRITE_CR0 - SVM_EXIT_READ_CR0; |
| 2471 | else |
| 2472 | cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0; |
| 2473 | |
| 2474 | err = 0; |
| 2475 | if (cr >= 16) { /* mov to cr */ |
| 2476 | cr -= 16; |
| 2477 | val = kvm_register_read(vcpu, reg); |
| 2478 | trace_kvm_cr_write(cr, val); |
| 2479 | switch (cr) { |
| 2480 | case 0: |
| 2481 | if (!check_selective_cr0_intercepted(vcpu, val)) |
| 2482 | err = kvm_set_cr0(vcpu, cr0: val); |
| 2483 | else |
| 2484 | return 1; |
| 2485 | |
| 2486 | break; |
| 2487 | case 3: |
| 2488 | err = kvm_set_cr3(vcpu, cr3: val); |
| 2489 | break; |
| 2490 | case 4: |
| 2491 | err = kvm_set_cr4(vcpu, cr4: val); |
| 2492 | break; |
| 2493 | case 8: |
| 2494 | err = kvm_set_cr8(vcpu, cr8: val); |
| 2495 | break; |
| 2496 | default: |
| 2497 | WARN(1, "unhandled write to CR%d" , cr); |
| 2498 | kvm_queue_exception(vcpu, UD_VECTOR); |
| 2499 | return 1; |
| 2500 | } |
| 2501 | } else { /* mov from cr */ |
| 2502 | switch (cr) { |
| 2503 | case 0: |
| 2504 | val = kvm_read_cr0(vcpu); |
| 2505 | break; |
| 2506 | case 2: |
| 2507 | val = vcpu->arch.cr2; |
| 2508 | break; |
| 2509 | case 3: |
| 2510 | val = kvm_read_cr3(vcpu); |
| 2511 | break; |
| 2512 | case 4: |
| 2513 | val = kvm_read_cr4(vcpu); |
| 2514 | break; |
| 2515 | case 8: |
| 2516 | val = kvm_get_cr8(vcpu); |
| 2517 | break; |
| 2518 | default: |
| 2519 | WARN(1, "unhandled read from CR%d" , cr); |
| 2520 | kvm_queue_exception(vcpu, UD_VECTOR); |
| 2521 | return 1; |
| 2522 | } |
| 2523 | kvm_register_write(vcpu, reg, val); |
| 2524 | trace_kvm_cr_read(cr, val); |
| 2525 | } |
| 2526 | return kvm_complete_insn_gp(vcpu, err); |
| 2527 | } |
| 2528 | |
| 2529 | static int cr_trap(struct kvm_vcpu *vcpu) |
| 2530 | { |
| 2531 | struct vcpu_svm *svm = to_svm(vcpu); |
| 2532 | unsigned long old_value, new_value; |
| 2533 | unsigned int cr; |
| 2534 | int ret = 0; |
| 2535 | |
| 2536 | new_value = (unsigned long)svm->vmcb->control.exit_info_1; |
| 2537 | |
| 2538 | cr = svm->vmcb->control.exit_code - SVM_EXIT_CR0_WRITE_TRAP; |
| 2539 | switch (cr) { |
| 2540 | case 0: |
| 2541 | old_value = kvm_read_cr0(vcpu); |
| 2542 | svm_set_cr0(vcpu, cr0: new_value); |
| 2543 | |
| 2544 | kvm_post_set_cr0(vcpu, old_cr0: old_value, cr0: new_value); |
| 2545 | break; |
| 2546 | case 4: |
| 2547 | old_value = kvm_read_cr4(vcpu); |
| 2548 | svm_set_cr4(vcpu, cr4: new_value); |
| 2549 | |
| 2550 | kvm_post_set_cr4(vcpu, old_cr4: old_value, cr4: new_value); |
| 2551 | break; |
| 2552 | case 8: |
| 2553 | ret = kvm_set_cr8(vcpu, cr8: new_value); |
| 2554 | break; |
| 2555 | default: |
| 2556 | WARN(1, "unhandled CR%d write trap" , cr); |
| 2557 | kvm_queue_exception(vcpu, UD_VECTOR); |
| 2558 | return 1; |
| 2559 | } |
| 2560 | |
| 2561 | return kvm_complete_insn_gp(vcpu, err: ret); |
| 2562 | } |
| 2563 | |
| 2564 | static int dr_interception(struct kvm_vcpu *vcpu) |
| 2565 | { |
| 2566 | struct vcpu_svm *svm = to_svm(vcpu); |
| 2567 | int reg, dr; |
| 2568 | int err = 0; |
| 2569 | |
| 2570 | /* |
| 2571 | * SEV-ES intercepts DR7 only to disable guest debugging and the guest issues a VMGEXIT |
| 2572 | * for DR7 write only. KVM cannot change DR7 (always swapped as type 'A') so return early. |
| 2573 | */ |
| 2574 | if (sev_es_guest(kvm: vcpu->kvm)) |
| 2575 | return 1; |
| 2576 | |
| 2577 | if (vcpu->guest_debug == 0) { |
| 2578 | /* |
| 2579 | * No more DR vmexits; force a reload of the debug registers |
| 2580 | * and reenter on this instruction. The next vmexit will |
| 2581 | * retrieve the full state of the debug registers. |
| 2582 | */ |
| 2583 | clr_dr_intercepts(svm); |
| 2584 | vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT; |
| 2585 | return 1; |
| 2586 | } |
| 2587 | |
| 2588 | if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS)) |
| 2589 | return emulate_on_interception(vcpu); |
| 2590 | |
| 2591 | reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; |
| 2592 | dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0; |
| 2593 | if (dr >= 16) { /* mov to DRn */ |
| 2594 | dr -= 16; |
| 2595 | err = kvm_set_dr(vcpu, dr, val: kvm_register_read(vcpu, reg)); |
| 2596 | } else { |
| 2597 | kvm_register_write(vcpu, reg, kvm_get_dr(vcpu, dr)); |
| 2598 | } |
| 2599 | |
| 2600 | return kvm_complete_insn_gp(vcpu, err); |
| 2601 | } |
| 2602 | |
| 2603 | static int cr8_write_interception(struct kvm_vcpu *vcpu) |
| 2604 | { |
| 2605 | int r; |
| 2606 | |
| 2607 | u8 cr8_prev = kvm_get_cr8(vcpu); |
| 2608 | /* instruction emulation calls kvm_set_cr8() */ |
| 2609 | r = cr_interception(vcpu); |
| 2610 | if (lapic_in_kernel(vcpu)) |
| 2611 | return r; |
| 2612 | if (cr8_prev <= kvm_get_cr8(vcpu)) |
| 2613 | return r; |
| 2614 | vcpu->run->exit_reason = KVM_EXIT_SET_TPR; |
| 2615 | return 0; |
| 2616 | } |
| 2617 | |
| 2618 | static int efer_trap(struct kvm_vcpu *vcpu) |
| 2619 | { |
| 2620 | struct msr_data msr_info; |
| 2621 | int ret; |
| 2622 | |
| 2623 | /* |
| 2624 | * Clear the EFER_SVME bit from EFER. The SVM code always sets this |
| 2625 | * bit in svm_set_efer(), but __kvm_valid_efer() checks it against |
| 2626 | * whether the guest has X86_FEATURE_SVM - this avoids a failure if |
| 2627 | * the guest doesn't have X86_FEATURE_SVM. |
| 2628 | */ |
| 2629 | msr_info.host_initiated = false; |
| 2630 | msr_info.index = MSR_EFER; |
| 2631 | msr_info.data = to_svm(vcpu)->vmcb->control.exit_info_1 & ~EFER_SVME; |
| 2632 | ret = kvm_set_msr_common(vcpu, msr: &msr_info); |
| 2633 | |
| 2634 | return kvm_complete_insn_gp(vcpu, err: ret); |
| 2635 | } |
| 2636 | |
| 2637 | static int svm_get_feature_msr(u32 msr, u64 *data) |
| 2638 | { |
| 2639 | *data = 0; |
| 2640 | |
| 2641 | switch (msr) { |
| 2642 | case MSR_AMD64_DE_CFG: |
| 2643 | if (cpu_feature_enabled(X86_FEATURE_LFENCE_RDTSC)) |
| 2644 | *data |= MSR_AMD64_DE_CFG_LFENCE_SERIALIZE; |
| 2645 | break; |
| 2646 | default: |
| 2647 | return KVM_MSR_RET_UNSUPPORTED; |
| 2648 | } |
| 2649 | |
| 2650 | return 0; |
| 2651 | } |
| 2652 | |
| 2653 | static bool sev_es_prevent_msr_access(struct kvm_vcpu *vcpu, |
| 2654 | struct msr_data *msr_info) |
| 2655 | { |
| 2656 | return sev_es_guest(kvm: vcpu->kvm) && vcpu->arch.guest_state_protected && |
| 2657 | msr_info->index != MSR_IA32_XSS && |
| 2658 | !msr_write_intercepted(vcpu, msr: msr_info->index); |
| 2659 | } |
| 2660 | |
| 2661 | static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) |
| 2662 | { |
| 2663 | struct vcpu_svm *svm = to_svm(vcpu); |
| 2664 | |
| 2665 | if (sev_es_prevent_msr_access(vcpu, msr_info)) { |
| 2666 | msr_info->data = 0; |
| 2667 | return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0; |
| 2668 | } |
| 2669 | |
| 2670 | switch (msr_info->index) { |
| 2671 | case MSR_AMD64_TSC_RATIO: |
| 2672 | if (!msr_info->host_initiated && |
| 2673 | !guest_cpu_cap_has(vcpu, X86_FEATURE_TSCRATEMSR)) |
| 2674 | return 1; |
| 2675 | msr_info->data = svm->tsc_ratio_msr; |
| 2676 | break; |
| 2677 | case MSR_STAR: |
| 2678 | msr_info->data = svm->vmcb01.ptr->save.star; |
| 2679 | break; |
| 2680 | #ifdef CONFIG_X86_64 |
| 2681 | case MSR_LSTAR: |
| 2682 | msr_info->data = svm->vmcb01.ptr->save.lstar; |
| 2683 | break; |
| 2684 | case MSR_CSTAR: |
| 2685 | msr_info->data = svm->vmcb01.ptr->save.cstar; |
| 2686 | break; |
| 2687 | case MSR_GS_BASE: |
| 2688 | msr_info->data = svm->vmcb01.ptr->save.gs.base; |
| 2689 | break; |
| 2690 | case MSR_FS_BASE: |
| 2691 | msr_info->data = svm->vmcb01.ptr->save.fs.base; |
| 2692 | break; |
| 2693 | case MSR_KERNEL_GS_BASE: |
| 2694 | msr_info->data = svm->vmcb01.ptr->save.kernel_gs_base; |
| 2695 | break; |
| 2696 | case MSR_SYSCALL_MASK: |
| 2697 | msr_info->data = svm->vmcb01.ptr->save.sfmask; |
| 2698 | break; |
| 2699 | #endif |
| 2700 | case MSR_IA32_SYSENTER_CS: |
| 2701 | msr_info->data = svm->vmcb01.ptr->save.sysenter_cs; |
| 2702 | break; |
| 2703 | case MSR_IA32_SYSENTER_EIP: |
| 2704 | msr_info->data = (u32)svm->vmcb01.ptr->save.sysenter_eip; |
| 2705 | if (guest_cpuid_is_intel_compatible(vcpu)) |
| 2706 | msr_info->data |= (u64)svm->sysenter_eip_hi << 32; |
| 2707 | break; |
| 2708 | case MSR_IA32_SYSENTER_ESP: |
| 2709 | msr_info->data = svm->vmcb01.ptr->save.sysenter_esp; |
| 2710 | if (guest_cpuid_is_intel_compatible(vcpu)) |
| 2711 | msr_info->data |= (u64)svm->sysenter_esp_hi << 32; |
| 2712 | break; |
| 2713 | case MSR_IA32_S_CET: |
| 2714 | msr_info->data = svm->vmcb->save.s_cet; |
| 2715 | break; |
| 2716 | case MSR_IA32_INT_SSP_TAB: |
| 2717 | msr_info->data = svm->vmcb->save.isst_addr; |
| 2718 | break; |
| 2719 | case MSR_KVM_INTERNAL_GUEST_SSP: |
| 2720 | msr_info->data = svm->vmcb->save.ssp; |
| 2721 | break; |
| 2722 | case MSR_TSC_AUX: |
| 2723 | msr_info->data = svm->tsc_aux; |
| 2724 | break; |
| 2725 | case MSR_IA32_DEBUGCTLMSR: |
| 2726 | msr_info->data = svm->vmcb->save.dbgctl; |
| 2727 | break; |
| 2728 | case MSR_IA32_LASTBRANCHFROMIP: |
| 2729 | msr_info->data = svm->vmcb->save.br_from; |
| 2730 | break; |
| 2731 | case MSR_IA32_LASTBRANCHTOIP: |
| 2732 | msr_info->data = svm->vmcb->save.br_to; |
| 2733 | break; |
| 2734 | case MSR_IA32_LASTINTFROMIP: |
| 2735 | msr_info->data = svm->vmcb->save.last_excp_from; |
| 2736 | break; |
| 2737 | case MSR_IA32_LASTINTTOIP: |
| 2738 | msr_info->data = svm->vmcb->save.last_excp_to; |
| 2739 | break; |
| 2740 | case MSR_VM_HSAVE_PA: |
| 2741 | msr_info->data = svm->nested.hsave_msr; |
| 2742 | break; |
| 2743 | case MSR_VM_CR: |
| 2744 | msr_info->data = svm->nested.vm_cr_msr; |
| 2745 | break; |
| 2746 | case MSR_IA32_SPEC_CTRL: |
| 2747 | if (!msr_info->host_initiated && |
| 2748 | !guest_has_spec_ctrl_msr(vcpu)) |
| 2749 | return 1; |
| 2750 | |
| 2751 | if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL)) |
| 2752 | msr_info->data = svm->vmcb->save.spec_ctrl; |
| 2753 | else |
| 2754 | msr_info->data = svm->spec_ctrl; |
| 2755 | break; |
| 2756 | case MSR_AMD64_VIRT_SPEC_CTRL: |
| 2757 | if (!msr_info->host_initiated && |
| 2758 | !guest_cpu_cap_has(vcpu, X86_FEATURE_VIRT_SSBD)) |
| 2759 | return 1; |
| 2760 | |
| 2761 | msr_info->data = svm->virt_spec_ctrl; |
| 2762 | break; |
| 2763 | case MSR_F15H_IC_CFG: { |
| 2764 | |
| 2765 | int family, model; |
| 2766 | |
| 2767 | family = guest_cpuid_family(vcpu); |
| 2768 | model = guest_cpuid_model(vcpu); |
| 2769 | |
| 2770 | if (family < 0 || model < 0) |
| 2771 | return kvm_get_msr_common(vcpu, msr: msr_info); |
| 2772 | |
| 2773 | msr_info->data = 0; |
| 2774 | |
| 2775 | if (family == 0x15 && |
| 2776 | (model >= 0x2 && model < 0x20)) |
| 2777 | msr_info->data = 0x1E; |
| 2778 | } |
| 2779 | break; |
| 2780 | case MSR_AMD64_DE_CFG: |
| 2781 | msr_info->data = svm->msr_decfg; |
| 2782 | break; |
| 2783 | default: |
| 2784 | return kvm_get_msr_common(vcpu, msr: msr_info); |
| 2785 | } |
| 2786 | return 0; |
| 2787 | } |
| 2788 | |
| 2789 | static int svm_complete_emulated_msr(struct kvm_vcpu *vcpu, int err) |
| 2790 | { |
| 2791 | struct vcpu_svm *svm = to_svm(vcpu); |
| 2792 | if (!err || !sev_es_guest(kvm: vcpu->kvm) || WARN_ON_ONCE(!svm->sev_es.ghcb)) |
| 2793 | return kvm_complete_insn_gp(vcpu, err); |
| 2794 | |
| 2795 | svm_vmgexit_inject_exception(svm, X86_TRAP_GP); |
| 2796 | return 1; |
| 2797 | } |
| 2798 | |
| 2799 | static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data) |
| 2800 | { |
| 2801 | struct vcpu_svm *svm = to_svm(vcpu); |
| 2802 | int svm_dis, chg_mask; |
| 2803 | |
| 2804 | if (data & ~SVM_VM_CR_VALID_MASK) |
| 2805 | return 1; |
| 2806 | |
| 2807 | chg_mask = SVM_VM_CR_VALID_MASK; |
| 2808 | |
| 2809 | if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK) |
| 2810 | chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK); |
| 2811 | |
| 2812 | svm->nested.vm_cr_msr &= ~chg_mask; |
| 2813 | svm->nested.vm_cr_msr |= (data & chg_mask); |
| 2814 | |
| 2815 | svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK; |
| 2816 | |
| 2817 | /* check for svm_disable while efer.svme is set */ |
| 2818 | if (svm_dis && (vcpu->arch.efer & EFER_SVME)) |
| 2819 | return 1; |
| 2820 | |
| 2821 | return 0; |
| 2822 | } |
| 2823 | |
| 2824 | static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) |
| 2825 | { |
| 2826 | struct vcpu_svm *svm = to_svm(vcpu); |
| 2827 | int ret = 0; |
| 2828 | |
| 2829 | u32 ecx = msr->index; |
| 2830 | u64 data = msr->data; |
| 2831 | |
| 2832 | if (sev_es_prevent_msr_access(vcpu, msr_info: msr)) |
| 2833 | return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0; |
| 2834 | |
| 2835 | switch (ecx) { |
| 2836 | case MSR_AMD64_TSC_RATIO: |
| 2837 | |
| 2838 | if (!guest_cpu_cap_has(vcpu, X86_FEATURE_TSCRATEMSR)) { |
| 2839 | |
| 2840 | if (!msr->host_initiated) |
| 2841 | return 1; |
| 2842 | /* |
| 2843 | * In case TSC scaling is not enabled, always |
| 2844 | * leave this MSR at the default value. |
| 2845 | * |
| 2846 | * Due to bug in qemu 6.2.0, it would try to set |
| 2847 | * this msr to 0 if tsc scaling is not enabled. |
| 2848 | * Ignore this value as well. |
| 2849 | */ |
| 2850 | if (data != 0 && data != svm->tsc_ratio_msr) |
| 2851 | return 1; |
| 2852 | break; |
| 2853 | } |
| 2854 | |
| 2855 | if (data & SVM_TSC_RATIO_RSVD) |
| 2856 | return 1; |
| 2857 | |
| 2858 | svm->tsc_ratio_msr = data; |
| 2859 | |
| 2860 | if (guest_cpu_cap_has(vcpu, X86_FEATURE_TSCRATEMSR) && |
| 2861 | is_guest_mode(vcpu)) |
| 2862 | nested_svm_update_tsc_ratio_msr(vcpu); |
| 2863 | |
| 2864 | break; |
| 2865 | case MSR_IA32_CR_PAT: |
| 2866 | ret = kvm_set_msr_common(vcpu, msr); |
| 2867 | if (ret) |
| 2868 | break; |
| 2869 | |
| 2870 | svm->vmcb01.ptr->save.g_pat = data; |
| 2871 | if (is_guest_mode(vcpu)) |
| 2872 | nested_vmcb02_compute_g_pat(svm); |
| 2873 | vmcb_mark_dirty(vmcb: svm->vmcb, bit: VMCB_NPT); |
| 2874 | break; |
| 2875 | case MSR_IA32_SPEC_CTRL: |
| 2876 | if (!msr->host_initiated && |
| 2877 | !guest_has_spec_ctrl_msr(vcpu)) |
| 2878 | return 1; |
| 2879 | |
| 2880 | if (kvm_spec_ctrl_test_value(data)) |
| 2881 | return 1; |
| 2882 | |
| 2883 | if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL)) |
| 2884 | svm->vmcb->save.spec_ctrl = data; |
| 2885 | else |
| 2886 | svm->spec_ctrl = data; |
| 2887 | if (!data) |
| 2888 | break; |
| 2889 | |
| 2890 | /* |
| 2891 | * For non-nested: |
| 2892 | * When it's written (to non-zero) for the first time, pass |
| 2893 | * it through. |
| 2894 | * |
| 2895 | * For nested: |
| 2896 | * The handling of the MSR bitmap for L2 guests is done in |
| 2897 | * nested_svm_merge_msrpm(). |
| 2898 | * We update the L1 MSR bit as well since it will end up |
| 2899 | * touching the MSR anyway now. |
| 2900 | */ |
| 2901 | svm_disable_intercept_for_msr(vcpu, MSR_IA32_SPEC_CTRL, MSR_TYPE_RW); |
| 2902 | break; |
| 2903 | case MSR_AMD64_VIRT_SPEC_CTRL: |
| 2904 | if (!msr->host_initiated && |
| 2905 | !guest_cpu_cap_has(vcpu, X86_FEATURE_VIRT_SSBD)) |
| 2906 | return 1; |
| 2907 | |
| 2908 | if (data & ~SPEC_CTRL_SSBD) |
| 2909 | return 1; |
| 2910 | |
| 2911 | svm->virt_spec_ctrl = data; |
| 2912 | break; |
| 2913 | case MSR_STAR: |
| 2914 | svm->vmcb01.ptr->save.star = data; |
| 2915 | break; |
| 2916 | #ifdef CONFIG_X86_64 |
| 2917 | case MSR_LSTAR: |
| 2918 | svm->vmcb01.ptr->save.lstar = data; |
| 2919 | break; |
| 2920 | case MSR_CSTAR: |
| 2921 | svm->vmcb01.ptr->save.cstar = data; |
| 2922 | break; |
| 2923 | case MSR_GS_BASE: |
| 2924 | svm->vmcb01.ptr->save.gs.base = data; |
| 2925 | break; |
| 2926 | case MSR_FS_BASE: |
| 2927 | svm->vmcb01.ptr->save.fs.base = data; |
| 2928 | break; |
| 2929 | case MSR_KERNEL_GS_BASE: |
| 2930 | svm->vmcb01.ptr->save.kernel_gs_base = data; |
| 2931 | break; |
| 2932 | case MSR_SYSCALL_MASK: |
| 2933 | svm->vmcb01.ptr->save.sfmask = data; |
| 2934 | break; |
| 2935 | #endif |
| 2936 | case MSR_IA32_SYSENTER_CS: |
| 2937 | svm->vmcb01.ptr->save.sysenter_cs = data; |
| 2938 | break; |
| 2939 | case MSR_IA32_SYSENTER_EIP: |
| 2940 | svm->vmcb01.ptr->save.sysenter_eip = (u32)data; |
| 2941 | /* |
| 2942 | * We only intercept the MSR_IA32_SYSENTER_{EIP|ESP} msrs |
| 2943 | * when we spoof an Intel vendor ID (for cross vendor migration). |
| 2944 | * In this case we use this intercept to track the high |
| 2945 | * 32 bit part of these msrs to support Intel's |
| 2946 | * implementation of SYSENTER/SYSEXIT. |
| 2947 | */ |
| 2948 | svm->sysenter_eip_hi = guest_cpuid_is_intel_compatible(vcpu) ? (data >> 32) : 0; |
| 2949 | break; |
| 2950 | case MSR_IA32_SYSENTER_ESP: |
| 2951 | svm->vmcb01.ptr->save.sysenter_esp = (u32)data; |
| 2952 | svm->sysenter_esp_hi = guest_cpuid_is_intel_compatible(vcpu) ? (data >> 32) : 0; |
| 2953 | break; |
| 2954 | case MSR_IA32_S_CET: |
| 2955 | svm->vmcb->save.s_cet = data; |
| 2956 | vmcb_mark_dirty(vmcb: svm->vmcb01.ptr, bit: VMCB_CET); |
| 2957 | break; |
| 2958 | case MSR_IA32_INT_SSP_TAB: |
| 2959 | svm->vmcb->save.isst_addr = data; |
| 2960 | vmcb_mark_dirty(vmcb: svm->vmcb01.ptr, bit: VMCB_CET); |
| 2961 | break; |
| 2962 | case MSR_KVM_INTERNAL_GUEST_SSP: |
| 2963 | svm->vmcb->save.ssp = data; |
| 2964 | vmcb_mark_dirty(vmcb: svm->vmcb01.ptr, bit: VMCB_CET); |
| 2965 | break; |
| 2966 | case MSR_TSC_AUX: |
| 2967 | /* |
| 2968 | * TSC_AUX is always virtualized for SEV-ES guests when the |
| 2969 | * feature is available. The user return MSR support is not |
| 2970 | * required in this case because TSC_AUX is restored on #VMEXIT |
| 2971 | * from the host save area. |
| 2972 | */ |
| 2973 | if (boot_cpu_has(X86_FEATURE_V_TSC_AUX) && sev_es_guest(kvm: vcpu->kvm)) |
| 2974 | break; |
| 2975 | |
| 2976 | /* |
| 2977 | * TSC_AUX is usually changed only during boot and never read |
| 2978 | * directly. Intercept TSC_AUX and switch it via user return. |
| 2979 | */ |
| 2980 | preempt_disable(); |
| 2981 | ret = kvm_set_user_return_msr(index: tsc_aux_uret_slot, val: data, mask: -1ull); |
| 2982 | preempt_enable(); |
| 2983 | if (ret) |
| 2984 | break; |
| 2985 | |
| 2986 | svm->tsc_aux = data; |
| 2987 | break; |
| 2988 | case MSR_IA32_DEBUGCTLMSR: |
| 2989 | if (!lbrv) { |
| 2990 | kvm_pr_unimpl_wrmsr(vcpu, ecx, data); |
| 2991 | break; |
| 2992 | } |
| 2993 | |
| 2994 | /* |
| 2995 | * Suppress BTF as KVM doesn't virtualize BTF, but there's no |
| 2996 | * way to communicate lack of support to the guest. |
| 2997 | */ |
| 2998 | if (data & DEBUGCTLMSR_BTF) { |
| 2999 | kvm_pr_unimpl_wrmsr(vcpu, MSR_IA32_DEBUGCTLMSR, data); |
| 3000 | data &= ~DEBUGCTLMSR_BTF; |
| 3001 | } |
| 3002 | |
| 3003 | if (data & DEBUGCTL_RESERVED_BITS) |
| 3004 | return 1; |
| 3005 | |
| 3006 | if (svm->vmcb->save.dbgctl == data) |
| 3007 | break; |
| 3008 | |
| 3009 | svm->vmcb->save.dbgctl = data; |
| 3010 | vmcb_mark_dirty(vmcb: svm->vmcb, bit: VMCB_LBR); |
| 3011 | svm_update_lbrv(vcpu); |
| 3012 | break; |
| 3013 | case MSR_VM_HSAVE_PA: |
| 3014 | /* |
| 3015 | * Old kernels did not validate the value written to |
| 3016 | * MSR_VM_HSAVE_PA. Allow KVM_SET_MSR to set an invalid |
| 3017 | * value to allow live migrating buggy or malicious guests |
| 3018 | * originating from those kernels. |
| 3019 | */ |
| 3020 | if (!msr->host_initiated && !page_address_valid(vcpu, data)) |
| 3021 | return 1; |
| 3022 | |
| 3023 | svm->nested.hsave_msr = data & PAGE_MASK; |
| 3024 | break; |
| 3025 | case MSR_VM_CR: |
| 3026 | return svm_set_vm_cr(vcpu, data); |
| 3027 | case MSR_VM_IGNNE: |
| 3028 | kvm_pr_unimpl_wrmsr(vcpu, ecx, data); |
| 3029 | break; |
| 3030 | case MSR_AMD64_DE_CFG: { |
| 3031 | u64 supported_de_cfg; |
| 3032 | |
| 3033 | if (svm_get_feature_msr(msr: ecx, data: &supported_de_cfg)) |
| 3034 | return 1; |
| 3035 | |
| 3036 | if (data & ~supported_de_cfg) |
| 3037 | return 1; |
| 3038 | |
| 3039 | svm->msr_decfg = data; |
| 3040 | break; |
| 3041 | } |
| 3042 | default: |
| 3043 | return kvm_set_msr_common(vcpu, msr); |
| 3044 | } |
| 3045 | return ret; |
| 3046 | } |
| 3047 | |
| 3048 | static int msr_interception(struct kvm_vcpu *vcpu) |
| 3049 | { |
| 3050 | if (to_svm(vcpu)->vmcb->control.exit_info_1) |
| 3051 | return kvm_emulate_wrmsr(vcpu); |
| 3052 | else |
| 3053 | return kvm_emulate_rdmsr(vcpu); |
| 3054 | } |
| 3055 | |
| 3056 | static int interrupt_window_interception(struct kvm_vcpu *vcpu) |
| 3057 | { |
| 3058 | kvm_make_request(KVM_REQ_EVENT, vcpu); |
| 3059 | svm_clear_vintr(svm: to_svm(vcpu)); |
| 3060 | |
| 3061 | /* |
| 3062 | * If not running nested, for AVIC, the only reason to end up here is ExtINTs. |
| 3063 | * In this case AVIC was temporarily disabled for |
| 3064 | * requesting the IRQ window and we have to re-enable it. |
| 3065 | * |
| 3066 | * If running nested, still remove the VM wide AVIC inhibit to |
| 3067 | * support case in which the interrupt window was requested when the |
| 3068 | * vCPU was not running nested. |
| 3069 | |
| 3070 | * All vCPUs which run still run nested, will remain to have their |
| 3071 | * AVIC still inhibited due to per-cpu AVIC inhibition. |
| 3072 | */ |
| 3073 | kvm_clear_apicv_inhibit(kvm: vcpu->kvm, reason: APICV_INHIBIT_REASON_IRQWIN); |
| 3074 | |
| 3075 | ++vcpu->stat.irq_window_exits; |
| 3076 | return 1; |
| 3077 | } |
| 3078 | |
| 3079 | static int pause_interception(struct kvm_vcpu *vcpu) |
| 3080 | { |
| 3081 | bool in_kernel; |
| 3082 | /* |
| 3083 | * CPL is not made available for an SEV-ES guest, therefore |
| 3084 | * vcpu->arch.preempted_in_kernel can never be true. Just |
| 3085 | * set in_kernel to false as well. |
| 3086 | */ |
| 3087 | in_kernel = !sev_es_guest(kvm: vcpu->kvm) && svm_get_cpl(vcpu) == 0; |
| 3088 | |
| 3089 | grow_ple_window(vcpu); |
| 3090 | |
| 3091 | kvm_vcpu_on_spin(vcpu, yield_to_kernel_mode: in_kernel); |
| 3092 | return kvm_skip_emulated_instruction(vcpu); |
| 3093 | } |
| 3094 | |
| 3095 | static int invpcid_interception(struct kvm_vcpu *vcpu) |
| 3096 | { |
| 3097 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3098 | unsigned long type; |
| 3099 | gva_t gva; |
| 3100 | |
| 3101 | if (!guest_cpu_cap_has(vcpu, X86_FEATURE_INVPCID)) { |
| 3102 | kvm_queue_exception(vcpu, UD_VECTOR); |
| 3103 | return 1; |
| 3104 | } |
| 3105 | |
| 3106 | /* |
| 3107 | * For an INVPCID intercept: |
| 3108 | * EXITINFO1 provides the linear address of the memory operand. |
| 3109 | * EXITINFO2 provides the contents of the register operand. |
| 3110 | */ |
| 3111 | type = svm->vmcb->control.exit_info_2; |
| 3112 | gva = svm->vmcb->control.exit_info_1; |
| 3113 | |
| 3114 | /* |
| 3115 | * FIXME: Perform segment checks for 32-bit mode, and inject #SS if the |
| 3116 | * stack segment is used. The intercept takes priority over all |
| 3117 | * #GP checks except CPL>0, but somehow still generates a linear |
| 3118 | * address? The APM is sorely lacking. |
| 3119 | */ |
| 3120 | if (is_noncanonical_address(gva, vcpu, 0)) { |
| 3121 | kvm_queue_exception_e(vcpu, GP_VECTOR, error_code: 0); |
| 3122 | return 1; |
| 3123 | } |
| 3124 | |
| 3125 | return kvm_handle_invpcid(vcpu, type, gva); |
| 3126 | } |
| 3127 | |
| 3128 | static inline int complete_userspace_buslock(struct kvm_vcpu *vcpu) |
| 3129 | { |
| 3130 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3131 | |
| 3132 | /* |
| 3133 | * If userspace has NOT changed RIP, then KVM's ABI is to let the guest |
| 3134 | * execute the bus-locking instruction. Set the bus lock counter to '1' |
| 3135 | * to effectively step past the bus lock. |
| 3136 | */ |
| 3137 | if (kvm_is_linear_rip(vcpu, linear_rip: vcpu->arch.cui_linear_rip)) |
| 3138 | svm->vmcb->control.bus_lock_counter = 1; |
| 3139 | |
| 3140 | return 1; |
| 3141 | } |
| 3142 | |
| 3143 | static int bus_lock_exit(struct kvm_vcpu *vcpu) |
| 3144 | { |
| 3145 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3146 | |
| 3147 | vcpu->run->exit_reason = KVM_EXIT_X86_BUS_LOCK; |
| 3148 | vcpu->run->flags |= KVM_RUN_X86_BUS_LOCK; |
| 3149 | |
| 3150 | vcpu->arch.cui_linear_rip = kvm_get_linear_rip(vcpu); |
| 3151 | vcpu->arch.complete_userspace_io = complete_userspace_buslock; |
| 3152 | |
| 3153 | if (is_guest_mode(vcpu)) |
| 3154 | svm->nested.ctl.bus_lock_rip = vcpu->arch.cui_linear_rip; |
| 3155 | |
| 3156 | return 0; |
| 3157 | } |
| 3158 | |
| 3159 | static int (*const svm_exit_handlers[])(struct kvm_vcpu *vcpu) = { |
| 3160 | [SVM_EXIT_READ_CR0] = cr_interception, |
| 3161 | [SVM_EXIT_READ_CR3] = cr_interception, |
| 3162 | [SVM_EXIT_READ_CR4] = cr_interception, |
| 3163 | [SVM_EXIT_READ_CR8] = cr_interception, |
| 3164 | [SVM_EXIT_CR0_SEL_WRITE] = cr_interception, |
| 3165 | [SVM_EXIT_WRITE_CR0] = cr_interception, |
| 3166 | [SVM_EXIT_WRITE_CR3] = cr_interception, |
| 3167 | [SVM_EXIT_WRITE_CR4] = cr_interception, |
| 3168 | [SVM_EXIT_WRITE_CR8] = cr8_write_interception, |
| 3169 | [SVM_EXIT_READ_DR0] = dr_interception, |
| 3170 | [SVM_EXIT_READ_DR1] = dr_interception, |
| 3171 | [SVM_EXIT_READ_DR2] = dr_interception, |
| 3172 | [SVM_EXIT_READ_DR3] = dr_interception, |
| 3173 | [SVM_EXIT_READ_DR4] = dr_interception, |
| 3174 | [SVM_EXIT_READ_DR5] = dr_interception, |
| 3175 | [SVM_EXIT_READ_DR6] = dr_interception, |
| 3176 | [SVM_EXIT_READ_DR7] = dr_interception, |
| 3177 | [SVM_EXIT_WRITE_DR0] = dr_interception, |
| 3178 | [SVM_EXIT_WRITE_DR1] = dr_interception, |
| 3179 | [SVM_EXIT_WRITE_DR2] = dr_interception, |
| 3180 | [SVM_EXIT_WRITE_DR3] = dr_interception, |
| 3181 | [SVM_EXIT_WRITE_DR4] = dr_interception, |
| 3182 | [SVM_EXIT_WRITE_DR5] = dr_interception, |
| 3183 | [SVM_EXIT_WRITE_DR6] = dr_interception, |
| 3184 | [SVM_EXIT_WRITE_DR7] = dr_interception, |
| 3185 | [SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception, |
| 3186 | [SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception, |
| 3187 | [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception, |
| 3188 | [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception, |
| 3189 | [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception, |
| 3190 | [SVM_EXIT_EXCP_BASE + AC_VECTOR] = ac_interception, |
| 3191 | [SVM_EXIT_EXCP_BASE + GP_VECTOR] = gp_interception, |
| 3192 | [SVM_EXIT_INTR] = intr_interception, |
| 3193 | [SVM_EXIT_NMI] = nmi_interception, |
| 3194 | [SVM_EXIT_SMI] = smi_interception, |
| 3195 | [SVM_EXIT_VINTR] = interrupt_window_interception, |
| 3196 | [SVM_EXIT_RDPMC] = kvm_emulate_rdpmc, |
| 3197 | [SVM_EXIT_CPUID] = kvm_emulate_cpuid, |
| 3198 | [SVM_EXIT_IRET] = iret_interception, |
| 3199 | [SVM_EXIT_INVD] = kvm_emulate_invd, |
| 3200 | [SVM_EXIT_PAUSE] = pause_interception, |
| 3201 | [SVM_EXIT_HLT] = kvm_emulate_halt, |
| 3202 | [SVM_EXIT_INVLPG] = invlpg_interception, |
| 3203 | [SVM_EXIT_INVLPGA] = invlpga_interception, |
| 3204 | [SVM_EXIT_IOIO] = io_interception, |
| 3205 | [SVM_EXIT_MSR] = msr_interception, |
| 3206 | [SVM_EXIT_TASK_SWITCH] = task_switch_interception, |
| 3207 | [SVM_EXIT_SHUTDOWN] = shutdown_interception, |
| 3208 | [SVM_EXIT_VMRUN] = vmrun_interception, |
| 3209 | [SVM_EXIT_VMMCALL] = kvm_emulate_hypercall, |
| 3210 | [SVM_EXIT_VMLOAD] = vmload_interception, |
| 3211 | [SVM_EXIT_VMSAVE] = vmsave_interception, |
| 3212 | [SVM_EXIT_STGI] = stgi_interception, |
| 3213 | [SVM_EXIT_CLGI] = clgi_interception, |
| 3214 | [SVM_EXIT_SKINIT] = skinit_interception, |
| 3215 | [SVM_EXIT_RDTSCP] = kvm_handle_invalid_op, |
| 3216 | [SVM_EXIT_WBINVD] = kvm_emulate_wbinvd, |
| 3217 | [SVM_EXIT_MONITOR] = kvm_emulate_monitor, |
| 3218 | [SVM_EXIT_MWAIT] = kvm_emulate_mwait, |
| 3219 | [SVM_EXIT_XSETBV] = kvm_emulate_xsetbv, |
| 3220 | [SVM_EXIT_RDPRU] = kvm_handle_invalid_op, |
| 3221 | [SVM_EXIT_EFER_WRITE_TRAP] = efer_trap, |
| 3222 | [SVM_EXIT_CR0_WRITE_TRAP] = cr_trap, |
| 3223 | [SVM_EXIT_CR4_WRITE_TRAP] = cr_trap, |
| 3224 | [SVM_EXIT_CR8_WRITE_TRAP] = cr_trap, |
| 3225 | [SVM_EXIT_INVPCID] = invpcid_interception, |
| 3226 | [SVM_EXIT_IDLE_HLT] = kvm_emulate_halt, |
| 3227 | [SVM_EXIT_NPF] = npf_interception, |
| 3228 | [SVM_EXIT_BUS_LOCK] = bus_lock_exit, |
| 3229 | [SVM_EXIT_RSM] = rsm_interception, |
| 3230 | [SVM_EXIT_AVIC_INCOMPLETE_IPI] = avic_incomplete_ipi_interception, |
| 3231 | [SVM_EXIT_AVIC_UNACCELERATED_ACCESS] = avic_unaccelerated_access_interception, |
| 3232 | #ifdef CONFIG_KVM_AMD_SEV |
| 3233 | [SVM_EXIT_VMGEXIT] = sev_handle_vmgexit, |
| 3234 | #endif |
| 3235 | }; |
| 3236 | |
| 3237 | static void dump_vmcb(struct kvm_vcpu *vcpu) |
| 3238 | { |
| 3239 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3240 | struct vmcb_control_area *control = &svm->vmcb->control; |
| 3241 | struct vmcb_save_area *save = &svm->vmcb->save; |
| 3242 | struct vmcb_save_area *save01 = &svm->vmcb01.ptr->save; |
| 3243 | char *vm_type; |
| 3244 | |
| 3245 | if (!dump_invalid_vmcb) { |
| 3246 | pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n" ); |
| 3247 | return; |
| 3248 | } |
| 3249 | |
| 3250 | guard(mutex)(T: &vmcb_dump_mutex); |
| 3251 | |
| 3252 | vm_type = sev_snp_guest(kvm: vcpu->kvm) ? "SEV-SNP" : |
| 3253 | sev_es_guest(kvm: vcpu->kvm) ? "SEV-ES" : |
| 3254 | sev_guest(kvm: vcpu->kvm) ? "SEV" : "SVM" ; |
| 3255 | |
| 3256 | pr_err("%s vCPU%u VMCB %p, last attempted VMRUN on CPU %d\n" , |
| 3257 | vm_type, vcpu->vcpu_id, svm->current_vmcb->ptr, vcpu->arch.last_vmentry_cpu); |
| 3258 | pr_err("VMCB Control Area:\n" ); |
| 3259 | pr_err("%-20s%04x\n" , "cr_read:" , control->intercepts[INTERCEPT_CR] & 0xffff); |
| 3260 | pr_err("%-20s%04x\n" , "cr_write:" , control->intercepts[INTERCEPT_CR] >> 16); |
| 3261 | pr_err("%-20s%04x\n" , "dr_read:" , control->intercepts[INTERCEPT_DR] & 0xffff); |
| 3262 | pr_err("%-20s%04x\n" , "dr_write:" , control->intercepts[INTERCEPT_DR] >> 16); |
| 3263 | pr_err("%-20s%08x\n" , "exceptions:" , control->intercepts[INTERCEPT_EXCEPTION]); |
| 3264 | pr_err("%-20s%08x %08x\n" , "intercepts:" , |
| 3265 | control->intercepts[INTERCEPT_WORD3], |
| 3266 | control->intercepts[INTERCEPT_WORD4]); |
| 3267 | pr_err("%-20s%d\n" , "pause filter count:" , control->pause_filter_count); |
| 3268 | pr_err("%-20s%d\n" , "pause filter threshold:" , |
| 3269 | control->pause_filter_thresh); |
| 3270 | pr_err("%-20s%016llx\n" , "iopm_base_pa:" , control->iopm_base_pa); |
| 3271 | pr_err("%-20s%016llx\n" , "msrpm_base_pa:" , control->msrpm_base_pa); |
| 3272 | pr_err("%-20s%016llx\n" , "tsc_offset:" , control->tsc_offset); |
| 3273 | pr_err("%-20s%d\n" , "asid:" , control->asid); |
| 3274 | pr_err("%-20s%d\n" , "tlb_ctl:" , control->tlb_ctl); |
| 3275 | pr_err("%-20s%08x\n" , "int_ctl:" , control->int_ctl); |
| 3276 | pr_err("%-20s%08x\n" , "int_vector:" , control->int_vector); |
| 3277 | pr_err("%-20s%08x\n" , "int_state:" , control->int_state); |
| 3278 | pr_err("%-20s%08x\n" , "exit_code:" , control->exit_code); |
| 3279 | pr_err("%-20s%016llx\n" , "exit_info1:" , control->exit_info_1); |
| 3280 | pr_err("%-20s%016llx\n" , "exit_info2:" , control->exit_info_2); |
| 3281 | pr_err("%-20s%08x\n" , "exit_int_info:" , control->exit_int_info); |
| 3282 | pr_err("%-20s%08x\n" , "exit_int_info_err:" , control->exit_int_info_err); |
| 3283 | pr_err("%-20s%lld\n" , "nested_ctl:" , control->nested_ctl); |
| 3284 | pr_err("%-20s%016llx\n" , "nested_cr3:" , control->nested_cr3); |
| 3285 | pr_err("%-20s%016llx\n" , "avic_vapic_bar:" , control->avic_vapic_bar); |
| 3286 | pr_err("%-20s%016llx\n" , "ghcb:" , control->ghcb_gpa); |
| 3287 | pr_err("%-20s%08x\n" , "event_inj:" , control->event_inj); |
| 3288 | pr_err("%-20s%08x\n" , "event_inj_err:" , control->event_inj_err); |
| 3289 | pr_err("%-20s%lld\n" , "virt_ext:" , control->virt_ext); |
| 3290 | pr_err("%-20s%016llx\n" , "next_rip:" , control->next_rip); |
| 3291 | pr_err("%-20s%016llx\n" , "avic_backing_page:" , control->avic_backing_page); |
| 3292 | pr_err("%-20s%016llx\n" , "avic_logical_id:" , control->avic_logical_id); |
| 3293 | pr_err("%-20s%016llx\n" , "avic_physical_id:" , control->avic_physical_id); |
| 3294 | pr_err("%-20s%016llx\n" , "vmsa_pa:" , control->vmsa_pa); |
| 3295 | pr_err("%-20s%016llx\n" , "allowed_sev_features:" , control->allowed_sev_features); |
| 3296 | pr_err("%-20s%016llx\n" , "guest_sev_features:" , control->guest_sev_features); |
| 3297 | |
| 3298 | if (sev_es_guest(kvm: vcpu->kvm)) { |
| 3299 | save = sev_decrypt_vmsa(vcpu); |
| 3300 | if (!save) |
| 3301 | goto no_vmsa; |
| 3302 | |
| 3303 | save01 = save; |
| 3304 | } |
| 3305 | |
| 3306 | pr_err("VMCB State Save Area:\n" ); |
| 3307 | pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n" , |
| 3308 | "es:" , |
| 3309 | save->es.selector, save->es.attrib, |
| 3310 | save->es.limit, save->es.base); |
| 3311 | pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n" , |
| 3312 | "cs:" , |
| 3313 | save->cs.selector, save->cs.attrib, |
| 3314 | save->cs.limit, save->cs.base); |
| 3315 | pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n" , |
| 3316 | "ss:" , |
| 3317 | save->ss.selector, save->ss.attrib, |
| 3318 | save->ss.limit, save->ss.base); |
| 3319 | pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n" , |
| 3320 | "ds:" , |
| 3321 | save->ds.selector, save->ds.attrib, |
| 3322 | save->ds.limit, save->ds.base); |
| 3323 | pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n" , |
| 3324 | "fs:" , |
| 3325 | save01->fs.selector, save01->fs.attrib, |
| 3326 | save01->fs.limit, save01->fs.base); |
| 3327 | pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n" , |
| 3328 | "gs:" , |
| 3329 | save01->gs.selector, save01->gs.attrib, |
| 3330 | save01->gs.limit, save01->gs.base); |
| 3331 | pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n" , |
| 3332 | "gdtr:" , |
| 3333 | save->gdtr.selector, save->gdtr.attrib, |
| 3334 | save->gdtr.limit, save->gdtr.base); |
| 3335 | pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n" , |
| 3336 | "ldtr:" , |
| 3337 | save01->ldtr.selector, save01->ldtr.attrib, |
| 3338 | save01->ldtr.limit, save01->ldtr.base); |
| 3339 | pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n" , |
| 3340 | "idtr:" , |
| 3341 | save->idtr.selector, save->idtr.attrib, |
| 3342 | save->idtr.limit, save->idtr.base); |
| 3343 | pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n" , |
| 3344 | "tr:" , |
| 3345 | save01->tr.selector, save01->tr.attrib, |
| 3346 | save01->tr.limit, save01->tr.base); |
| 3347 | pr_err("vmpl: %d cpl: %d efer: %016llx\n" , |
| 3348 | save->vmpl, save->cpl, save->efer); |
| 3349 | pr_err("%-15s %016llx %-13s %016llx\n" , |
| 3350 | "cr0:" , save->cr0, "cr2:" , save->cr2); |
| 3351 | pr_err("%-15s %016llx %-13s %016llx\n" , |
| 3352 | "cr3:" , save->cr3, "cr4:" , save->cr4); |
| 3353 | pr_err("%-15s %016llx %-13s %016llx\n" , |
| 3354 | "dr6:" , save->dr6, "dr7:" , save->dr7); |
| 3355 | pr_err("%-15s %016llx %-13s %016llx\n" , |
| 3356 | "rip:" , save->rip, "rflags:" , save->rflags); |
| 3357 | pr_err("%-15s %016llx %-13s %016llx\n" , |
| 3358 | "rsp:" , save->rsp, "rax:" , save->rax); |
| 3359 | pr_err("%-15s %016llx %-13s %016llx\n" , |
| 3360 | "s_cet:" , save->s_cet, "ssp:" , save->ssp); |
| 3361 | pr_err("%-15s %016llx\n" , |
| 3362 | "isst_addr:" , save->isst_addr); |
| 3363 | pr_err("%-15s %016llx %-13s %016llx\n" , |
| 3364 | "star:" , save01->star, "lstar:" , save01->lstar); |
| 3365 | pr_err("%-15s %016llx %-13s %016llx\n" , |
| 3366 | "cstar:" , save01->cstar, "sfmask:" , save01->sfmask); |
| 3367 | pr_err("%-15s %016llx %-13s %016llx\n" , |
| 3368 | "kernel_gs_base:" , save01->kernel_gs_base, |
| 3369 | "sysenter_cs:" , save01->sysenter_cs); |
| 3370 | pr_err("%-15s %016llx %-13s %016llx\n" , |
| 3371 | "sysenter_esp:" , save01->sysenter_esp, |
| 3372 | "sysenter_eip:" , save01->sysenter_eip); |
| 3373 | pr_err("%-15s %016llx %-13s %016llx\n" , |
| 3374 | "gpat:" , save->g_pat, "dbgctl:" , save->dbgctl); |
| 3375 | pr_err("%-15s %016llx %-13s %016llx\n" , |
| 3376 | "br_from:" , save->br_from, "br_to:" , save->br_to); |
| 3377 | pr_err("%-15s %016llx %-13s %016llx\n" , |
| 3378 | "excp_from:" , save->last_excp_from, |
| 3379 | "excp_to:" , save->last_excp_to); |
| 3380 | |
| 3381 | if (sev_es_guest(kvm: vcpu->kvm)) { |
| 3382 | struct sev_es_save_area *vmsa = (struct sev_es_save_area *)save; |
| 3383 | |
| 3384 | pr_err("%-15s %016llx\n" , |
| 3385 | "sev_features" , vmsa->sev_features); |
| 3386 | |
| 3387 | pr_err("%-15s %016llx %-13s %016llx\n" , |
| 3388 | "pl0_ssp:" , vmsa->pl0_ssp, "pl1_ssp:" , vmsa->pl1_ssp); |
| 3389 | pr_err("%-15s %016llx %-13s %016llx\n" , |
| 3390 | "pl2_ssp:" , vmsa->pl2_ssp, "pl3_ssp:" , vmsa->pl3_ssp); |
| 3391 | pr_err("%-15s %016llx\n" , |
| 3392 | "u_cet:" , vmsa->u_cet); |
| 3393 | |
| 3394 | pr_err("%-15s %016llx %-13s %016llx\n" , |
| 3395 | "rax:" , vmsa->rax, "rbx:" , vmsa->rbx); |
| 3396 | pr_err("%-15s %016llx %-13s %016llx\n" , |
| 3397 | "rcx:" , vmsa->rcx, "rdx:" , vmsa->rdx); |
| 3398 | pr_err("%-15s %016llx %-13s %016llx\n" , |
| 3399 | "rsi:" , vmsa->rsi, "rdi:" , vmsa->rdi); |
| 3400 | pr_err("%-15s %016llx %-13s %016llx\n" , |
| 3401 | "rbp:" , vmsa->rbp, "rsp:" , vmsa->rsp); |
| 3402 | pr_err("%-15s %016llx %-13s %016llx\n" , |
| 3403 | "r8:" , vmsa->r8, "r9:" , vmsa->r9); |
| 3404 | pr_err("%-15s %016llx %-13s %016llx\n" , |
| 3405 | "r10:" , vmsa->r10, "r11:" , vmsa->r11); |
| 3406 | pr_err("%-15s %016llx %-13s %016llx\n" , |
| 3407 | "r12:" , vmsa->r12, "r13:" , vmsa->r13); |
| 3408 | pr_err("%-15s %016llx %-13s %016llx\n" , |
| 3409 | "r14:" , vmsa->r14, "r15:" , vmsa->r15); |
| 3410 | pr_err("%-15s %016llx %-13s %016llx\n" , |
| 3411 | "xcr0:" , vmsa->xcr0, "xss:" , vmsa->xss); |
| 3412 | } else { |
| 3413 | pr_err("%-15s %016llx %-13s %016lx\n" , |
| 3414 | "rax:" , save->rax, "rbx:" , |
| 3415 | vcpu->arch.regs[VCPU_REGS_RBX]); |
| 3416 | pr_err("%-15s %016lx %-13s %016lx\n" , |
| 3417 | "rcx:" , vcpu->arch.regs[VCPU_REGS_RCX], |
| 3418 | "rdx:" , vcpu->arch.regs[VCPU_REGS_RDX]); |
| 3419 | pr_err("%-15s %016lx %-13s %016lx\n" , |
| 3420 | "rsi:" , vcpu->arch.regs[VCPU_REGS_RSI], |
| 3421 | "rdi:" , vcpu->arch.regs[VCPU_REGS_RDI]); |
| 3422 | pr_err("%-15s %016lx %-13s %016llx\n" , |
| 3423 | "rbp:" , vcpu->arch.regs[VCPU_REGS_RBP], |
| 3424 | "rsp:" , save->rsp); |
| 3425 | #ifdef CONFIG_X86_64 |
| 3426 | pr_err("%-15s %016lx %-13s %016lx\n" , |
| 3427 | "r8:" , vcpu->arch.regs[VCPU_REGS_R8], |
| 3428 | "r9:" , vcpu->arch.regs[VCPU_REGS_R9]); |
| 3429 | pr_err("%-15s %016lx %-13s %016lx\n" , |
| 3430 | "r10:" , vcpu->arch.regs[VCPU_REGS_R10], |
| 3431 | "r11:" , vcpu->arch.regs[VCPU_REGS_R11]); |
| 3432 | pr_err("%-15s %016lx %-13s %016lx\n" , |
| 3433 | "r12:" , vcpu->arch.regs[VCPU_REGS_R12], |
| 3434 | "r13:" , vcpu->arch.regs[VCPU_REGS_R13]); |
| 3435 | pr_err("%-15s %016lx %-13s %016lx\n" , |
| 3436 | "r14:" , vcpu->arch.regs[VCPU_REGS_R14], |
| 3437 | "r15:" , vcpu->arch.regs[VCPU_REGS_R15]); |
| 3438 | #endif |
| 3439 | } |
| 3440 | |
| 3441 | no_vmsa: |
| 3442 | if (sev_es_guest(kvm: vcpu->kvm)) |
| 3443 | sev_free_decrypted_vmsa(vcpu, vmsa: save); |
| 3444 | } |
| 3445 | |
| 3446 | static bool svm_check_exit_valid(u64 exit_code) |
| 3447 | { |
| 3448 | return (exit_code < ARRAY_SIZE(svm_exit_handlers) && |
| 3449 | svm_exit_handlers[exit_code]); |
| 3450 | } |
| 3451 | |
| 3452 | static int svm_handle_invalid_exit(struct kvm_vcpu *vcpu, u64 exit_code) |
| 3453 | { |
| 3454 | dump_vmcb(vcpu); |
| 3455 | kvm_prepare_unexpected_reason_exit(vcpu, exit_reason: exit_code); |
| 3456 | return 0; |
| 3457 | } |
| 3458 | |
| 3459 | int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code) |
| 3460 | { |
| 3461 | if (!svm_check_exit_valid(exit_code)) |
| 3462 | return svm_handle_invalid_exit(vcpu, exit_code); |
| 3463 | |
| 3464 | #ifdef CONFIG_MITIGATION_RETPOLINE |
| 3465 | if (exit_code == SVM_EXIT_MSR) |
| 3466 | return msr_interception(vcpu); |
| 3467 | else if (exit_code == SVM_EXIT_VINTR) |
| 3468 | return interrupt_window_interception(vcpu); |
| 3469 | else if (exit_code == SVM_EXIT_INTR) |
| 3470 | return intr_interception(vcpu); |
| 3471 | else if (exit_code == SVM_EXIT_HLT || exit_code == SVM_EXIT_IDLE_HLT) |
| 3472 | return kvm_emulate_halt(vcpu); |
| 3473 | else if (exit_code == SVM_EXIT_NPF) |
| 3474 | return npf_interception(vcpu); |
| 3475 | #ifdef CONFIG_KVM_AMD_SEV |
| 3476 | else if (exit_code == SVM_EXIT_VMGEXIT) |
| 3477 | return sev_handle_vmgexit(vcpu); |
| 3478 | #endif |
| 3479 | #endif |
| 3480 | return svm_exit_handlers[exit_code](vcpu); |
| 3481 | } |
| 3482 | |
| 3483 | static void svm_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason, |
| 3484 | u64 *info1, u64 *info2, |
| 3485 | u32 *intr_info, u32 *error_code) |
| 3486 | { |
| 3487 | struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control; |
| 3488 | |
| 3489 | *reason = control->exit_code; |
| 3490 | *info1 = control->exit_info_1; |
| 3491 | *info2 = control->exit_info_2; |
| 3492 | *intr_info = control->exit_int_info; |
| 3493 | if ((*intr_info & SVM_EXITINTINFO_VALID) && |
| 3494 | (*intr_info & SVM_EXITINTINFO_VALID_ERR)) |
| 3495 | *error_code = control->exit_int_info_err; |
| 3496 | else |
| 3497 | *error_code = 0; |
| 3498 | } |
| 3499 | |
| 3500 | static void svm_get_entry_info(struct kvm_vcpu *vcpu, u32 *intr_info, |
| 3501 | u32 *error_code) |
| 3502 | { |
| 3503 | struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control; |
| 3504 | |
| 3505 | *intr_info = control->event_inj; |
| 3506 | |
| 3507 | if ((*intr_info & SVM_EXITINTINFO_VALID) && |
| 3508 | (*intr_info & SVM_EXITINTINFO_VALID_ERR)) |
| 3509 | *error_code = control->event_inj_err; |
| 3510 | else |
| 3511 | *error_code = 0; |
| 3512 | |
| 3513 | } |
| 3514 | |
| 3515 | static int svm_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) |
| 3516 | { |
| 3517 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3518 | struct kvm_run *kvm_run = vcpu->run; |
| 3519 | u32 exit_code = svm->vmcb->control.exit_code; |
| 3520 | |
| 3521 | /* SEV-ES guests must use the CR write traps to track CR registers. */ |
| 3522 | if (!sev_es_guest(kvm: vcpu->kvm)) { |
| 3523 | if (!svm_is_intercept(svm, bit: INTERCEPT_CR0_WRITE)) |
| 3524 | vcpu->arch.cr0 = svm->vmcb->save.cr0; |
| 3525 | if (npt_enabled) |
| 3526 | vcpu->arch.cr3 = svm->vmcb->save.cr3; |
| 3527 | } |
| 3528 | |
| 3529 | if (is_guest_mode(vcpu)) { |
| 3530 | int vmexit; |
| 3531 | |
| 3532 | trace_kvm_nested_vmexit(vcpu, KVM_ISA_SVM); |
| 3533 | |
| 3534 | vmexit = nested_svm_exit_special(svm); |
| 3535 | |
| 3536 | if (vmexit == NESTED_EXIT_CONTINUE) |
| 3537 | vmexit = nested_svm_exit_handled(svm); |
| 3538 | |
| 3539 | if (vmexit == NESTED_EXIT_DONE) |
| 3540 | return 1; |
| 3541 | } |
| 3542 | |
| 3543 | if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) { |
| 3544 | kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; |
| 3545 | kvm_run->fail_entry.hardware_entry_failure_reason |
| 3546 | = svm->vmcb->control.exit_code; |
| 3547 | kvm_run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu; |
| 3548 | dump_vmcb(vcpu); |
| 3549 | return 0; |
| 3550 | } |
| 3551 | |
| 3552 | if (exit_fastpath != EXIT_FASTPATH_NONE) |
| 3553 | return 1; |
| 3554 | |
| 3555 | return svm_invoke_exit_handler(vcpu, exit_code); |
| 3556 | } |
| 3557 | |
| 3558 | static int pre_svm_run(struct kvm_vcpu *vcpu) |
| 3559 | { |
| 3560 | struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu); |
| 3561 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3562 | |
| 3563 | /* |
| 3564 | * If the previous vmrun of the vmcb occurred on a different physical |
| 3565 | * cpu, then mark the vmcb dirty and assign a new asid. Hardware's |
| 3566 | * vmcb clean bits are per logical CPU, as are KVM's asid assignments. |
| 3567 | */ |
| 3568 | if (unlikely(svm->current_vmcb->cpu != vcpu->cpu)) { |
| 3569 | svm->current_vmcb->asid_generation = 0; |
| 3570 | vmcb_mark_all_dirty(vmcb: svm->vmcb); |
| 3571 | svm->current_vmcb->cpu = vcpu->cpu; |
| 3572 | } |
| 3573 | |
| 3574 | if (sev_guest(kvm: vcpu->kvm)) |
| 3575 | return pre_sev_run(svm, cpu: vcpu->cpu); |
| 3576 | |
| 3577 | /* FIXME: handle wraparound of asid_generation */ |
| 3578 | if (svm->current_vmcb->asid_generation != sd->asid_generation) |
| 3579 | new_asid(svm, sd); |
| 3580 | |
| 3581 | return 0; |
| 3582 | } |
| 3583 | |
| 3584 | static void svm_inject_nmi(struct kvm_vcpu *vcpu) |
| 3585 | { |
| 3586 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3587 | |
| 3588 | svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI; |
| 3589 | |
| 3590 | if (svm->nmi_l1_to_l2) |
| 3591 | return; |
| 3592 | |
| 3593 | /* |
| 3594 | * No need to manually track NMI masking when vNMI is enabled, hardware |
| 3595 | * automatically sets V_NMI_BLOCKING_MASK as appropriate, including the |
| 3596 | * case where software directly injects an NMI. |
| 3597 | */ |
| 3598 | if (!is_vnmi_enabled(svm)) { |
| 3599 | svm->nmi_masked = true; |
| 3600 | svm_set_iret_intercept(svm); |
| 3601 | } |
| 3602 | ++vcpu->stat.nmi_injections; |
| 3603 | } |
| 3604 | |
| 3605 | static bool svm_is_vnmi_pending(struct kvm_vcpu *vcpu) |
| 3606 | { |
| 3607 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3608 | |
| 3609 | if (!is_vnmi_enabled(svm)) |
| 3610 | return false; |
| 3611 | |
| 3612 | return !!(svm->vmcb->control.int_ctl & V_NMI_PENDING_MASK); |
| 3613 | } |
| 3614 | |
| 3615 | static bool svm_set_vnmi_pending(struct kvm_vcpu *vcpu) |
| 3616 | { |
| 3617 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3618 | |
| 3619 | if (!is_vnmi_enabled(svm)) |
| 3620 | return false; |
| 3621 | |
| 3622 | if (svm->vmcb->control.int_ctl & V_NMI_PENDING_MASK) |
| 3623 | return false; |
| 3624 | |
| 3625 | svm->vmcb->control.int_ctl |= V_NMI_PENDING_MASK; |
| 3626 | vmcb_mark_dirty(vmcb: svm->vmcb, bit: VMCB_INTR); |
| 3627 | |
| 3628 | /* |
| 3629 | * Because the pending NMI is serviced by hardware, KVM can't know when |
| 3630 | * the NMI is "injected", but for all intents and purposes, passing the |
| 3631 | * NMI off to hardware counts as injection. |
| 3632 | */ |
| 3633 | ++vcpu->stat.nmi_injections; |
| 3634 | |
| 3635 | return true; |
| 3636 | } |
| 3637 | |
| 3638 | static void svm_inject_irq(struct kvm_vcpu *vcpu, bool reinjected) |
| 3639 | { |
| 3640 | struct kvm_queued_interrupt *intr = &vcpu->arch.interrupt; |
| 3641 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3642 | u32 type; |
| 3643 | |
| 3644 | if (intr->soft) { |
| 3645 | if (svm_update_soft_interrupt_rip(vcpu, vector: intr->nr)) |
| 3646 | return; |
| 3647 | |
| 3648 | type = SVM_EVTINJ_TYPE_SOFT; |
| 3649 | } else { |
| 3650 | type = SVM_EVTINJ_TYPE_INTR; |
| 3651 | } |
| 3652 | |
| 3653 | trace_kvm_inj_virq(intr->nr, intr->soft, reinjected); |
| 3654 | ++vcpu->stat.irq_injections; |
| 3655 | |
| 3656 | svm->vmcb->control.event_inj = intr->nr | SVM_EVTINJ_VALID | type; |
| 3657 | } |
| 3658 | |
| 3659 | void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode, |
| 3660 | int trig_mode, int vector) |
| 3661 | { |
| 3662 | /* |
| 3663 | * apic->apicv_active must be read after vcpu->mode. |
| 3664 | * Pairs with smp_store_release in vcpu_enter_guest. |
| 3665 | */ |
| 3666 | bool in_guest_mode = (smp_load_acquire(&vcpu->mode) == IN_GUEST_MODE); |
| 3667 | |
| 3668 | /* Note, this is called iff the local APIC is in-kernel. */ |
| 3669 | if (!READ_ONCE(vcpu->arch.apic->apicv_active)) { |
| 3670 | /* Process the interrupt via kvm_check_and_inject_events(). */ |
| 3671 | kvm_make_request(KVM_REQ_EVENT, vcpu); |
| 3672 | kvm_vcpu_kick(vcpu); |
| 3673 | return; |
| 3674 | } |
| 3675 | |
| 3676 | trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode, trig_mode, vector); |
| 3677 | if (in_guest_mode) { |
| 3678 | /* |
| 3679 | * Signal the doorbell to tell hardware to inject the IRQ. If |
| 3680 | * the vCPU exits the guest before the doorbell chimes, hardware |
| 3681 | * will automatically process AVIC interrupts at the next VMRUN. |
| 3682 | */ |
| 3683 | avic_ring_doorbell(vcpu); |
| 3684 | } else { |
| 3685 | /* |
| 3686 | * Wake the vCPU if it was blocking. KVM will then detect the |
| 3687 | * pending IRQ when checking if the vCPU has a wake event. |
| 3688 | */ |
| 3689 | kvm_vcpu_wake_up(vcpu); |
| 3690 | } |
| 3691 | } |
| 3692 | |
| 3693 | static void svm_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode, |
| 3694 | int trig_mode, int vector) |
| 3695 | { |
| 3696 | kvm_lapic_set_irr(vector, apic); |
| 3697 | |
| 3698 | /* |
| 3699 | * Pairs with the smp_mb_*() after setting vcpu->guest_mode in |
| 3700 | * vcpu_enter_guest() to ensure the write to the vIRR is ordered before |
| 3701 | * the read of guest_mode. This guarantees that either VMRUN will see |
| 3702 | * and process the new vIRR entry, or that svm_complete_interrupt_delivery |
| 3703 | * will signal the doorbell if the CPU has already entered the guest. |
| 3704 | */ |
| 3705 | smp_mb__after_atomic(); |
| 3706 | svm_complete_interrupt_delivery(vcpu: apic->vcpu, delivery_mode, trig_mode, vector); |
| 3707 | } |
| 3708 | |
| 3709 | static void svm_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) |
| 3710 | { |
| 3711 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3712 | |
| 3713 | /* |
| 3714 | * SEV-ES guests must always keep the CR intercepts cleared. CR |
| 3715 | * tracking is done using the CR write traps. |
| 3716 | */ |
| 3717 | if (sev_es_guest(kvm: vcpu->kvm)) |
| 3718 | return; |
| 3719 | |
| 3720 | if (nested_svm_virtualize_tpr(vcpu)) |
| 3721 | return; |
| 3722 | |
| 3723 | svm_clr_intercept(svm, bit: INTERCEPT_CR8_WRITE); |
| 3724 | |
| 3725 | if (irr == -1) |
| 3726 | return; |
| 3727 | |
| 3728 | if (tpr >= irr) |
| 3729 | svm_set_intercept(svm, bit: INTERCEPT_CR8_WRITE); |
| 3730 | } |
| 3731 | |
| 3732 | static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu) |
| 3733 | { |
| 3734 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3735 | |
| 3736 | if (is_vnmi_enabled(svm)) |
| 3737 | return svm->vmcb->control.int_ctl & V_NMI_BLOCKING_MASK; |
| 3738 | else |
| 3739 | return svm->nmi_masked; |
| 3740 | } |
| 3741 | |
| 3742 | static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) |
| 3743 | { |
| 3744 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3745 | |
| 3746 | if (is_vnmi_enabled(svm)) { |
| 3747 | if (masked) |
| 3748 | svm->vmcb->control.int_ctl |= V_NMI_BLOCKING_MASK; |
| 3749 | else |
| 3750 | svm->vmcb->control.int_ctl &= ~V_NMI_BLOCKING_MASK; |
| 3751 | |
| 3752 | } else { |
| 3753 | svm->nmi_masked = masked; |
| 3754 | if (masked) |
| 3755 | svm_set_iret_intercept(svm); |
| 3756 | else |
| 3757 | svm_clr_iret_intercept(svm); |
| 3758 | } |
| 3759 | } |
| 3760 | |
| 3761 | bool svm_nmi_blocked(struct kvm_vcpu *vcpu) |
| 3762 | { |
| 3763 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3764 | struct vmcb *vmcb = svm->vmcb; |
| 3765 | |
| 3766 | if (!gif_set(svm)) |
| 3767 | return true; |
| 3768 | |
| 3769 | if (is_guest_mode(vcpu) && nested_exit_on_nmi(svm)) |
| 3770 | return false; |
| 3771 | |
| 3772 | if (svm_get_nmi_mask(vcpu)) |
| 3773 | return true; |
| 3774 | |
| 3775 | return vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK; |
| 3776 | } |
| 3777 | |
| 3778 | static int svm_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection) |
| 3779 | { |
| 3780 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3781 | if (svm->nested.nested_run_pending) |
| 3782 | return -EBUSY; |
| 3783 | |
| 3784 | if (svm_nmi_blocked(vcpu)) |
| 3785 | return 0; |
| 3786 | |
| 3787 | /* An NMI must not be injected into L2 if it's supposed to VM-Exit. */ |
| 3788 | if (for_injection && is_guest_mode(vcpu) && nested_exit_on_nmi(svm)) |
| 3789 | return -EBUSY; |
| 3790 | return 1; |
| 3791 | } |
| 3792 | |
| 3793 | bool svm_interrupt_blocked(struct kvm_vcpu *vcpu) |
| 3794 | { |
| 3795 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3796 | struct vmcb *vmcb = svm->vmcb; |
| 3797 | |
| 3798 | if (!gif_set(svm)) |
| 3799 | return true; |
| 3800 | |
| 3801 | if (is_guest_mode(vcpu)) { |
| 3802 | /* As long as interrupts are being delivered... */ |
| 3803 | if ((svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) |
| 3804 | ? !(svm->vmcb01.ptr->save.rflags & X86_EFLAGS_IF) |
| 3805 | : !(kvm_get_rflags(vcpu) & X86_EFLAGS_IF)) |
| 3806 | return true; |
| 3807 | |
| 3808 | /* ... vmexits aren't blocked by the interrupt shadow */ |
| 3809 | if (nested_exit_on_intr(svm)) |
| 3810 | return false; |
| 3811 | } else { |
| 3812 | if (!svm_get_if_flag(vcpu)) |
| 3813 | return true; |
| 3814 | } |
| 3815 | |
| 3816 | return (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK); |
| 3817 | } |
| 3818 | |
| 3819 | static int svm_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection) |
| 3820 | { |
| 3821 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3822 | |
| 3823 | if (svm->nested.nested_run_pending) |
| 3824 | return -EBUSY; |
| 3825 | |
| 3826 | if (svm_interrupt_blocked(vcpu)) |
| 3827 | return 0; |
| 3828 | |
| 3829 | /* |
| 3830 | * An IRQ must not be injected into L2 if it's supposed to VM-Exit, |
| 3831 | * e.g. if the IRQ arrived asynchronously after checking nested events. |
| 3832 | */ |
| 3833 | if (for_injection && is_guest_mode(vcpu) && nested_exit_on_intr(svm)) |
| 3834 | return -EBUSY; |
| 3835 | |
| 3836 | return 1; |
| 3837 | } |
| 3838 | |
| 3839 | static void svm_enable_irq_window(struct kvm_vcpu *vcpu) |
| 3840 | { |
| 3841 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3842 | |
| 3843 | /* |
| 3844 | * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes |
| 3845 | * 1, because that's a separate STGI/VMRUN intercept. The next time we |
| 3846 | * get that intercept, this function will be called again though and |
| 3847 | * we'll get the vintr intercept. However, if the vGIF feature is |
| 3848 | * enabled, the STGI interception will not occur. Enable the irq |
| 3849 | * window under the assumption that the hardware will set the GIF. |
| 3850 | */ |
| 3851 | if (vgif || gif_set(svm)) { |
| 3852 | /* |
| 3853 | * IRQ window is not needed when AVIC is enabled, |
| 3854 | * unless we have pending ExtINT since it cannot be injected |
| 3855 | * via AVIC. In such case, KVM needs to temporarily disable AVIC, |
| 3856 | * and fallback to injecting IRQ via V_IRQ. |
| 3857 | * |
| 3858 | * If running nested, AVIC is already locally inhibited |
| 3859 | * on this vCPU, therefore there is no need to request |
| 3860 | * the VM wide AVIC inhibition. |
| 3861 | */ |
| 3862 | if (!is_guest_mode(vcpu)) |
| 3863 | kvm_set_apicv_inhibit(kvm: vcpu->kvm, reason: APICV_INHIBIT_REASON_IRQWIN); |
| 3864 | |
| 3865 | svm_set_vintr(svm); |
| 3866 | } |
| 3867 | } |
| 3868 | |
| 3869 | static void svm_enable_nmi_window(struct kvm_vcpu *vcpu) |
| 3870 | { |
| 3871 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3872 | |
| 3873 | /* |
| 3874 | * If NMIs are outright masked, i.e. the vCPU is already handling an |
| 3875 | * NMI, and KVM has not yet intercepted an IRET, then there is nothing |
| 3876 | * more to do at this time as KVM has already enabled IRET intercepts. |
| 3877 | * If KVM has already intercepted IRET, then single-step over the IRET, |
| 3878 | * as NMIs aren't architecturally unmasked until the IRET completes. |
| 3879 | * |
| 3880 | * If vNMI is enabled, KVM should never request an NMI window if NMIs |
| 3881 | * are masked, as KVM allows at most one to-be-injected NMI and one |
| 3882 | * pending NMI. If two NMIs arrive simultaneously, KVM will inject one |
| 3883 | * NMI and set V_NMI_PENDING for the other, but if and only if NMIs are |
| 3884 | * unmasked. KVM _will_ request an NMI window in some situations, e.g. |
| 3885 | * if the vCPU is in an STI shadow or if GIF=0, KVM can't immediately |
| 3886 | * inject the NMI. In those situations, KVM needs to single-step over |
| 3887 | * the STI shadow or intercept STGI. |
| 3888 | */ |
| 3889 | if (svm_get_nmi_mask(vcpu)) { |
| 3890 | WARN_ON_ONCE(is_vnmi_enabled(svm)); |
| 3891 | |
| 3892 | if (!svm->awaiting_iret_completion) |
| 3893 | return; /* IRET will cause a vm exit */ |
| 3894 | } |
| 3895 | |
| 3896 | /* |
| 3897 | * SEV-ES guests are responsible for signaling when a vCPU is ready to |
| 3898 | * receive a new NMI, as SEV-ES guests can't be single-stepped, i.e. |
| 3899 | * KVM can't intercept and single-step IRET to detect when NMIs are |
| 3900 | * unblocked (architecturally speaking). See SVM_VMGEXIT_NMI_COMPLETE. |
| 3901 | * |
| 3902 | * Note, GIF is guaranteed to be '1' for SEV-ES guests as hardware |
| 3903 | * ignores SEV-ES guest writes to EFER.SVME *and* CLGI/STGI are not |
| 3904 | * supported NAEs in the GHCB protocol. |
| 3905 | */ |
| 3906 | if (sev_es_guest(kvm: vcpu->kvm)) |
| 3907 | return; |
| 3908 | |
| 3909 | if (!gif_set(svm)) { |
| 3910 | if (vgif) |
| 3911 | svm_set_intercept(svm, bit: INTERCEPT_STGI); |
| 3912 | return; /* STGI will cause a vm exit */ |
| 3913 | } |
| 3914 | |
| 3915 | /* |
| 3916 | * Something prevents NMI from been injected. Single step over possible |
| 3917 | * problem (IRET or exception injection or interrupt shadow) |
| 3918 | */ |
| 3919 | svm->nmi_singlestep_guest_rflags = svm_get_rflags(vcpu); |
| 3920 | svm->nmi_singlestep = true; |
| 3921 | svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF); |
| 3922 | } |
| 3923 | |
| 3924 | static void svm_flush_tlb_asid(struct kvm_vcpu *vcpu) |
| 3925 | { |
| 3926 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3927 | |
| 3928 | /* |
| 3929 | * Unlike VMX, SVM doesn't provide a way to flush only NPT TLB entries. |
| 3930 | * A TLB flush for the current ASID flushes both "host" and "guest" TLB |
| 3931 | * entries, and thus is a superset of Hyper-V's fine grained flushing. |
| 3932 | */ |
| 3933 | kvm_hv_vcpu_purge_flush_tlb(vcpu); |
| 3934 | |
| 3935 | /* |
| 3936 | * Flush only the current ASID even if the TLB flush was invoked via |
| 3937 | * kvm_flush_remote_tlbs(). Although flushing remote TLBs requires all |
| 3938 | * ASIDs to be flushed, KVM uses a single ASID for L1 and L2, and |
| 3939 | * unconditionally does a TLB flush on both nested VM-Enter and nested |
| 3940 | * VM-Exit (via kvm_mmu_reset_context()). |
| 3941 | */ |
| 3942 | if (static_cpu_has(X86_FEATURE_FLUSHBYASID)) |
| 3943 | svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID; |
| 3944 | else |
| 3945 | svm->current_vmcb->asid_generation--; |
| 3946 | } |
| 3947 | |
| 3948 | static void svm_flush_tlb_current(struct kvm_vcpu *vcpu) |
| 3949 | { |
| 3950 | hpa_t root_tdp = vcpu->arch.mmu->root.hpa; |
| 3951 | |
| 3952 | /* |
| 3953 | * When running on Hyper-V with EnlightenedNptTlb enabled, explicitly |
| 3954 | * flush the NPT mappings via hypercall as flushing the ASID only |
| 3955 | * affects virtual to physical mappings, it does not invalidate guest |
| 3956 | * physical to host physical mappings. |
| 3957 | */ |
| 3958 | if (svm_hv_is_enlightened_tlb_enabled(vcpu) && VALID_PAGE(root_tdp)) |
| 3959 | hyperv_flush_guest_mapping(as: root_tdp); |
| 3960 | |
| 3961 | svm_flush_tlb_asid(vcpu); |
| 3962 | } |
| 3963 | |
| 3964 | static void svm_flush_tlb_all(struct kvm_vcpu *vcpu) |
| 3965 | { |
| 3966 | /* |
| 3967 | * When running on Hyper-V with EnlightenedNptTlb enabled, remote TLB |
| 3968 | * flushes should be routed to hv_flush_remote_tlbs() without requesting |
| 3969 | * a "regular" remote flush. Reaching this point means either there's |
| 3970 | * a KVM bug or a prior hv_flush_remote_tlbs() call failed, both of |
| 3971 | * which might be fatal to the guest. Yell, but try to recover. |
| 3972 | */ |
| 3973 | if (WARN_ON_ONCE(svm_hv_is_enlightened_tlb_enabled(vcpu))) |
| 3974 | hv_flush_remote_tlbs(vcpu->kvm); |
| 3975 | |
| 3976 | svm_flush_tlb_asid(vcpu); |
| 3977 | } |
| 3978 | |
| 3979 | static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva) |
| 3980 | { |
| 3981 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3982 | |
| 3983 | invlpga(addr: gva, asid: svm->vmcb->control.asid); |
| 3984 | } |
| 3985 | |
| 3986 | static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu) |
| 3987 | { |
| 3988 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3989 | |
| 3990 | if (nested_svm_virtualize_tpr(vcpu)) |
| 3991 | return; |
| 3992 | |
| 3993 | if (!svm_is_intercept(svm, bit: INTERCEPT_CR8_WRITE)) { |
| 3994 | int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK; |
| 3995 | kvm_set_cr8(vcpu, cr8); |
| 3996 | } |
| 3997 | } |
| 3998 | |
| 3999 | static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu) |
| 4000 | { |
| 4001 | struct vcpu_svm *svm = to_svm(vcpu); |
| 4002 | u64 cr8; |
| 4003 | |
| 4004 | if (nested_svm_virtualize_tpr(vcpu)) |
| 4005 | return; |
| 4006 | |
| 4007 | cr8 = kvm_get_cr8(vcpu); |
| 4008 | svm->vmcb->control.int_ctl &= ~V_TPR_MASK; |
| 4009 | svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK; |
| 4010 | } |
| 4011 | |
| 4012 | static void svm_complete_soft_interrupt(struct kvm_vcpu *vcpu, u8 vector, |
| 4013 | int type) |
| 4014 | { |
| 4015 | bool is_exception = (type == SVM_EXITINTINFO_TYPE_EXEPT); |
| 4016 | bool is_soft = (type == SVM_EXITINTINFO_TYPE_SOFT); |
| 4017 | struct vcpu_svm *svm = to_svm(vcpu); |
| 4018 | |
| 4019 | /* |
| 4020 | * If NRIPS is enabled, KVM must snapshot the pre-VMRUN next_rip that's |
| 4021 | * associated with the original soft exception/interrupt. next_rip is |
| 4022 | * cleared on all exits that can occur while vectoring an event, so KVM |
| 4023 | * needs to manually set next_rip for re-injection. Unlike the !nrips |
| 4024 | * case below, this needs to be done if and only if KVM is re-injecting |
| 4025 | * the same event, i.e. if the event is a soft exception/interrupt, |
| 4026 | * otherwise next_rip is unused on VMRUN. |
| 4027 | */ |
| 4028 | if (nrips && (is_soft || (is_exception && kvm_exception_is_soft(vector))) && |
| 4029 | kvm_is_linear_rip(vcpu, linear_rip: svm->soft_int_old_rip + svm->soft_int_csbase)) |
| 4030 | svm->vmcb->control.next_rip = svm->soft_int_next_rip; |
| 4031 | /* |
| 4032 | * If NRIPS isn't enabled, KVM must manually advance RIP prior to |
| 4033 | * injecting the soft exception/interrupt. That advancement needs to |
| 4034 | * be unwound if vectoring didn't complete. Note, the new event may |
| 4035 | * not be the injected event, e.g. if KVM injected an INTn, the INTn |
| 4036 | * hit a #NP in the guest, and the #NP encountered a #PF, the #NP will |
| 4037 | * be the reported vectored event, but RIP still needs to be unwound. |
| 4038 | */ |
| 4039 | else if (!nrips && (is_soft || is_exception) && |
| 4040 | kvm_is_linear_rip(vcpu, linear_rip: svm->soft_int_next_rip + svm->soft_int_csbase)) |
| 4041 | kvm_rip_write(vcpu, svm->soft_int_old_rip); |
| 4042 | } |
| 4043 | |
| 4044 | static void svm_complete_interrupts(struct kvm_vcpu *vcpu) |
| 4045 | { |
| 4046 | struct vcpu_svm *svm = to_svm(vcpu); |
| 4047 | u8 vector; |
| 4048 | int type; |
| 4049 | u32 exitintinfo = svm->vmcb->control.exit_int_info; |
| 4050 | bool nmi_l1_to_l2 = svm->nmi_l1_to_l2; |
| 4051 | bool soft_int_injected = svm->soft_int_injected; |
| 4052 | |
| 4053 | svm->nmi_l1_to_l2 = false; |
| 4054 | svm->soft_int_injected = false; |
| 4055 | |
| 4056 | /* |
| 4057 | * If we've made progress since setting awaiting_iret_completion, we've |
| 4058 | * executed an IRET and can allow NMI injection. |
| 4059 | */ |
| 4060 | if (svm->awaiting_iret_completion && |
| 4061 | kvm_rip_read(vcpu) != svm->nmi_iret_rip) { |
| 4062 | svm->awaiting_iret_completion = false; |
| 4063 | svm->nmi_masked = false; |
| 4064 | kvm_make_request(KVM_REQ_EVENT, vcpu); |
| 4065 | } |
| 4066 | |
| 4067 | vcpu->arch.nmi_injected = false; |
| 4068 | kvm_clear_exception_queue(vcpu); |
| 4069 | kvm_clear_interrupt_queue(vcpu); |
| 4070 | |
| 4071 | if (!(exitintinfo & SVM_EXITINTINFO_VALID)) |
| 4072 | return; |
| 4073 | |
| 4074 | kvm_make_request(KVM_REQ_EVENT, vcpu); |
| 4075 | |
| 4076 | vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK; |
| 4077 | type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK; |
| 4078 | |
| 4079 | if (soft_int_injected) |
| 4080 | svm_complete_soft_interrupt(vcpu, vector, type); |
| 4081 | |
| 4082 | switch (type) { |
| 4083 | case SVM_EXITINTINFO_TYPE_NMI: |
| 4084 | vcpu->arch.nmi_injected = true; |
| 4085 | svm->nmi_l1_to_l2 = nmi_l1_to_l2; |
| 4086 | break; |
| 4087 | case SVM_EXITINTINFO_TYPE_EXEPT: { |
| 4088 | u32 error_code = 0; |
| 4089 | |
| 4090 | /* |
| 4091 | * Never re-inject a #VC exception. |
| 4092 | */ |
| 4093 | if (vector == X86_TRAP_VC) |
| 4094 | break; |
| 4095 | |
| 4096 | if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) |
| 4097 | error_code = svm->vmcb->control.exit_int_info_err; |
| 4098 | |
| 4099 | kvm_requeue_exception(vcpu, nr: vector, |
| 4100 | has_error_code: exitintinfo & SVM_EXITINTINFO_VALID_ERR, |
| 4101 | error_code); |
| 4102 | break; |
| 4103 | } |
| 4104 | case SVM_EXITINTINFO_TYPE_INTR: |
| 4105 | kvm_queue_interrupt(vcpu, vector, false); |
| 4106 | break; |
| 4107 | case SVM_EXITINTINFO_TYPE_SOFT: |
| 4108 | kvm_queue_interrupt(vcpu, vector, true); |
| 4109 | break; |
| 4110 | default: |
| 4111 | break; |
| 4112 | } |
| 4113 | |
| 4114 | } |
| 4115 | |
| 4116 | static void svm_cancel_injection(struct kvm_vcpu *vcpu) |
| 4117 | { |
| 4118 | struct vcpu_svm *svm = to_svm(vcpu); |
| 4119 | struct vmcb_control_area *control = &svm->vmcb->control; |
| 4120 | |
| 4121 | control->exit_int_info = control->event_inj; |
| 4122 | control->exit_int_info_err = control->event_inj_err; |
| 4123 | control->event_inj = 0; |
| 4124 | svm_complete_interrupts(vcpu); |
| 4125 | } |
| 4126 | |
| 4127 | static int svm_vcpu_pre_run(struct kvm_vcpu *vcpu) |
| 4128 | { |
| 4129 | if (to_kvm_sev_info(kvm: vcpu->kvm)->need_init) |
| 4130 | return -EINVAL; |
| 4131 | |
| 4132 | return 1; |
| 4133 | } |
| 4134 | |
| 4135 | static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu) |
| 4136 | { |
| 4137 | struct vcpu_svm *svm = to_svm(vcpu); |
| 4138 | struct vmcb_control_area *control = &svm->vmcb->control; |
| 4139 | |
| 4140 | /* |
| 4141 | * Next RIP must be provided as IRQs are disabled, and accessing guest |
| 4142 | * memory to decode the instruction might fault, i.e. might sleep. |
| 4143 | */ |
| 4144 | if (!nrips || !control->next_rip) |
| 4145 | return EXIT_FASTPATH_NONE; |
| 4146 | |
| 4147 | if (is_guest_mode(vcpu)) |
| 4148 | return EXIT_FASTPATH_NONE; |
| 4149 | |
| 4150 | switch (control->exit_code) { |
| 4151 | case SVM_EXIT_MSR: |
| 4152 | if (!control->exit_info_1) |
| 4153 | break; |
| 4154 | return handle_fastpath_wrmsr(vcpu); |
| 4155 | case SVM_EXIT_HLT: |
| 4156 | return handle_fastpath_hlt(vcpu); |
| 4157 | case SVM_EXIT_INVD: |
| 4158 | return handle_fastpath_invd(vcpu); |
| 4159 | default: |
| 4160 | break; |
| 4161 | } |
| 4162 | |
| 4163 | return EXIT_FASTPATH_NONE; |
| 4164 | } |
| 4165 | |
| 4166 | static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_intercepted) |
| 4167 | { |
| 4168 | struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu); |
| 4169 | struct vcpu_svm *svm = to_svm(vcpu); |
| 4170 | |
| 4171 | guest_state_enter_irqoff(); |
| 4172 | |
| 4173 | /* |
| 4174 | * Set RFLAGS.IF prior to VMRUN, as the host's RFLAGS.IF at the time of |
| 4175 | * VMRUN controls whether or not physical IRQs are masked (KVM always |
| 4176 | * runs with V_INTR_MASKING_MASK). Toggle RFLAGS.IF here to avoid the |
| 4177 | * temptation to do STI+VMRUN+CLI, as AMD CPUs bleed the STI shadow |
| 4178 | * into guest state if delivery of an event during VMRUN triggers a |
| 4179 | * #VMEXIT, and the guest_state transitions already tell lockdep that |
| 4180 | * IRQs are being enabled/disabled. Note! GIF=0 for the entirety of |
| 4181 | * this path, so IRQs aren't actually unmasked while running host code. |
| 4182 | */ |
| 4183 | raw_local_irq_enable(); |
| 4184 | |
| 4185 | amd_clear_divider(); |
| 4186 | |
| 4187 | if (sev_es_guest(kvm: vcpu->kvm)) |
| 4188 | __svm_sev_es_vcpu_run(svm, spec_ctrl_intercepted, |
| 4189 | hostsa: sev_es_host_save_area(sd)); |
| 4190 | else |
| 4191 | __svm_vcpu_run(svm, spec_ctrl_intercepted); |
| 4192 | |
| 4193 | raw_local_irq_disable(); |
| 4194 | |
| 4195 | guest_state_exit_irqoff(); |
| 4196 | } |
| 4197 | |
| 4198 | static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags) |
| 4199 | { |
| 4200 | bool force_immediate_exit = run_flags & KVM_RUN_FORCE_IMMEDIATE_EXIT; |
| 4201 | struct vcpu_svm *svm = to_svm(vcpu); |
| 4202 | bool spec_ctrl_intercepted = msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL); |
| 4203 | |
| 4204 | trace_kvm_entry(vcpu, force_immediate_exit); |
| 4205 | |
| 4206 | svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; |
| 4207 | svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; |
| 4208 | svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; |
| 4209 | |
| 4210 | /* |
| 4211 | * Disable singlestep if we're injecting an interrupt/exception. |
| 4212 | * We don't want our modified rflags to be pushed on the stack where |
| 4213 | * we might not be able to easily reset them if we disabled NMI |
| 4214 | * singlestep later. |
| 4215 | */ |
| 4216 | if (svm->nmi_singlestep && svm->vmcb->control.event_inj) { |
| 4217 | /* |
| 4218 | * Event injection happens before external interrupts cause a |
| 4219 | * vmexit and interrupts are disabled here, so smp_send_reschedule |
| 4220 | * is enough to force an immediate vmexit. |
| 4221 | */ |
| 4222 | disable_nmi_singlestep(svm); |
| 4223 | force_immediate_exit = true; |
| 4224 | } |
| 4225 | |
| 4226 | if (force_immediate_exit) |
| 4227 | smp_send_reschedule(vcpu->cpu); |
| 4228 | |
| 4229 | if (pre_svm_run(vcpu)) { |
| 4230 | vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; |
| 4231 | vcpu->run->fail_entry.hardware_entry_failure_reason = SVM_EXIT_ERR; |
| 4232 | vcpu->run->fail_entry.cpu = vcpu->cpu; |
| 4233 | return EXIT_FASTPATH_EXIT_USERSPACE; |
| 4234 | } |
| 4235 | |
| 4236 | sync_lapic_to_cr8(vcpu); |
| 4237 | |
| 4238 | if (unlikely(svm->asid != svm->vmcb->control.asid)) { |
| 4239 | svm->vmcb->control.asid = svm->asid; |
| 4240 | vmcb_mark_dirty(vmcb: svm->vmcb, bit: VMCB_ASID); |
| 4241 | } |
| 4242 | svm->vmcb->save.cr2 = vcpu->arch.cr2; |
| 4243 | |
| 4244 | svm_hv_update_vp_id(vmcb: svm->vmcb, vcpu); |
| 4245 | |
| 4246 | /* |
| 4247 | * Run with all-zero DR6 unless the guest can write DR6 freely, so that |
| 4248 | * KVM can get the exact cause of a #DB. Note, loading guest DR6 from |
| 4249 | * KVM's snapshot is only necessary when DR accesses won't exit. |
| 4250 | */ |
| 4251 | if (unlikely(run_flags & KVM_RUN_LOAD_GUEST_DR6)) |
| 4252 | svm_set_dr6(vcpu, value: vcpu->arch.dr6); |
| 4253 | else if (likely(!(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))) |
| 4254 | svm_set_dr6(vcpu, DR6_ACTIVE_LOW); |
| 4255 | |
| 4256 | clgi(); |
| 4257 | |
| 4258 | /* |
| 4259 | * Hardware only context switches DEBUGCTL if LBR virtualization is |
| 4260 | * enabled. Manually load DEBUGCTL if necessary (and restore it after |
| 4261 | * VM-Exit), as running with the host's DEBUGCTL can negatively affect |
| 4262 | * guest state and can even be fatal, e.g. due to Bus Lock Detect. |
| 4263 | */ |
| 4264 | if (!(svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK) && |
| 4265 | vcpu->arch.host_debugctl != svm->vmcb->save.dbgctl) |
| 4266 | update_debugctlmsr(debugctlmsr: svm->vmcb->save.dbgctl); |
| 4267 | |
| 4268 | kvm_wait_lapic_expire(vcpu); |
| 4269 | |
| 4270 | /* |
| 4271 | * If this vCPU has touched SPEC_CTRL, restore the guest's value if |
| 4272 | * it's non-zero. Since vmentry is serialising on affected CPUs, there |
| 4273 | * is no need to worry about the conditional branch over the wrmsr |
| 4274 | * being speculatively taken. |
| 4275 | */ |
| 4276 | if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL)) |
| 4277 | x86_spec_ctrl_set_guest(guest_virt_spec_ctrl: svm->virt_spec_ctrl); |
| 4278 | |
| 4279 | svm_vcpu_enter_exit(vcpu, spec_ctrl_intercepted); |
| 4280 | |
| 4281 | if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL)) |
| 4282 | x86_spec_ctrl_restore_host(guest_virt_spec_ctrl: svm->virt_spec_ctrl); |
| 4283 | |
| 4284 | if (!sev_es_guest(kvm: vcpu->kvm)) { |
| 4285 | vcpu->arch.cr2 = svm->vmcb->save.cr2; |
| 4286 | vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; |
| 4287 | vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; |
| 4288 | vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; |
| 4289 | } |
| 4290 | vcpu->arch.regs_dirty = 0; |
| 4291 | |
| 4292 | if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) |
| 4293 | kvm_before_interrupt(vcpu, KVM_HANDLING_NMI); |
| 4294 | |
| 4295 | if (!(svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK) && |
| 4296 | vcpu->arch.host_debugctl != svm->vmcb->save.dbgctl) |
| 4297 | update_debugctlmsr(debugctlmsr: vcpu->arch.host_debugctl); |
| 4298 | |
| 4299 | stgi(); |
| 4300 | |
| 4301 | /* Any pending NMI will happen here */ |
| 4302 | |
| 4303 | if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) |
| 4304 | kvm_after_interrupt(vcpu); |
| 4305 | |
| 4306 | sync_cr8_to_lapic(vcpu); |
| 4307 | |
| 4308 | svm->next_rip = 0; |
| 4309 | if (is_guest_mode(vcpu)) { |
| 4310 | nested_sync_control_from_vmcb02(svm); |
| 4311 | |
| 4312 | /* Track VMRUNs that have made past consistency checking */ |
| 4313 | if (svm->nested.nested_run_pending && |
| 4314 | svm->vmcb->control.exit_code != SVM_EXIT_ERR) |
| 4315 | ++vcpu->stat.nested_run; |
| 4316 | |
| 4317 | svm->nested.nested_run_pending = 0; |
| 4318 | } |
| 4319 | |
| 4320 | svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; |
| 4321 | vmcb_mark_all_clean(vmcb: svm->vmcb); |
| 4322 | |
| 4323 | /* if exit due to PF check for async PF */ |
| 4324 | if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) |
| 4325 | vcpu->arch.apf.host_apf_flags = |
| 4326 | kvm_read_and_reset_apf_flags(); |
| 4327 | |
| 4328 | vcpu->arch.regs_avail &= ~SVM_REGS_LAZY_LOAD_SET; |
| 4329 | |
| 4330 | trace_kvm_exit(vcpu, KVM_ISA_SVM); |
| 4331 | |
| 4332 | svm_complete_interrupts(vcpu); |
| 4333 | |
| 4334 | return svm_exit_handlers_fastpath(vcpu); |
| 4335 | } |
| 4336 | |
| 4337 | static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, |
| 4338 | int root_level) |
| 4339 | { |
| 4340 | struct vcpu_svm *svm = to_svm(vcpu); |
| 4341 | unsigned long cr3; |
| 4342 | |
| 4343 | if (npt_enabled) { |
| 4344 | svm->vmcb->control.nested_cr3 = __sme_set(root_hpa); |
| 4345 | vmcb_mark_dirty(vmcb: svm->vmcb, bit: VMCB_NPT); |
| 4346 | |
| 4347 | hv_track_root_tdp(vcpu, root_hpa); |
| 4348 | |
| 4349 | cr3 = vcpu->arch.cr3; |
| 4350 | } else if (root_level >= PT64_ROOT_4LEVEL) { |
| 4351 | cr3 = __sme_set(root_hpa) | kvm_get_active_pcid(vcpu); |
| 4352 | } else { |
| 4353 | /* PCID in the guest should be impossible with a 32-bit MMU. */ |
| 4354 | WARN_ON_ONCE(kvm_get_active_pcid(vcpu)); |
| 4355 | cr3 = root_hpa; |
| 4356 | } |
| 4357 | |
| 4358 | svm->vmcb->save.cr3 = cr3; |
| 4359 | vmcb_mark_dirty(vmcb: svm->vmcb, bit: VMCB_CR); |
| 4360 | } |
| 4361 | |
| 4362 | static void |
| 4363 | svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) |
| 4364 | { |
| 4365 | /* |
| 4366 | * Patch in the VMMCALL instruction: |
| 4367 | */ |
| 4368 | hypercall[0] = 0x0f; |
| 4369 | hypercall[1] = 0x01; |
| 4370 | hypercall[2] = 0xd9; |
| 4371 | } |
| 4372 | |
| 4373 | /* |
| 4374 | * The kvm parameter can be NULL (module initialization, or invocation before |
| 4375 | * VM creation). Be sure to check the kvm parameter before using it. |
| 4376 | */ |
| 4377 | static bool svm_has_emulated_msr(struct kvm *kvm, u32 index) |
| 4378 | { |
| 4379 | switch (index) { |
| 4380 | case MSR_IA32_MCG_EXT_CTL: |
| 4381 | case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR: |
| 4382 | return false; |
| 4383 | case MSR_IA32_SMBASE: |
| 4384 | if (!IS_ENABLED(CONFIG_KVM_SMM)) |
| 4385 | return false; |
| 4386 | /* SEV-ES guests do not support SMM, so report false */ |
| 4387 | if (kvm && sev_es_guest(kvm)) |
| 4388 | return false; |
| 4389 | break; |
| 4390 | default: |
| 4391 | break; |
| 4392 | } |
| 4393 | |
| 4394 | return true; |
| 4395 | } |
| 4396 | |
| 4397 | static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) |
| 4398 | { |
| 4399 | struct vcpu_svm *svm = to_svm(vcpu); |
| 4400 | |
| 4401 | /* |
| 4402 | * SVM doesn't provide a way to disable just XSAVES in the guest, KVM |
| 4403 | * can only disable all variants of by disallowing CR4.OSXSAVE from |
| 4404 | * being set. As a result, if the host has XSAVE and XSAVES, and the |
| 4405 | * guest has XSAVE enabled, the guest can execute XSAVES without |
| 4406 | * faulting. Treat XSAVES as enabled in this case regardless of |
| 4407 | * whether it's advertised to the guest so that KVM context switches |
| 4408 | * XSS on VM-Enter/VM-Exit. Failure to do so would effectively give |
| 4409 | * the guest read/write access to the host's XSS. |
| 4410 | */ |
| 4411 | guest_cpu_cap_change(vcpu, X86_FEATURE_XSAVES, |
| 4412 | boot_cpu_has(X86_FEATURE_XSAVES) && |
| 4413 | guest_cpu_cap_has(vcpu, X86_FEATURE_XSAVE)); |
| 4414 | |
| 4415 | /* |
| 4416 | * Intercept VMLOAD if the vCPU model is Intel in order to emulate that |
| 4417 | * VMLOAD drops bits 63:32 of SYSENTER (ignoring the fact that exposing |
| 4418 | * SVM on Intel is bonkers and extremely unlikely to work). |
| 4419 | */ |
| 4420 | if (guest_cpuid_is_intel_compatible(vcpu)) |
| 4421 | guest_cpu_cap_clear(vcpu, X86_FEATURE_V_VMSAVE_VMLOAD); |
| 4422 | |
| 4423 | if (sev_guest(kvm: vcpu->kvm)) |
| 4424 | sev_vcpu_after_set_cpuid(svm); |
| 4425 | } |
| 4426 | |
| 4427 | static bool svm_has_wbinvd_exit(void) |
| 4428 | { |
| 4429 | return true; |
| 4430 | } |
| 4431 | |
| 4432 | #define PRE_EX(exit) { .exit_code = (exit), \ |
| 4433 | .stage = X86_ICPT_PRE_EXCEPT, } |
| 4434 | #define POST_EX(exit) { .exit_code = (exit), \ |
| 4435 | .stage = X86_ICPT_POST_EXCEPT, } |
| 4436 | #define POST_MEM(exit) { .exit_code = (exit), \ |
| 4437 | .stage = X86_ICPT_POST_MEMACCESS, } |
| 4438 | |
| 4439 | static const struct __x86_intercept { |
| 4440 | u32 exit_code; |
| 4441 | enum x86_intercept_stage stage; |
| 4442 | } x86_intercept_map[] = { |
| 4443 | [x86_intercept_cr_read] = POST_EX(SVM_EXIT_READ_CR0), |
| 4444 | [x86_intercept_cr_write] = POST_EX(SVM_EXIT_WRITE_CR0), |
| 4445 | [x86_intercept_clts] = POST_EX(SVM_EXIT_WRITE_CR0), |
| 4446 | [x86_intercept_lmsw] = POST_EX(SVM_EXIT_WRITE_CR0), |
| 4447 | [x86_intercept_smsw] = POST_EX(SVM_EXIT_READ_CR0), |
| 4448 | [x86_intercept_dr_read] = POST_EX(SVM_EXIT_READ_DR0), |
| 4449 | [x86_intercept_dr_write] = POST_EX(SVM_EXIT_WRITE_DR0), |
| 4450 | [x86_intercept_sldt] = POST_EX(SVM_EXIT_LDTR_READ), |
| 4451 | [x86_intercept_str] = POST_EX(SVM_EXIT_TR_READ), |
| 4452 | [x86_intercept_lldt] = POST_EX(SVM_EXIT_LDTR_WRITE), |
| 4453 | [x86_intercept_ltr] = POST_EX(SVM_EXIT_TR_WRITE), |
| 4454 | [x86_intercept_sgdt] = POST_EX(SVM_EXIT_GDTR_READ), |
| 4455 | [x86_intercept_sidt] = POST_EX(SVM_EXIT_IDTR_READ), |
| 4456 | [x86_intercept_lgdt] = POST_EX(SVM_EXIT_GDTR_WRITE), |
| 4457 | [x86_intercept_lidt] = POST_EX(SVM_EXIT_IDTR_WRITE), |
| 4458 | [x86_intercept_vmrun] = POST_EX(SVM_EXIT_VMRUN), |
| 4459 | [x86_intercept_vmmcall] = POST_EX(SVM_EXIT_VMMCALL), |
| 4460 | [x86_intercept_vmload] = POST_EX(SVM_EXIT_VMLOAD), |
| 4461 | [x86_intercept_vmsave] = POST_EX(SVM_EXIT_VMSAVE), |
| 4462 | [x86_intercept_stgi] = POST_EX(SVM_EXIT_STGI), |
| 4463 | [x86_intercept_clgi] = POST_EX(SVM_EXIT_CLGI), |
| 4464 | [x86_intercept_skinit] = POST_EX(SVM_EXIT_SKINIT), |
| 4465 | [x86_intercept_invlpga] = POST_EX(SVM_EXIT_INVLPGA), |
| 4466 | [x86_intercept_rdtscp] = POST_EX(SVM_EXIT_RDTSCP), |
| 4467 | [x86_intercept_monitor] = POST_MEM(SVM_EXIT_MONITOR), |
| 4468 | [x86_intercept_mwait] = POST_EX(SVM_EXIT_MWAIT), |
| 4469 | [x86_intercept_invlpg] = POST_EX(SVM_EXIT_INVLPG), |
| 4470 | [x86_intercept_invd] = POST_EX(SVM_EXIT_INVD), |
| 4471 | [x86_intercept_wbinvd] = POST_EX(SVM_EXIT_WBINVD), |
| 4472 | [x86_intercept_wrmsr] = POST_EX(SVM_EXIT_MSR), |
| 4473 | [x86_intercept_rdtsc] = POST_EX(SVM_EXIT_RDTSC), |
| 4474 | [x86_intercept_rdmsr] = POST_EX(SVM_EXIT_MSR), |
| 4475 | [x86_intercept_rdpmc] = POST_EX(SVM_EXIT_RDPMC), |
| 4476 | [x86_intercept_cpuid] = PRE_EX(SVM_EXIT_CPUID), |
| 4477 | [x86_intercept_rsm] = PRE_EX(SVM_EXIT_RSM), |
| 4478 | [x86_intercept_pause] = PRE_EX(SVM_EXIT_PAUSE), |
| 4479 | [x86_intercept_pushf] = PRE_EX(SVM_EXIT_PUSHF), |
| 4480 | [x86_intercept_popf] = PRE_EX(SVM_EXIT_POPF), |
| 4481 | [x86_intercept_intn] = PRE_EX(SVM_EXIT_SWINT), |
| 4482 | [x86_intercept_iret] = PRE_EX(SVM_EXIT_IRET), |
| 4483 | [x86_intercept_icebp] = PRE_EX(SVM_EXIT_ICEBP), |
| 4484 | [x86_intercept_hlt] = POST_EX(SVM_EXIT_HLT), |
| 4485 | [x86_intercept_in] = POST_EX(SVM_EXIT_IOIO), |
| 4486 | [x86_intercept_ins] = POST_EX(SVM_EXIT_IOIO), |
| 4487 | [x86_intercept_out] = POST_EX(SVM_EXIT_IOIO), |
| 4488 | [x86_intercept_outs] = POST_EX(SVM_EXIT_IOIO), |
| 4489 | [x86_intercept_xsetbv] = PRE_EX(SVM_EXIT_XSETBV), |
| 4490 | }; |
| 4491 | |
| 4492 | #undef PRE_EX |
| 4493 | #undef POST_EX |
| 4494 | #undef POST_MEM |
| 4495 | |
| 4496 | static int svm_check_intercept(struct kvm_vcpu *vcpu, |
| 4497 | struct x86_instruction_info *info, |
| 4498 | enum x86_intercept_stage stage, |
| 4499 | struct x86_exception *exception) |
| 4500 | { |
| 4501 | struct vcpu_svm *svm = to_svm(vcpu); |
| 4502 | int vmexit, ret = X86EMUL_CONTINUE; |
| 4503 | struct __x86_intercept icpt_info; |
| 4504 | struct vmcb *vmcb = svm->vmcb; |
| 4505 | |
| 4506 | if (info->intercept >= ARRAY_SIZE(x86_intercept_map)) |
| 4507 | goto out; |
| 4508 | |
| 4509 | icpt_info = x86_intercept_map[info->intercept]; |
| 4510 | |
| 4511 | if (stage != icpt_info.stage) |
| 4512 | goto out; |
| 4513 | |
| 4514 | switch (icpt_info.exit_code) { |
| 4515 | case SVM_EXIT_READ_CR0: |
| 4516 | if (info->intercept == x86_intercept_cr_read) |
| 4517 | icpt_info.exit_code += info->modrm_reg; |
| 4518 | break; |
| 4519 | case SVM_EXIT_WRITE_CR0: { |
| 4520 | unsigned long cr0, val; |
| 4521 | |
| 4522 | /* |
| 4523 | * Adjust the exit code accordingly if a CR other than CR0 is |
| 4524 | * being written, and skip straight to the common handling as |
| 4525 | * only CR0 has an additional selective intercept. |
| 4526 | */ |
| 4527 | if (info->intercept == x86_intercept_cr_write && info->modrm_reg) { |
| 4528 | icpt_info.exit_code += info->modrm_reg; |
| 4529 | break; |
| 4530 | } |
| 4531 | |
| 4532 | /* |
| 4533 | * Convert the exit_code to SVM_EXIT_CR0_SEL_WRITE if a |
| 4534 | * selective CR0 intercept is triggered (the common logic will |
| 4535 | * treat the selective intercept as being enabled). Note, the |
| 4536 | * unconditional intercept has higher priority, i.e. this is |
| 4537 | * only relevant if *only* the selective intercept is enabled. |
| 4538 | */ |
| 4539 | if (vmcb12_is_intercept(control: &svm->nested.ctl, bit: INTERCEPT_CR0_WRITE) || |
| 4540 | !(vmcb12_is_intercept(control: &svm->nested.ctl, bit: INTERCEPT_SELECTIVE_CR0))) |
| 4541 | break; |
| 4542 | |
| 4543 | /* CLTS never triggers INTERCEPT_SELECTIVE_CR0 */ |
| 4544 | if (info->intercept == x86_intercept_clts) |
| 4545 | break; |
| 4546 | |
| 4547 | /* LMSW always triggers INTERCEPT_SELECTIVE_CR0 */ |
| 4548 | if (info->intercept == x86_intercept_lmsw) { |
| 4549 | icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE; |
| 4550 | break; |
| 4551 | } |
| 4552 | |
| 4553 | /* |
| 4554 | * MOV-to-CR0 only triggers INTERCEPT_SELECTIVE_CR0 if any bit |
| 4555 | * other than SVM_CR0_SELECTIVE_MASK is changed. |
| 4556 | */ |
| 4557 | cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK; |
| 4558 | val = info->src_val & ~SVM_CR0_SELECTIVE_MASK; |
| 4559 | if (cr0 ^ val) |
| 4560 | icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE; |
| 4561 | break; |
| 4562 | } |
| 4563 | case SVM_EXIT_READ_DR0: |
| 4564 | case SVM_EXIT_WRITE_DR0: |
| 4565 | icpt_info.exit_code += info->modrm_reg; |
| 4566 | break; |
| 4567 | case SVM_EXIT_MSR: |
| 4568 | if (info->intercept == x86_intercept_wrmsr) |
| 4569 | vmcb->control.exit_info_1 = 1; |
| 4570 | else |
| 4571 | vmcb->control.exit_info_1 = 0; |
| 4572 | break; |
| 4573 | case SVM_EXIT_PAUSE: |
| 4574 | /* |
| 4575 | * We get this for NOP only, but pause |
| 4576 | * is rep not, check this here |
| 4577 | */ |
| 4578 | if (info->rep_prefix != REPE_PREFIX) |
| 4579 | goto out; |
| 4580 | break; |
| 4581 | case SVM_EXIT_IOIO: { |
| 4582 | u64 exit_info; |
| 4583 | u32 bytes; |
| 4584 | |
| 4585 | if (info->intercept == x86_intercept_in || |
| 4586 | info->intercept == x86_intercept_ins) { |
| 4587 | exit_info = ((info->src_val & 0xffff) << 16) | |
| 4588 | SVM_IOIO_TYPE_MASK; |
| 4589 | bytes = info->dst_bytes; |
| 4590 | } else { |
| 4591 | exit_info = (info->dst_val & 0xffff) << 16; |
| 4592 | bytes = info->src_bytes; |
| 4593 | } |
| 4594 | |
| 4595 | if (info->intercept == x86_intercept_outs || |
| 4596 | info->intercept == x86_intercept_ins) |
| 4597 | exit_info |= SVM_IOIO_STR_MASK; |
| 4598 | |
| 4599 | if (info->rep_prefix) |
| 4600 | exit_info |= SVM_IOIO_REP_MASK; |
| 4601 | |
| 4602 | bytes = min(bytes, 4u); |
| 4603 | |
| 4604 | exit_info |= bytes << SVM_IOIO_SIZE_SHIFT; |
| 4605 | |
| 4606 | exit_info |= (u32)info->ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1); |
| 4607 | |
| 4608 | vmcb->control.exit_info_1 = exit_info; |
| 4609 | vmcb->control.exit_info_2 = info->next_rip; |
| 4610 | |
| 4611 | break; |
| 4612 | } |
| 4613 | default: |
| 4614 | break; |
| 4615 | } |
| 4616 | |
| 4617 | /* TODO: Advertise NRIPS to guest hypervisor unconditionally */ |
| 4618 | if (static_cpu_has(X86_FEATURE_NRIPS)) |
| 4619 | vmcb->control.next_rip = info->next_rip; |
| 4620 | vmcb->control.exit_code = icpt_info.exit_code; |
| 4621 | vmcb->control.exit_code_hi = 0; |
| 4622 | vmexit = nested_svm_exit_handled(svm); |
| 4623 | |
| 4624 | ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED |
| 4625 | : X86EMUL_CONTINUE; |
| 4626 | |
| 4627 | out: |
| 4628 | return ret; |
| 4629 | } |
| 4630 | |
| 4631 | static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu) |
| 4632 | { |
| 4633 | switch (to_svm(vcpu)->vmcb->control.exit_code) { |
| 4634 | case SVM_EXIT_EXCP_BASE + MC_VECTOR: |
| 4635 | svm_handle_mce(vcpu); |
| 4636 | break; |
| 4637 | case SVM_EXIT_INTR: |
| 4638 | vcpu->arch.at_instruction_boundary = true; |
| 4639 | break; |
| 4640 | default: |
| 4641 | break; |
| 4642 | } |
| 4643 | } |
| 4644 | |
| 4645 | static void svm_setup_mce(struct kvm_vcpu *vcpu) |
| 4646 | { |
| 4647 | /* [63:9] are reserved. */ |
| 4648 | vcpu->arch.mcg_cap &= 0x1ff; |
| 4649 | } |
| 4650 | |
| 4651 | #ifdef CONFIG_KVM_SMM |
| 4652 | bool svm_smi_blocked(struct kvm_vcpu *vcpu) |
| 4653 | { |
| 4654 | struct vcpu_svm *svm = to_svm(vcpu); |
| 4655 | |
| 4656 | /* Per APM Vol.2 15.22.2 "Response to SMI" */ |
| 4657 | if (!gif_set(svm)) |
| 4658 | return true; |
| 4659 | |
| 4660 | return is_smm(vcpu); |
| 4661 | } |
| 4662 | |
| 4663 | static int svm_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection) |
| 4664 | { |
| 4665 | struct vcpu_svm *svm = to_svm(vcpu); |
| 4666 | if (svm->nested.nested_run_pending) |
| 4667 | return -EBUSY; |
| 4668 | |
| 4669 | if (svm_smi_blocked(vcpu)) |
| 4670 | return 0; |
| 4671 | |
| 4672 | /* An SMI must not be injected into L2 if it's supposed to VM-Exit. */ |
| 4673 | if (for_injection && is_guest_mode(vcpu) && nested_exit_on_smi(svm)) |
| 4674 | return -EBUSY; |
| 4675 | |
| 4676 | return 1; |
| 4677 | } |
| 4678 | |
| 4679 | static int svm_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram) |
| 4680 | { |
| 4681 | struct vcpu_svm *svm = to_svm(vcpu); |
| 4682 | struct kvm_host_map map_save; |
| 4683 | int ret; |
| 4684 | |
| 4685 | if (!is_guest_mode(vcpu)) |
| 4686 | return 0; |
| 4687 | |
| 4688 | /* |
| 4689 | * 32-bit SMRAM format doesn't preserve EFER and SVM state. Userspace is |
| 4690 | * responsible for ensuring nested SVM and SMIs are mutually exclusive. |
| 4691 | */ |
| 4692 | |
| 4693 | if (!guest_cpu_cap_has(vcpu, X86_FEATURE_LM)) |
| 4694 | return 1; |
| 4695 | |
| 4696 | smram->smram64.svm_guest_flag = 1; |
| 4697 | smram->smram64.svm_guest_vmcb_gpa = svm->nested.vmcb12_gpa; |
| 4698 | |
| 4699 | svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; |
| 4700 | svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; |
| 4701 | svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; |
| 4702 | |
| 4703 | ret = nested_svm_simple_vmexit(svm, SVM_EXIT_SW); |
| 4704 | if (ret) |
| 4705 | return ret; |
| 4706 | |
| 4707 | /* |
| 4708 | * KVM uses VMCB01 to store L1 host state while L2 runs but |
| 4709 | * VMCB01 is going to be used during SMM and thus the state will |
| 4710 | * be lost. Temporary save non-VMLOAD/VMSAVE state to the host save |
| 4711 | * area pointed to by MSR_VM_HSAVE_PA. APM guarantees that the |
| 4712 | * format of the area is identical to guest save area offsetted |
| 4713 | * by 0x400 (matches the offset of 'struct vmcb_save_area' |
| 4714 | * within 'struct vmcb'). Note: HSAVE area may also be used by |
| 4715 | * L1 hypervisor to save additional host context (e.g. KVM does |
| 4716 | * that, see svm_prepare_switch_to_guest()) which must be |
| 4717 | * preserved. |
| 4718 | */ |
| 4719 | if (kvm_vcpu_map(vcpu, gpa: gpa_to_gfn(gpa: svm->nested.hsave_msr), map: &map_save)) |
| 4720 | return 1; |
| 4721 | |
| 4722 | BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400); |
| 4723 | |
| 4724 | svm_copy_vmrun_state(to_save: map_save.hva + 0x400, |
| 4725 | from_save: &svm->vmcb01.ptr->save); |
| 4726 | |
| 4727 | kvm_vcpu_unmap(vcpu, map: &map_save); |
| 4728 | return 0; |
| 4729 | } |
| 4730 | |
| 4731 | static int svm_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram) |
| 4732 | { |
| 4733 | struct vcpu_svm *svm = to_svm(vcpu); |
| 4734 | struct kvm_host_map map, map_save; |
| 4735 | struct vmcb *vmcb12; |
| 4736 | int ret; |
| 4737 | |
| 4738 | const struct kvm_smram_state_64 *smram64 = &smram->smram64; |
| 4739 | |
| 4740 | if (!guest_cpu_cap_has(vcpu, X86_FEATURE_LM)) |
| 4741 | return 0; |
| 4742 | |
| 4743 | /* Non-zero if SMI arrived while vCPU was in guest mode. */ |
| 4744 | if (!smram64->svm_guest_flag) |
| 4745 | return 0; |
| 4746 | |
| 4747 | if (!guest_cpu_cap_has(vcpu, X86_FEATURE_SVM)) |
| 4748 | return 1; |
| 4749 | |
| 4750 | if (!(smram64->efer & EFER_SVME)) |
| 4751 | return 1; |
| 4752 | |
| 4753 | if (kvm_vcpu_map(vcpu, gpa: gpa_to_gfn(gpa: smram64->svm_guest_vmcb_gpa), map: &map)) |
| 4754 | return 1; |
| 4755 | |
| 4756 | ret = 1; |
| 4757 | if (kvm_vcpu_map(vcpu, gpa: gpa_to_gfn(gpa: svm->nested.hsave_msr), map: &map_save)) |
| 4758 | goto unmap_map; |
| 4759 | |
| 4760 | if (svm_allocate_nested(svm)) |
| 4761 | goto unmap_save; |
| 4762 | |
| 4763 | /* |
| 4764 | * Restore L1 host state from L1 HSAVE area as VMCB01 was |
| 4765 | * used during SMM (see svm_enter_smm()) |
| 4766 | */ |
| 4767 | |
| 4768 | svm_copy_vmrun_state(to_save: &svm->vmcb01.ptr->save, from_save: map_save.hva + 0x400); |
| 4769 | |
| 4770 | /* |
| 4771 | * Enter the nested guest now |
| 4772 | */ |
| 4773 | |
| 4774 | vmcb_mark_all_dirty(vmcb: svm->vmcb01.ptr); |
| 4775 | |
| 4776 | vmcb12 = map.hva; |
| 4777 | nested_copy_vmcb_control_to_cache(svm, control: &vmcb12->control); |
| 4778 | nested_copy_vmcb_save_to_cache(svm, save: &vmcb12->save); |
| 4779 | ret = enter_svm_guest_mode(vcpu, vmcb_gpa: smram64->svm_guest_vmcb_gpa, vmcb12, from_vmrun: false); |
| 4780 | |
| 4781 | if (ret) |
| 4782 | goto unmap_save; |
| 4783 | |
| 4784 | svm->nested.nested_run_pending = 1; |
| 4785 | |
| 4786 | unmap_save: |
| 4787 | kvm_vcpu_unmap(vcpu, map: &map_save); |
| 4788 | unmap_map: |
| 4789 | kvm_vcpu_unmap(vcpu, map: &map); |
| 4790 | return ret; |
| 4791 | } |
| 4792 | |
| 4793 | static void svm_enable_smi_window(struct kvm_vcpu *vcpu) |
| 4794 | { |
| 4795 | struct vcpu_svm *svm = to_svm(vcpu); |
| 4796 | |
| 4797 | if (!gif_set(svm)) { |
| 4798 | if (vgif) |
| 4799 | svm_set_intercept(svm, bit: INTERCEPT_STGI); |
| 4800 | /* STGI will cause a vm exit */ |
| 4801 | } else { |
| 4802 | /* We must be in SMM; RSM will cause a vmexit anyway. */ |
| 4803 | } |
| 4804 | } |
| 4805 | #endif |
| 4806 | |
| 4807 | static int svm_check_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type, |
| 4808 | void *insn, int insn_len) |
| 4809 | { |
| 4810 | struct vcpu_svm *svm = to_svm(vcpu); |
| 4811 | bool smep, smap, is_user; |
| 4812 | u64 error_code; |
| 4813 | |
| 4814 | /* Check that emulation is possible during event vectoring */ |
| 4815 | if ((svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK) && |
| 4816 | !kvm_can_emulate_event_vectoring(emul_type)) |
| 4817 | return X86EMUL_UNHANDLEABLE_VECTORING; |
| 4818 | |
| 4819 | /* Emulation is always possible when KVM has access to all guest state. */ |
| 4820 | if (!sev_guest(vcpu->kvm)) |
| 4821 | return X86EMUL_CONTINUE; |
| 4822 | |
| 4823 | /* #UD and #GP should never be intercepted for SEV guests. */ |
| 4824 | WARN_ON_ONCE(emul_type & (EMULTYPE_TRAP_UD | |
| 4825 | EMULTYPE_TRAP_UD_FORCED | |
| 4826 | EMULTYPE_VMWARE_GP)); |
| 4827 | |
| 4828 | /* |
| 4829 | * Emulation is impossible for SEV-ES guests as KVM doesn't have access |
| 4830 | * to guest register state. |
| 4831 | */ |
| 4832 | if (sev_es_guest(vcpu->kvm)) |
| 4833 | return X86EMUL_RETRY_INSTR; |
| 4834 | |
| 4835 | /* |
| 4836 | * Emulation is possible if the instruction is already decoded, e.g. |
| 4837 | * when completing I/O after returning from userspace. |
| 4838 | */ |
| 4839 | if (emul_type & EMULTYPE_NO_DECODE) |
| 4840 | return X86EMUL_CONTINUE; |
| 4841 | |
| 4842 | /* |
| 4843 | * Emulation is possible for SEV guests if and only if a prefilled |
| 4844 | * buffer containing the bytes of the intercepted instruction is |
| 4845 | * available. SEV guest memory is encrypted with a guest specific key |
| 4846 | * and cannot be decrypted by KVM, i.e. KVM would read ciphertext and |
| 4847 | * decode garbage. |
| 4848 | * |
| 4849 | * If KVM is NOT trying to simply skip an instruction, inject #UD if |
| 4850 | * KVM reached this point without an instruction buffer. In practice, |
| 4851 | * this path should never be hit by a well-behaved guest, e.g. KVM |
| 4852 | * doesn't intercept #UD or #GP for SEV guests, but this path is still |
| 4853 | * theoretically reachable, e.g. via unaccelerated fault-like AVIC |
| 4854 | * access, and needs to be handled by KVM to avoid putting the guest |
| 4855 | * into an infinite loop. Injecting #UD is somewhat arbitrary, but |
| 4856 | * its the least awful option given lack of insight into the guest. |
| 4857 | * |
| 4858 | * If KVM is trying to skip an instruction, simply resume the guest. |
| 4859 | * If a #NPF occurs while the guest is vectoring an INT3/INTO, then KVM |
| 4860 | * will attempt to re-inject the INT3/INTO and skip the instruction. |
| 4861 | * In that scenario, retrying the INT3/INTO and hoping the guest will |
| 4862 | * make forward progress is the only option that has a chance of |
| 4863 | * success (and in practice it will work the vast majority of the time). |
| 4864 | */ |
| 4865 | if (unlikely(!insn)) { |
| 4866 | if (emul_type & EMULTYPE_SKIP) |
| 4867 | return X86EMUL_UNHANDLEABLE; |
| 4868 | |
| 4869 | kvm_queue_exception(vcpu, UD_VECTOR); |
| 4870 | return X86EMUL_PROPAGATE_FAULT; |
| 4871 | } |
| 4872 | |
| 4873 | /* |
| 4874 | * Emulate for SEV guests if the insn buffer is not empty. The buffer |
| 4875 | * will be empty if the DecodeAssist microcode cannot fetch bytes for |
| 4876 | * the faulting instruction because the code fetch itself faulted, e.g. |
| 4877 | * the guest attempted to fetch from emulated MMIO or a guest page |
| 4878 | * table used to translate CS:RIP resides in emulated MMIO. |
| 4879 | */ |
| 4880 | if (likely(insn_len)) |
| 4881 | return X86EMUL_CONTINUE; |
| 4882 | |
| 4883 | /* |
| 4884 | * Detect and workaround Errata 1096 Fam_17h_00_0Fh. |
| 4885 | * |
| 4886 | * Errata: |
| 4887 | * When CPU raises #NPF on guest data access and vCPU CR4.SMAP=1, it is |
| 4888 | * possible that CPU microcode implementing DecodeAssist will fail to |
| 4889 | * read guest memory at CS:RIP and vmcb.GuestIntrBytes will incorrectly |
| 4890 | * be '0'. This happens because microcode reads CS:RIP using a _data_ |
| 4891 | * loap uop with CPL=0 privileges. If the load hits a SMAP #PF, ucode |
| 4892 | * gives up and does not fill the instruction bytes buffer. |
| 4893 | * |
| 4894 | * As above, KVM reaches this point iff the VM is an SEV guest, the CPU |
| 4895 | * supports DecodeAssist, a #NPF was raised, KVM's page fault handler |
| 4896 | * triggered emulation (e.g. for MMIO), and the CPU returned 0 in the |
| 4897 | * GuestIntrBytes field of the VMCB. |
| 4898 | * |
| 4899 | * This does _not_ mean that the erratum has been encountered, as the |
| 4900 | * DecodeAssist will also fail if the load for CS:RIP hits a legitimate |
| 4901 | * #PF, e.g. if the guest attempt to execute from emulated MMIO and |
| 4902 | * encountered a reserved/not-present #PF. |
| 4903 | * |
| 4904 | * To hit the erratum, the following conditions must be true: |
| 4905 | * 1. CR4.SMAP=1 (obviously). |
| 4906 | * 2. CR4.SMEP=0 || CPL=3. If SMEP=1 and CPL<3, the erratum cannot |
| 4907 | * have been hit as the guest would have encountered a SMEP |
| 4908 | * violation #PF, not a #NPF. |
| 4909 | * 3. The #NPF is not due to a code fetch, in which case failure to |
| 4910 | * retrieve the instruction bytes is legitimate (see abvoe). |
| 4911 | * |
| 4912 | * In addition, don't apply the erratum workaround if the #NPF occurred |
| 4913 | * while translating guest page tables (see below). |
| 4914 | */ |
| 4915 | error_code = svm->vmcb->control.exit_info_1; |
| 4916 | if (error_code & (PFERR_GUEST_PAGE_MASK | PFERR_FETCH_MASK)) |
| 4917 | goto resume_guest; |
| 4918 | |
| 4919 | smep = kvm_is_cr4_bit_set(vcpu, X86_CR4_SMEP); |
| 4920 | smap = kvm_is_cr4_bit_set(vcpu, X86_CR4_SMAP); |
| 4921 | is_user = svm_get_cpl(vcpu) == 3; |
| 4922 | if (smap && (!smep || is_user)) { |
| 4923 | pr_err_ratelimited("SEV Guest triggered AMD Erratum 1096\n" ); |
| 4924 | |
| 4925 | /* |
| 4926 | * If the fault occurred in userspace, arbitrarily inject #GP |
| 4927 | * to avoid killing the guest and to hopefully avoid confusing |
| 4928 | * the guest kernel too much, e.g. injecting #PF would not be |
| 4929 | * coherent with respect to the guest's page tables. Request |
| 4930 | * triple fault if the fault occurred in the kernel as there's |
| 4931 | * no fault that KVM can inject without confusing the guest. |
| 4932 | * In practice, the triple fault is moot as no sane SEV kernel |
| 4933 | * will execute from user memory while also running with SMAP=1. |
| 4934 | */ |
| 4935 | if (is_user) |
| 4936 | kvm_inject_gp(vcpu, error_code: 0); |
| 4937 | else |
| 4938 | kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); |
| 4939 | return X86EMUL_PROPAGATE_FAULT; |
| 4940 | } |
| 4941 | |
| 4942 | resume_guest: |
| 4943 | /* |
| 4944 | * If the erratum was not hit, simply resume the guest and let it fault |
| 4945 | * again. While awful, e.g. the vCPU may get stuck in an infinite loop |
| 4946 | * if the fault is at CPL=0, it's the lesser of all evils. Exiting to |
| 4947 | * userspace will kill the guest, and letting the emulator read garbage |
| 4948 | * will yield random behavior and potentially corrupt the guest. |
| 4949 | * |
| 4950 | * Simply resuming the guest is technically not a violation of the SEV |
| 4951 | * architecture. AMD's APM states that all code fetches and page table |
| 4952 | * accesses for SEV guest are encrypted, regardless of the C-Bit. The |
| 4953 | * APM also states that encrypted accesses to MMIO are "ignored", but |
| 4954 | * doesn't explicitly define "ignored", i.e. doing nothing and letting |
| 4955 | * the guest spin is technically "ignoring" the access. |
| 4956 | */ |
| 4957 | return X86EMUL_RETRY_INSTR; |
| 4958 | } |
| 4959 | |
| 4960 | static bool svm_apic_init_signal_blocked(struct kvm_vcpu *vcpu) |
| 4961 | { |
| 4962 | struct vcpu_svm *svm = to_svm(vcpu); |
| 4963 | |
| 4964 | return !gif_set(svm); |
| 4965 | } |
| 4966 | |
| 4967 | static void svm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector) |
| 4968 | { |
| 4969 | if (!sev_es_guest(kvm: vcpu->kvm)) |
| 4970 | return kvm_vcpu_deliver_sipi_vector(vcpu, vector); |
| 4971 | |
| 4972 | sev_vcpu_deliver_sipi_vector(vcpu, vector); |
| 4973 | } |
| 4974 | |
| 4975 | static void svm_vm_destroy(struct kvm *kvm) |
| 4976 | { |
| 4977 | avic_vm_destroy(kvm); |
| 4978 | sev_vm_destroy(kvm); |
| 4979 | |
| 4980 | svm_srso_vm_destroy(); |
| 4981 | } |
| 4982 | |
| 4983 | static int svm_vm_init(struct kvm *kvm) |
| 4984 | { |
| 4985 | int type = kvm->arch.vm_type; |
| 4986 | |
| 4987 | if (type != KVM_X86_DEFAULT_VM && |
| 4988 | type != KVM_X86_SW_PROTECTED_VM) { |
| 4989 | kvm->arch.has_protected_state = |
| 4990 | (type == KVM_X86_SEV_ES_VM || type == KVM_X86_SNP_VM); |
| 4991 | to_kvm_sev_info(kvm)->need_init = true; |
| 4992 | |
| 4993 | kvm->arch.has_private_mem = (type == KVM_X86_SNP_VM); |
| 4994 | kvm->arch.pre_fault_allowed = !kvm->arch.has_private_mem; |
| 4995 | } |
| 4996 | |
| 4997 | if (!pause_filter_count || !pause_filter_thresh) |
| 4998 | kvm_disable_exits(kvm, KVM_X86_DISABLE_EXITS_PAUSE); |
| 4999 | |
| 5000 | if (enable_apicv) { |
| 5001 | int ret = avic_vm_init(kvm); |
| 5002 | if (ret) |
| 5003 | return ret; |
| 5004 | } |
| 5005 | |
| 5006 | svm_srso_vm_init(); |
| 5007 | return 0; |
| 5008 | } |
| 5009 | |
| 5010 | static void *svm_alloc_apic_backing_page(struct kvm_vcpu *vcpu) |
| 5011 | { |
| 5012 | struct page *page = snp_safe_alloc_page(); |
| 5013 | |
| 5014 | if (!page) |
| 5015 | return NULL; |
| 5016 | |
| 5017 | return page_address(page); |
| 5018 | } |
| 5019 | |
| 5020 | struct kvm_x86_ops svm_x86_ops __initdata = { |
| 5021 | .name = KBUILD_MODNAME, |
| 5022 | |
| 5023 | .check_processor_compatibility = svm_check_processor_compat, |
| 5024 | |
| 5025 | .hardware_unsetup = svm_hardware_unsetup, |
| 5026 | .enable_virtualization_cpu = svm_enable_virtualization_cpu, |
| 5027 | .disable_virtualization_cpu = svm_disable_virtualization_cpu, |
| 5028 | .emergency_disable_virtualization_cpu = svm_emergency_disable_virtualization_cpu, |
| 5029 | .has_emulated_msr = svm_has_emulated_msr, |
| 5030 | |
| 5031 | .vcpu_precreate = svm_vcpu_precreate, |
| 5032 | .vcpu_create = svm_vcpu_create, |
| 5033 | .vcpu_free = svm_vcpu_free, |
| 5034 | .vcpu_reset = svm_vcpu_reset, |
| 5035 | |
| 5036 | .vm_size = sizeof(struct kvm_svm), |
| 5037 | .vm_init = svm_vm_init, |
| 5038 | .vm_destroy = svm_vm_destroy, |
| 5039 | |
| 5040 | .prepare_switch_to_guest = svm_prepare_switch_to_guest, |
| 5041 | .vcpu_load = svm_vcpu_load, |
| 5042 | .vcpu_put = svm_vcpu_put, |
| 5043 | .vcpu_blocking = avic_vcpu_blocking, |
| 5044 | .vcpu_unblocking = avic_vcpu_unblocking, |
| 5045 | |
| 5046 | .update_exception_bitmap = svm_update_exception_bitmap, |
| 5047 | .get_feature_msr = svm_get_feature_msr, |
| 5048 | .get_msr = svm_get_msr, |
| 5049 | .set_msr = svm_set_msr, |
| 5050 | .get_segment_base = svm_get_segment_base, |
| 5051 | .get_segment = svm_get_segment, |
| 5052 | .set_segment = svm_set_segment, |
| 5053 | .get_cpl = svm_get_cpl, |
| 5054 | .get_cpl_no_cache = svm_get_cpl, |
| 5055 | .get_cs_db_l_bits = svm_get_cs_db_l_bits, |
| 5056 | .is_valid_cr0 = svm_is_valid_cr0, |
| 5057 | .set_cr0 = svm_set_cr0, |
| 5058 | .post_set_cr3 = sev_post_set_cr3, |
| 5059 | .is_valid_cr4 = svm_is_valid_cr4, |
| 5060 | .set_cr4 = svm_set_cr4, |
| 5061 | .set_efer = svm_set_efer, |
| 5062 | .get_idt = svm_get_idt, |
| 5063 | .set_idt = svm_set_idt, |
| 5064 | .get_gdt = svm_get_gdt, |
| 5065 | .set_gdt = svm_set_gdt, |
| 5066 | .set_dr7 = svm_set_dr7, |
| 5067 | .sync_dirty_debug_regs = svm_sync_dirty_debug_regs, |
| 5068 | .cache_reg = svm_cache_reg, |
| 5069 | .get_rflags = svm_get_rflags, |
| 5070 | .set_rflags = svm_set_rflags, |
| 5071 | .get_if_flag = svm_get_if_flag, |
| 5072 | |
| 5073 | .flush_tlb_all = svm_flush_tlb_all, |
| 5074 | .flush_tlb_current = svm_flush_tlb_current, |
| 5075 | .flush_tlb_gva = svm_flush_tlb_gva, |
| 5076 | .flush_tlb_guest = svm_flush_tlb_asid, |
| 5077 | |
| 5078 | .vcpu_pre_run = svm_vcpu_pre_run, |
| 5079 | .vcpu_run = svm_vcpu_run, |
| 5080 | .handle_exit = svm_handle_exit, |
| 5081 | .skip_emulated_instruction = svm_skip_emulated_instruction, |
| 5082 | .update_emulated_instruction = NULL, |
| 5083 | .set_interrupt_shadow = svm_set_interrupt_shadow, |
| 5084 | .get_interrupt_shadow = svm_get_interrupt_shadow, |
| 5085 | .patch_hypercall = svm_patch_hypercall, |
| 5086 | .inject_irq = svm_inject_irq, |
| 5087 | .inject_nmi = svm_inject_nmi, |
| 5088 | .is_vnmi_pending = svm_is_vnmi_pending, |
| 5089 | .set_vnmi_pending = svm_set_vnmi_pending, |
| 5090 | .inject_exception = svm_inject_exception, |
| 5091 | .cancel_injection = svm_cancel_injection, |
| 5092 | .interrupt_allowed = svm_interrupt_allowed, |
| 5093 | .nmi_allowed = svm_nmi_allowed, |
| 5094 | .get_nmi_mask = svm_get_nmi_mask, |
| 5095 | .set_nmi_mask = svm_set_nmi_mask, |
| 5096 | .enable_nmi_window = svm_enable_nmi_window, |
| 5097 | .enable_irq_window = svm_enable_irq_window, |
| 5098 | .update_cr8_intercept = svm_update_cr8_intercept, |
| 5099 | |
| 5100 | .x2apic_icr_is_split = true, |
| 5101 | .set_virtual_apic_mode = avic_refresh_virtual_apic_mode, |
| 5102 | .refresh_apicv_exec_ctrl = avic_refresh_apicv_exec_ctrl, |
| 5103 | .apicv_post_state_restore = avic_apicv_post_state_restore, |
| 5104 | .required_apicv_inhibits = AVIC_REQUIRED_APICV_INHIBITS, |
| 5105 | |
| 5106 | .get_exit_info = svm_get_exit_info, |
| 5107 | .get_entry_info = svm_get_entry_info, |
| 5108 | |
| 5109 | .vcpu_after_set_cpuid = svm_vcpu_after_set_cpuid, |
| 5110 | |
| 5111 | .has_wbinvd_exit = svm_has_wbinvd_exit, |
| 5112 | |
| 5113 | .get_l2_tsc_offset = svm_get_l2_tsc_offset, |
| 5114 | .get_l2_tsc_multiplier = svm_get_l2_tsc_multiplier, |
| 5115 | .write_tsc_offset = svm_write_tsc_offset, |
| 5116 | .write_tsc_multiplier = svm_write_tsc_multiplier, |
| 5117 | |
| 5118 | .load_mmu_pgd = svm_load_mmu_pgd, |
| 5119 | |
| 5120 | .check_intercept = svm_check_intercept, |
| 5121 | .handle_exit_irqoff = svm_handle_exit_irqoff, |
| 5122 | |
| 5123 | .nested_ops = &svm_nested_ops, |
| 5124 | |
| 5125 | .deliver_interrupt = svm_deliver_interrupt, |
| 5126 | .pi_update_irte = avic_pi_update_irte, |
| 5127 | .setup_mce = svm_setup_mce, |
| 5128 | |
| 5129 | #ifdef CONFIG_KVM_SMM |
| 5130 | .smi_allowed = svm_smi_allowed, |
| 5131 | .enter_smm = svm_enter_smm, |
| 5132 | .leave_smm = svm_leave_smm, |
| 5133 | .enable_smi_window = svm_enable_smi_window, |
| 5134 | #endif |
| 5135 | |
| 5136 | #ifdef CONFIG_KVM_AMD_SEV |
| 5137 | .dev_get_attr = sev_dev_get_attr, |
| 5138 | .mem_enc_ioctl = sev_mem_enc_ioctl, |
| 5139 | .mem_enc_register_region = sev_mem_enc_register_region, |
| 5140 | .mem_enc_unregister_region = sev_mem_enc_unregister_region, |
| 5141 | .guest_memory_reclaimed = sev_guest_memory_reclaimed, |
| 5142 | |
| 5143 | .vm_copy_enc_context_from = sev_vm_copy_enc_context_from, |
| 5144 | .vm_move_enc_context_from = sev_vm_move_enc_context_from, |
| 5145 | #endif |
| 5146 | .check_emulate_instruction = svm_check_emulate_instruction, |
| 5147 | |
| 5148 | .apic_init_signal_blocked = svm_apic_init_signal_blocked, |
| 5149 | |
| 5150 | .recalc_intercepts = svm_recalc_intercepts, |
| 5151 | .complete_emulated_msr = svm_complete_emulated_msr, |
| 5152 | |
| 5153 | .vcpu_deliver_sipi_vector = svm_vcpu_deliver_sipi_vector, |
| 5154 | .vcpu_get_apicv_inhibit_reasons = avic_vcpu_get_apicv_inhibit_reasons, |
| 5155 | .alloc_apic_backing_page = svm_alloc_apic_backing_page, |
| 5156 | |
| 5157 | .gmem_prepare = sev_gmem_prepare, |
| 5158 | .gmem_invalidate = sev_gmem_invalidate, |
| 5159 | .gmem_max_mapping_level = sev_gmem_max_mapping_level, |
| 5160 | }; |
| 5161 | |
| 5162 | /* |
| 5163 | * The default MMIO mask is a single bit (excluding the present bit), |
| 5164 | * which could conflict with the memory encryption bit. Check for |
| 5165 | * memory encryption support and override the default MMIO mask if |
| 5166 | * memory encryption is enabled. |
| 5167 | */ |
| 5168 | static __init void svm_adjust_mmio_mask(void) |
| 5169 | { |
| 5170 | unsigned int enc_bit, mask_bit; |
| 5171 | u64 msr, mask; |
| 5172 | |
| 5173 | /* If there is no memory encryption support, use existing mask */ |
| 5174 | if (cpuid_eax(op: 0x80000000) < 0x8000001f) |
| 5175 | return; |
| 5176 | |
| 5177 | /* If memory encryption is not enabled, use existing mask */ |
| 5178 | rdmsrq(MSR_AMD64_SYSCFG, msr); |
| 5179 | if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT)) |
| 5180 | return; |
| 5181 | |
| 5182 | enc_bit = cpuid_ebx(op: 0x8000001f) & 0x3f; |
| 5183 | mask_bit = boot_cpu_data.x86_phys_bits; |
| 5184 | |
| 5185 | /* Increment the mask bit if it is the same as the encryption bit */ |
| 5186 | if (enc_bit == mask_bit) |
| 5187 | mask_bit++; |
| 5188 | |
| 5189 | /* |
| 5190 | * If the mask bit location is below 52, then some bits above the |
| 5191 | * physical addressing limit will always be reserved, so use the |
| 5192 | * rsvd_bits() function to generate the mask. This mask, along with |
| 5193 | * the present bit, will be used to generate a page fault with |
| 5194 | * PFER.RSV = 1. |
| 5195 | * |
| 5196 | * If the mask bit location is 52 (or above), then clear the mask. |
| 5197 | */ |
| 5198 | mask = (mask_bit < 52) ? rsvd_bits(mask_bit, 51) | PT_PRESENT_MASK : 0; |
| 5199 | |
| 5200 | kvm_mmu_set_mmio_spte_mask(mask, mask, PT_WRITABLE_MASK | PT_USER_MASK); |
| 5201 | } |
| 5202 | |
| 5203 | static __init void svm_set_cpu_caps(void) |
| 5204 | { |
| 5205 | kvm_set_cpu_caps(); |
| 5206 | |
| 5207 | kvm_caps.supported_perf_cap = 0; |
| 5208 | |
| 5209 | kvm_cpu_cap_clear(X86_FEATURE_IBT); |
| 5210 | |
| 5211 | /* CPUID 0x80000001 and 0x8000000A (SVM features) */ |
| 5212 | if (nested) { |
| 5213 | kvm_cpu_cap_set(X86_FEATURE_SVM); |
| 5214 | kvm_cpu_cap_set(X86_FEATURE_VMCBCLEAN); |
| 5215 | |
| 5216 | /* |
| 5217 | * KVM currently flushes TLBs on *every* nested SVM transition, |
| 5218 | * and so for all intents and purposes KVM supports flushing by |
| 5219 | * ASID, i.e. KVM is guaranteed to honor every L1 ASID flush. |
| 5220 | */ |
| 5221 | kvm_cpu_cap_set(X86_FEATURE_FLUSHBYASID); |
| 5222 | |
| 5223 | if (nrips) |
| 5224 | kvm_cpu_cap_set(X86_FEATURE_NRIPS); |
| 5225 | |
| 5226 | if (npt_enabled) |
| 5227 | kvm_cpu_cap_set(X86_FEATURE_NPT); |
| 5228 | |
| 5229 | if (tsc_scaling) |
| 5230 | kvm_cpu_cap_set(X86_FEATURE_TSCRATEMSR); |
| 5231 | |
| 5232 | if (vls) |
| 5233 | kvm_cpu_cap_set(X86_FEATURE_V_VMSAVE_VMLOAD); |
| 5234 | if (lbrv) |
| 5235 | kvm_cpu_cap_set(X86_FEATURE_LBRV); |
| 5236 | |
| 5237 | if (boot_cpu_has(X86_FEATURE_PAUSEFILTER)) |
| 5238 | kvm_cpu_cap_set(X86_FEATURE_PAUSEFILTER); |
| 5239 | |
| 5240 | if (boot_cpu_has(X86_FEATURE_PFTHRESHOLD)) |
| 5241 | kvm_cpu_cap_set(X86_FEATURE_PFTHRESHOLD); |
| 5242 | |
| 5243 | if (vgif) |
| 5244 | kvm_cpu_cap_set(X86_FEATURE_VGIF); |
| 5245 | |
| 5246 | if (vnmi) |
| 5247 | kvm_cpu_cap_set(X86_FEATURE_VNMI); |
| 5248 | |
| 5249 | /* Nested VM can receive #VMEXIT instead of triggering #GP */ |
| 5250 | kvm_cpu_cap_set(X86_FEATURE_SVME_ADDR_CHK); |
| 5251 | } |
| 5252 | |
| 5253 | if (cpu_feature_enabled(X86_FEATURE_BUS_LOCK_THRESHOLD)) |
| 5254 | kvm_caps.has_bus_lock_exit = true; |
| 5255 | |
| 5256 | /* CPUID 0x80000008 */ |
| 5257 | if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) || |
| 5258 | boot_cpu_has(X86_FEATURE_AMD_SSBD)) |
| 5259 | kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD); |
| 5260 | |
| 5261 | if (enable_pmu) { |
| 5262 | /* |
| 5263 | * Enumerate support for PERFCTR_CORE if and only if KVM has |
| 5264 | * access to enough counters to virtualize "core" support, |
| 5265 | * otherwise limit vPMU support to the legacy number of counters. |
| 5266 | */ |
| 5267 | if (kvm_pmu_cap.num_counters_gp < AMD64_NUM_COUNTERS_CORE) |
| 5268 | kvm_pmu_cap.num_counters_gp = min(AMD64_NUM_COUNTERS, |
| 5269 | kvm_pmu_cap.num_counters_gp); |
| 5270 | else |
| 5271 | kvm_cpu_cap_check_and_set(X86_FEATURE_PERFCTR_CORE); |
| 5272 | |
| 5273 | if (kvm_pmu_cap.version != 2 || |
| 5274 | !kvm_cpu_cap_has(X86_FEATURE_PERFCTR_CORE)) |
| 5275 | kvm_cpu_cap_clear(X86_FEATURE_PERFMON_V2); |
| 5276 | } |
| 5277 | |
| 5278 | /* CPUID 0x8000001F (SME/SEV features) */ |
| 5279 | sev_set_cpu_caps(); |
| 5280 | |
| 5281 | /* |
| 5282 | * Clear capabilities that are automatically configured by common code, |
| 5283 | * but that require explicit SVM support (that isn't yet implemented). |
| 5284 | */ |
| 5285 | kvm_cpu_cap_clear(X86_FEATURE_BUS_LOCK_DETECT); |
| 5286 | kvm_cpu_cap_clear(X86_FEATURE_MSR_IMM); |
| 5287 | |
| 5288 | kvm_setup_xss_caps(); |
| 5289 | } |
| 5290 | |
| 5291 | static __init int svm_hardware_setup(void) |
| 5292 | { |
| 5293 | void *iopm_va; |
| 5294 | int cpu, r; |
| 5295 | |
| 5296 | /* |
| 5297 | * NX is required for shadow paging and for NPT if the NX huge pages |
| 5298 | * mitigation is enabled. |
| 5299 | */ |
| 5300 | if (!boot_cpu_has(X86_FEATURE_NX)) { |
| 5301 | pr_err_ratelimited("NX (Execute Disable) not supported\n" ); |
| 5302 | return -EOPNOTSUPP; |
| 5303 | } |
| 5304 | kvm_enable_efer_bits(EFER_NX); |
| 5305 | |
| 5306 | kvm_caps.supported_xcr0 &= ~(XFEATURE_MASK_BNDREGS | |
| 5307 | XFEATURE_MASK_BNDCSR); |
| 5308 | |
| 5309 | if (boot_cpu_has(X86_FEATURE_FXSR_OPT)) |
| 5310 | kvm_enable_efer_bits(EFER_FFXSR); |
| 5311 | |
| 5312 | if (tsc_scaling) { |
| 5313 | if (!boot_cpu_has(X86_FEATURE_TSCRATEMSR)) { |
| 5314 | tsc_scaling = false; |
| 5315 | } else { |
| 5316 | pr_info("TSC scaling supported\n" ); |
| 5317 | kvm_caps.has_tsc_control = true; |
| 5318 | } |
| 5319 | } |
| 5320 | kvm_caps.max_tsc_scaling_ratio = SVM_TSC_RATIO_MAX; |
| 5321 | kvm_caps.tsc_scaling_ratio_frac_bits = 32; |
| 5322 | |
| 5323 | tsc_aux_uret_slot = kvm_add_user_return_msr(MSR_TSC_AUX); |
| 5324 | |
| 5325 | if (boot_cpu_has(X86_FEATURE_AUTOIBRS)) |
| 5326 | kvm_enable_efer_bits(EFER_AUTOIBRS); |
| 5327 | |
| 5328 | /* Check for pause filtering support */ |
| 5329 | if (!boot_cpu_has(X86_FEATURE_PAUSEFILTER)) { |
| 5330 | pause_filter_count = 0; |
| 5331 | pause_filter_thresh = 0; |
| 5332 | } else if (!boot_cpu_has(X86_FEATURE_PFTHRESHOLD)) { |
| 5333 | pause_filter_thresh = 0; |
| 5334 | } |
| 5335 | |
| 5336 | if (nested) { |
| 5337 | pr_info("Nested Virtualization enabled\n" ); |
| 5338 | kvm_enable_efer_bits(EFER_SVME); |
| 5339 | if (!boot_cpu_has(X86_FEATURE_EFER_LMSLE_MBZ)) |
| 5340 | kvm_enable_efer_bits(EFER_LMSLE); |
| 5341 | |
| 5342 | r = nested_svm_init_msrpm_merge_offsets(); |
| 5343 | if (r) |
| 5344 | return r; |
| 5345 | } |
| 5346 | |
| 5347 | /* |
| 5348 | * KVM's MMU doesn't support using 2-level paging for itself, and thus |
| 5349 | * NPT isn't supported if the host is using 2-level paging since host |
| 5350 | * CR4 is unchanged on VMRUN. |
| 5351 | */ |
| 5352 | if (!IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_X86_PAE)) |
| 5353 | npt_enabled = false; |
| 5354 | |
| 5355 | if (!boot_cpu_has(X86_FEATURE_NPT)) |
| 5356 | npt_enabled = false; |
| 5357 | |
| 5358 | /* Force VM NPT level equal to the host's paging level */ |
| 5359 | kvm_configure_mmu(enable_tdp: npt_enabled, tdp_forced_root_level: get_npt_level(), |
| 5360 | tdp_max_root_level: get_npt_level(), tdp_huge_page_level: PG_LEVEL_1G); |
| 5361 | pr_info("Nested Paging %s\n" , str_enabled_disabled(npt_enabled)); |
| 5362 | |
| 5363 | /* |
| 5364 | * It seems that on AMD processors PTE's accessed bit is |
| 5365 | * being set by the CPU hardware before the NPF vmexit. |
| 5366 | * This is not expected behaviour and our tests fail because |
| 5367 | * of it. |
| 5368 | * A workaround here is to disable support for |
| 5369 | * GUEST_MAXPHYADDR < HOST_MAXPHYADDR if NPT is enabled. |
| 5370 | * In this case userspace can know if there is support using |
| 5371 | * KVM_CAP_SMALLER_MAXPHYADDR extension and decide how to handle |
| 5372 | * it |
| 5373 | * If future AMD CPU models change the behaviour described above, |
| 5374 | * this variable can be changed accordingly |
| 5375 | */ |
| 5376 | allow_smaller_maxphyaddr = !npt_enabled; |
| 5377 | |
| 5378 | /* Setup shadow_me_value and shadow_me_mask */ |
| 5379 | kvm_mmu_set_me_spte_mask(sme_me_mask, sme_me_mask); |
| 5380 | |
| 5381 | svm_adjust_mmio_mask(); |
| 5382 | |
| 5383 | nrips = nrips && boot_cpu_has(X86_FEATURE_NRIPS); |
| 5384 | |
| 5385 | if (lbrv) { |
| 5386 | if (!boot_cpu_has(X86_FEATURE_LBRV)) |
| 5387 | lbrv = false; |
| 5388 | else |
| 5389 | pr_info("LBR virtualization supported\n" ); |
| 5390 | } |
| 5391 | |
| 5392 | iopm_va = svm_alloc_permissions_map(IOPM_SIZE, GFP_KERNEL); |
| 5393 | if (!iopm_va) |
| 5394 | return -ENOMEM; |
| 5395 | |
| 5396 | iopm_base = __sme_set(__pa(iopm_va)); |
| 5397 | |
| 5398 | /* |
| 5399 | * Note, SEV setup consumes npt_enabled and enable_mmio_caching (which |
| 5400 | * may be modified by svm_adjust_mmio_mask()), as well as nrips. |
| 5401 | */ |
| 5402 | sev_hardware_setup(); |
| 5403 | |
| 5404 | svm_hv_hardware_setup(); |
| 5405 | |
| 5406 | enable_apicv = avic_hardware_setup(); |
| 5407 | if (!enable_apicv) { |
| 5408 | enable_ipiv = false; |
| 5409 | svm_x86_ops.vcpu_blocking = NULL; |
| 5410 | svm_x86_ops.vcpu_unblocking = NULL; |
| 5411 | svm_x86_ops.vcpu_get_apicv_inhibit_reasons = NULL; |
| 5412 | } |
| 5413 | |
| 5414 | if (vls) { |
| 5415 | if (!npt_enabled || |
| 5416 | !boot_cpu_has(X86_FEATURE_V_VMSAVE_VMLOAD) || |
| 5417 | !IS_ENABLED(CONFIG_X86_64)) { |
| 5418 | vls = false; |
| 5419 | } else { |
| 5420 | pr_info("Virtual VMLOAD VMSAVE supported\n" ); |
| 5421 | } |
| 5422 | } |
| 5423 | |
| 5424 | if (boot_cpu_has(X86_FEATURE_SVME_ADDR_CHK)) |
| 5425 | svm_gp_erratum_intercept = false; |
| 5426 | |
| 5427 | if (vgif) { |
| 5428 | if (!boot_cpu_has(X86_FEATURE_VGIF)) |
| 5429 | vgif = false; |
| 5430 | else |
| 5431 | pr_info("Virtual GIF supported\n" ); |
| 5432 | } |
| 5433 | |
| 5434 | vnmi = vgif && vnmi && boot_cpu_has(X86_FEATURE_VNMI); |
| 5435 | if (vnmi) |
| 5436 | pr_info("Virtual NMI enabled\n" ); |
| 5437 | |
| 5438 | if (!vnmi) { |
| 5439 | svm_x86_ops.is_vnmi_pending = NULL; |
| 5440 | svm_x86_ops.set_vnmi_pending = NULL; |
| 5441 | } |
| 5442 | |
| 5443 | if (!enable_pmu) |
| 5444 | pr_info("PMU virtualization is disabled\n" ); |
| 5445 | |
| 5446 | svm_set_cpu_caps(); |
| 5447 | |
| 5448 | kvm_caps.inapplicable_quirks &= ~KVM_X86_QUIRK_CD_NW_CLEARED; |
| 5449 | |
| 5450 | for_each_possible_cpu(cpu) { |
| 5451 | r = svm_cpu_init(cpu); |
| 5452 | if (r) |
| 5453 | goto err; |
| 5454 | } |
| 5455 | |
| 5456 | return 0; |
| 5457 | |
| 5458 | err: |
| 5459 | svm_hardware_unsetup(); |
| 5460 | return r; |
| 5461 | } |
| 5462 | |
| 5463 | |
| 5464 | static struct kvm_x86_init_ops svm_init_ops __initdata = { |
| 5465 | .hardware_setup = svm_hardware_setup, |
| 5466 | |
| 5467 | .runtime_ops = &svm_x86_ops, |
| 5468 | .pmu_ops = &amd_pmu_ops, |
| 5469 | }; |
| 5470 | |
| 5471 | static void __svm_exit(void) |
| 5472 | { |
| 5473 | kvm_x86_vendor_exit(); |
| 5474 | } |
| 5475 | |
| 5476 | static int __init svm_init(void) |
| 5477 | { |
| 5478 | int r; |
| 5479 | |
| 5480 | KVM_SANITY_CHECK_VM_STRUCT_SIZE(kvm_svm); |
| 5481 | |
| 5482 | __unused_size_checks(); |
| 5483 | |
| 5484 | if (!kvm_is_svm_supported()) |
| 5485 | return -EOPNOTSUPP; |
| 5486 | |
| 5487 | r = kvm_x86_vendor_init(ops: &svm_init_ops); |
| 5488 | if (r) |
| 5489 | return r; |
| 5490 | |
| 5491 | /* |
| 5492 | * Common KVM initialization _must_ come last, after this, /dev/kvm is |
| 5493 | * exposed to userspace! |
| 5494 | */ |
| 5495 | r = kvm_init(vcpu_size: sizeof(struct vcpu_svm), vcpu_align: __alignof__(struct vcpu_svm), |
| 5496 | THIS_MODULE); |
| 5497 | if (r) |
| 5498 | goto err_kvm_init; |
| 5499 | |
| 5500 | return 0; |
| 5501 | |
| 5502 | err_kvm_init: |
| 5503 | __svm_exit(); |
| 5504 | return r; |
| 5505 | } |
| 5506 | |
| 5507 | static void __exit svm_exit(void) |
| 5508 | { |
| 5509 | kvm_exit(); |
| 5510 | __svm_exit(); |
| 5511 | } |
| 5512 | |
| 5513 | module_init(svm_init) |
| 5514 | module_exit(svm_exit) |
| 5515 | |