mirror of
https://github.com/raspberrypi/linux.git
synced 2025-12-19 08:14:25 +00:00
Direct HLT instruction execution causes #VEs for TDX VMs which is routed to hypervisor via TDCALL. If HLT is executed in STI-shadow, resulting #VE handler will enable interrupts before TDCALL is routed to hypervisor leading to missed wakeup events, as current TDX spec doesn't expose interruptibility state information to allow #VE handler to selectively enable interrupts. Commitbfe6ed0c67("x86/tdx: Add HLT support for TDX guests") prevented the idle routines from executing HLT instruction in STI-shadow. But it missed the paravirt routine which can be reached via this path as an example: kvm_wait() => safe_halt() => raw_safe_halt() => arch_safe_halt() => irq.safe_halt() => pv_native_safe_halt() To reliably handle arch_safe_halt() for TDX VMs, introduce explicit dependency on CONFIG_PARAVIRT and override paravirt halt()/safe_halt() routines with TDX-safe versions that execute direct TDCALL and needed interrupt flag updates. Executing direct TDCALL brings in additional benefit of avoiding HLT related #VEs altogether. As tested by Ryan Afranji: "Tested with the specjbb2015 benchmark. It has heavy lock contention which leads to many halt calls. TDX VMs suffered a poor score before this patchset. Verified the major performance improvement with this patchset applied." Fixes:bfe6ed0c67("x86/tdx: Add HLT support for TDX guests") Signed-off-by: Vishal Annapurve <vannapurve@google.com> Signed-off-by: Ingo Molnar <mingo@kernel.org> Reviewed-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Tested-by: Ryan Afranji <afranji@google.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Brian Gerst <brgerst@gmail.com> Cc: Juergen Gross <jgross@suse.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20250228014416.3925664-3-vannapurve@google.com
131 lines
3.4 KiB
C
131 lines
3.4 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/* Copyright (C) 2021-2022 Intel Corporation */
|
|
#ifndef _ASM_X86_TDX_H
|
|
#define _ASM_X86_TDX_H
|
|
|
|
#include <linux/init.h>
|
|
#include <linux/bits.h>
|
|
|
|
#include <asm/errno.h>
|
|
#include <asm/ptrace.h>
|
|
#include <asm/trapnr.h>
|
|
#include <asm/shared/tdx.h>
|
|
|
|
/*
|
|
* SW-defined error codes.
|
|
*
|
|
* Bits 47:40 == 0xFF indicate Reserved status code class that never used by
|
|
* TDX module.
|
|
*/
|
|
#define TDX_ERROR _BITUL(63)
|
|
#define TDX_SW_ERROR (TDX_ERROR | GENMASK_ULL(47, 40))
|
|
#define TDX_SEAMCALL_VMFAILINVALID (TDX_SW_ERROR | _UL(0xFFFF0000))
|
|
|
|
#define TDX_SEAMCALL_GP (TDX_SW_ERROR | X86_TRAP_GP)
|
|
#define TDX_SEAMCALL_UD (TDX_SW_ERROR | X86_TRAP_UD)
|
|
|
|
/*
|
|
* TDX module SEAMCALL leaf function error codes
|
|
*/
|
|
#define TDX_SUCCESS 0ULL
|
|
#define TDX_RND_NO_ENTROPY 0x8000020300000000ULL
|
|
|
|
#ifndef __ASSEMBLER__
|
|
|
|
#include <uapi/asm/mce.h>
|
|
|
|
/*
|
|
* Used by the #VE exception handler to gather the #VE exception
|
|
* info from the TDX module. This is a software only structure
|
|
* and not part of the TDX module/VMM ABI.
|
|
*/
|
|
struct ve_info {
|
|
u64 exit_reason;
|
|
u64 exit_qual;
|
|
/* Guest Linear (virtual) Address */
|
|
u64 gla;
|
|
/* Guest Physical Address */
|
|
u64 gpa;
|
|
u32 instr_len;
|
|
u32 instr_info;
|
|
};
|
|
|
|
#ifdef CONFIG_INTEL_TDX_GUEST
|
|
|
|
void __init tdx_early_init(void);
|
|
|
|
void tdx_get_ve_info(struct ve_info *ve);
|
|
|
|
bool tdx_handle_virt_exception(struct pt_regs *regs, struct ve_info *ve);
|
|
|
|
void tdx_halt(void);
|
|
|
|
bool tdx_early_handle_ve(struct pt_regs *regs);
|
|
|
|
int tdx_mcall_get_report0(u8 *reportdata, u8 *tdreport);
|
|
|
|
u64 tdx_hcall_get_quote(u8 *buf, size_t size);
|
|
|
|
void __init tdx_dump_attributes(u64 td_attr);
|
|
void __init tdx_dump_td_ctls(u64 td_ctls);
|
|
|
|
#else
|
|
|
|
static inline void tdx_early_init(void) { };
|
|
static inline void tdx_halt(void) { };
|
|
|
|
static inline bool tdx_early_handle_ve(struct pt_regs *regs) { return false; }
|
|
|
|
#endif /* CONFIG_INTEL_TDX_GUEST */
|
|
|
|
#if defined(CONFIG_KVM_GUEST) && defined(CONFIG_INTEL_TDX_GUEST)
|
|
long tdx_kvm_hypercall(unsigned int nr, unsigned long p1, unsigned long p2,
|
|
unsigned long p3, unsigned long p4);
|
|
#else
|
|
static inline long tdx_kvm_hypercall(unsigned int nr, unsigned long p1,
|
|
unsigned long p2, unsigned long p3,
|
|
unsigned long p4)
|
|
{
|
|
return -ENODEV;
|
|
}
|
|
#endif /* CONFIG_INTEL_TDX_GUEST && CONFIG_KVM_GUEST */
|
|
|
|
#ifdef CONFIG_INTEL_TDX_HOST
|
|
u64 __seamcall(u64 fn, struct tdx_module_args *args);
|
|
u64 __seamcall_ret(u64 fn, struct tdx_module_args *args);
|
|
u64 __seamcall_saved_ret(u64 fn, struct tdx_module_args *args);
|
|
void tdx_init(void);
|
|
|
|
#include <asm/archrandom.h>
|
|
|
|
typedef u64 (*sc_func_t)(u64 fn, struct tdx_module_args *args);
|
|
|
|
static inline u64 sc_retry(sc_func_t func, u64 fn,
|
|
struct tdx_module_args *args)
|
|
{
|
|
int retry = RDRAND_RETRY_LOOPS;
|
|
u64 ret;
|
|
|
|
do {
|
|
ret = func(fn, args);
|
|
} while (ret == TDX_RND_NO_ENTROPY && --retry);
|
|
|
|
return ret;
|
|
}
|
|
|
|
#define seamcall(_fn, _args) sc_retry(__seamcall, (_fn), (_args))
|
|
#define seamcall_ret(_fn, _args) sc_retry(__seamcall_ret, (_fn), (_args))
|
|
#define seamcall_saved_ret(_fn, _args) sc_retry(__seamcall_saved_ret, (_fn), (_args))
|
|
int tdx_cpu_enable(void);
|
|
int tdx_enable(void);
|
|
const char *tdx_dump_mce_info(struct mce *m);
|
|
#else
|
|
static inline void tdx_init(void) { }
|
|
static inline int tdx_cpu_enable(void) { return -ENODEV; }
|
|
static inline int tdx_enable(void) { return -ENODEV; }
|
|
static inline const char *tdx_dump_mce_info(struct mce *m) { return NULL; }
|
|
#endif /* CONFIG_INTEL_TDX_HOST */
|
|
|
|
#endif /* !__ASSEMBLER__ */
|
|
#endif /* _ASM_X86_TDX_H */
|