4.9-stable review patch. If anyone has any objections, please let me know. ------------------ From: Mark Rutland <mark.rutland@xxxxxxx> From: Marc Zyngier <marc.zyngier@xxxxxxx> commit f72af90c3783d924337624659b43e2d36f1b36b4 upstream. We want SMCCC_ARCH_WORKAROUND_1 to be fast. As fast as possible. So let's intercept it as early as we can by testing for the function call number as soon as we've identified a HVC call coming from the guest. Tested-by: Ard Biesheuvel <ard.biesheuvel@xxxxxxxxxx> Reviewed-by: Christoffer Dall <christoffer.dall@xxxxxxxxxx> Signed-off-by: Marc Zyngier <marc.zyngier@xxxxxxx> Signed-off-by: Catalin Marinas <catalin.marinas@xxxxxxx> Signed-off-by: Mark Rutland <mark.rutland@xxxxxxx> [v4.9 backport] Tested-by: Greg Hackmann <ghackmann@xxxxxxxxxx> Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- arch/arm64/kvm/hyp/hyp-entry.S | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -15,6 +15,7 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <linux/arm-smccc.h> #include <linux/linkage.h> #include <asm/alternative.h> @@ -79,10 +80,11 @@ alternative_endif lsr x0, x1, #ESR_ELx_EC_SHIFT cmp x0, #ESR_ELx_EC_HVC64 + ccmp x0, #ESR_ELx_EC_HVC32, #4, ne b.ne el1_trap - mrs x1, vttbr_el2 // If vttbr is valid, the 64bit guest - cbnz x1, el1_trap // called HVC + mrs x1, vttbr_el2 // If vttbr is valid, the guest + cbnz x1, el1_hvc_guest // called HVC /* Here, we're pretty sure the host called HVC. */ ldp x0, x1, [sp], #16 @@ -101,6 +103,20 @@ alternative_endif 2: eret +el1_hvc_guest: + /* + * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1. + * The workaround has already been applied on the host, + * so let's quickly get back to the guest. We don't bother + * restoring x1, as it can be clobbered anyway. + */ + ldr x1, [sp] // Guest's x0 + eor w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1 + cbnz w1, el1_trap + mov x0, x1 + add sp, sp, #16 + eret + el1_trap: /* * x0: ESR_EC