linux-next: manual merge of the kvm-arm tree with the arm64 tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hi all,

Today's linux-next merge of the kvm-arm tree got a conflict in:

  arch/arm64/kernel/cpu_errata.c

between commit:

  c0cda3b8ee6b ("arm64: capabilities: Update prototype for enable call back")
  followed by a series of patches cleaning up capabilities

from the arm64 tree and commits:

  4b472ffd1513 ("arm64: Enable ARM64_HARDEN_EL2_VECTORS on Cortex-A57 and A72")
  f9f5dc19509b ("arm64: KVM: Use SMCCC_ARCH_WORKAROUND_1 for Falkor BP hardening")

from the kvm-arm tree.

I fixed it up (maybe, please check the result and see below) and can
carry the fix as necessary. This is now fixed as far as linux-next is
concerned, but any non trivial conflicts should be mentioned to your
upstream maintainer when your tree is submitted for merging.  You may
also want to consider cooperating with the maintainer of the conflicting
tree to minimise any particularly complex conflicts.

-- 
Cheers,
Stephen Rothwell

diff --cc arch/arm64/kernel/cpu_errata.c
index 2df792771053,caa73af7d26e..000000000000
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@@ -76,8 -57,11 +76,10 @@@ cpu_enable_trap_ctr_access(const struc
  {
  	/* Clear SCTLR_EL1.UCT */
  	config_sctlr_el1(SCTLR_EL1_UCT, 0);
 -	return 0;
  }
  
+ atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
+ 
  #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
  #include <asm/mmu_context.h>
  #include <asm/cacheflush.h>
@@@ -179,18 -156,31 +174,31 @@@ static void call_hvc_arch_workaround_1(
  	arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
  }
  
+ static void qcom_link_stack_sanitization(void)
+ {
+ 	u64 tmp;
+ 
+ 	asm volatile("mov	%0, x30		\n"
+ 		     ".rept	16		\n"
+ 		     "bl	. + 4		\n"
+ 		     ".endr			\n"
+ 		     "mov	x30, %0		\n"
+ 		     : "=&r" (tmp));
+ }
+ 
 -static int enable_smccc_arch_workaround_1(void *data)
 +static void
 +enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
  {
 -	const struct arm64_cpu_capabilities *entry = data;
  	bp_hardening_cb_t cb;
  	void *smccc_start, *smccc_end;
  	struct arm_smccc_res res;
+ 	u32 midr = read_cpuid_id();
  
  	if (!entry->matches(entry, SCOPE_LOCAL_CPU))
 -		return 0;
 +		return;
  
  	if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
 -		return 0;
 +		return;
  
  	switch (psci_ops.conduit) {
  	case PSCI_CONDUIT_HVC:
@@@ -214,139 -204,33 +222,124 @@@
  		break;
  
  	default:
 -		return 0;
 +		return;
  	}
  
+ 	if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
+ 	    ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
+ 		cb = qcom_link_stack_sanitization;
+ 
  	install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
  
 -	return 0;
 +	return;
  }
  
- static void qcom_link_stack_sanitization(void)
- {
- 	u64 tmp;
- 
- 	asm volatile("mov	%0, x30		\n"
- 		     ".rept	16		\n"
- 		     "bl	. + 4		\n"
- 		     ".endr			\n"
- 		     "mov	x30, %0		\n"
- 		     : "=&r" (tmp));
- }
- 
- static void
- qcom_enable_link_stack_sanitization(const struct arm64_cpu_capabilities *entry)
- {
- 	install_bp_hardening_cb(entry, qcom_link_stack_sanitization,
- 				__qcom_hyp_sanitize_link_stack_start,
- 				__qcom_hyp_sanitize_link_stack_end);
- }
  #endif	/* CONFIG_HARDEN_BRANCH_PREDICTOR */
  
 -#define MIDR_RANGE(model, min, max) \
 -	.def_scope = SCOPE_LOCAL_CPU, \
 -	.matches = is_affected_midr_range, \
 -	.midr_model = model, \
 -	.midr_range_min = min, \
 -	.midr_range_max = max
 +#define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)	\
 +	.matches = is_affected_midr_range,			\
 +	.midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
 +
 +#define CAP_MIDR_ALL_VERSIONS(model)					\
 +	.matches = is_affected_midr_range,				\
 +	.midr_range = MIDR_ALL_VERSIONS(model)
 +
 +#define MIDR_FIXED(rev, revidr_mask) \
 +	.fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
 +
 +#define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max)		\
 +	.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,				\
 +	CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
 +
 +#define CAP_MIDR_RANGE_LIST(list)				\
 +	.matches = is_affected_midr_range_list,			\
 +	.midr_range_list = list
 +
 +/* Errata affecting a range of revisions of  given model variant */
 +#define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max)	 \
 +	ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
 +
 +/* Errata affecting a single variant/revision of a model */
 +#define ERRATA_MIDR_REV(model, var, rev)	\
 +	ERRATA_MIDR_RANGE(model, var, rev, var, rev)
 +
 +/* Errata affecting all variants/revisions of a given a model */
 +#define ERRATA_MIDR_ALL_VERSIONS(model)				\
 +	.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,			\
 +	CAP_MIDR_ALL_VERSIONS(model)
 +
 +/* Errata affecting a list of midr ranges, with same work around */
 +#define ERRATA_MIDR_RANGE_LIST(midr_list)			\
 +	.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,			\
 +	CAP_MIDR_RANGE_LIST(midr_list)
 +
 +/*
 + * Generic helper for handling capabilties with multiple (match,enable) pairs
 + * of call backs, sharing the same capability bit.
 + * Iterate over each entry to see if at least one matches.
 + */
 +static bool __maybe_unused
 +multi_entry_cap_matches(const struct arm64_cpu_capabilities *entry, int scope)
 +{
 +	const struct arm64_cpu_capabilities *caps;
 +
 +	for (caps = entry->match_list; caps->matches; caps++)
 +		if (caps->matches(caps, scope))
 +			return true;
 +
 +	return false;
 +}
 +
 +/*
 + * Take appropriate action for all matching entries in the shared capability
 + * entry.
 + */
 +static void __maybe_unused
 +multi_entry_cap_cpu_enable(const struct arm64_cpu_capabilities *entry)
 +{
 +	const struct arm64_cpu_capabilities *caps;
 +
 +	for (caps = entry->match_list; caps->matches; caps++)
 +		if (caps->matches(caps, SCOPE_LOCAL_CPU) &&
 +		    caps->cpu_enable)
 +			caps->cpu_enable(caps);
 +}
 +
 +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
 +
 +/*
 + * List of CPUs where we need to issue a psci call to
 + * harden the branch predictor.
 + */
 +static const struct midr_range arm64_bp_harden_smccc_cpus[] = {
 +	MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
 +	MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
 +	MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
 +	MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
 +	MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
 +	MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
 +	{},
 +};
 +
 +static const struct midr_range qcom_bp_harden_cpus[] = {
 +	MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
 +	MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
 +	{},
 +};
 +
 +static const struct arm64_cpu_capabilities arm64_bp_harden_list[] = {
 +	{
 +		CAP_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus),
 +		.cpu_enable = enable_smccc_arch_workaround_1,
 +	},
 +	{
 +		CAP_MIDR_RANGE_LIST(qcom_bp_harden_cpus),
 +		.cpu_enable = qcom_enable_link_stack_sanitization,
 +	},
 +	{},
 +};
  
 -#define MIDR_ALL_VERSIONS(model) \
 -	.def_scope = SCOPE_LOCAL_CPU, \
 -	.matches = is_affected_midr_range, \
 -	.midr_model = model, \
 -	.midr_range_min = 0, \
 -	.midr_range_max = (MIDR_VARIANT_MASK | MIDR_REVISION_MASK)
 +#endif
  
  const struct arm64_cpu_capabilities arm64_errata[] = {
  #if	defined(CONFIG_ARM64_ERRATUM_826319) || \
@@@ -491,15 -369,56 +484,27 @@@
  #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
  	{
  		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
 -		MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
 -		.enable = enable_smccc_arch_workaround_1,
 -	},
 -	{
 -		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
 -		MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
 -		.enable = enable_smccc_arch_workaround_1,
 -	},
 -	{
 -		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
 -		MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
 -		.enable = enable_smccc_arch_workaround_1,
 -	},
 -	{
 -		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
 -		MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
 -		.enable = enable_smccc_arch_workaround_1,
 -	},
 -	{
 -		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
 -		MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
 -		.enable = enable_smccc_arch_workaround_1,
 -	},
 -	{
 -		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
 -		MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
 -		.enable = enable_smccc_arch_workaround_1,
 +		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
 +		.matches = multi_entry_cap_matches,
 +		.cpu_enable = multi_entry_cap_cpu_enable,
 +		.match_list = arm64_bp_harden_list,
  	},
  	{
 -		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
 -		MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
 -		.enable = enable_smccc_arch_workaround_1,
 -	},
 -	{
 -		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
 -		MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
 -		.enable = enable_smccc_arch_workaround_1,
 +		.capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
 +		ERRATA_MIDR_RANGE_LIST(qcom_bp_harden_cpus),
  	},
+ #endif
+ #ifdef CONFIG_HARDEN_EL2_VECTORS
+ 	{
+ 		.desc = "Cortex-A57 EL2 vector hardening",
+ 		.capability = ARM64_HARDEN_EL2_VECTORS,
 -		MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
++		ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
+ 	},
+ 	{
+ 		.desc = "Cortex-A72 EL2 vector hardening",
+ 		.capability = ARM64_HARDEN_EL2_VECTORS,
 -		MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
++		ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
+ 	},
  #endif
  	{
  	}

Attachment: pgpIfV7qpPxB2.pgp
Description: OpenPGP digital signature


[Index of Archives]     [Linux Kernel]     [Linux USB Development]     [Yosemite News]     [Linux SCSI]

  Powered by Linux