On 3/24/2020 1:06 AM, Thomas Gleixner wrote:
Xiaoyao Li <xiaoyao.li@xxxxxxxxx> writes:
+/*
+ * Soft copy of MSR_TEST_CTRL initialized when we first read the
+ * MSR. Used at runtime to avoid using rdmsr again just to collect
+ * the reserved bits in the MSR. We assume reserved bits are the
+ * same on all CPUs.
+ */
+static u64 test_ctrl_val;
+
/*
* Locking is not required at the moment because only bit 29 of this
* MSR is implemented and locking would not prevent that the operation
@@ -1027,16 +1035,14 @@ static void __init split_lock_setup(void)
*/
static void __sld_msr_set(bool on)
{
- u64 test_ctrl_val;
-
- rdmsrl(MSR_TEST_CTRL, test_ctrl_val);
+ u64 val = test_ctrl_val;
if (on)
- test_ctrl_val |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
+ val |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
else
- test_ctrl_val &= ~MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
+ val &= ~MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
- wrmsrl(MSR_TEST_CTRL, test_ctrl_val);
+ wrmsrl(MSR_TEST_CTRL, val);
}
/*
@@ -1048,11 +1054,13 @@ static void __sld_msr_set(bool on)
*/
static void split_lock_init(struct cpuinfo_x86 *c)
{
- u64 test_ctrl_val;
+ u64 val;
- if (rdmsrl_safe(MSR_TEST_CTRL, &test_ctrl_val))
+ if (rdmsrl_safe(MSR_TEST_CTRL, &val))
goto msr_broken;
+ test_ctrl_val = val;
+
switch (sld_state) {
case sld_off:
if (wrmsrl_safe(MSR_TEST_CTRL, test_ctrl_val & ~MSR_TEST_CTRL_SPLIT_LOCK_DETECT))
That's just broken. Simply because
case sld_warn:
case sld_fatal:
set the split lock detect bit, but the cache variable has it cleared
unless it was set at boot time already.
The test_ctrl_val is not to cache the value of MSR_TEST_CTRL, but cache
the reserved/unused bits other than MSR_TEST_CTRL_SPLIT_LOCK_DETECT bit.
Thanks,
tglx