On x86-64 __VIRTUAL_MASK_SHIFT depends on paging mode now. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@xxxxxxxxxxxxxxx> --- arch/x86/entry/entry_64.S | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 607d72c4a485..edec30584eb8 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -266,7 +266,8 @@ return_from_SYSCALL_64: * If width of "canonical tail" ever becomes variable, this will need * to be updated to remain correct on both old and new CPUs. * - * Change top 16 bits to be the sign-extension of 47th bit + * Change top bits to match most significant bit (47th or 56th bit + * depending on paging mode) in the address. */ shl $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx sar $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx -- 2.11.0