The minimum version of binutils for kernel build is currently 2.23 and it doesn't support GFNI. So, it fails to build the aria-avx2 if the old binutils is used. The code using GFNI is an optional part of aria-avx2. So, it disables GFNI part in it when the old binutils is used. Fixes: 37d8d3ae7a58 ("crypto: x86/aria - implement aria-avx2") Reported-by: Jan Beulich <jbeulich@xxxxxxxx> Signed-off-by: Taehee Yoo <ap420073@xxxxxxxxx> --- arch/x86/crypto/aria-aesni-avx2-asm_64.S | 10 +++++++++- arch/x86/crypto/aria_aesni_avx2_glue.c | 4 +++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/arch/x86/crypto/aria-aesni-avx2-asm_64.S b/arch/x86/crypto/aria-aesni-avx2-asm_64.S index b6cac9a40f2c..82a14b4ad920 100644 --- a/arch/x86/crypto/aria-aesni-avx2-asm_64.S +++ b/arch/x86/crypto/aria-aesni-avx2-asm_64.S @@ -302,6 +302,7 @@ vpbroadcastb ((round * 16) + idx + 4)(rk), t0; \ vpxor t0, x7, x7; +#ifdef CONFIG_AS_GFNI #define aria_sbox_8way_gfni(x0, x1, x2, x3, \ x4, x5, x6, x7, \ t0, t1, t2, t3, \ @@ -324,6 +325,7 @@ vgf2p8affineinvqb $0, t2, x3, x3; \ vgf2p8affineinvqb $0, t2, x7, x7 +#endif /* CONFIG_AS_GFNI */ #define aria_sbox_8way(x0, x1, x2, x3, \ x4, x5, x6, x7, \ t0, t1, t2, t3, \ @@ -596,7 +598,7 @@ aria_load_state_8way(y0, y1, y2, y3, \ y4, y5, y6, y7, \ mem_tmp, 8); - +#ifdef CONFIG_AS_GFNI #define aria_fe_gfni(x0, x1, x2, x3, \ x4, x5, x6, x7, \ y0, y1, y2, y3, \ @@ -750,6 +752,7 @@ aria_load_state_8way(y0, y1, y2, y3, \ y4, y5, y6, y7, \ mem_tmp, 8); +#endif /* CONFIG_AS_GFNI */ .section .rodata.cst32.shufb_16x16b, "aM", @progbits, 32 .align 32 @@ -803,6 +806,7 @@ .Ltf_hi__x2__and__fwd_aff: .octa 0x3F893781E95FE1576CDA64D2BA0CB204 +#ifdef CONFIG_AS_GFNI .section .rodata.cst8, "aM", @progbits, 8 .align 8 /* AES affine: */ @@ -864,6 +868,8 @@ BV8(0, 0, 0, 0, 0, 0, 1, 0), BV8(0, 0, 0, 0, 0, 0, 0, 1)) +#endif /* CONFIG_AS_GFNI */ + /* 4-bit mask */ .section .rodata.cst4.L0f0f0f0f, "aM", @progbits, 4 .align 4 @@ -1213,6 +1219,7 @@ SYM_TYPED_FUNC_START(aria_aesni_avx2_ctr_crypt_32way) RET; SYM_FUNC_END(aria_aesni_avx2_ctr_crypt_32way) +#ifdef CONFIG_AS_GFNI SYM_FUNC_START_LOCAL(__aria_aesni_avx2_gfni_crypt_32way) /* input: * %r9: rk @@ -1431,3 +1438,4 @@ SYM_TYPED_FUNC_START(aria_aesni_avx2_gfni_ctr_crypt_32way) FRAME_END RET; SYM_FUNC_END(aria_aesni_avx2_gfni_ctr_crypt_32way) +#endif /* CONFIG_AS_GFNI */ diff --git a/arch/x86/crypto/aria_aesni_avx2_glue.c b/arch/x86/crypto/aria_aesni_avx2_glue.c index 95fccc6dc420..87a11804fc77 100644 --- a/arch/x86/crypto/aria_aesni_avx2_glue.c +++ b/arch/x86/crypto/aria_aesni_avx2_glue.c @@ -26,6 +26,7 @@ asmlinkage void aria_aesni_avx2_ctr_crypt_32way(const void *ctx, u8 *dst, const u8 *src, u8 *keystream, u8 *iv); EXPORT_SYMBOL_GPL(aria_aesni_avx2_ctr_crypt_32way); +#ifdef CONFIG_AS_GFNI asmlinkage void aria_aesni_avx2_gfni_encrypt_32way(const void *ctx, u8 *dst, const u8 *src); EXPORT_SYMBOL_GPL(aria_aesni_avx2_gfni_encrypt_32way); @@ -36,6 +37,7 @@ asmlinkage void aria_aesni_avx2_gfni_ctr_crypt_32way(const void *ctx, u8 *dst, const u8 *src, u8 *keystream, u8 *iv); EXPORT_SYMBOL_GPL(aria_aesni_avx2_gfni_ctr_crypt_32way); +#endif /* CONFIG_AS_GFNI */ static struct aria_avx_ops aria_ops; @@ -215,7 +217,7 @@ static int __init aria_avx2_init(void) return -ENODEV; } - if (boot_cpu_has(X86_FEATURE_GFNI)) { + if (boot_cpu_has(X86_FEATURE_GFNI) && IS_ENABLED(CONFIG_AS_GFNI)) { aria_ops.aria_encrypt_16way = aria_aesni_avx_gfni_encrypt_16way; aria_ops.aria_decrypt_16way = aria_aesni_avx_gfni_decrypt_16way; aria_ops.aria_ctr_crypt_16way = aria_aesni_avx_gfni_ctr_crypt_16way; -- 2.34.1