Use array_size to protect against multiplication overflows. The changes were done using the following Coccinelle semantic patch: // <smpl> @@ expression E1, E2; constant C1, C2; identifier alloc = {vmalloc,vzalloc}; @@ ( alloc(C1 * C2,...) | alloc( - (E1) * (E2) + array_size(E1, E2) ,...) ) // </smpl> Signed-off-by: Julia Lawall <Julia.Lawall@xxxxxxxx> --- arch/x86/kernel/cpu/sgx/main.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c index 166692f2d501..3a234942c586 100644 --- a/arch/x86/kernel/cpu/sgx/main.c +++ b/arch/x86/kernel/cpu/sgx/main.c @@ -628,7 +628,8 @@ static bool __init sgx_setup_epc_section(u64 phys_addr, u64 size, if (!section->virt_addr) return false; - section->pages = vmalloc(nr_pages * sizeof(struct sgx_epc_page)); + section->pages = vmalloc(array_size(nr_pages, + sizeof(struct sgx_epc_page))); if (!section->pages) { memunmap(section->virt_addr); return false;