Hello David Rientjes, The patch b86bc2858b38: "KVM: SVM: prevent DBG_DECRYPT and DBG_ENCRYPT overflow" from Mar 25, 2019, leads to the following static checker warning: arch/x86/kvm/svm/sev.c:960 sev_dbg_crypt() error: uninitialized symbol 'ret'. arch/x86/kvm/svm/sev.c 879 static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec) 880 { 881 unsigned long vaddr, vaddr_end, next_vaddr; ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 882 unsigned long dst_vaddr; ^^^^^^^^^^^^^^^^^^^^^^^^ These are unsigned long 883 struct page **src_p, **dst_p; 884 struct kvm_sev_dbg debug; 885 unsigned long n; 886 unsigned int size; 887 int ret; 888 889 if (!sev_guest(kvm)) 890 return -ENOTTY; 891 892 if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug))) 893 return -EFAULT; 894 895 if (!debug.len || debug.src_uaddr + debug.len < debug.src_uaddr) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ But these are u64 so this could still overflow on 32 bit. Do we care? 896 return -EINVAL; 897 if (!debug.dst_uaddr) 898 return -EINVAL; 899 900 vaddr = debug.src_uaddr; 901 size = debug.len; 902 vaddr_end = vaddr + size; 903 dst_vaddr = debug.dst_uaddr; 904 905 for (; vaddr < vaddr_end; vaddr = next_vaddr) { 906 int len, s_off, d_off; 907 908 /* lock userspace source and destination page */ 909 src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0); 910 if (IS_ERR(src_p)) 911 return PTR_ERR(src_p); 912 913 dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1); 914 if (IS_ERR(dst_p)) { 915 sev_unpin_memory(kvm, src_p, n); 916 return PTR_ERR(dst_p); 917 } 918 919 /* 920 * Flush (on non-coherent CPUs) before DBG_{DE,EN}CRYPT read or modify 921 * the pages; flush the destination too so that future accesses do not 922 * see stale data. 923 */ 924 sev_clflush_pages(src_p, 1); 925 sev_clflush_pages(dst_p, 1); 926 927 /* 928 * Since user buffer may not be page aligned, calculate the 929 * offset within the page. 930 */ 931 s_off = vaddr & ~PAGE_MASK; 932 d_off = dst_vaddr & ~PAGE_MASK; 933 len = min_t(size_t, (PAGE_SIZE - s_off), size); 934 935 if (dec) 936 ret = __sev_dbg_decrypt_user(kvm, 937 __sme_page_pa(src_p[0]) + s_off, 938 dst_vaddr, 939 __sme_page_pa(dst_p[0]) + d_off, 940 len, &argp->error); 941 else 942 ret = __sev_dbg_encrypt_user(kvm, 943 __sme_page_pa(src_p[0]) + s_off, 944 vaddr, 945 __sme_page_pa(dst_p[0]) + d_off, 946 dst_vaddr, 947 len, &argp->error); 948 949 sev_unpin_memory(kvm, src_p, n); 950 sev_unpin_memory(kvm, dst_p, n); 951 952 if (ret) 953 goto err; 954 955 next_vaddr = vaddr + len; 956 dst_vaddr = dst_vaddr + len; 957 size -= len; 958 } 959 err: 960 return ret; 961 } regards, dan carpenter