Re: [PATCH 04/10] s390/mm: force swiotlb for protected virtualization

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Subject: [PATCH 04/10] s390/mm: force swiotlb for protected virtualization
Date: Fri, 26 Apr 2019 20:32:39 +0200
From: Halil Pasic <pasic@xxxxxxxxxxxxx>
To: kvm@xxxxxxxxxxxxxxx, linux-s390@xxxxxxxxxxxxxxx, Cornelia Huck <cohuck@xxxxxxxxxx>, Martin Schwidefsky <schwidefsky@xxxxxxxxxx>, Sebastian Ott <sebott@xxxxxxxxxxxxx> CC: Halil Pasic <pasic@xxxxxxxxxxxxx>, virtualization@xxxxxxxxxxxxxxxxxxxxxxxxxx, Michael S. Tsirkin <mst@xxxxxxxxxx>, Christoph Hellwig <hch@xxxxxxxxxxxxx>, Thomas Huth <thuth@xxxxxxxxxx>, Christian Borntraeger <borntraeger@xxxxxxxxxx>, Viktor Mihajlovski <mihajlov@xxxxxxxxxxxxx>, Vasily Gorbik <gor@xxxxxxxxxxxxx>, Janosch Frank <frankja@xxxxxxxxxxxxx>, Claudio Imbrenda <imbrenda@xxxxxxxxxxxxx>, Farhan Ali <alifm@xxxxxxxxxxxxx>, Eric Farman <farman@xxxxxxxxxxxxx>

On s390, protected virtualization guests have to use bounced I/O
buffers.  That requires some plumbing.

Let us make sure, any device that uses DMA API with direct ops correctly
is spared from the problems, that a hypervisor attempting I/O to a
non-shared page would bring.

Signed-off-by: Halil Pasic <pasic@xxxxxxxxxxxxx>
---
  arch/s390/Kconfig                   |  4 +++
  arch/s390/include/asm/mem_encrypt.h | 18 +++++++++++++
  arch/s390/mm/init.c                 | 50 +++++++++++++++++++++++++++++++++++++
  3 files changed, 72 insertions(+)
  create mode 100644 arch/s390/include/asm/mem_encrypt.h

diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 1c3fcf19c3af..5500d05d4d53 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -1,4 +1,7 @@
  # SPDX-License-Identifier: GPL-2.0
+config ARCH_HAS_MEM_ENCRYPT
+        def_bool y
+
  config MMU
      def_bool y
  @@ -191,6 +194,7 @@ config S390
      select ARCH_HAS_SCALED_CPUTIME
      select VIRT_TO_BUS
      select HAVE_NMI
+    select SWIOTLB
    config SCHED_OMIT_FRAME_POINTER
diff --git a/arch/s390/include/asm/mem_encrypt.h b/arch/s390/include/asm/mem_encrypt.h
new file mode 100644
index 000000000000..0898c09a888c
--- /dev/null
+++ b/arch/s390/include/asm/mem_encrypt.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef S390_MEM_ENCRYPT_H__
+#define S390_MEM_ENCRYPT_H__
+
+#ifndef __ASSEMBLY__
+
+#define sme_me_mask    0ULL
+
+static inline bool sme_active(void) { return false; }
+extern bool sev_active(void);
+

I noticed this patch always returns false for sme_active. Is it safe to assume that whatever fixups are required on x86 to deal with sme do not apply to s390?

+int set_memory_encrypted(unsigned long addr, int numpages);
+int set_memory_decrypted(unsigned long addr, int numpages);
+
+#endif    /* __ASSEMBLY__ */
+
+#endif    /* S390_MEM_ENCRYPT_H__ */
+
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 3e82f66d5c61..7e3cbd15dcfa 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -18,6 +18,7 @@
  #include <linux/mman.h>
  #include <linux/mm.h>
  #include <linux/swap.h>
+#include <linux/swiotlb.h>
  #include <linux/smp.h>
  #include <linux/init.h>
  #include <linux/pagemap.h>
@@ -29,6 +30,7 @@
  #include <linux/export.h>
  #include <linux/cma.h>
  #include <linux/gfp.h>
+#include <linux/dma-mapping.h>
  #include <asm/processor.h>
  #include <linux/uaccess.h>
  #include <asm/pgtable.h>
@@ -42,6 +44,8 @@
  #include <asm/sclp.h>
  #include <asm/set_memory.h>
  #include <asm/kasan.h>
+#include <asm/dma-mapping.h>
+#include <asm/uv.h>
   pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
  @@ -126,6 +130,50 @@ void mark_rodata_ro(void)
      pr_info("Write protected read-only-after-init data: %luk\n", size >> 10);
  }
  +int set_memory_encrypted(unsigned long addr, int numpages)
+{
+    int i;
+
+    /* make all pages shared, (swiotlb, dma_free) */

This comment should be "make all pages unshared"?

+    for (i = 0; i < numpages; ++i) {
+        uv_remove_shared(addr);
+        addr += PAGE_SIZE;
+    }
+    return 0;
+}
+EXPORT_SYMBOL_GPL(set_memory_encrypted);
+
+int set_memory_decrypted(unsigned long addr, int numpages)
+{
+    int i;
+    /* make all pages shared (swiotlb, dma_alloca) */
+    for (i = 0; i < numpages; ++i) {
+        uv_set_shared(addr);
+        addr += PAGE_SIZE;
+    }
+    return 0;
+}
+EXPORT_SYMBOL_GPL(set_memory_decrypted);

The addr arguments for the above functions appear to be referring to virtual addresses. Would vaddr be a better name?

+
+/* are we a protected virtualization guest? */
+bool sev_active(void)
+{
+    return is_prot_virt_guest();
+}
+EXPORT_SYMBOL_GPL(sev_active);
+
+/* protected virtualization */
+static void pv_init(void)
+{
+    if (!sev_active())
+        return;
+
+    /* make sure bounce buffers are shared */
+    swiotlb_init(1);
+    swiotlb_update_mem_attributes();
+    swiotlb_force = SWIOTLB_FORCE;
+}
+
  void __init mem_init(void)
  {
      cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
@@ -134,6 +182,8 @@ void __init mem_init(void)
      set_max_mapnr(max_low_pfn);
          high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
  +    pv_init();
+
      /* Setup guest page hinting */
      cmma_init();
  -- 2.16.4



--
-- Jason J. Herne (jjherne@xxxxxxxxxxxxx)




[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux