Zhang, Xiantao wrote:
Avi
This is the new patch for icache flush after DMA emualtion for ia64, and it should address Hollis's comments.
Xiantao
From 60a27e2ea9758c97e974aa5bb1925ad4ed045c5f Mon Sep 17 00:00:00 2001
From: Xiantao Zhang <xiantao.zhang@xxxxxxxxx>
Date: Mon, 11 May 2009 18:04:15 +0800
Subject: [PATCH] qemu-kvm: Flush icache after dma operations for ia64
ia64 system depends on that platform issues snoop cycle to flush
icache for memory touched by DMA write operations, but virtual DMA
operations is emulated by memcpy, so use explict instrustions to flush
the related icache, otherwise, guest may use obsolete icache.
+#elif defined(__ia64__)
+static inline void flush_icache_range(unsigned long start, unsigned long stop)
+{
+ while (start < stop) {
+ asm volatile ("fc %0" :: "r"(start));
+ start += 32;
+ }
+ asm volatile (";;sync.i;;srlz.i;;");
+}
+#define qemu_cache_utils_init(envp) do { (void) (envp); } while (0)
#else
#define qemu_cache_utils_init(envp) do { (void) (envp); } while (0)
#endif
diff --git a/cutils.c b/cutils.c
index a1652ab..6b7d506 100644
--- a/cutils.c
+++ b/cutils.c
@@ -25,6 +25,10 @@
#include "host-utils.h"
#include <assert.h>
+#ifdef __ia64__
+#include "cache-utils.h"
+#endif
#includes should be unconditional.
+
void pstrcpy(char *buf, int buf_size, const char *str)
{
int c;
@@ -176,6 +180,16 @@ void qemu_iovec_from_buffer(QEMUIOVector *qiov, const void *buf, size_t count)
if (copy > qiov->iov[i].iov_len)
copy = qiov->iov[i].iov_len;
memcpy(qiov->iov[i].iov_base, p, copy);
+
+ /*ia64 system depends on that platform issues snoop cycle to flush
+ * icache for memory touched by DMA write operations, but virtual DMA
+ * operations is emulated by memcpy, so use explict instrustions to flush
+ * the related icache, otherwise, guest may use obsolete icache. */
+#ifdef __ia64__
+ flush_icache_range((unsigned long)qiov->iov[i].iov_base,
+ (unsigned long)(qiov->iov[i].iov_base + copy));
+#endif
Instead of the #ifdef, please add a flush_icache_range_after_dma()
function which does flush_icache_range() for ia64 and nothing for other
architectures. This avoids the #ifdef everywhere, and you will only
need one copy of the comment.
I don't think you need to flush here. Instead, put the flush in
cpu_physical_memory_unmap(). Every dma access should invoke that. If
you find a path which doesn't, it should be fixed.
+#ifdef __ia64__
+#include "cache-utils.h"
+#endif
Unconditional #include.
+
static AIOPool dma_aio_pool;
void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint)
@@ -149,6 +153,23 @@ static BlockDriverAIOCB *dma_bdrv_io(
dbs->bh = NULL;
qemu_iovec_init(&dbs->iov, sg->nsg);
dma_bdrv_cb(dbs, 0);
+
+ /*ia64 system depends on that platform issues snoop cycle to flush
+ * icache for memory touched by DMA write operations, but virtual DMA
+ * operations is emulated by memcpy, so use explict instrustions to flush
+ * the related icache, otherwise, guest may use obsolete icache. */
+#ifdef __ia64__
+ int i;
+ QEMUIOVector *qiov;
+ if (!is_write) {
+ qiov = &dbs->iov;
+ for (i = 0; i < qiov->niov; ++i) {
+ flush_icache_range((unsigned long)qiov->iov[i].iov_base,
+ (unsigned long)(qiov->iov[i].iov_base + qiov->iov[i].iov_len));
+ }
+ }
+#endif
If you move the flush to cpu_physical_memory_unmap(), this can go away.
+
if (!dbs->acb) {
qemu_aio_release(dbs);
return NULL;
diff --git a/exec.c b/exec.c
index 29c91fb..170ede1 100644
--- a/exec.c
+++ b/exec.c
@@ -35,6 +35,7 @@
#include "cpu.h"
#include "exec-all.h"
#include "qemu-common.h"
+#include "cache-utils.h"
#if !defined(TARGET_IA64)
#include "tcg.h"
@@ -3402,8 +3403,12 @@ void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
}
addr1 += l;
access_len -= l;
- }
- }
+ }
+#ifdef TARGET_IA64
+ flush_icache_range((unsigned long)buffer,
+ (unsigned long)buffer + access_len);
+#endif
+ }
Whitespace damage.
access_len here is zero, so this does nothing.
--
error compiling committee.c: too many arguments to function
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html