ARGH! I think the attachment was lost :-( Trying again. Jes >>>>> "Jes" == Jes Sorensen <jes@xxxxxxx> writes: Jes> Hi, Looking at Avi's comments, I came up with a slightly modified Jes> version of this patch, which avoids the #ifdef __ia64__ problem Jes> by introducing dma_flush_range(). Jes> I have made it a noop on non ia64, but it may need to be changed Jes> for PPC? Hollis would you check if you guys need this too? Jes> It also fixes the problem of trying to flush a zero sized block Jes> in cpu_physical_memory_unmap(). Jes> Xiantao, are you ok with this version? Jes> Cheers, Jes ia64 system depends on that platform issues snoop cycle to flush icache for memory touched by DMA write operations, but virtual DMA operations is emulated by memcpy, so use explict instrustions to flush the related icache, otherwise, guest may use obsolete icache. Slightly modified version of Xiantao's patch, which avoids the #ifdef's for ia64 by introducing a dma_flush_range() function defined as a noop on architectures which do not need it. Signed-off-by: Xiantao Zhang <xiantao.zhang@xxxxxxxxx> Signed-off-by: Jes Sorensen <jes@xxxxxxx> --- cache-utils.h | 19 +++++++++++++++++++ cutils.c | 5 +++++ dma-helpers.c | 12 ++++++++++++ exec.c | 7 ++++++- target-ia64/cpu.h | 1 - target-ia64/fake-exec.c | 9 --------- 6 files changed, 42 insertions(+), 11 deletions(-) Index: qemu-kvm/cache-utils.h =================================================================== --- qemu-kvm.orig/cache-utils.h +++ qemu-kvm/cache-utils.h @@ -34,7 +34,26 @@ asm volatile ("isync" : : : "memory"); } +/* + * Is this correct for PPC? + */ +#define dma_flush_range(start, end) \ + do { (void) (start); (void) (end); } while (0) + +#elif defined(__ia64__) +static inline void flush_icache_range(unsigned long start, unsigned long stop) +{ + while (start < stop) { + asm volatile ("fc %0" :: "r"(start)); + start += 32; + } + asm volatile (";;sync.i;;srlz.i;;"); +} +#define dma_flush_range(start, end) flush_icache_range(start, end) +#define qemu_cache_utils_init(envp) do { (void) (envp); } while (0) #else +#define dma_flush_range(start, end) \ + do { (void) (start); (void) (end); } while (0) #define qemu_cache_utils_init(envp) do { (void) (envp); } while (0) #endif Index: qemu-kvm/cutils.c =================================================================== --- qemu-kvm.orig/cutils.c +++ qemu-kvm/cutils.c @@ -24,6 +24,7 @@ #include "qemu-common.h" #include "host-utils.h" #include <assert.h> +#include "cache-utils.h" void pstrcpy(char *buf, int buf_size, const char *str) { @@ -176,6 +177,10 @@ if (copy > qiov->iov[i].iov_len) copy = qiov->iov[i].iov_len; memcpy(qiov->iov[i].iov_base, p, copy); + + dma_flush_range((unsigned long)qiov->iov[i].iov_base, + (unsigned long)qiov->iov[i].iov_base + copy); + p += copy; count -= copy; } Index: qemu-kvm/dma-helpers.c =================================================================== --- qemu-kvm.orig/dma-helpers.c +++ qemu-kvm/dma-helpers.c @@ -9,6 +9,7 @@ #include "dma.h" #include "block_int.h" +#include "cache-utils.h" static AIOPool dma_aio_pool; @@ -149,6 +150,17 @@ dbs->bh = NULL; qemu_iovec_init(&dbs->iov, sg->nsg); dma_bdrv_cb(dbs, 0); + + if (!is_write) { + int i; + QEMUIOVector *qiov; + qiov = &dbs->iov; + for (i = 0; i < qiov->niov; ++i) { + dma_flush_range((unsigned long)qiov->iov[i].iov_base, + (unsigned long)(qiov->iov[i].iov_base + qiov->iov[i].iov_len)); + } + } + if (!dbs->acb) { qemu_aio_release(dbs); return NULL; Index: qemu-kvm/exec.c =================================================================== --- qemu-kvm.orig/exec.c +++ qemu-kvm/exec.c @@ -35,6 +35,7 @@ #include "cpu.h" #include "exec-all.h" #include "qemu-common.h" +#include "cache-utils.h" #if !defined(TARGET_IA64) #include "tcg.h" @@ -3385,6 +3386,8 @@ void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len, int is_write, target_phys_addr_t access_len) { + unsigned long flush_len = (unsigned long)access_len; + if (buffer != bounce.buffer) { if (is_write) { ram_addr_t addr1 = qemu_ram_addr_from_host(buffer); @@ -3402,7 +3405,9 @@ } addr1 += l; access_len -= l; - } + } + dma_flush_range((unsigned long)buffer, + (unsigned long)buffer + flush_len); } return; } Index: qemu-kvm/target-ia64/cpu.h =================================================================== --- qemu-kvm.orig/target-ia64/cpu.h +++ qemu-kvm/target-ia64/cpu.h @@ -73,7 +73,6 @@ * These ones really should go to the appropriate tcg header file, if/when * tcg support is added for ia64. */ -void flush_icache_range(unsigned long start, unsigned long stop); void tcg_dump_info(FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...)); Index: qemu-kvm/target-ia64/fake-exec.c =================================================================== --- qemu-kvm.orig/target-ia64/fake-exec.c +++ qemu-kvm/target-ia64/fake-exec.c @@ -41,15 +41,6 @@ return; } -void flush_icache_range(unsigned long start, unsigned long stop) -{ - while (start < stop) { - asm volatile ("fc %0" :: "r"(start)); - start += 32; - } - asm volatile (";;sync.i;;srlz.i;;"); -} - int cpu_restore_state(TranslationBlock *tb, CPUState *env, unsigned long searched_pc, void *puc) -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html