On 20/02/20 14:05, Philippe Mathieu-Daudé wrote: > Signed-off-by: Philippe Mathieu-Daudé <philmd@xxxxxxxxxx> > --- > include/exec/cpu-common.h | 6 +++--- > include/sysemu/xen-mapcache.h | 4 ++-- > exec.c | 8 ++++---- > hw/i386/xen/xen-mapcache.c | 2 +- > 4 files changed, 10 insertions(+), 10 deletions(-) > > diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h > index 81753bbb34..05ac1a5d69 100644 > --- a/include/exec/cpu-common.h > +++ b/include/exec/cpu-common.h > @@ -48,11 +48,11 @@ typedef uint32_t CPUReadMemoryFunc(void *opaque, hwaddr addr); > > void qemu_ram_remap(ram_addr_t addr, ram_addr_t length); > /* This should not be used by devices. */ > -ram_addr_t qemu_ram_addr_from_host(void *ptr); > +ram_addr_t qemu_ram_addr_from_host(const void *ptr); This is a bit ugly, because the pointer _can_ be modified via qemu_map_ram_ptr. Is this needed for the rest of the series to apply? Paolo > RAMBlock *qemu_ram_block_by_name(const char *name); > -RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset, > +RAMBlock *qemu_ram_block_from_host(const void *ptr, bool round_offset, > ram_addr_t *offset); > -ram_addr_t qemu_ram_block_host_offset(RAMBlock *rb, void *host); > +ram_addr_t qemu_ram_block_host_offset(RAMBlock *rb, const void *host); > void qemu_ram_set_idstr(RAMBlock *block, const char *name, DeviceState *dev); > void qemu_ram_unset_idstr(RAMBlock *block); > const char *qemu_ram_get_idstr(RAMBlock *rb); > diff --git a/include/sysemu/xen-mapcache.h b/include/sysemu/xen-mapcache.h > index c8e7c2f6cf..81e9aa2fa6 100644 > --- a/include/sysemu/xen-mapcache.h > +++ b/include/sysemu/xen-mapcache.h > @@ -19,7 +19,7 @@ void xen_map_cache_init(phys_offset_to_gaddr_t f, > void *opaque); > uint8_t *xen_map_cache(hwaddr phys_addr, hwaddr size, > uint8_t lock, bool dma); > -ram_addr_t xen_ram_addr_from_mapcache(void *ptr); > +ram_addr_t xen_ram_addr_from_mapcache(const void *ptr); > void xen_invalidate_map_cache_entry(uint8_t *buffer); > void xen_invalidate_map_cache(void); > uint8_t *xen_replace_cache_entry(hwaddr old_phys_addr, > @@ -40,7 +40,7 @@ static inline uint8_t *xen_map_cache(hwaddr phys_addr, > abort(); > } > > -static inline ram_addr_t xen_ram_addr_from_mapcache(void *ptr) > +static inline ram_addr_t xen_ram_addr_from_mapcache(const void *ptr) > { > abort(); > } > diff --git a/exec.c b/exec.c > index 8e9cc3b47c..02b4e6ea41 100644 > --- a/exec.c > +++ b/exec.c > @@ -2614,7 +2614,7 @@ static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr, > } > > /* Return the offset of a hostpointer within a ramblock */ > -ram_addr_t qemu_ram_block_host_offset(RAMBlock *rb, void *host) > +ram_addr_t qemu_ram_block_host_offset(RAMBlock *rb, const void *host) > { > ram_addr_t res = (uint8_t *)host - (uint8_t *)rb->host; > assert((uintptr_t)host >= (uintptr_t)rb->host); > @@ -2640,11 +2640,11 @@ ram_addr_t qemu_ram_block_host_offset(RAMBlock *rb, void *host) > * pointer, such as a reference to the region that includes the incoming > * ram_addr_t. > */ > -RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset, > +RAMBlock *qemu_ram_block_from_host(const void *ptr, bool round_offset, > ram_addr_t *offset) > { > RAMBlock *block; > - uint8_t *host = ptr; > + const uint8_t *host = ptr; > > if (xen_enabled()) { > ram_addr_t ram_addr; > @@ -2705,7 +2705,7 @@ RAMBlock *qemu_ram_block_by_name(const char *name) > > /* Some of the softmmu routines need to translate from a host pointer > (typically a TLB entry) back to a ram offset. */ > -ram_addr_t qemu_ram_addr_from_host(void *ptr) > +ram_addr_t qemu_ram_addr_from_host(const void *ptr) > { > RAMBlock *block; > ram_addr_t offset; > diff --git a/hw/i386/xen/xen-mapcache.c b/hw/i386/xen/xen-mapcache.c > index 5b120ed44b..432ad3354d 100644 > --- a/hw/i386/xen/xen-mapcache.c > +++ b/hw/i386/xen/xen-mapcache.c > @@ -363,7 +363,7 @@ uint8_t *xen_map_cache(hwaddr phys_addr, hwaddr size, > return p; > } > > -ram_addr_t xen_ram_addr_from_mapcache(void *ptr) > +ram_addr_t xen_ram_addr_from_mapcache(const void *ptr) > { > MapCacheEntry *entry = NULL; > MapCacheRev *reventry; >