On 04/06/2010 03:51 AM, Yoshiaki Tamura wrote:
Signed-off-by: Yoshiaki Tamura<tamura.yoshiaki@xxxxxxxxxxxxx>
Signed-off-by: OHMURA Kei<ohmura.kei@xxxxxxxxxxxxx>
---
static inline int cpu_physical_memory_get_dirty_flags(ram_addr_t addr)
{
- return phys_ram_dirty[addr>> TARGET_PAGE_BITS];
+ unsigned long mask;
+ int index = (addr>> TARGET_PAGE_BITS) / HOST_LONG_BITS;
+ int offset = (addr>> TARGET_PAGE_BITS)& (HOST_LONG_BITS - 1);
+ int ret = 0;
+
+ mask = 1UL<< offset;
+ if (phys_ram_dirty[MASTER_DIRTY_FLAG][index]& mask)
+ return 0xff;
+ if (phys_ram_dirty[VGA_DIRTY_FLAG][index]& mask)
+ ret |= VGA_DIRTY_FLAG;
+ if (phys_ram_dirty[CODE_DIRTY_FLAG][index]& mask)
+ ret |= CODE_DIRTY_FLAG;
+ if (phys_ram_dirty[MIGRATION_DIRTY_FLAG][index]& mask)
+ ret |= MIGRATION_DIRTY_FLAG;
+
+ return ret;
}
Again, nicer as a loop.
I think if you define both *_DIRTY_FLAG and *_DIRTY_IDX the transition
patches can be nicer.
Coding style: use braces after if (), even for single statements.
static inline int cpu_physical_memory_get_dirty(ram_addr_t addr,
int dirty_flags)
{
- return phys_ram_dirty[addr>> TARGET_PAGE_BITS]& dirty_flags;
+ unsigned long mask;
+ int index = (addr>> TARGET_PAGE_BITS) / HOST_LONG_BITS;
+ int offset = (addr>> TARGET_PAGE_BITS)& (HOST_LONG_BITS - 1);
+
+ mask = 1UL<< offset;
+ return (phys_ram_dirty[MASTER_DIRTY_FLAG][index]& mask) ||
+ (phys_ram_dirty[dirty_flags][index]& mask);
}
A helper that also accepts the DIRTY_IDX index can increase reuse.
static inline void cpu_physical_memory_set_dirty(ram_addr_t addr)
{
- phys_ram_dirty[addr>> TARGET_PAGE_BITS] = 0xff;
+ unsigned long mask;
+ int index = (addr>> TARGET_PAGE_BITS) / HOST_LONG_BITS;
+ int offset = (addr>> TARGET_PAGE_BITS)& (HOST_LONG_BITS - 1);
+
+ mask = 1UL<< offset;
+ phys_ram_dirty[MASTER_DIRTY_FLAG][index] |= mask;
-static inline int cpu_physical_memory_set_dirty_flags(ram_addr_t addr,
- int dirty_flags)
+static inline void cpu_physical_memory_set_dirty_flags(ram_addr_t addr,
+ int dirty_flags)
{
- return phys_ram_dirty[addr>> TARGET_PAGE_BITS] |= dirty_flags;
+ unsigned long mask;
+ int index = (addr>> TARGET_PAGE_BITS) / HOST_LONG_BITS;
+ int offset = (addr>> TARGET_PAGE_BITS)& (HOST_LONG_BITS - 1);
+
+ mask = 1UL<< offset;
+ if (dirty_flags& VGA_DIRTY_FLAG)
+ phys_ram_dirty[VGA_DIRTY_FLAG][index] |= mask;
+ if (dirty_flags& CODE_DIRTY_FLAG)
+ phys_ram_dirty[CODE_DIRTY_FLAG][index] |= mask;
+ if (dirty_flags& MIGRATION_DIRTY_FLAG)
+ phys_ram_dirty[MIGRATION_DIRTY_FLAG][index] |= mask;
}
Is it necessary to update migration and vga bitmaps?
We can simply update the master bitmap, and update the migration and vga
bitmaps only when they need it. That can be done in a different patch.
Note that we should only allocate the migration and vga bitmaps when
migration or vga is active.
--
I have a truly marvellous patch that fixes the bug which this
signature is too narrow to contain.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html