We are recalculating ram size continously, when we know that it don't change during migration. Create a field in RAMState to track it. Signed-off-by: Juan Quintela <quintela@xxxxxxxxxx> Reviewed-by: Philippe Mathieu-Daudé <philmd@xxxxxxxxxx> --- migration/ram.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/migration/ram.c b/migration/ram.c index 1727fe5ef6..6abfe075f2 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -330,6 +330,8 @@ struct RAMState { PageSearchStatus pss[RAM_CHANNEL_MAX]; /* UFFD file descriptor, used in 'write-tracking' migration */ int uffdio_fd; + /* total ram size in bytes */ + uint64_t ram_bytes_total; /* Last block that we have visited searching for dirty pages */ RAMBlock *last_seen_block; /* Last dirty target page we have sent */ @@ -2546,7 +2548,7 @@ static int ram_find_and_save_block(RAMState *rs) int pages = 0; /* No dirty page as there is zero RAM */ - if (!ram_bytes_total()) { + if (!rs->ram_bytes_total) { return pages; } @@ -3009,13 +3011,14 @@ static int ram_state_init(RAMState **rsp) qemu_mutex_init(&(*rsp)->bitmap_mutex); qemu_mutex_init(&(*rsp)->src_page_req_mutex); QSIMPLEQ_INIT(&(*rsp)->src_page_requests); + (*rsp)->ram_bytes_total = ram_bytes_total(); /* * Count the total number of pages used by ram blocks not including any * gaps due to alignment or unplugs. * This must match with the initial values of dirty bitmap. */ - (*rsp)->migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS; + (*rsp)->migration_dirty_pages = (*rsp)->ram_bytes_total >> TARGET_PAGE_BITS; ram_state_reset(*rsp); return 0; -- 2.39.1