These are some utilty functions that will be used later on for storage attributes migration. Signed-off-by: Claudio Imbrenda <imbrenda@xxxxxxxxxxxxxxxxxx> --- arch/s390/kvm/kvm-s390.c | 33 +++++++++++++++++++++++++++++++++ arch/s390/kvm/kvm-s390.h | 7 +++++++ include/linux/kvm_host.h | 7 +++++++ virt/kvm/kvm_main.c | 2 +- 4 files changed, 48 insertions(+), 1 deletion(-) diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 1371dff..d0b9c92 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -1504,6 +1504,39 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) #define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX) /* + * Similar to gfn_to_memslot, but returns the index of a memslot also when the + * address falls in a hole. In that case the index of one of the memslots + * bordering the hole is returned. + */ +static int gfn_to_memslot_approx(struct kvm *kvm, gfn_t gfn) +{ + struct kvm_memslots *slots = kvm_memslots(kvm); + int start = 0, end = slots->used_slots; + int slot = atomic_read(&slots->lru_slot); + struct kvm_memory_slot *memslots = slots->memslots; + + if (gfn >= memslots[slot].base_gfn && + gfn < memslots[slot].base_gfn + memslots[slot].npages) + return slot; + + while (start < end) { + slot = start + (end - start) / 2; + + if (gfn >= memslots[slot].base_gfn) + end = slot; + else + start = slot + 1; + } + + if (gfn >= memslots[start].base_gfn && + gfn < memslots[start].base_gfn + memslots[start].npages) { + atomic_set(&slots->lru_slot, start); + } + + return start; +} + +/* * This function searches for the next page with dirty CMMA attributes, and * saves the attributes in the buffer up to either the end of the buffer or * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found; diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 5e46ba4..8086d69 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -183,6 +183,13 @@ static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm) return kvm->arch.user_cpu_state_ctrl != 0; } +static inline unsigned long *cmma_bitmap(struct kvm_memory_slot *slot) +{ + unsigned long len = kvm_dirty_bitmap_bytes(slot); + + return slot->dirty_bitmap + len / sizeof(*slot->dirty_bitmap); +} + /* implemented in interrupt.c */ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu); void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 6bdd4b9..ea524a8 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -308,6 +308,13 @@ static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memsl return ALIGN(memslot->npages, BITS_PER_LONG) / 8; } +static inline unsigned long *kvm_shadow_dirty_bitmap(struct kvm_memory_slot *memslot) +{ + unsigned long len = kvm_dirty_bitmap_bytes(memslot); + + return memslot->dirty_bitmap + len / sizeof(*memslot->dirty_bitmap); +} + struct kvm_s390_adapter_int { u64 ind_addr; u64 summary_addr; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 210bf82..9e18331 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1158,7 +1158,7 @@ int kvm_get_dirty_log_protect(struct kvm *kvm, n = kvm_dirty_bitmap_bytes(memslot); - dirty_bitmap_buffer = dirty_bitmap + n / sizeof(long); + dirty_bitmap_buffer = kvm_shadow_dirty_bitmap(memslot); memset(dirty_bitmap_buffer, 0, n); spin_lock(&kvm->mmu_lock); -- 2.7.4