Steven Price <steven.price@xxxxxxx> writes: ..... > +int realm_map_protected(struct realm *realm, > + unsigned long base_ipa, > + struct page *dst_page, > + unsigned long map_size, > + struct kvm_mmu_memory_cache *memcache) > +{ > + phys_addr_t dst_phys = page_to_phys(dst_page); > + phys_addr_t rd = virt_to_phys(realm->rd); > + unsigned long phys = dst_phys; > + unsigned long ipa = base_ipa; > + unsigned long size; > + int map_level; > + int ret = 0; > + > + if (WARN_ON(!IS_ALIGNED(ipa, map_size))) > + return -EINVAL; > + > + switch (map_size) { > + case PAGE_SIZE: > + map_level = 3; > + break; > + case RME_L2_BLOCK_SIZE: > + map_level = 2; > + break; > + default: > + return -EINVAL; > + } > + > + if (map_level < RME_RTT_MAX_LEVEL) { > + /* > + * A temporary RTT is needed during the map, precreate it, > + * however if there is an error (e.g. missing parent tables) > + * this will be handled below. > + */ > + realm_create_rtt_levels(realm, ipa, map_level, > + RME_RTT_MAX_LEVEL, memcache); > + } > + > + for (size = 0; size < map_size; size += PAGE_SIZE) { > + if (rmi_granule_delegate(phys)) { > + struct rtt_entry rtt; > + > + /* > + * It's possible we raced with another VCPU on the same > + * fault. If the entry exists and matches then exit > + * early and assume the other VCPU will handle the > + * mapping. > + */ > + if (rmi_rtt_read_entry(rd, ipa, RME_RTT_MAX_LEVEL, &rtt)) > + goto err; > + > + /* > + * FIXME: For a block mapping this could race at level > + * 2 or 3... currently we don't support block mappings > + */ > + if (WARN_ON((rtt.walk_level != RME_RTT_MAX_LEVEL || > + rtt.state != RMI_ASSIGNED || > + rtt_get_phys(realm, &rtt) != phys))) { > + goto err; > + } > + > + return 0; > + } > Technically we are are not mapping more than PAGE_SIZE here, but then the code does the loop above and with that loop should that return 0 be a 'continue'? if we find the granule delegated, then does that ensure rest of the map_size is also delegated? -aneesh