Re: [PATCH 5/5] KVM: Add hugepage support for dedicated guest memory

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



> +static struct folio *kvm_gmem_get_huge_folio(struct inode *inode, pgoff_t index,
> +					     unsigned int order)
> +{
> +	pgoff_t npages = 1UL << order;
> +	pgoff_t huge_index = round_down(index, npages);
> +	struct address_space *mapping  = inode->i_mapping;
> +	gfp_t gfp = mapping_gfp_mask(mapping) | __GFP_NOWARN;
> +	loff_t size = i_size_read(inode);
> +	struct folio *folio;
> +
> +	/* Make sure hugepages would be fully-contained by inode */
> +	if ((huge_index + npages) * PAGE_SIZE > size)
> +		return NULL;
> +
> +	if (filemap_range_has_page(mapping, (loff_t)huge_index << PAGE_SHIFT,
> +				   (loff_t)(huge_index + npages - 1) << PAGE_SHIFT))
> +		return NULL;
> +
> +	folio = filemap_alloc_folio(gfp, order);
> +	if (!folio)
> +		return NULL;
Instead of returning NULL here, what about invoking __filemap_get_folio()
directly as below?

> +	if (filemap_add_folio(mapping, folio, huge_index, gfp)) {
> +		folio_put(folio);
> +		return NULL;
> +	}
> +
> +	return folio;
> +}
> +
>  /*
>   * Returns a locked folio on success.  The caller is responsible for
>   * setting the up-to-date flag before the memory is mapped into the guest.
> @@ -284,8 +314,15 @@ static int kvm_gmem_prepare_folio(struct kvm *kvm, struct file *file,
>   */
>  static struct folio *kvm_gmem_get_folio(struct inode *inode, pgoff_t index)
>  {
> -	/* TODO: Support huge pages. */
> -	return filemap_grab_folio(inode->i_mapping, index);
> +	struct folio *folio = NULL;
> +
> +	if (gmem_2m_enabled)
> +		folio = kvm_gmem_get_huge_folio(inode, index, PMD_ORDER);
> +
> +	if (!folio)
Also need to check IS_ERR(folio).

> +		folio = filemap_grab_folio(inode->i_mapping, index);
> +
> +	return folio;
>  }
Could we introduce a common helper to calculate max_order by checking for
gfn/index alignment and ensuring memory attributes in range are uniform?

Then we can pass in the max_order to kvm_gmem_get_folio() and only allocate huge
folio when it's necessary.

static struct folio *kvm_gmem_get_folio(struct inode *inode, pgoff_t index, int max_order)
{                                                                                
        struct folio *folio = NULL;                                              
                                                                                 
        if (max_order >= PMD_ORDER) {                                            
                fgf_t fgp_flags = FGP_LOCK | FGP_ACCESSED | FGP_CREAT;           
                                                                                 
                fgp_flags |= fgf_set_order(1U << (PAGE_SHIFT + PMD_ORDER));      
                folio = __filemap_get_folio(inode->i_mapping, index, fgp_flags,  
                        mapping_gfp_mask(inode->i_mapping));                     
        }                                                                        
                                                                                 
        if (!folio || IS_ERR(folio))                                             
                folio = filemap_grab_folio(inode->i_mapping, index);             
                                                                                 
        return folio;                                                            
}




[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux