Re: [PATCH v4 3/9] s390/mm: add gmap pmd invalidation notification

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



>>> - * gmap_pte_op_fixup - force a page in and connect the gmap page table
>>> + * gmap_fixup - force memory in and connect the gmap table entry
>>>   * @gmap: pointer to guest mapping meta data structure
>>>   * @gaddr: virtual address in the guest address space
>>>   * @vmaddr: address in the host process address space
>>> @@ -841,10 +851,10 @@ static pte_t *gmap_pte_op_walk(struct gmap *gmap, unsigned long gaddr,
>>>   *
>>>   * Returns 0 if the caller can retry __gmap_translate (might fail again),
>>>   * -ENOMEM if out of memory and -EFAULT if anything goes wrong while fixing
>>> - * up or connecting the gmap page table.
>>> + * up or connecting the gmap table entry.
>>>   */
>>> -static int gmap_pte_op_fixup(struct gmap *gmap, unsigned long gaddr,
>>> -			     unsigned long vmaddr, int prot)
>>> +static int gmap_fixup(struct gmap *gmap, unsigned long gaddr,
>>> +		      unsigned long vmaddr, int prot)
>>>  {
>>>  	struct mm_struct *mm = gmap->mm;
>>>  	unsigned int fault_flags;
>>> @@ -892,8 +902,11 @@ static inline pmd_t *gmap_pmd_op_walk(struct gmap *gmap, unsigned long gaddr)
>>>  		return NULL;
>>>  	}
>>>  
>>> -	/* 4k page table entries are locked via the pte (pte_alloc_map_lock). */
>>> -	if (!pmd_large(*pmdp))
>>> +	/*
>>> +	 * Non-split 4k page table entries are locked via the pte
>>> +	 * (pte_alloc_map_lock).
>>> +	 */
>>> +	if (!gmap_pmd_is_split(pmdp) && !pmd_large(*pmdp))
>>>  		spin_unlock(&gmap->guest_table_lock);
>>>  	return pmdp;
>>>  }
>>> @@ -905,10 +918,77 @@ static inline pmd_t *gmap_pmd_op_walk(struct gmap *gmap, unsigned long gaddr)
>>>   */
>>>  static inline void gmap_pmd_op_end(struct gmap *gmap, pmd_t *pmdp)
>>>  {
>>> -	if (pmd_large(*pmdp))
>>> +	if (pmd_large(*pmdp) || gmap_pmd_is_split(pmdp))
>>>  		spin_unlock(&gmap->guest_table_lock);
>>>  }
>>>  
>>> +static pte_t *gmap_pte_from_pmd(struct gmap *gmap, pmd_t *pmdp,
>>> +				unsigned long addr, spinlock_t **ptl)
>>> +{
>>> +	if (likely(!gmap_pmd_is_split(pmdp)))
>>> +		return pte_alloc_map_lock(gmap->mm, pmdp, addr, ptl);
>>> +
>>> +	*ptl = NULL;
>>> +	return pte_offset_map(pmdp, addr);
>>> +}
>>> +
>>> +/**
>>> + * gmap_pmd_split_free - Free a split pmd's page table
>>> + * @pmdp The split pmd that we free of its page table
>>> + *
>>> + * If the userspace pmds are exchanged, we'll remove the gmap pmds as
>>> + * well, so we fault on them and link them again. We would leak
>>> + * memory, if we didn't free split pmds here.
>>> + */
>>> +static inline void gmap_pmd_split_free(pmd_t *pmdp)
>>> +{
>>> +	unsigned long pgt = pmd_val(*pmdp) & _SEGMENT_ENTRY_ORIGIN;
>>> +	struct page *page;
>>> +
>>> +	if (gmap_pmd_is_split(pmdp)) {
>>
>> can this ever not be the case? This function is not used in this patch.
> 
> Look into the next one.

Move it to the next one :)


-- 

Thanks,

David / dhildenb



[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux