> +/** > + * gmap_pmd_op_walk - walk the gmap tables, get the guest table lock > + * and return the pmd pointer > + * @gmap: pointer to guest mapping meta data structure > + * @gaddr: virtual address in the guest address space > + * > + * Returns a pointer to the pmd for a guest address, or NULL > + */ > +static inline pmd_t *gmap_pmd_op_walk(struct gmap *gmap, unsigned long gaddr) > +{ > + pmd_t *pmdp; > + > + BUG_ON(gmap_is_shadow(gmap)); > + spin_lock(&gmap->guest_table_lock); > + pmdp = (pmd_t *) gmap_table_walk(gmap, gaddr, 1); > + So gmap_table_walk() now basically always has to be called with the guest_table_lock held. We could add a BUG/WARN for that in gmap_table_walk(). > + if (!pmdp || pmd_none(*pmdp)) { > + spin_unlock(&gmap->guest_table_lock); > + return NULL; > + } > + > + /* 4k page table entries are locked via the pte (pte_alloc_map_lock). */ > + if (!pmd_large(*pmdp)) > + spin_unlock(&gmap->guest_table_lock); > + return pmdp; > +} > + > +/** > + * gmap_pmd_op_end - release the guest_table_lock if needed > + * @gmap: pointer to the guest mapping meta data structure > + * @pmdp: pointer to the pmd > + */ > +static inline void gmap_pmd_op_end(struct gmap *gmap, pmd_t *pmdp) > +{ > + if (pmd_large(*pmdp)) > + spin_unlock(&gmap->guest_table_lock); > +} > + > +/* > + * gmap_protect_pte - remove access rights to memory and set pgste bits > + * @gmap: pointer to guest mapping meta data structure > + * @gaddr: virtual address in the guest address space > + * @pmdp: pointer to the pmd associated with the pte > + * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE > + * @bits: pgste notification bits to set > + * > + * Returns 0 if successfully protected, -ENOMEM if out of memory and > + * -EAGAIN if a fixup is needed. > + * > + * Expected to be called with sg->mm->mmap_sem in read and > + * guest_table_lock held for shadow gmaps. > + */ stale comment about shadow gmaps. > +static int gmap_protect_pte(struct gmap *gmap, unsigned long gaddr, > + pmd_t *pmdp, int prot, unsigned long bits) > +{ > + int rc; > + pte_t *ptep; > + spinlock_t *ptl = NULL; > + > + ptep = pte_alloc_map_lock(gmap->mm, pmdp, gaddr, &ptl); > + if (!ptep) > + return -ENOMEM; > + > + /* Protect and unlock. */ > + rc = ptep_force_prot(gmap->mm, gaddr, ptep, prot, bits); > + gmap_pte_op_end(ptl); > + return rc; > } > > /* > @@ -896,16 +966,20 @@ static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr, > unsigned long len, int prot, unsigned long bits) > { > unsigned long vmaddr; > - spinlock_t *ptl; > - pte_t *ptep; > + pmd_t *pmdp; > int rc; > > while (len) { > rc = -EAGAIN; > - ptep = gmap_pte_op_walk(gmap, gaddr, &ptl); > - if (ptep) { > - rc = ptep_force_prot(gmap->mm, gaddr, ptep, prot, bits); > - gmap_pte_op_end(ptl); > + pmdp = gmap_pmd_op_walk(gmap, gaddr); > + if (pmdp && !(pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)) { You could move the !(pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID) check into either gmap_pmd_op_walk() or gmap_protect_pte(). Makes this easier to read (and you already have similar checks in gmap_pmd_op_walk() ) > + rc = gmap_protect_pte(gmap, gaddr, pmdp, prot, > + bits); > + if (!rc) { > + len -= PAGE_SIZE; > + gaddr += PAGE_SIZE; > + } > + gmap_pmd_op_end(gmap, pmdp); > } > if (rc) { > vmaddr = __gmap_translate(gmap, gaddr); > @@ -914,10 +988,7 @@ static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr, > rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, prot); > if (rc) > return rc; > - continue; > } > - gaddr += PAGE_SIZE; > - len -= PAGE_SIZE; > } > return 0; > } > Apart from that, looks good to me. -- Thanks, David / dhildenb -- To unsubscribe from this list: send the line "unsubscribe linux-s390" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html