On Mon, Sep 28, 2020 at 02:38:33PM -0700, Jacob Pan wrote: > Each ioasid_set is given a quota during allocation. As system > administrators balance resources among VMs, we shall support the > adjustment of quota at runtime. The new quota cannot be less than the > outstanding IOASIDs already allocated within the set. The extra quota > will be returned to the system-wide IOASID pool if the new quota is > smaller than the existing one. > > Signed-off-by: Jacob Pan <jacob.jun.pan@xxxxxxxxxxxxxxx> Minor comments below, but Reviewed-by: Jean-Philippe Brucker <jean-philippe@xxxxxxxxxx> > --- > drivers/iommu/ioasid.c | 47 +++++++++++++++++++++++++++++++++++++++++++++++ > include/linux/ioasid.h | 6 ++++++ > 2 files changed, 53 insertions(+) > > diff --git a/drivers/iommu/ioasid.c b/drivers/iommu/ioasid.c > index 61e25c2375ab..cf8c7d34e2de 100644 > --- a/drivers/iommu/ioasid.c > +++ b/drivers/iommu/ioasid.c > @@ -654,6 +654,53 @@ void ioasid_set_put(struct ioasid_set *set) > EXPORT_SYMBOL_GPL(ioasid_set_put); > > /** > + * ioasid_adjust_set - Adjust the quota of an IOASID set > + * @set: IOASID set to be assigned > + * @quota: Quota allowed in this set > + * > + * Return 0 on success. If the new quota is smaller than the number of > + * IOASIDs already allocated, -EINVAL will be returned. No change will be > + * made to the existing quota. > + */ > +int ioasid_adjust_set(struct ioasid_set *set, int quota) @quota probably doesn't need to be signed (since it's the same as an ioasid_t, which is unsigned). > +{ > + int ret = 0; > + > + if (quota <= 0) > + return -EINVAL; > + > + spin_lock(&ioasid_allocator_lock); > + if (set->nr_ioasids > quota) { > + pr_err("New quota %d is smaller than outstanding IOASIDs %d\n", > + quota, set->nr_ioasids); > + ret = -EINVAL; > + goto done_unlock; > + } > + > + if ((quota > set->quota) && > + (quota - set->quota > ioasid_capacity_avail)) { misaligned > + ret = -ENOSPC; > + goto done_unlock; > + } > + > + /* Return the delta back to system pool */ > + ioasid_capacity_avail += set->quota - quota; > + > + /* > + * May have a policy to prevent giving all available IOASIDs > + * to one set. But we don't enforce here, it should be in the > + * upper layers. > + */ I think here would be OK to check about fairness. But anyway, we don't have to worry about this yet, so I'd just drop the comment. Thanks, Jean > + set->quota = quota; > + > +done_unlock: > + spin_unlock(&ioasid_allocator_lock); > + > + return ret; > +} > +EXPORT_SYMBOL_GPL(ioasid_adjust_set); > + > +/** > * ioasid_find - Find IOASID data > * @set: the IOASID set > * @ioasid: the IOASID to find > diff --git a/include/linux/ioasid.h b/include/linux/ioasid.h > index 1ae213b660f0..0a5e82148eb9 100644 > --- a/include/linux/ioasid.h > +++ b/include/linux/ioasid.h > @@ -62,6 +62,7 @@ struct ioasid_allocator_ops { > void ioasid_install_capacity(ioasid_t total); > ioasid_t ioasid_get_capacity(void); > struct ioasid_set *ioasid_set_alloc(void *token, ioasid_t quota, int type); > +int ioasid_adjust_set(struct ioasid_set *set, int quota); > void ioasid_set_get(struct ioasid_set *set); > void ioasid_set_put(struct ioasid_set *set); > > @@ -99,6 +100,11 @@ static inline struct ioasid_set *ioasid_set_alloc(void *token, ioasid_t quota, i > return ERR_PTR(-ENOTSUPP); > } > > +static inline int ioasid_adjust_set(struct ioasid_set *set, int quota) > +{ > + return -ENOTSUPP; > +} > + > static inline void ioasid_set_put(struct ioasid_set *set) > { > } > -- > 2.7.4 >