Patches 1,2,4 will be submitted through amd-staging-drm-next. Patch 3 goes through the cgroup tree. Patch 4 depends on patch 3. So submitting patch 4 will need to wait until we rebase amd-staging-drm-next on a new enough kernel release that includes patch 3. Patch 1 and 2 could be submitted now or wait for patch 3 as well so we submit all our cgroup stuff in amdgpu and KFD together. It probably makes most sense to wait since unused code tends to rot. Patches 1,2,4 are already reviewed by me. Feel free to add my Acked-by to patch 3. Thanks, Felix On 2019-05-17 12:15 p.m., Kasiviswanathan, Harish wrote: > [CAUTION: External Email] > > Participate in device cgroup. All kfd devices are exposed via /dev/kfd. > So use /dev/dri/renderN node. > > Before exposing the device to a task check if it has permission to > access it. If the task (based on its cgroup) can access /dev/dri/renderN > then expose the device via kfd node. > > If the task cannot access /dev/dri/renderN then process device data > (pdd) is not created. This will ensure that task cannot use the device. > > In sysfs topology, all device nodes are visible irrespective of the task > cgroup. The sysfs node directories are created at driver load time and > cannot be changed dynamically. However, access to information inside > nodes is controlled based on the task's cgroup permissions. > > Change-Id: Ia7ed40930542111ac9f74b0706c3fa5ebf186b3c > Signed-off-by: Harish Kasiviswanathan <Harish.Kasiviswanathan@xxxxxxx> > Reviewed-by: Felix Kuehling <Felix.Kuehling@xxxxxxx> > --- > drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c | 9 +++++++-- > drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 17 +++++++++++++++++ > drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 12 ++++++++++++ > 3 files changed, 36 insertions(+), 2 deletions(-) > > diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c > index 22a8e88b6a67..85c8838323a2 100644 > --- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c > +++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c > @@ -369,8 +369,13 @@ int kfd_init_apertures(struct kfd_process *process) > > /*Iterating over all devices*/ > while (kfd_topology_enum_kfd_devices(id, &dev) == 0) { > - if (!dev) { > - id++; /* Skip non GPU devices */ > + if (!dev || kfd_devcgroup_check_permission(dev)) { > + /* Skip non GPU devices and devices to which the > + * current process have no access to. Access can be > + * limited by placing the process in a specific > + * cgroup hierarchy. > + */ > + id++; > continue; > } > > diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h > index cbba0047052d..85f55022014a 100644 > --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h > +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h > @@ -35,6 +35,8 @@ > #include <linux/kfifo.h> > #include <linux/seq_file.h> > #include <linux/kref.h> > +#include <linux/device_cgroup.h> > +#include <drm/drmP.h> > #include <kgd_kfd_interface.h> > > #include "amd_shared.h" > @@ -989,6 +991,21 @@ bool kfd_is_locked(void); > void kfd_inc_compute_active(struct kfd_dev *dev); > void kfd_dec_compute_active(struct kfd_dev *dev); > > +/* Cgroup Support */ > +/* Check with device cgroup if @kfd device is accessible */ > +static inline int kfd_devcgroup_check_permission(struct kfd_dev *kfd) > +{ > +#if defined(CONFIG_CGROUP_DEVICE) > + struct drm_device *ddev = kfd->ddev; > + > + return devcgroup_check_permission(DEVCG_DEV_CHAR, ddev->driver->major, > + ddev->render->index, > + DEVCG_ACC_WRITE | DEVCG_ACC_READ); > +#else > + return 0; > +#endif > +} > + > /* Debugfs */ > #if defined(CONFIG_DEBUG_FS) > > diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c > index 64d667ae0d36..a3a14a76ece1 100644 > --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c > +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c > @@ -269,6 +269,8 @@ static ssize_t iolink_show(struct kobject *kobj, struct attribute *attr, > buffer[0] = 0; > > iolink = container_of(attr, struct kfd_iolink_properties, attr); > + if (iolink->gpu && kfd_devcgroup_check_permission(iolink->gpu)) > + return -EPERM; > sysfs_show_32bit_prop(buffer, "type", iolink->iolink_type); > sysfs_show_32bit_prop(buffer, "version_major", iolink->ver_maj); > sysfs_show_32bit_prop(buffer, "version_minor", iolink->ver_min); > @@ -305,6 +307,8 @@ static ssize_t mem_show(struct kobject *kobj, struct attribute *attr, > buffer[0] = 0; > > mem = container_of(attr, struct kfd_mem_properties, attr); > + if (mem->gpu && kfd_devcgroup_check_permission(mem->gpu)) > + return -EPERM; > sysfs_show_32bit_prop(buffer, "heap_type", mem->heap_type); > sysfs_show_64bit_prop(buffer, "size_in_bytes", mem->size_in_bytes); > sysfs_show_32bit_prop(buffer, "flags", mem->flags); > @@ -334,6 +338,8 @@ static ssize_t kfd_cache_show(struct kobject *kobj, struct attribute *attr, > buffer[0] = 0; > > cache = container_of(attr, struct kfd_cache_properties, attr); > + if (cache->gpu && kfd_devcgroup_check_permission(cache->gpu)) > + return -EPERM; > sysfs_show_32bit_prop(buffer, "processor_id_low", > cache->processor_id_low); > sysfs_show_32bit_prop(buffer, "level", cache->cache_level); > @@ -416,12 +422,16 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, > if (strcmp(attr->name, "gpu_id") == 0) { > dev = container_of(attr, struct kfd_topology_device, > attr_gpuid); > + if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu)) > + return -EPERM; > return sysfs_show_32bit_val(buffer, dev->gpu_id); > } > > if (strcmp(attr->name, "name") == 0) { > dev = container_of(attr, struct kfd_topology_device, > attr_name); > + if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu)) > + return -EPERM; > for (i = 0; i < KFD_TOPOLOGY_PUBLIC_NAME_SIZE; i++) { > public_name[i] = > (char)dev->node_props.marketing_name[i]; > @@ -434,6 +444,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, > > dev = container_of(attr, struct kfd_topology_device, > attr_props); > + if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu)) > + return -EPERM; > sysfs_show_32bit_prop(buffer, "cpu_cores_count", > dev->node_props.cpu_cores_count); > sysfs_show_32bit_prop(buffer, "simd_count", > -- > 2.17.1 > > _______________________________________________ > amd-gfx mailing list > amd-gfx@xxxxxxxxxxxxxxxxxxxxx > https://lists.freedesktop.org/mailman/listinfo/amd-gfx