On Tue, 2024-10-15 at 20:25 -0700, Matthew Brost wrote: > Used to show we can bounce memory multiple times. > > Signed-off-by: Matthew Brost <matthew.brost@xxxxxxxxx> > --- > drivers/gpu/drm/xe/xe_module.c | 7 +++++++ > drivers/gpu/drm/xe/xe_module.h | 1 + > drivers/gpu/drm/xe/xe_svm.c | 3 +++ > 3 files changed, 11 insertions(+) > > diff --git a/drivers/gpu/drm/xe/xe_module.c > b/drivers/gpu/drm/xe/xe_module.c > index 77ce9f9ca7a5..088f6caea307 100644 > --- a/drivers/gpu/drm/xe/xe_module.c > +++ b/drivers/gpu/drm/xe/xe_module.c > @@ -25,9 +25,16 @@ struct xe_modparam xe_modparam = { > .max_vfs = IS_ENABLED(CONFIG_DRM_XE_DEBUG) ? ~0 : 0, > #endif > .wedged_mode = 1, > + .svm_notifier_size = 512, > /* the rest are 0 by default */ > }; > > +module_param_named(svm_notifier_size, xe_modparam.svm_notifier_size, > uint, 0600); > +MODULE_PARM_DESC(svm_notifier_size, "Set the svm notifier size(in > MiB), must be pow2"); Ah, this should probably have been in the previous patch? pow2 could be spelled out "a power of 2"? > + > +module_param_named(always_migrate_to_vram, > xe_modparam.always_migrate_to_vram, bool, 0444); > +MODULE_PARM_DESC(always_migrate_to_vram, "Always migrate to VRAM on > GPU fault"); > + > module_param_named_unsafe(force_execlist, > xe_modparam.force_execlist, bool, 0444); > MODULE_PARM_DESC(force_execlist, "Force Execlist submission"); > > diff --git a/drivers/gpu/drm/xe/xe_module.h > b/drivers/gpu/drm/xe/xe_module.h > index 5a3bfea8b7b4..84339e509c80 100644 > --- a/drivers/gpu/drm/xe/xe_module.h > +++ b/drivers/gpu/drm/xe/xe_module.h > @@ -12,6 +12,7 @@ > struct xe_modparam { > bool force_execlist; > bool probe_display; > + bool always_migrate_to_vram; Kerneldoc Thanks, Thomas > u32 force_vram_bar_size; > int guc_log_level; > char *guc_firmware_path; > diff --git a/drivers/gpu/drm/xe/xe_svm.c > b/drivers/gpu/drm/xe/xe_svm.c > index 16e34aaead79..bb386f56a189 100644 > --- a/drivers/gpu/drm/xe/xe_svm.c > +++ b/drivers/gpu/drm/xe/xe_svm.c > @@ -767,6 +767,9 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, > struct xe_vma *vma, > } > drm_exec_fini(&exec); > > + if (xe_modparam.always_migrate_to_vram) > + range->migrated = false; > + > dma_fence_wait(fence, false); > dma_fence_put(fence); >