Re: [PATCH v8 04/11] tracing/user_events: Fixup enable faults asyncly

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Tue, 21 Feb 2023 13:11:36 -0800
Beau Belgrave <beaub@xxxxxxxxxxxxxxxxxxx> wrote:

> @@ -263,7 +277,85 @@ static int user_event_mm_fault_in(struct user_event_mm *mm, unsigned long uaddr)
>  }
>  
>  static int user_event_enabler_write(struct user_event_mm *mm,
> -				    struct user_event_enabler *enabler)
> +				    struct user_event_enabler *enabler,
> +				    bool fixup_fault);
> +
> +static void user_event_enabler_fault_fixup(struct work_struct *work)
> +{
> +	struct user_event_enabler_fault *fault = container_of(
> +		work, struct user_event_enabler_fault, work);
> +	struct user_event_enabler *enabler = fault->enabler;
> +	struct user_event_mm *mm = fault->mm;
> +	unsigned long uaddr = enabler->addr;
> +	int ret;
> +
> +	ret = user_event_mm_fault_in(mm, uaddr);
> +
> +	if (ret && ret != -ENOENT) {
> +		struct user_event *user = enabler->event;
> +
> +		pr_warn("user_events: Fault for mm: 0x%pK @ 0x%llx event: %s\n",
> +			mm->mm, (unsigned long long)uaddr, EVENT_NAME(user));
> +	}
> +
> +	/* Prevent state changes from racing */
> +	mutex_lock(&event_mutex);
> +
> +	/*
> +	 * If we managed to get the page, re-issue the write. We do not
> +	 * want to get into a possible infinite loop, which is why we only
> +	 * attempt again directly if the page came in. If we couldn't get
> +	 * the page here, then we will try again the next time the event is
> +	 * enabled/disabled.
> +	 */

What case would we not get the page? A bad page mapping? User space doing
something silly?

Or something else, for which how can it go into an infinite loop? Can that
only happen if userspace is doing something mischievous?

-- Steve


> +	clear_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler));
> +
> +	if (!ret) {
> +		mmap_read_lock(mm->mm);
> +		user_event_enabler_write(mm, enabler, true);
> +		mmap_read_unlock(mm->mm);
> +	}
> +
> +	mutex_unlock(&event_mutex);
> +
> +	/* In all cases we no longer need the mm or fault */
> +	user_event_mm_put(mm);
> +	kmem_cache_free(fault_cache, fault);
> +}
> +
> +static bool user_event_enabler_queue_fault(struct user_event_mm *mm,
> +					   struct user_event_enabler *enabler)
> +{
> +	struct user_event_enabler_fault *fault;
> +
> +	fault = kmem_cache_zalloc(fault_cache, GFP_NOWAIT | __GFP_NOWARN);
> +
> +	if (!fault)
> +		return false;
> +
> +	INIT_WORK(&fault->work, user_event_enabler_fault_fixup);
> +	fault->mm = user_event_mm_get(mm);
> +	fault->enabler = enabler;
> +
> +	/* Don't try to queue in again while we have a pending fault */
> +	set_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler));
> +
> +	if (!schedule_work(&fault->work)) {
> +		/* Allow another attempt later */
> +		clear_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler));
> +
> +		user_event_mm_put(mm);
> +		kmem_cache_free(fault_cache, fault);
> +
> +		return false;
> +	}
> +
> +	return true;
> +}
> +




[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux