Re: [PATCH bpf-next 3/9] net: napi: add ability to create CPU-pinned threaded NAPI

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 




On Fri, Aug 30, 2024, at 9:25 AM, Alexander Lobakin wrote:
> From: Lorenzo Bianconi <lorenzo@xxxxxxxxxx>
>
> Add netif_napi_add_percpu() to pin NAPI in threaded mode to a particular
> CPU. This means, if the NAPI is not threaded, it will be run as usually,
> but when switching to threaded mode, it will always be run on the
> specified CPU.
> It's not meant to be used in drivers, but might be useful when creating
> percpu threaded NAPIs, for example, to replace percpu kthreads or
> workers where a NAPI context is needed.
> The already existing netif_napi_add*() are not anyhow affected.
>
> Signed-off-by: Lorenzo Bianconi <lorenzo@xxxxxxxxxx>
> Signed-off-by: Alexander Lobakin <aleksander.lobakin@xxxxxxxxx>
> ---
>  include/linux/netdevice.h | 35 +++++++++++++++++++++++++++++++++--
>  net/core/dev.c            | 18 +++++++++++++-----
>  2 files changed, 46 insertions(+), 7 deletions(-)
>
> diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
> index ca5f0dda733b..4d6fb0ccdea1 100644
> --- a/include/linux/netdevice.h
> +++ b/include/linux/netdevice.h
> @@ -377,6 +377,7 @@ struct napi_struct {
>  	struct list_head	dev_list;
>  	struct hlist_node	napi_hash_node;
>  	int			irq;
> +	int			thread_cpuid;
>  };
> 
>  enum {
> @@ -2619,8 +2620,18 @@ static inline void netif_napi_set_irq(struct 
> napi_struct *napi, int irq)
>   */
>  #define NAPI_POLL_WEIGHT 64
> 
> -void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
> -			   int (*poll)(struct napi_struct *, int), int weight);
> +void netif_napi_add_weight_percpu(struct net_device *dev,
> +				  struct napi_struct *napi,
> +				  int (*poll)(struct napi_struct *, int),
> +				  int weight, int thread_cpuid);
> +
> +static inline void netif_napi_add_weight(struct net_device *dev,
> +					 struct napi_struct *napi,
> +					 int (*poll)(struct napi_struct *, int),
> +					 int weight)
> +{
> +	netif_napi_add_weight_percpu(dev, napi, poll, weight, -1);
> +}
> 
>  /**
>   * netif_napi_add() - initialize a NAPI context
> @@ -2665,6 +2676,26 @@ static inline void netif_napi_add_tx(struct 
> net_device *dev,
>  	netif_napi_add_tx_weight(dev, napi, poll, NAPI_POLL_WEIGHT);
>  }
> 
> +/**
> + * netif_napi_add_percpu() - initialize a CPU-pinned threaded NAPI 
> context
> + * @dev:  network device
> + * @napi: NAPI context
> + * @poll: polling function
> + * @thread_cpuid: CPU which this NAPI will be pinned to
> + *
> + * Variant of netif_napi_add() which pins the NAPI to the specified 
> CPU. No
> + * changes in the "standard" mode, but in case with the threaded one, 
> this
> + * NAPI will always be run on the passed CPU no matter where scheduled.
> + */
> +static inline void netif_napi_add_percpu(struct net_device *dev,
> +					 struct napi_struct *napi,
> +					 int (*poll)(struct napi_struct *, int),
> +					 int thread_cpuid)
> +{
> +	netif_napi_add_weight_percpu(dev, napi, poll, NAPI_POLL_WEIGHT,
> +				     thread_cpuid);
> +}
> +
>  /**
>   *  __netif_napi_del - remove a NAPI context
>   *  @napi: NAPI context
> diff --git a/net/core/dev.c b/net/core/dev.c
> index 98bb5f890b88..93ca3df8e9dd 100644
> --- a/net/core/dev.c
> +++ b/net/core/dev.c
> @@ -1428,8 +1428,13 @@ static int napi_kthread_create(struct 
> napi_struct *n)
>  	 * TASK_INTERRUPTIBLE mode to avoid the blocked task
>  	 * warning and work with loadavg.
>  	 */
> -	n->thread = kthread_run(napi_threaded_poll, n, "napi/%s-%d",
> -				n->dev->name, n->napi_id);
> +	if (n->thread_cpuid >= 0)
> +		n->thread = kthread_run_on_cpu(napi_threaded_poll, n,
> +					       n->thread_cpuid, "napi/%s-%u",
> +					       n->dev->name);
> +	else
> +		n->thread = kthread_run(napi_threaded_poll, n, "napi/%s-%d",
> +					n->dev->name, n->napi_id);
>  	if (IS_ERR(n->thread)) {
>  		err = PTR_ERR(n->thread);
>  		pr_err("kthread_run failed with err %d\n", err);
> @@ -6640,8 +6645,10 @@ void netif_queue_set_napi(struct net_device 
> *dev, unsigned int queue_index,
>  }
>  EXPORT_SYMBOL(netif_queue_set_napi);
> 
> -void netif_napi_add_weight(struct net_device *dev, struct napi_struct 
> *napi,
> -			   int (*poll)(struct napi_struct *, int), int weight)
> +void netif_napi_add_weight_percpu(struct net_device *dev,
> +				  struct napi_struct *napi,
> +				  int (*poll)(struct napi_struct *, int),
> +				  int weight, int thread_cpuid)
>  {
>  	if (WARN_ON(test_and_set_bit(NAPI_STATE_LISTED, &napi->state)))
>  		return;
> @@ -6664,6 +6671,7 @@ void netif_napi_add_weight(struct net_device 
> *dev, struct napi_struct *napi,
>  	napi->poll_owner = -1;
>  #endif
>  	napi->list_owner = -1;
> +	napi->thread_cpuid = thread_cpuid;
>  	set_bit(NAPI_STATE_SCHED, &napi->state);
>  	set_bit(NAPI_STATE_NPSVC, &napi->state);
>  	list_add_rcu(&napi->dev_list, &dev->napi_list);
> @@ -6677,7 +6685,7 @@ void netif_napi_add_weight(struct net_device 
> *dev, struct napi_struct *napi,
>  		dev->threaded = false;
>  	netif_napi_set_irq(napi, -1);
>  }
> -EXPORT_SYMBOL(netif_napi_add_weight);
> +EXPORT_SYMBOL(netif_napi_add_weight_percpu);
> 
>  void napi_disable(struct napi_struct *n)
>  {
> -- 
> 2.46.0

Acked-by: Daniel Xu <dxu@xxxxxxxxx>




[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux