Move virtnet_[en/dis]able_delayed_refill to header file. This is prepare for separating virtio-related funcs. Signed-off-by: Xuan Zhuo <xuanzhuo@xxxxxxxxxxxxxxxxx> --- drivers/net/virtio/virtnet.c | 20 +++----------------- drivers/net/virtio/virtnet.h | 15 +++++++++++++++ 2 files changed, 18 insertions(+), 17 deletions(-) diff --git a/drivers/net/virtio/virtnet.c b/drivers/net/virtio/virtnet.c index 8f281a7f9d7a..75a74864c3fe 100644 --- a/drivers/net/virtio/virtnet.c +++ b/drivers/net/virtio/virtnet.c @@ -136,20 +136,6 @@ static struct page *get_a_page(struct virtnet_rq *rq, gfp_t gfp_mask) return p; } -static void enable_delayed_refill(struct virtnet_info *vi) -{ - spin_lock_bh(&vi->refill_lock); - vi->refill_enabled = true; - spin_unlock_bh(&vi->refill_lock); -} - -static void disable_delayed_refill(struct virtnet_info *vi) -{ - spin_lock_bh(&vi->refill_lock); - vi->refill_enabled = false; - spin_unlock_bh(&vi->refill_lock); -} - static void virtqueue_napi_schedule(struct napi_struct *napi, struct virtqueue *vq) { @@ -1622,7 +1608,7 @@ static int virtnet_open(struct net_device *dev) struct virtnet_info *vi = netdev_priv(dev); int i, err; - enable_delayed_refill(vi); + virtnet_enable_delayed_refill(vi); for (i = 0; i < vi->max_queue_pairs; i++) { if (i < vi->curr_queue_pairs) @@ -1979,7 +1965,7 @@ static int virtnet_close(struct net_device *dev) int i; /* Make sure NAPI doesn't schedule refill work */ - disable_delayed_refill(vi); + virtnet_disable_delayed_refill(vi); /* Make sure virtnet_refill_work doesn't re-enable napi! */ cancel_delayed_work_sync(&vi->refill); @@ -2068,7 +2054,7 @@ static int virtnet_restore_up(struct virtio_device *vdev) virtio_device_ready(vdev); - enable_delayed_refill(vi); + virtnet_enable_delayed_refill(vi); if (netif_running(vi->dev)) { err = virtnet_get_netdev()->ndo_open(vi->dev); diff --git a/drivers/net/virtio/virtnet.h b/drivers/net/virtio/virtnet.h index 1315dcf52f1b..5f20e9103a0e 100644 --- a/drivers/net/virtio/virtnet.h +++ b/drivers/net/virtio/virtnet.h @@ -193,4 +193,19 @@ void virtnet_skb_xmit_done(struct virtqueue *vq); void virtnet_skb_recv_done(struct virtqueue *rvq); void virtnet_refill_work(struct work_struct *work); void virtnet_free_bufs(struct virtnet_info *vi); + +static inline void virtnet_enable_delayed_refill(struct virtnet_info *vi) +{ + spin_lock_bh(&vi->refill_lock); + vi->refill_enabled = true; + spin_unlock_bh(&vi->refill_lock); +} + +static inline void virtnet_disable_delayed_refill(struct virtnet_info *vi) +{ + spin_lock_bh(&vi->refill_lock); + vi->refill_enabled = false; + spin_unlock_bh(&vi->refill_lock); +} + #endif -- 2.32.0.3.g01195cf9f