With ICE_CFG_BUSY PF state flag locking used in ice_xdp(), there is no need to lock with VSI state inside ice_xsk_pool_setup(). For robust synchronization the state between reset preparation and PF VSI rebuild has to be handled, in the same way as in ice_xdp_setup_prog(). Remove locking logic from ice_qp_dis() and ice_qp_ena() and skip those functions, if rebuild is pending. Fixes: 2d4238f55697 ("ice: Add support for AF_XDP") Reviewed-by: Igor Bagnucki <igor.bagnucki@xxxxxxxxx> Signed-off-by: Larysa Zaremba <larysa.zaremba@xxxxxxxxx> --- drivers/net/ethernet/intel/ice/ice.h | 1 + drivers/net/ethernet/intel/ice/ice_main.c | 2 +- drivers/net/ethernet/intel/ice/ice_xsk.c | 12 ++---------- 3 files changed, 4 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 701a61d791dd..76590cfcaf68 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -941,6 +941,7 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog, enum ice_xdp_cfg cfg_type); int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type); void ice_map_xdp_rings(struct ice_vsi *vsi); +bool ice_rebuild_pending(struct ice_vsi *vsi); int ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index dc60d816a345..cd8be3c3b956 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -2994,7 +2994,7 @@ static int ice_max_xdp_frame_size(struct ice_vsi *vsi) * so it happens strictly before or after .ndo_bpf(). * In case it has happened before, we do not have anything attached to rings */ -static bool ice_rebuild_pending(struct ice_vsi *vsi) +bool ice_rebuild_pending(struct ice_vsi *vsi) { return ice_is_reset_in_progress(vsi->back->state) && !vsi->rx_rings[0]->desc; diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 4e2020ab0825..6c95bebd7777 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -163,7 +163,6 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx) struct ice_tx_ring *xdp_ring; struct ice_tx_ring *tx_ring; struct ice_rx_ring *rx_ring; - int timeout = 50; int fail = 0; int err; @@ -175,13 +174,6 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx) xdp_ring = vsi->xdp_rings[q_idx]; q_vector = rx_ring->q_vector; - while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) { - timeout--; - if (!timeout) - return -EBUSY; - usleep_range(1000, 2000); - } - synchronize_net(); netif_trans_update(vsi->netdev); netif_carrier_off(vsi->netdev); @@ -251,7 +243,6 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx) synchronize_net(); netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx)); netif_carrier_on(vsi->netdev); - clear_bit(ICE_CFG_BUSY, vsi->state); return fail; } @@ -379,7 +370,8 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid) return -EINVAL; } - if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi); + if_running = !ice_rebuild_pending(vsi) && + netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi); if (if_running) { struct ice_rx_ring *rx_ring = vsi->rx_rings[qid]; -- 2.43.0