Using the new unregister APIs provided by the core. Provide the dealloc_driver hook for the core to callback at the time of device un-registration. Also, simplify bnxt_re_from_netdev by retrieving the netdev information from the core API- ib_device_get_by_netdev. bnxt_re VF resources are created by the corresponding PF driver. During driver unload, if PFs gets destroyed first, driver need not send down any command to FW. So adding a state (res_deinit) in the per device structures to detect driver removal and avoid failures VF destroy due to driver removal. Signed-off-by: Selvin Xavier <selvin.xavier@xxxxxxxxxxxx> --- drivers/infiniband/hw/bnxt_re/main.c | 100 ++++++++++++----------------- drivers/infiniband/hw/bnxt_re/qplib_rcfw.c | 7 ++ drivers/infiniband/hw/bnxt_re/qplib_rcfw.h | 1 + 3 files changed, 48 insertions(+), 60 deletions(-) diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 0cf38a4..9ea4ce7 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -79,7 +79,7 @@ static struct list_head bnxt_re_dev_list = LIST_HEAD_INIT(bnxt_re_dev_list); static DEFINE_MUTEX(bnxt_re_dev_lock); static struct workqueue_struct *bnxt_re_wq; static void bnxt_re_remove_device(struct bnxt_re_dev *rdev); -static void bnxt_re_ib_uninit(struct bnxt_re_dev *rdev); +static void bnxt_re_dealloc_driver(struct ib_device *ib_dev); static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev *rdev) { @@ -223,9 +223,7 @@ static void bnxt_re_shutdown(void *p) if (!rdev) return; - bnxt_re_ib_uninit(rdev); - /* rtnl_lock held by L2 before coming here */ - bnxt_re_remove_device(rdev); + ib_unregister_device_queued(&rdev->ibdev); } static void bnxt_re_stop_irq(void *handle) @@ -527,17 +525,12 @@ static bool is_bnxt_re_dev(struct net_device *netdev) static struct bnxt_re_dev *bnxt_re_from_netdev(struct net_device *netdev) { - struct bnxt_re_dev *rdev; + struct ib_device *ibdev = + ib_device_get_by_netdev(netdev, RDMA_DRIVER_BNXT_RE); + if (!ibdev) + return NULL; - rcu_read_lock(); - list_for_each_entry_rcu(rdev, &bnxt_re_dev_list, list) { - if (rdev->netdev == netdev) { - rcu_read_unlock(); - return rdev; - } - } - rcu_read_unlock(); - return NULL; + return container_of(ibdev, struct bnxt_re_dev, ibdev); } static void bnxt_re_dev_unprobe(struct net_device *netdev, @@ -611,11 +604,6 @@ static const struct attribute_group bnxt_re_dev_attr_group = { .attrs = bnxt_re_attributes, }; -static void bnxt_re_unregister_ib(struct bnxt_re_dev *rdev) -{ - ib_unregister_device(&rdev->ibdev); -} - static const struct ib_device_ops bnxt_re_dev_ops = { .owner = THIS_MODULE, .driver_id = RDMA_DRIVER_BNXT_RE, @@ -630,6 +618,7 @@ static const struct ib_device_ops bnxt_re_dev_ops = { .create_cq = bnxt_re_create_cq, .create_qp = bnxt_re_create_qp, .create_srq = bnxt_re_create_srq, + .dealloc_driver = bnxt_re_dealloc_driver, .dealloc_pd = bnxt_re_dealloc_pd, .dealloc_ucontext = bnxt_re_dealloc_ucontext, .del_gid = bnxt_re_del_gid, @@ -726,15 +715,11 @@ static void bnxt_re_dev_remove(struct bnxt_re_dev *rdev) { dev_put(rdev->netdev); rdev->netdev = NULL; - mutex_lock(&bnxt_re_dev_lock); list_del_rcu(&rdev->list); mutex_unlock(&bnxt_re_dev_lock); synchronize_rcu(); - - ib_dealloc_device(&rdev->ibdev); - /* rdev is gone */ } static struct bnxt_re_dev *bnxt_re_dev_add(struct net_device *netdev, @@ -767,6 +752,7 @@ static struct bnxt_re_dev *bnxt_re_dev_add(struct net_device *netdev, mutex_lock(&bnxt_re_dev_lock); list_add_tail_rcu(&rdev->list, &bnxt_re_dev_list); mutex_unlock(&bnxt_re_dev_lock); + return rdev; } @@ -1300,15 +1286,6 @@ static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev) le16_to_cpu(resp.hwrm_intf_patch); } -static void bnxt_re_ib_uninit(struct bnxt_re_dev *rdev) -{ - /* Cleanup ib dev */ - if (test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags)) { - ib_unregister_device(&rdev->ibdev); - clear_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags); - } -} - int bnxt_re_ib_init(struct bnxt_re_dev *rdev) { int rc = 0; @@ -1335,10 +1312,6 @@ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev) u8 type; int rc; - if (test_and_clear_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags)) { - /* Cleanup ib dev */ - bnxt_re_unregister_ib(rdev); - } if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags)) cancel_delayed_work_sync(&rdev->worker); @@ -1595,6 +1568,19 @@ static int bnxt_re_add_device(struct bnxt_re_dev **rdev, return rc; } +static void bnxt_re_dealloc_driver(struct ib_device *ib_dev) +{ + struct bnxt_re_dev *rdev = + container_of(ib_dev, struct bnxt_re_dev, ibdev); + + clear_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags); + dev_info(rdev_to_dev(rdev), "Unregistering Device"); + + rtnl_lock(); + bnxt_re_remove_device(rdev); + rtnl_unlock(); +} + /* Handle all deferred netevents tasks */ static void bnxt_re_task(struct work_struct *work) { @@ -1669,6 +1655,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier, struct bnxt_re_dev *rdev; int rc = 0; bool sch_work = false; + bool release = true; real_dev = rdma_vlan_dev_real_dev(netdev); if (!real_dev) @@ -1676,7 +1663,8 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier, rdev = bnxt_re_from_netdev(real_dev); if (!rdev && event != NETDEV_REGISTER) - goto exit; + return NOTIFY_OK; + if (real_dev != netdev) goto exit; @@ -1687,6 +1675,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier, rc = bnxt_re_add_device(&rdev, real_dev); if (!rc) sch_work = true; + release = false; break; case NETDEV_UNREGISTER: @@ -1695,8 +1684,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier, */ if (atomic_read(&rdev->sched_count) > 0) goto exit; - bnxt_re_ib_uninit(rdev); - bnxt_re_remove_device(rdev); + ib_unregister_device_queued(&rdev->ibdev); break; default: @@ -1718,6 +1706,8 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier, } exit: + if (release) + ib_device_put(&rdev->ibdev); return NOTIFY_DONE; } @@ -1753,32 +1743,22 @@ static int __init bnxt_re_mod_init(void) static void __exit bnxt_re_mod_exit(void) { - struct bnxt_re_dev *rdev, *next; - LIST_HEAD(to_be_deleted); + struct bnxt_re_dev *rdev; + flush_workqueue(bnxt_re_wq); mutex_lock(&bnxt_re_dev_lock); - /* Free all adapter allocated resources */ - if (!list_empty(&bnxt_re_dev_list)) - list_splice_init(&bnxt_re_dev_list, &to_be_deleted); - mutex_unlock(&bnxt_re_dev_lock); - /* - * Cleanup the devices in reverse order so that the VF device - * cleanup is done before PF cleanup - */ - list_for_each_entry_safe_reverse(rdev, next, &to_be_deleted, list) { - dev_info(rdev_to_dev(rdev), "Unregistering Device"); + list_for_each_entry(rdev, &bnxt_re_dev_list, list) { /* - * Flush out any scheduled tasks before destroying the - * resources + * Set unreg flag to avoid VF resource cleanup + * in module unload path. This is required because + * dealloc_driver for VF can come after PF cleaning + * the VF resources. */ - flush_workqueue(bnxt_re_wq); - bnxt_re_dev_stop(rdev); - bnxt_re_ib_uninit(rdev); - /* Acquire the rtnl_lock as the L2 resources are freed here */ - rtnl_lock(); - bnxt_re_remove_device(rdev); - rtnl_unlock(); + if (rdev->is_virtfn) + rdev->rcfw.res_deinit = true; } + mutex_unlock(&bnxt_re_dev_lock); + ib_unregister_driver(RDMA_DRIVER_BNXT_RE); unregister_netdevice_notifier(&bnxt_re_netdev_notifier); if (bnxt_re_wq) destroy_workqueue(bnxt_re_wq); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c index 5cdfa84..2304f97 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c @@ -211,6 +211,13 @@ int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, u8 opcode, retry_cnt = 0xFF; int rc = 0; + /* Check if the command needs to be send down to HW. + * Return success if resources are de-initialized + */ + + if (rcfw->res_deinit) + return 0; + do { opcode = req->opcode; rc = __send_message(rcfw, req, resp, sb, is_block); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h index dfeadc1..c2b930e 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h @@ -265,6 +265,7 @@ struct bnxt_qplib_rcfw { u64 oos_prev; u32 init_oos_stats; u32 cmdq_depth; + bool res_deinit; }; void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw); -- 2.5.5