Initialize ib_device_ops with the supported operations. Signed-off-by: Kamal Heib <kamalheib1@xxxxxxxxx> --- drivers/infiniband/hw/mlx5/main.c | 126 +++++++++++++++++++++++++++++++++++++- 1 file changed, 125 insertions(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index b3294a7e3ff9..1d2b8f4b2904 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -5760,6 +5760,92 @@ static void mlx5_ib_stage_flow_db_cleanup(struct mlx5_ib_dev *dev) kfree(dev->flow_db); } +static struct ib_device_ops mlx5_ib_dev_ops = { + .query_device = mlx5_ib_query_device, + .get_link_layer = mlx5_ib_port_link_layer, + .query_gid = mlx5_ib_query_gid, + .add_gid = mlx5_ib_add_gid, + .del_gid = mlx5_ib_del_gid, + .query_pkey = mlx5_ib_query_pkey, + .modify_device = mlx5_ib_modify_device, + .modify_port = mlx5_ib_modify_port, + .alloc_ucontext = mlx5_ib_alloc_ucontext, + .dealloc_ucontext = mlx5_ib_dealloc_ucontext, + .mmap = mlx5_ib_mmap, + .alloc_pd = mlx5_ib_alloc_pd, + .dealloc_pd = mlx5_ib_dealloc_pd, + .create_ah = mlx5_ib_create_ah, + .query_ah = mlx5_ib_query_ah, + .destroy_ah = mlx5_ib_destroy_ah, + .create_srq = mlx5_ib_create_srq, + .modify_srq = mlx5_ib_modify_srq, + .query_srq = mlx5_ib_query_srq, + .destroy_srq = mlx5_ib_destroy_srq, + .post_srq_recv = mlx5_ib_post_srq_recv, + .create_qp = mlx5_ib_create_qp, + .modify_qp = mlx5_ib_modify_qp, + .query_qp = mlx5_ib_query_qp, + .destroy_qp = mlx5_ib_destroy_qp, + .drain_sq = mlx5_ib_drain_sq, + .drain_rq = mlx5_ib_drain_rq, + .post_send = mlx5_ib_post_send, + .post_recv = mlx5_ib_post_recv, + .create_cq = mlx5_ib_create_cq, + .modify_cq = mlx5_ib_modify_cq, + .resize_cq = mlx5_ib_resize_cq, + .destroy_cq = mlx5_ib_destroy_cq, + .poll_cq = mlx5_ib_poll_cq, + .req_notify_cq = mlx5_ib_arm_cq, + .get_dma_mr = mlx5_ib_get_dma_mr, + .reg_user_mr = mlx5_ib_reg_user_mr, + .rereg_user_mr = mlx5_ib_rereg_user_mr, + .dereg_mr = mlx5_ib_dereg_mr, + .attach_mcast = mlx5_ib_mcg_attach, + .detach_mcast = mlx5_ib_mcg_detach, + .process_mad = mlx5_ib_process_mad, + .alloc_mr = mlx5_ib_alloc_mr, + .map_mr_sg = mlx5_ib_map_mr_sg, + .check_mr_status = mlx5_ib_check_mr_status, + .get_dev_fw_str = get_dev_fw_str, + .get_vector_affinity = mlx5_ib_get_vector_affinity, + .disassociate_ucontext = mlx5_ib_disassociate_ucontext, + .create_flow = mlx5_ib_create_flow, + .destroy_flow = mlx5_ib_destroy_flow, + .create_flow_action_esp = mlx5_ib_create_flow_action_esp, + .destroy_flow_action = mlx5_ib_destroy_flow_action, + .modify_flow_action_esp = mlx5_ib_modify_flow_action_esp, + .create_counters = mlx5_ib_create_counters, + .destroy_counters = mlx5_ib_destroy_counters, + .read_counters = mlx5_ib_read_counters, +}; + +static struct ib_device_ops mlx5_ib_dev_ipoib_enhanced_ops = { + .alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev, +}; + +static struct ib_device_ops mlx5_ib_dev_sriov_ops = { + .get_vf_config = mlx5_ib_get_vf_config, + .set_vf_link_state = mlx5_ib_set_vf_link_state, + .get_vf_stats = mlx5_ib_get_vf_stats, + .set_vf_guid = mlx5_ib_set_vf_guid, +}; + +static struct ib_device_ops mlx5_ib_dev_mw_ops = { + .alloc_mw = mlx5_ib_alloc_mw, + .dealloc_mw = mlx5_ib_dealloc_mw, +}; + +static struct ib_device_ops mlx5_ib_dev_xrc_ops = { + .alloc_xrcd = mlx5_ib_alloc_xrcd, + .dealloc_xrcd = mlx5_ib_dealloc_xrcd, +}; + +static struct ib_device_ops mlx5_ib_dev_dm_ops = { + .alloc_dm = mlx5_ib_alloc_dm, + .dealloc_dm = mlx5_ib_dealloc_dm, + .reg_dm_mr = mlx5_ib_reg_dm_mr, +}; + int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev) { struct mlx5_core_dev *mdev = dev->mdev; @@ -5847,14 +5933,18 @@ int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev) dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status; dev->ib_dev.get_dev_fw_str = get_dev_fw_str; dev->ib_dev.get_vector_affinity = mlx5_ib_get_vector_affinity; - if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads)) + if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads)) { dev->ib_dev.alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev; + ib_set_device_ops(&dev->ib_dev, + &mlx5_ib_dev_ipoib_enhanced_ops); + } if (mlx5_core_is_pf(mdev)) { dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config; dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state; dev->ib_dev.get_vf_stats = mlx5_ib_get_vf_stats; dev->ib_dev.set_vf_guid = mlx5_ib_set_vf_guid; + ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_sriov_ops); } dev->ib_dev.disassociate_ucontext = mlx5_ib_disassociate_ucontext; @@ -5864,6 +5954,7 @@ int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev) if (MLX5_CAP_GEN(mdev, imaicl)) { dev->ib_dev.alloc_mw = mlx5_ib_alloc_mw; dev->ib_dev.dealloc_mw = mlx5_ib_dealloc_mw; + ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_mw_ops); dev->ib_dev.uverbs_cmd_mask |= (1ull << IB_USER_VERBS_CMD_ALLOC_MW) | (1ull << IB_USER_VERBS_CMD_DEALLOC_MW); @@ -5872,6 +5963,7 @@ int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev) if (MLX5_CAP_GEN(mdev, xrc)) { dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd; dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd; + ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_xrc_ops); dev->ib_dev.uverbs_cmd_mask |= (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) | (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD); @@ -5881,6 +5973,7 @@ int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev) dev->ib_dev.alloc_dm = mlx5_ib_alloc_dm; dev->ib_dev.dealloc_dm = mlx5_ib_dealloc_dm; dev->ib_dev.reg_dm_mr = mlx5_ib_reg_dm_mr; + ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_dm_ops); } dev->ib_dev.create_flow = mlx5_ib_create_flow; @@ -5895,6 +5988,7 @@ int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev) dev->ib_dev.create_counters = mlx5_ib_create_counters; dev->ib_dev.destroy_counters = mlx5_ib_destroy_counters; dev->ib_dev.read_counters = mlx5_ib_read_counters; + ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_ops); err = init_node_data(dev); if (err) @@ -5908,22 +6002,45 @@ int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev) return 0; } +static struct ib_device_ops mlx5_ib_dev_port_ops = { + .get_port_immutable = mlx5_port_immutable, + .query_port = mlx5_ib_query_port, +}; + static int mlx5_ib_stage_non_default_cb(struct mlx5_ib_dev *dev) { dev->ib_dev.get_port_immutable = mlx5_port_immutable; dev->ib_dev.query_port = mlx5_ib_query_port; + ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_ops); + return 0; } +static struct ib_device_ops mlx5_ib_dev_port_rep_ops = { + .get_port_immutable = mlx5_port_rep_immutable, + .query_port = mlx5_ib_rep_query_port, +}; + int mlx5_ib_stage_rep_non_default_cb(struct mlx5_ib_dev *dev) { dev->ib_dev.get_port_immutable = mlx5_port_rep_immutable; dev->ib_dev.query_port = mlx5_ib_rep_query_port; + ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_rep_ops); + return 0; } +static struct ib_device_ops mlx5_ib_dev_common_roce_ops = { + .get_netdev = mlx5_ib_get_netdev, + .create_wq = mlx5_ib_create_wq, + .modify_wq = mlx5_ib_modify_wq, + .destroy_wq = mlx5_ib_destroy_wq, + .create_rwq_ind_table = mlx5_ib_create_rwq_ind_table, + .destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table, +}; + static int mlx5_ib_stage_common_roce_init(struct mlx5_ib_dev *dev) { u8 port_num; @@ -5942,6 +6059,7 @@ static int mlx5_ib_stage_common_roce_init(struct mlx5_ib_dev *dev) dev->ib_dev.create_rwq_ind_table = mlx5_ib_create_rwq_ind_table; dev->ib_dev.destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table; + ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_common_roce_ops); dev->ib_dev.uverbs_ex_cmd_mask |= (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) | (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) | @@ -6041,11 +6159,17 @@ static int mlx5_ib_stage_odp_init(struct mlx5_ib_dev *dev) return mlx5_ib_odp_init_one(dev); } +static struct ib_device_ops mlx5_ib_dev_hw_stats_ops = { + .get_hw_stats = mlx5_ib_get_hw_stats, + .alloc_hw_stats = mlx5_ib_alloc_hw_stats, +}; + int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev) { if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) { dev->ib_dev.get_hw_stats = mlx5_ib_get_hw_stats; dev->ib_dev.alloc_hw_stats = mlx5_ib_alloc_hw_stats; + ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_hw_stats_ops); return mlx5_ib_alloc_counters(dev); } -- 2.14.4