[PATCH 4/5] IB/iser: switch on IB pool device interface

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Signed-off-by: Roman Pen <roman.penyaev@xxxxxxxxxxxxxxxx>
Cc: Christoph Hellwig <hch@xxxxxx>
Cc: Steve Wise <swise@xxxxxxxxxxxxxxxxxxxxx>
Cc: Bart Van Assche <bart.vanassche@xxxxxxxxxxx>
Cc: Sagi Grimberg <sagi@xxxxxxxxxxx>
Cc: Doug Ledford <dledford@xxxxxxxxxx>
---
 drivers/infiniband/ulp/iser/iscsi_iser.c     |  48 ++++++++++---
 drivers/infiniband/ulp/iser/iscsi_iser.h     |  19 +++--
 drivers/infiniband/ulp/iser/iser_initiator.c |  42 +++++------
 drivers/infiniband/ulp/iser/iser_memory.c    |  18 ++---
 drivers/infiniband/ulp/iser/iser_verbs.c     | 102 ++++++---------------------
 5 files changed, 100 insertions(+), 129 deletions(-)

diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 0336643c2ed6..984c8edc72e7 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -198,9 +198,9 @@ iser_initialize_task_headers(struct iscsi_task *task,
 		goto out;
 	}
 
-	dma_addr = ib_dma_map_single(device->ib_device, (void *)tx_desc,
+	dma_addr = ib_dma_map_single(device->dev.ib_dev, (void *)tx_desc,
 				ISER_HEADERS_LEN, DMA_TO_DEVICE);
-	if (ib_dma_mapping_error(device->ib_device, dma_addr)) {
+	if (ib_dma_mapping_error(device->dev.ib_dev, dma_addr)) {
 		ret = -ENOMEM;
 		goto out;
 	}
@@ -210,7 +210,7 @@ iser_initialize_task_headers(struct iscsi_task *task,
 	tx_desc->dma_addr = dma_addr;
 	tx_desc->tx_sg[0].addr   = tx_desc->dma_addr;
 	tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
-	tx_desc->tx_sg[0].lkey   = device->pd->local_dma_lkey;
+	tx_desc->tx_sg[0].lkey   = device->dev.ib_pd->local_dma_lkey;
 
 	iser_task->iser_conn = iser_conn;
 out:
@@ -375,7 +375,7 @@ static void iscsi_iser_cleanup_task(struct iscsi_task *task)
 		return;
 
 	if (likely(tx_desc->mapped)) {
-		ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr,
+		ib_dma_unmap_single(device->dev.ib_dev, tx_desc->dma_addr,
 				    ISER_HEADERS_LEN, DMA_TO_DEVICE);
 		tx_desc->mapped = false;
 	}
@@ -646,7 +646,7 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
 
 		ib_conn = &iser_conn->ib_conn;
 		if (ib_conn->pi_support) {
-			u32 sig_caps = ib_conn->device->ib_device->attrs.sig_prot_cap;
+			u32 sig_caps = ib_conn->device->dev.ib_dev->attrs.sig_prot_cap;
 
 			scsi_host_set_prot(shost, iser_dif_prot_caps(sig_caps));
 			scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP |
@@ -654,7 +654,7 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
 		}
 
 		if (iscsi_host_add(shost,
-				   ib_conn->device->ib_device->dev.parent)) {
+				   ib_conn->device->dev.ib_dev->dev.parent)) {
 			mutex_unlock(&iser_conn->state_mutex);
 			goto free_host;
 		}
@@ -987,7 +987,7 @@ static int iscsi_iser_slave_alloc(struct scsi_device *sdev)
 		mutex_unlock(&unbind_iser_conn_mutex);
 		return -ENOTCONN;
 	}
-	ib_dev = iser_conn->ib_conn.device->ib_device;
+	ib_dev = iser_conn->ib_conn.device->dev.ib_dev;
 
 	if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG))
 		blk_queue_virt_boundary(sdev->request_queue, ~MASK_4K);
@@ -1053,6 +1053,34 @@ static struct iscsi_transport iscsi_iser_transport = {
 	.ep_disconnect          = iscsi_iser_ep_disconnect
 };
 
+static struct ib_pool_device *iser_ib_pool_dev_alloc(void)
+{
+	struct iser_device *device;
+
+	device = kzalloc(sizeof(*device), GFP_KERNEL);
+	if (likely(device))
+		return &device->dev;
+
+	return NULL;
+}
+
+static void iser_ib_pool_dev_free(struct ib_pool_device *dev)
+{
+	struct iser_device *device;
+
+	device = container_of(dev, typeof(*device), dev);
+	iser_free_device_ib_res(device);
+	kfree(device);
+}
+
+static int iser_ib_pool_dev_init(struct ib_pool_device *dev)
+{
+	struct iser_device *device;
+
+	device = container_of(dev, typeof(*device), dev);
+	return iser_create_device_ib_res(device);
+}
+
 static int __init iser_init(void)
 {
 	int err;
@@ -1074,8 +1102,9 @@ static int __init iser_init(void)
 		return -ENOMEM;
 
 	/* device init is called only after the first addr resolution */
-	mutex_init(&ig.device_list_mutex);
-	INIT_LIST_HEAD(&ig.device_list);
+	ib_pool_dev_init(iser_always_reg ? 0 : IB_PD_UNSAFE_GLOBAL_RKEY,
+			 iser_ib_pool_dev_alloc, iser_ib_pool_dev_free,
+			 iser_ib_pool_dev_init,  &ig.ib_devs_pool);
 	mutex_init(&ig.connlist_mutex);
 	INIT_LIST_HEAD(&ig.connlist);
 
@@ -1127,6 +1156,7 @@ static void __exit iser_exit(void)
 
 	iscsi_unregister_transport(&iscsi_iser_transport);
 	kmem_cache_destroy(ig.desc_cache);
+	ib_pool_dev_deinit(&ig.ib_devs_pool);
 }
 
 module_init(iser_init);
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index c1ae4aeae2f9..b1359814610c 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -67,6 +67,7 @@
 #include <rdma/ib_verbs.h>
 #include <rdma/ib_fmr_pool.h>
 #include <rdma/rdma_cm.h>
+#include <rdma/dev_pool.h>
 
 #define DRV_NAME	"iser"
 #define PFX		DRV_NAME ": "
@@ -359,9 +360,7 @@ struct iser_reg_ops {
 /**
  * struct iser_device - iSER device handle
  *
- * @ib_device:     RDMA device
- * @pd:            Protection Domain for this device
- * @mr:            Global DMA memory region
+ * @dev:           IB device in pool
  * @event_handler: IB events handle routine
  * @ig_list:	   entry in devices list
  * @refcount:      Reference counter, dominated by open iser connections
@@ -372,11 +371,8 @@ struct iser_reg_ops {
  * @remote_inv_sup: Remote invalidate is supported on this device
  */
 struct iser_device {
-	struct ib_device             *ib_device;
-	struct ib_pd	             *pd;
+	struct ib_pool_device        dev;
 	struct ib_event_handler      event_handler;
-	struct list_head             ig_list;
-	int                          refcount;
 	int			     comps_used;
 	struct iser_comp	     *comps;
 	const struct iser_reg_ops    *reg_ops;
@@ -557,15 +553,13 @@ struct iser_page_vec {
 /**
  * struct iser_global: iSER global context
  *
- * @device_list_mutex:    protects device_list
- * @device_list:          iser devices global list
+ * @ib_devs_pool:         pool of devices
  * @connlist_mutex:       protects connlist
  * @connlist:             iser connections global list
  * @desc_cache:           kmem cache for tx dataout
  */
 struct iser_global {
-	struct mutex      device_list_mutex;
-	struct list_head  device_list;
+	struct ib_device_pool ib_devs_pool;
 	struct mutex      connlist_mutex;
 	struct list_head  connlist;
 	struct kmem_cache *desc_cache;
@@ -580,6 +574,9 @@ extern bool iser_always_reg;
 
 int iser_assign_reg_ops(struct iser_device *device);
 
+int iser_create_device_ib_res(struct iser_device *device);
+void iser_free_device_ib_res(struct iser_device *device);
+
 int iser_send_control(struct iscsi_conn *conn,
 		      struct iscsi_task *task);
 
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index df49c4eb67f7..d8bb081af4f0 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -165,7 +165,7 @@ static void iser_create_send_desc(struct iser_conn	*iser_conn,
 {
 	struct iser_device *device = iser_conn->ib_conn.device;
 
-	ib_dma_sync_single_for_cpu(device->ib_device,
+	ib_dma_sync_single_for_cpu(device->dev.ib_dev,
 		tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
 
 	memset(&tx_desc->iser_header, 0, sizeof(struct iser_ctrl));
@@ -181,10 +181,10 @@ static void iser_free_login_buf(struct iser_conn *iser_conn)
 	if (!desc->req)
 		return;
 
-	ib_dma_unmap_single(device->ib_device, desc->req_dma,
+	ib_dma_unmap_single(device->dev.ib_dev, desc->req_dma,
 			    ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE);
 
-	ib_dma_unmap_single(device->ib_device, desc->rsp_dma,
+	ib_dma_unmap_single(device->dev.ib_dev, desc->rsp_dma,
 			    ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
 
 	kfree(desc->req);
@@ -204,10 +204,10 @@ static int iser_alloc_login_buf(struct iser_conn *iser_conn)
 	if (!desc->req)
 		return -ENOMEM;
 
-	desc->req_dma = ib_dma_map_single(device->ib_device, desc->req,
+	desc->req_dma = ib_dma_map_single(device->dev.ib_dev, desc->req,
 					  ISCSI_DEF_MAX_RECV_SEG_LEN,
 					  DMA_TO_DEVICE);
-	if (ib_dma_mapping_error(device->ib_device,
+	if (ib_dma_mapping_error(device->dev.ib_dev,
 				desc->req_dma))
 		goto free_req;
 
@@ -215,10 +215,10 @@ static int iser_alloc_login_buf(struct iser_conn *iser_conn)
 	if (!desc->rsp)
 		goto unmap_req;
 
-	desc->rsp_dma = ib_dma_map_single(device->ib_device, desc->rsp,
+	desc->rsp_dma = ib_dma_map_single(device->dev.ib_dev, desc->rsp,
 					   ISER_RX_LOGIN_SIZE,
 					   DMA_FROM_DEVICE);
-	if (ib_dma_mapping_error(device->ib_device,
+	if (ib_dma_mapping_error(device->dev.ib_dev,
 				desc->rsp_dma))
 		goto free_rsp;
 
@@ -227,7 +227,7 @@ static int iser_alloc_login_buf(struct iser_conn *iser_conn)
 free_rsp:
 	kfree(desc->rsp);
 unmap_req:
-	ib_dma_unmap_single(device->ib_device, desc->req_dma,
+	ib_dma_unmap_single(device->dev.ib_dev, desc->req_dma,
 			    ISCSI_DEF_MAX_RECV_SEG_LEN,
 			    DMA_TO_DEVICE);
 free_req:
@@ -266,9 +266,9 @@ int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
 	rx_desc = iser_conn->rx_descs;
 
 	for (i = 0; i < iser_conn->qp_max_recv_dtos; i++, rx_desc++)  {
-		dma_addr = ib_dma_map_single(device->ib_device, (void *)rx_desc,
+		dma_addr = ib_dma_map_single(device->dev.ib_dev, (void *)rx_desc,
 					ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
-		if (ib_dma_mapping_error(device->ib_device, dma_addr))
+		if (ib_dma_mapping_error(device->dev.ib_dev, dma_addr))
 			goto rx_desc_dma_map_failed;
 
 		rx_desc->dma_addr = dma_addr;
@@ -276,7 +276,7 @@ int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
 		rx_sg = &rx_desc->rx_sg;
 		rx_sg->addr = rx_desc->dma_addr;
 		rx_sg->length = ISER_RX_PAYLOAD_SIZE;
-		rx_sg->lkey = device->pd->local_dma_lkey;
+		rx_sg->lkey = device->dev.ib_pd->local_dma_lkey;
 	}
 
 	iser_conn->rx_desc_head = 0;
@@ -285,7 +285,7 @@ int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
 rx_desc_dma_map_failed:
 	rx_desc = iser_conn->rx_descs;
 	for (j = 0; j < i; j++, rx_desc++)
-		ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
+		ib_dma_unmap_single(device->dev.ib_dev, rx_desc->dma_addr,
 				    ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
 	kfree(iser_conn->rx_descs);
 	iser_conn->rx_descs = NULL;
@@ -310,7 +310,7 @@ void iser_free_rx_descriptors(struct iser_conn *iser_conn)
 
 	rx_desc = iser_conn->rx_descs;
 	for (i = 0; i < iser_conn->qp_max_recv_dtos; i++, rx_desc++)
-		ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
+		ib_dma_unmap_single(device->dev.ib_dev, rx_desc->dma_addr,
 				    ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
 	kfree(iser_conn->rx_descs);
 	/* make sure we never redo any unmapping */
@@ -520,17 +520,17 @@ int iser_send_control(struct iscsi_conn *conn,
 			goto send_control_error;
 		}
 
-		ib_dma_sync_single_for_cpu(device->ib_device, desc->req_dma,
+		ib_dma_sync_single_for_cpu(device->dev.ib_dev, desc->req_dma,
 					   task->data_count, DMA_TO_DEVICE);
 
 		memcpy(desc->req, task->data, task->data_count);
 
-		ib_dma_sync_single_for_device(device->ib_device, desc->req_dma,
+		ib_dma_sync_single_for_device(device->dev.ib_dev, desc->req_dma,
 					      task->data_count, DMA_TO_DEVICE);
 
 		tx_dsg->addr = desc->req_dma;
 		tx_dsg->length = task->data_count;
-		tx_dsg->lkey = device->pd->local_dma_lkey;
+		tx_dsg->lkey = device->dev.ib_pd->local_dma_lkey;
 		mdesc->num_sge = 2;
 	}
 
@@ -568,7 +568,7 @@ void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc)
 		return;
 	}
 
-	ib_dma_sync_single_for_cpu(ib_conn->device->ib_device,
+	ib_dma_sync_single_for_cpu(ib_conn->device->dev.ib_dev,
 				   desc->rsp_dma, ISER_RX_LOGIN_SIZE,
 				   DMA_FROM_DEVICE);
 
@@ -581,7 +581,7 @@ void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc)
 
 	iscsi_iser_recv(iser_conn->iscsi_conn, hdr, data, length);
 
-	ib_dma_sync_single_for_device(ib_conn->device->ib_device,
+	ib_dma_sync_single_for_device(ib_conn->device->dev.ib_dev,
 				      desc->rsp_dma, ISER_RX_LOGIN_SIZE,
 				      DMA_FROM_DEVICE);
 
@@ -653,7 +653,7 @@ void iser_task_rsp(struct ib_cq *cq, struct ib_wc *wc)
 		return;
 	}
 
-	ib_dma_sync_single_for_cpu(ib_conn->device->ib_device,
+	ib_dma_sync_single_for_cpu(ib_conn->device->dev.ib_dev,
 				   desc->dma_addr, ISER_RX_PAYLOAD_SIZE,
 				   DMA_FROM_DEVICE);
 
@@ -671,7 +671,7 @@ void iser_task_rsp(struct ib_cq *cq, struct ib_wc *wc)
 
 	iscsi_iser_recv(iser_conn->iscsi_conn, hdr, desc->data, length);
 
-	ib_dma_sync_single_for_device(ib_conn->device->ib_device,
+	ib_dma_sync_single_for_device(ib_conn->device->dev.ib_dev,
 				      desc->dma_addr, ISER_RX_PAYLOAD_SIZE,
 				      DMA_FROM_DEVICE);
 
@@ -722,7 +722,7 @@ void iser_dataout_comp(struct ib_cq *cq, struct ib_wc *wc)
 	if (unlikely(wc->status != IB_WC_SUCCESS))
 		iser_err_comp(wc, "dataout");
 
-	ib_dma_unmap_single(device->ib_device, desc->dma_addr,
+	ib_dma_unmap_single(device->dev.ib_dev, desc->dma_addr,
 			    ISER_HEADERS_LEN, DMA_TO_DEVICE);
 	kmem_cache_free(ig.desc_cache, desc);
 }
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 322209d5ff58..09f0219170e1 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -74,7 +74,7 @@ void iser_reg_comp(struct ib_cq *cq, struct ib_wc *wc)
 
 int iser_assign_reg_ops(struct iser_device *device)
 {
-	struct ib_device *ib_dev = device->ib_device;
+	struct ib_device *ib_dev = device->dev.ib_dev;
 
 	/* Assign function handles  - based on FMR support */
 	if (ib_dev->alloc_fmr && ib_dev->dealloc_fmr &&
@@ -168,7 +168,7 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
 	struct ib_device *dev;
 
 	iser_task->dir[iser_dir] = 1;
-	dev = iser_task->iser_conn->ib_conn.device->ib_device;
+	dev = iser_task->iser_conn->ib_conn.device->dev.ib_dev;
 
 	data->dma_nents = ib_dma_map_sg(dev, data->sg, data->size, dma_dir);
 	if (data->dma_nents == 0) {
@@ -184,7 +184,7 @@ void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
 {
 	struct ib_device *dev;
 
-	dev = iser_task->iser_conn->ib_conn.device->ib_device;
+	dev = iser_task->iser_conn->ib_conn.device->dev.ib_dev;
 	ib_dma_unmap_sg(dev, data->sg, data->size, dir);
 }
 
@@ -194,18 +194,18 @@ iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem,
 {
 	struct scatterlist *sg = mem->sg;
 
-	reg->sge.lkey = device->pd->local_dma_lkey;
+	reg->sge.lkey = device->dev.ib_pd->local_dma_lkey;
 	/*
 	 * FIXME: rework the registration code path to differentiate
 	 * rkey/lkey use cases
 	 */
 
-	if (device->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)
-		reg->rkey = device->pd->unsafe_global_rkey;
+	if (device->dev.ib_pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)
+		reg->rkey = device->dev.ib_pd->unsafe_global_rkey;
 	else
 		reg->rkey = 0;
-	reg->sge.addr = ib_sg_dma_address(device->ib_device, &sg[0]);
-	reg->sge.length = ib_sg_dma_len(device->ib_device, &sg[0]);
+	reg->sge.addr = ib_sg_dma_address(device->dev.ib_dev, &sg[0]);
+	reg->sge.length = ib_sg_dma_len(device->dev.ib_dev, &sg[0]);
 
 	iser_dbg("Single DMA entry: lkey=0x%x, rkey=0x%x, addr=0x%llx,"
 		 " length=0x%x\n", reg->sge.lkey, reg->rkey,
@@ -243,7 +243,7 @@ int iser_fast_reg_fmr(struct iscsi_iser_task *iser_task,
 			      mem->size, NULL, iser_set_page);
 	if (unlikely(plen < mem->size)) {
 		iser_err("page vec too short to hold this SG\n");
-		iser_data_buf_dump(mem, device->ib_device);
+		iser_data_buf_dump(mem, device->dev.ib_dev);
 		iser_dump_page_vec(page_vec);
 		return -EINVAL;
 	}
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 56b7240a3fc3..85e24714604d 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -65,9 +65,9 @@ static void iser_event_handler(struct ib_event_handler *handler,
  *
  * returns 0 on success, -1 on failure
  */
-static int iser_create_device_ib_res(struct iser_device *device)
+int iser_create_device_ib_res(struct iser_device *device)
 {
-	struct ib_device *ib_dev = device->ib_device;
+	struct ib_device *ib_dev = device->dev.ib_dev;
 	int ret, i, max_cqe;
 
 	ret = iser_assign_reg_ops(device);
@@ -88,11 +88,6 @@ static int iser_create_device_ib_res(struct iser_device *device)
 		  device->comps_used, ib_dev->name,
 		  ib_dev->num_comp_vectors, max_cqe);
 
-	device->pd = ib_alloc_pd(ib_dev,
-		iser_always_reg ? 0 : IB_PD_UNSAFE_GLOBAL_RKEY);
-	if (IS_ERR(device->pd))
-		goto pd_err;
-
 	for (i = 0; i < device->comps_used; i++) {
 		struct iser_comp *comp = &device->comps[i];
 
@@ -116,8 +111,6 @@ static int iser_create_device_ib_res(struct iser_device *device)
 		if (comp->cq)
 			ib_free_cq(comp->cq);
 	}
-	ib_dealloc_pd(device->pd);
-pd_err:
 	kfree(device->comps);
 comps_err:
 	iser_err("failed to allocate an IB resource\n");
@@ -128,7 +121,7 @@ static int iser_create_device_ib_res(struct iser_device *device)
  * iser_free_device_ib_res - destroy/dealloc/dereg the DMA MR,
  * CQ and PD created with the device associated with the adapator.
  */
-static void iser_free_device_ib_res(struct iser_device *device)
+void iser_free_device_ib_res(struct iser_device *device)
 {
 	int i;
 
@@ -140,11 +133,9 @@ static void iser_free_device_ib_res(struct iser_device *device)
 	}
 
 	ib_unregister_event_handler(&device->event_handler);
-	ib_dealloc_pd(device->pd);
 
 	kfree(device->comps);
 	device->comps = NULL;
-	device->pd = NULL;
 }
 
 /**
@@ -192,7 +183,7 @@ int iser_alloc_fmr_pool(struct ib_conn *ib_conn,
 				    IB_ACCESS_REMOTE_WRITE |
 				    IB_ACCESS_REMOTE_READ);
 
-	fmr_pool = ib_create_fmr_pool(device->pd, &params);
+	fmr_pool = ib_create_fmr_pool(device->dev.ib_pd, &params);
 	if (IS_ERR(fmr_pool)) {
 		ret = PTR_ERR(fmr_pool);
 		iser_err("FMR allocation failed, err %d\n", ret);
@@ -239,7 +230,7 @@ iser_alloc_reg_res(struct iser_device *device,
 		   struct iser_reg_resources *res,
 		   unsigned int size)
 {
-	struct ib_device *ib_dev = device->ib_device;
+	struct ib_device *ib_dev = device->dev.ib_dev;
 	enum ib_mr_type mr_type;
 	int ret;
 
@@ -364,7 +355,7 @@ int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
 	spin_lock_init(&fr_pool->lock);
 	fr_pool->size = 0;
 	for (i = 0; i < cmds_max; i++) {
-		desc = iser_create_fastreg_desc(device, device->pd,
+		desc = iser_create_fastreg_desc(device, device->dev.ib_pd,
 						ib_conn->pi_support, size);
 		if (IS_ERR(desc)) {
 			ret = PTR_ERR(desc);
@@ -428,7 +419,7 @@ static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
 	BUG_ON(ib_conn->device == NULL);
 
 	device = ib_conn->device;
-	ib_dev = device->ib_device;
+	ib_dev = device->dev.ib_dev;
 
 	memset(&init_attr, 0, sizeof init_attr);
 
@@ -468,11 +459,12 @@ static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
 			iser_conn->max_cmds =
 				ISER_GET_MAX_XMIT_CMDS(ib_dev->attrs.max_qp_wr);
 			iser_dbg("device %s supports max_send_wr %d\n",
-				 device->ib_device->name, ib_dev->attrs.max_qp_wr);
+				 device->dev.ib_dev->name,
+				 ib_dev->attrs.max_qp_wr);
 		}
 	}
 
-	ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr);
+	ret = rdma_create_qp(ib_conn->cma_id, device->dev.ib_pd, &init_attr);
 	if (ret)
 		goto out_err;
 
@@ -492,57 +484,6 @@ static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
 }
 
 /**
- * based on the resolved device node GUID see if there already allocated
- * device for this device. If there's no such, create one.
- */
-static
-struct iser_device *iser_device_find_by_ib_device(struct rdma_cm_id *cma_id)
-{
-	struct iser_device *device;
-
-	mutex_lock(&ig.device_list_mutex);
-
-	list_for_each_entry(device, &ig.device_list, ig_list)
-		/* find if there's a match using the node GUID */
-		if (device->ib_device->node_guid == cma_id->device->node_guid)
-			goto inc_refcnt;
-
-	device = kzalloc(sizeof *device, GFP_KERNEL);
-	if (device == NULL)
-		goto out;
-
-	/* assign this device to the device */
-	device->ib_device = cma_id->device;
-	/* init the device and link it into ig device list */
-	if (iser_create_device_ib_res(device)) {
-		kfree(device);
-		device = NULL;
-		goto out;
-	}
-	list_add(&device->ig_list, &ig.device_list);
-
-inc_refcnt:
-	device->refcount++;
-out:
-	mutex_unlock(&ig.device_list_mutex);
-	return device;
-}
-
-/* if there's no demand for this device, release it */
-static void iser_device_try_release(struct iser_device *device)
-{
-	mutex_lock(&ig.device_list_mutex);
-	device->refcount--;
-	iser_info("device %p refcount %d\n", device, device->refcount);
-	if (!device->refcount) {
-		iser_free_device_ib_res(device);
-		list_del(&device->ig_list);
-		kfree(device);
-	}
-	mutex_unlock(&ig.device_list_mutex);
-}
-
-/**
  * Called with state mutex held
  **/
 static int iser_conn_state_comp_exch(struct iser_conn *iser_conn,
@@ -609,7 +550,7 @@ static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
 			iser_free_rx_descriptors(iser_conn);
 
 		if (device != NULL) {
-			iser_device_try_release(device);
+			ib_pool_dev_put(&device->dev);
 			ib_conn->device = NULL;
 		}
 	}
@@ -706,12 +647,12 @@ iser_calc_scsi_params(struct iser_conn *iser_conn,
 	unsigned short sg_tablesize, sup_sg_tablesize;
 
 	sg_tablesize = DIV_ROUND_UP(max_sectors * 512, SIZE_4K);
-	if (device->ib_device->attrs.device_cap_flags &
+	if (device->dev.ib_dev->attrs.device_cap_flags &
 			IB_DEVICE_MEM_MGT_EXTENSIONS)
 		sup_sg_tablesize =
 			min_t(
 			 uint, ISCSI_ISER_MAX_SG_TABLESIZE,
-			 device->ib_device->attrs.max_fast_reg_page_list_len);
+			 device->dev.ib_dev->attrs.max_fast_reg_page_list_len);
 	else
 		sup_sg_tablesize = ISCSI_ISER_MAX_SG_TABLESIZE;
 
@@ -723,6 +664,7 @@ iser_calc_scsi_params(struct iser_conn *iser_conn,
  **/
 static void iser_addr_handler(struct rdma_cm_id *cma_id)
 {
+	struct ib_pool_device *pool_dev;
 	struct iser_device *device;
 	struct iser_conn   *iser_conn;
 	struct ib_conn   *ib_conn;
@@ -734,22 +676,24 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id)
 		return;
 
 	ib_conn = &iser_conn->ib_conn;
-	device = iser_device_find_by_ib_device(cma_id);
-	if (!device) {
+	pool_dev = ib_pool_dev_find_get_or_create(cma_id->device,
+						  &ig.ib_devs_pool);
+	if (unlikely(!pool_dev)) {
 		iser_err("device lookup/creation failed\n");
 		iser_connect_error(cma_id);
 		return;
 	}
+	device = container_of(pool_dev, typeof(*device), dev);
 
 	ib_conn->device = device;
 
 	/* connection T10-PI support */
 	if (iser_pi_enable) {
-		if (!(device->ib_device->attrs.device_cap_flags &
+		if (!(device->dev.ib_dev->attrs.device_cap_flags &
 		      IB_DEVICE_SIGNATURE_HANDOVER)) {
 			iser_warn("T10-PI requested but not supported on %s, "
 				  "continue without T10-PI\n",
-				  ib_conn->device->ib_device->name);
+				  ib_conn->device->dev.ib_dev->name);
 			ib_conn->pi_support = false;
 		} else {
 			ib_conn->pi_support = true;
@@ -787,7 +731,7 @@ static void iser_route_handler(struct rdma_cm_id *cma_id)
 		goto failure;
 
 	memset(&conn_param, 0, sizeof conn_param);
-	conn_param.responder_resources = device->ib_device->attrs.max_qp_rd_atom;
+	conn_param.responder_resources = device->dev.ib_dev->attrs.max_qp_rd_atom;
 	conn_param.initiator_depth     = 1;
 	conn_param.retry_count	       = 7;
 	conn_param.rnr_retry_count     = 6;
@@ -1012,7 +956,7 @@ int iser_post_recvl(struct iser_conn *iser_conn)
 
 	desc->sge.addr = desc->rsp_dma;
 	desc->sge.length = ISER_RX_LOGIN_SIZE;
-	desc->sge.lkey = ib_conn->device->pd->local_dma_lkey;
+	desc->sge.lkey = ib_conn->device->dev.ib_pd->local_dma_lkey;
 
 	desc->cqe.done = iser_login_rsp;
 	wr.wr_cqe = &desc->cqe;
@@ -1074,7 +1018,7 @@ int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
 	struct ib_send_wr *bad_wr, *wr = iser_tx_next_wr(tx_desc);
 	int ib_ret;
 
-	ib_dma_sync_single_for_device(ib_conn->device->ib_device,
+	ib_dma_sync_single_for_device(ib_conn->device->dev.ib_dev,
 				      tx_desc->dma_addr, ISER_HEADERS_LEN,
 				      DMA_TO_DEVICE);
 
-- 
2.13.1

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html



[Index of Archives]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Photo]     [Yosemite News]     [Yosemite Photos]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux