[bug report] RDMA/hns: Support rq record doorbell for the user space

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hello Yixian Liu,

The patch e088a685eae9: "RDMA/hns: Support rq record doorbell for the
user space" from Mar 9, 2018, leads to the following static checker
warning:

	drivers/infiniband/hw/hns/hns_roce_main.c:359 hns_roce_alloc_ucontext()
	warn: check that 'resp.reserved' doesn't leak information

drivers/infiniband/hw/hns/hns_roce_main.c
   336  static struct ib_ucontext *hns_roce_alloc_ucontext(struct ib_device *ib_dev,
   337                                                     struct ib_udata *udata)
   338  {
   339          int ret = 0;
   340          struct hns_roce_ucontext *context;
   341          struct hns_roce_ib_alloc_ucontext_resp resp;
   342          struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
   343  
   344          resp.qp_tab_size = hr_dev->caps.num_qps;
   345  
   346          context = kmalloc(sizeof(*context), GFP_KERNEL);
   347          if (!context)
   348                  return ERR_PTR(-ENOMEM);
   349  
   350          ret = hns_roce_uar_alloc(hr_dev, &context->uar);
   351          if (ret)
   352                  goto error_fail_uar_alloc;
   353  
   354          if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) {
   355                  INIT_LIST_HEAD(&context->page_list);
   356                  mutex_init(&context->page_mutex);
   357          }
   358  
   359          ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
                                               ^^^^
We need to zero out resp.reserved or use sizeof(resp) - u32 or
something.  My preference is to zero it, but I never can predict
tastes...  :)

Otherwise it's a small information leak because users can find out what
was on the stack and a security issue.

   360          if (ret)
   361                  goto error_fail_copy_to_udata;
   362  
   363          return &context->ibucontext;
   364  
   365  error_fail_copy_to_udata:
   366          hns_roce_uar_free(hr_dev, &context->uar);
   367  
   368  error_fail_uar_alloc:
   369          kfree(context);
   370  
   371          return ERR_PTR(ret);
   372  }

    drivers/infiniband/hw/hns/hns_roce_cq.c:421 hns_roce_ib_create_cq()
    warn: check that 'resp.reserved' doesn't leak information

drivers/infiniband/hw/hns/hns_roce_cq.c
   310  struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
   311                                      const struct ib_cq_init_attr *attr,
   312                                      struct ib_ucontext *context,
   313                                      struct ib_udata *udata)
   314  {
   315          struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
   316          struct device *dev = hr_dev->dev;
   317          struct hns_roce_ib_create_cq ucmd;
   318          struct hns_roce_ib_create_cq_resp resp;
   319          struct hns_roce_cq *hr_cq = NULL;
   320          struct hns_roce_uar *uar = NULL;
   321          int vector = attr->comp_vector;
   322          int cq_entries = attr->cqe;
   323          int ret;
   324  
   325          if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) {
   326                  dev_err(dev, "Creat CQ failed. entries=%d, max=%d\n",
   327                          cq_entries, hr_dev->caps.max_cqes);
   328                  return ERR_PTR(-EINVAL);
   329          }
   330  
   331          hr_cq = kzalloc(sizeof(*hr_cq), GFP_KERNEL);
   332          if (!hr_cq)
   333                  return ERR_PTR(-ENOMEM);
   334  
   335          if (hr_dev->caps.min_cqes)
   336                  cq_entries = max(cq_entries, hr_dev->caps.min_cqes);
   337  
   338          cq_entries = roundup_pow_of_two((unsigned int)cq_entries);
   339          hr_cq->ib_cq.cqe = cq_entries - 1;
   340          spin_lock_init(&hr_cq->lock);
   341  
   342          if (context) {
   343                  if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
   344                          dev_err(dev, "Failed to copy_from_udata.\n");
   345                          ret = -EFAULT;
   346                          goto err_cq;
   347                  }
   348  
   349                  /* Get user space address, write it into mtt table */
   350                  ret = hns_roce_ib_get_cq_umem(hr_dev, context, &hr_cq->hr_buf,
   351                                                &hr_cq->umem, ucmd.buf_addr,
   352                                                cq_entries);
   353                  if (ret) {
   354                          dev_err(dev, "Failed to get_cq_umem.\n");
   355                          goto err_cq;
   356                  }
   357  
   358                  /* Get user space parameters */
   359                  uar = &to_hr_ucontext(context)->uar;
   360          } else {
   361                  if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) {
   362                          ret = hns_roce_alloc_db(hr_dev, &hr_cq->db, 1);
   363                          if (ret)
   364                                  goto err_cq;
   365  
   366                          hr_cq->set_ci_db = hr_cq->db.db_record;
   367                          *hr_cq->set_ci_db = 0;
   368                  }
   369  
   370                  /* Init mmt table and write buff address to mtt table */
   371                  ret = hns_roce_ib_alloc_cq_buf(hr_dev, &hr_cq->hr_buf,
   372                                                 cq_entries);
   373                  if (ret) {
   374                          dev_err(dev, "Failed to alloc_cq_buf.\n");
   375                          goto err_db;
   376                  }
   377  
   378                  uar = &hr_dev->priv_uar;
   379                  hr_cq->cq_db_l = hr_dev->reg_base + hr_dev->odb_offset +
   380                                  DB_REG_OFFSET * uar->index;
   381          }
   382  
   383          /* Allocate cq index, fill cq_context */
   384          ret = hns_roce_cq_alloc(hr_dev, cq_entries, &hr_cq->hr_buf.hr_mtt, uar,
   385                                  hr_cq, vector);
   386          if (ret) {
   387                  dev_err(dev, "Creat CQ .Failed to cq_alloc.\n");
   388                  goto err_mtt;
   389          }
   390  
   391          if (context && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
   392              (udata->outlen == sizeof(resp))) {
   393                  ret = hns_roce_db_map_user(to_hr_ucontext(context),
   394                                             ucmd.db_addr, &hr_cq->db);
   395                  if (ret) {
   396                          dev_err(dev, "cq record doorbell map failed!\n");
   397                          goto err_cqc;
   398                  }
   399          }
   400  
   401          /*
   402           * For the QP created by kernel space, tptr value should be initialized
   403           * to zero; For the QP created by user space, it will cause synchronous
   404           * problems if tptr is set to zero here, so we initialze it in user
   405           * space.
   406           */
   407          if (!context && hr_cq->tptr_addr)
   408                  *hr_cq->tptr_addr = 0;
   409  
   410          /* Get created cq handler and carry out event */
   411          hr_cq->comp = hns_roce_ib_cq_comp;
   412          hr_cq->event = hns_roce_ib_cq_event;
   413          hr_cq->cq_depth = cq_entries;
   414  
   415          if (context) {
   416                  if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
   417                                          (udata->outlen == sizeof(resp))) {
   418                          hr_cq->db_en = 1;
   419                          resp.cqn = hr_cq->cqn;
   420                          resp.cap_flags |= HNS_ROCE_SUPPORT_CQ_RECORD_DB;
   421                          ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
                                                               ^^^^
Same.

   422                  } else
   423                          ret = ib_copy_to_udata(udata, &hr_cq->cqn, sizeof(u64));

regards,
dan carpenter
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html



[Index of Archives]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Photo]     [Yosemite News]     [Yosemite Photos]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux