oops. Please ignore this v5 patch. I just realized Linus suggested in an old email not use BUG()/BUG_ON() in the code. I will switch to the WARN() solution and resend again. Sorry for the junk email. Dongli Zhang On 2019/1/8 下午4:15, Dongli Zhang wrote: > The xenstore 'ring-page-order' is used globally for each blkback queue and > therefore should be read from xenstore only once. However, it is obtained > in read_per_ring_refs() which might be called multiple times during the > initialization of each blkback queue. > > If the blkfront is malicious and the 'ring-page-order' is set in different > value by blkfront every time before blkback reads it, this may end up at > the "WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));" in > xen_blkif_disconnect() when frontend is destroyed. > > This patch reworks connect_ring() to read xenstore 'ring-page-order' only > once. > > Signed-off-by: Dongli Zhang <dongli.zhang@xxxxxxxxxx> > --- > Changed since v1: > * change the order of xenstore read in read_per_ring_refs > * use xenbus_read_unsigned() in connect_ring() > > Changed since v2: > * simplify the condition check as "(err != 1 && nr_grefs > 1)" > * avoid setting err as -EINVAL to remove extra one line of code > > Changed since v3: > * exit at the beginning if !nr_grefs > * change the if statements to avoid test (err != 1) twice > * initialize a 'blkif' stack variable (refer to PATCH 1/2) > > Changed since v4: > * use BUG_ON() when (nr_grefs == 0) to reminder the developer > * set err = -EINVAL before xenbus_dev_fatal() > > drivers/block/xen-blkback/xenbus.c | 69 ++++++++++++++++++++++---------------- > 1 file changed, 40 insertions(+), 29 deletions(-) > > diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c > index a4aadac..f6146cd 100644 > --- a/drivers/block/xen-blkback/xenbus.c > +++ b/drivers/block/xen-blkback/xenbus.c > @@ -926,7 +926,7 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir) > int err, i, j; > struct xen_blkif *blkif = ring->blkif; > struct xenbus_device *dev = blkif->be->dev; > - unsigned int ring_page_order, nr_grefs, evtchn; > + unsigned int nr_grefs, evtchn; > > err = xenbus_scanf(XBT_NIL, dir, "event-channel", "%u", > &evtchn); > @@ -936,43 +936,39 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir) > return err; > } > > - err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u", > - &ring_page_order); > - if (err != 1) { > - err = xenbus_scanf(XBT_NIL, dir, "ring-ref", "%u", &ring_ref[0]); > + nr_grefs = blkif->nr_ring_pages; > + > + BUG_ON(!nr_grefs); > + > + for (i = 0; i < nr_grefs; i++) { > + char ring_ref_name[RINGREF_NAME_LEN]; > + > + snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i); > + err = xenbus_scanf(XBT_NIL, dir, ring_ref_name, > + "%u", &ring_ref[i]); > + > if (err != 1) { > + if (nr_grefs == 1) > + break; > + > err = -EINVAL; > - xenbus_dev_fatal(dev, err, "reading %s/ring-ref", dir); > + xenbus_dev_fatal(dev, err, "reading %s/%s", > + dir, ring_ref_name); > return err; > } > - nr_grefs = 1; > - } else { > - unsigned int i; > + } > > - if (ring_page_order > xen_blkif_max_ring_order) { > + if (err != 1) { > + WARN_ON(nr_grefs != 1); > + > + err = xenbus_scanf(XBT_NIL, dir, "ring-ref", "%u", > + &ring_ref[0]); > + if (err != 1) { > err = -EINVAL; > - xenbus_dev_fatal(dev, err, "%s/request %d ring page order exceed max:%d", > - dir, ring_page_order, > - xen_blkif_max_ring_order); > + xenbus_dev_fatal(dev, err, "reading %s/ring-ref", dir); > return err; > } > - > - nr_grefs = 1 << ring_page_order; > - for (i = 0; i < nr_grefs; i++) { > - char ring_ref_name[RINGREF_NAME_LEN]; > - > - snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i); > - err = xenbus_scanf(XBT_NIL, dir, ring_ref_name, > - "%u", &ring_ref[i]); > - if (err != 1) { > - err = -EINVAL; > - xenbus_dev_fatal(dev, err, "reading %s/%s", > - dir, ring_ref_name); > - return err; > - } > - } > } > - blkif->nr_ring_pages = nr_grefs; > > for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) { > req = kzalloc(sizeof(*req), GFP_KERNEL); > @@ -1031,6 +1027,7 @@ static int connect_ring(struct backend_info *be) > size_t xspathsize; > const size_t xenstore_path_ext_size = 11; /* sufficient for "/queue-NNN" */ > unsigned int requested_num_queues = 0; > + unsigned int ring_page_order; > > pr_debug("%s %s\n", __func__, dev->otherend); > > @@ -1076,6 +1073,20 @@ static int connect_ring(struct backend_info *be) > blkif->nr_rings, blkif->blk_protocol, protocol, > pers_grants ? "persistent grants" : ""); > > + ring_page_order = xenbus_read_unsigned(dev->otherend, > + "ring-page-order", 0); > + > + if (ring_page_order > xen_blkif_max_ring_order) { > + err = -EINVAL; > + xenbus_dev_fatal(dev, err, > + "requested ring page order %d exceed max:%d", > + ring_page_order, > + xen_blkif_max_ring_order); > + return err; > + } > + > + blkif->nr_ring_pages = 1 << ring_page_order; > + > if (blkif->nr_rings == 1) > return read_per_ring_refs(&blkif->rings[0], dev->otherend); > else { >