On Mon, Sep 13, 2021 at 02:40:09PM +0800, Jason Wang wrote: > On Mon, Sep 13, 2021 at 2:36 PM Michael S. Tsirkin <mst@xxxxxxxxxx> wrote: > > > > On Mon, Sep 13, 2021 at 01:53:53PM +0800, Jason Wang wrote: > > > This patch validate the used buffer length provided by the device > > > before trying to use it. This is done by record the in buffer length > > > in a new field in desc_state structure during virtqueue_add(), then we > > > can fail the virtqueue_get_buf() when we find the device is trying to > > > give us a used buffer length which is greater than the in buffer > > > length. > > > > > > Signed-off-by: Jason Wang <jasowang@xxxxxxxxxx> > > > > Hmm this was proposed in the past. The overhead here is > > not negligeable, so I'd like to know more - > > when is it a problem if the used len is too big? > > One example is: https://github.com/fuzzsa/fuzzsa-bugs/blob/master/virtio_rng.md > > And there would be more I guess. That seems to suggest hwrng validation is better, and I think it makes sense: will fix all rng drivers in one go. > > Don't the affected drivers already track the length somewhere > > and so can validated it without the extra cost in > > virtio core? > > Probably, but this requires the changes in each device driver. And it > would be easily forgotten if new drivers are introduced? > > Thanks My thinking is one just has to be aware that before enabling any drivers they have to be audited. We can validate used length but e.g. for virtio net the length is inside the buffer anyway. If we really have to, maybe use extra->len? And maybe have a mod param so the check can be turned off e.g. for benchmarking purposes. > > > > > --- > > > drivers/virtio/virtio_ring.c | 23 +++++++++++++++++++++++ > > > 1 file changed, 23 insertions(+) > > > > > > diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c > > > index d2ca0a7365f8..b8374a6144f3 100644 > > > --- a/drivers/virtio/virtio_ring.c > > > +++ b/drivers/virtio/virtio_ring.c > > > @@ -69,6 +69,7 @@ > > > struct vring_desc_state_split { > > > void *data; /* Data for callback. */ > > > struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ > > > + u64 buflen; /* In buffer length */ > > > }; > > > > > > struct vring_desc_state_packed { > > > @@ -76,6 +77,7 @@ struct vring_desc_state_packed { > > > struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */ > > > u16 num; /* Descriptor list length. */ > > > u16 last; /* The last desc state in a list. */ > > > + u64 buflen; /* In buffer length */ > > > }; > > > > > > struct vring_desc_extra { > > > @@ -490,6 +492,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, > > > unsigned int i, n, avail, descs_used, prev, err_idx; > > > int head; > > > bool indirect; > > > + u64 buflen = 0; > > > > > > START_USE(vq); > > > > > > @@ -571,6 +574,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, > > > VRING_DESC_F_NEXT | > > > VRING_DESC_F_WRITE, > > > indirect); > > > + buflen += sg->length; > > > } > > > } > > > /* Last one doesn't continue. */ > > > @@ -605,6 +609,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, > > > > > > /* Store token and indirect buffer state. */ > > > vq->split.desc_state[head].data = data; > > > + vq->split.desc_state[head].buflen = buflen; > > > if (indirect) > > > vq->split.desc_state[head].indir_desc = desc; > > > else > > > @@ -784,6 +789,11 @@ static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq, > > > BAD_RING(vq, "id %u is not a head!\n", i); > > > return NULL; > > > } > > > + if (unlikely(*len > vq->split.desc_state[i].buflen)) { > > > + BAD_RING(vq, "used len %d is larger than in buflen %lld\n", > > > + *len, vq->split.desc_state[i].buflen); > > > + return NULL; > > > + } > > > > > > /* detach_buf_split clears data, so grab it now. */ > > > ret = vq->split.desc_state[i].data; > > > @@ -1062,6 +1072,7 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq, > > > unsigned int i, n, err_idx; > > > u16 head, id; > > > dma_addr_t addr; > > > + u64 buflen = 0; > > > > > > head = vq->packed.next_avail_idx; > > > desc = alloc_indirect_packed(total_sg, gfp); > > > @@ -1089,6 +1100,8 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq, > > > desc[i].addr = cpu_to_le64(addr); > > > desc[i].len = cpu_to_le32(sg->length); > > > i++; > > > + if (n >= out_sgs) > > > + buflen += sg->length; > > > } > > > } > > > > > > @@ -1141,6 +1154,7 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq, > > > vq->packed.desc_state[id].data = data; > > > vq->packed.desc_state[id].indir_desc = desc; > > > vq->packed.desc_state[id].last = id; > > > + vq->packed.desc_state[id].buflen = buflen; > > > > > > vq->num_added += 1; > > > > > > @@ -1176,6 +1190,7 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq, > > > unsigned int i, n, c, descs_used, err_idx; > > > __le16 head_flags, flags; > > > u16 head, id, prev, curr, avail_used_flags; > > > + u64 buflen = 0; > > > > > > START_USE(vq); > > > > > > @@ -1250,6 +1265,8 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq, > > > 1 << VRING_PACKED_DESC_F_AVAIL | > > > 1 << VRING_PACKED_DESC_F_USED; > > > } > > > + if (n >= out_sgs) > > > + buflen += sg->length; > > > } > > > } > > > > > > @@ -1268,6 +1285,7 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq, > > > vq->packed.desc_state[id].data = data; > > > vq->packed.desc_state[id].indir_desc = ctx; > > > vq->packed.desc_state[id].last = prev; > > > + vq->packed.desc_state[id].buflen = buflen; > > > > > > /* > > > * A driver MUST NOT make the first descriptor in the list > > > @@ -1455,6 +1473,11 @@ static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq, > > > BAD_RING(vq, "id %u is not a head!\n", id); > > > return NULL; > > > } > > > + if (unlikely(*len > vq->packed.desc_state[id].buflen)) { > > > + BAD_RING(vq, "used len %d is larger than in buflen %lld\n", > > > + *len, vq->packed.desc_state[id].buflen); > > > + return NULL; > > > + } > > > > > > /* detach_buf_packed clears data, so grab it now. */ > > > ret = vq->packed.desc_state[id].data; > > > -- > > > 2.25.1 > > _______________________________________________ Virtualization mailing list Virtualization@xxxxxxxxxxxxxxxxxxxxxxxxxx https://lists.linuxfoundation.org/mailman/listinfo/virtualization