Re: [Qemu-devel] [PATCH v5 3/4] block: add block timer and block throttling algorithm

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Fri, Aug 12, 2011 at 1:47 PM, Stefan Hajnoczi <stefanha@xxxxxxxxx> wrote:
> On Fri, Aug 12, 2011 at 6:35 AM, Zhi Yong Wu <zwu.kernel@xxxxxxxxx> wrote:
>> On Tue, Aug 9, 2011 at 4:57 PM, Ram Pai <linuxram@xxxxxxxxxx> wrote:
>>> On Tue, Aug 09, 2011 at 12:17:51PM +0800, Zhi Yong Wu wrote:
>>>> Note:
>>>>       1.) When bps/iops limits are specified to a small value such as 511 bytes/s, this VM will hang up. We are considering how to handle this senario.
>>>>       2.) When "dd" command is issued in guest, if its option bs is set to a large value such as "bs=1024K", the result speed will slightly bigger than the limits.
>>>>
>>>> For these problems, if you have nice thought, pls let us know.:)
>>>>
>>>> Signed-off-by: Zhi Yong Wu <wuzhy@xxxxxxxxxxxxxxxxxx>
>>>> ---
>>>>  block.c     |  347 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++--
>>>>  block.h     |    6 +-
>>>>  block_int.h |   30 +++++
>>>>  3 files changed, 372 insertions(+), 11 deletions(-)
>>>>
>>>> diff --git a/block.c b/block.c
>>>> index 24a25d5..8fd6643 100644
>>>> --- a/block.c
>>>> +++ b/block.c
>>>> @@ -29,6 +29,9 @@
>>>>  #include "module.h"
>>>>  #include "qemu-objects.h"
>>>>
>>>> +#include "qemu-timer.h"
>>>> +#include "block/blk-queue.h"
>>>> +
>>>>  #ifdef CONFIG_BSD
>>>>  #include <sys/types.h>
>>>>  #include <sys/stat.h>
>>>> @@ -58,6 +61,13 @@ static int bdrv_read_em(BlockDriverState *bs, int64_t sector_num,
>>>>  static int bdrv_write_em(BlockDriverState *bs, int64_t sector_num,
>>>>                           const uint8_t *buf, int nb_sectors);
>>>>
>>>> +static bool bdrv_exceed_bps_limits(BlockDriverState *bs, int nb_sectors,
>>>> +        bool is_write, double elapsed_time, uint64_t *wait);
>>>> +static bool bdrv_exceed_iops_limits(BlockDriverState *bs, bool is_write,
>>>> +        double elapsed_time, uint64_t *wait);
>>>> +static bool bdrv_exceed_io_limits(BlockDriverState *bs, int nb_sectors,
>>>> +        bool is_write, uint64_t *wait);
>>>> +
>>>>  static QTAILQ_HEAD(, BlockDriverState) bdrv_states =
>>>>      QTAILQ_HEAD_INITIALIZER(bdrv_states);
>>>>
>>>> @@ -90,6 +100,68 @@ int is_windows_drive(const char *filename)
>>>>  }
>>>>  #endif
>>>>
>>>> +/* throttling disk I/O limits */
>>>> +void bdrv_io_limits_disable(BlockDriverState *bs)
>>>> +{
>>>> +    bs->io_limits_enabled = false;
>>>> +    bs->req_from_queue    = false;
>>>> +
>>>> +    if (bs->block_queue) {
>>>> +        qemu_block_queue_flush(bs->block_queue);
>>>> +        qemu_del_block_queue(bs->block_queue);
>>>> +    }
>>>> +
>>>> +    if (bs->block_timer) {
>>>> +        qemu_del_timer(bs->block_timer);
>>>> +        qemu_free_timer(bs->block_timer);
>>>> +    }
>>>> +
>>>> +    bs->slice_start[0]   = 0;
>>>> +    bs->slice_start[1]   = 0;
>>>> +
>>>> +    bs->slice_end[0]     = 0;
>>>> +    bs->slice_end[1]     = 0;
>>>> +}
>>>> +
>>>> +static void bdrv_block_timer(void *opaque)
>>>> +{
>>>> +    BlockDriverState *bs = opaque;
>>>> +    BlockQueue *queue = bs->block_queue;
>>>> +
>>>> +    qemu_block_queue_flush(queue);
>>>> +}
>>>> +
>>>> +void bdrv_io_limits_enable(BlockDriverState *bs)
>>>> +{
>>>> +    bs->req_from_queue = false;
>>>> +
>>>> +    bs->block_queue    = qemu_new_block_queue();
>>>> +    bs->block_timer    = qemu_new_timer_ns(vm_clock, bdrv_block_timer, bs);
>>>> +
>>>> +    bs->slice_start[BLOCK_IO_LIMIT_READ]  = qemu_get_clock_ns(vm_clock);
>>>> +    bs->slice_start[BLOCK_IO_LIMIT_WRITE] = qemu_get_clock_ns(vm_clock);
>>>
>>> a minor comment. better to keep the slice_start of the both the READ and WRITE
>>> side the same.
>>>
>>>    bs->slice_start[BLOCK_IO_LIMIT_WRITE] = bs->slice_start[BLOCK_IO_LIMIT_READ];
>>>
>>> saves  a call to qemu_get_clock_ns().
>>>
>>>> +
>>>> +    bs->slice_end[BLOCK_IO_LIMIT_READ]    =
>>>> +                      qemu_get_clock_ns(vm_clock) + BLOCK_IO_SLICE_TIME;
>>>
>>>    bs->slice_end[BLOCK_IO_LIMIT_READ] = bs->slice_start[BLOCK_IO_LIMIT_READ] +
>>>                        BLOCK_IO_SLICE_TIME;
>>>
>>> saves one more call to qemu_get_clock_ns()
>>>
>>>> +    bs->slice_end[BLOCK_IO_LIMIT_WRITE]   =
>>>> +                      qemu_get_clock_ns(vm_clock) + BLOCK_IO_SLICE_TIME;
>>>
>>>
>>>    bs->slice_end[BLOCK_IO_LIMIT_WRITE] = bs->slice_start[BLOCK_IO_LIMIT_WRITE] +
>>>                        BLOCK_IO_SLICE_TIME;
>>>
>>> yet another call saving.
>>>
>>>
>>>> +}
>>>> +
>>>> +bool bdrv_io_limits_enabled(BlockDriverState *bs)
>>>> +{
>>>> +    BlockIOLimit *io_limits = &bs->io_limits;
>>>> +    if ((io_limits->bps[BLOCK_IO_LIMIT_READ] == 0)
>>>> +         && (io_limits->bps[BLOCK_IO_LIMIT_WRITE] == 0)
>>>> +         && (io_limits->bps[BLOCK_IO_LIMIT_TOTAL] == 0)
>>>> +         && (io_limits->iops[BLOCK_IO_LIMIT_READ] == 0)
>>>> +         && (io_limits->iops[BLOCK_IO_LIMIT_WRITE] == 0)
>>>> +         && (io_limits->iops[BLOCK_IO_LIMIT_TOTAL] == 0)) {
>>>> +        return false;
>>>> +    }
>>>> +
>>>> +    return true;
>>>> +}
>>>
>>> can be optimized to:
>>>
>>>        return (io_limits->bps[BLOCK_IO_LIMIT_READ]
>>>                || io_limits->bps[BLOCK_IO_LIMIT_WRITE]
>>>                || io_limits->bps[BLOCK_IO_LIMIT_TOTAL]
>>>                || io_limits->iops[BLOCK_IO_LIMIT_READ]
>>>                || io_limits->iops[BLOCK_IO_LIMIT_WRITE]
>>>                || io_limits->iops[BLOCK_IO_LIMIT_TOTAL]);
>> I want to apply this, but it violate qemu coding styles.
>
> Perhaps checkpatch.pl complains because of the (...) around the return
> value.  Try removing them.
After i removed the parentheses, it can work now. thanks.
>
> Stefan
>



-- 
Regards,

Zhi Yong Wu
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux