Use four times more cache entries and divide the memory for each entry by four. This lowers the linear read throughput somewhat but increases the access speed for filesystems. Signed-off-by: Sascha Hauer <s.hauer@xxxxxxxxxxxxxx> --- common/block.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/common/block.c b/common/block.c index 55d8d1637e..219b943afc 100644 --- a/common/block.c +++ b/common/block.c @@ -36,7 +36,7 @@ struct chunk { struct list_head list; }; -#define BUFSIZE (PAGE_SIZE * 16) +#define BUFSIZE (PAGE_SIZE * 4) /* * Write all dirty chunks back to the device @@ -361,7 +361,7 @@ int blockdevice_register(struct block_device *blk) debug("%s: rdbufsize: %d blockbits: %d blkmask: 0x%08x\n", __func__, blk->rdbufsize, blk->blockbits, blk->blkmask); - for (i = 0; i < 8; i++) { + for (i = 0; i < 32; i++) { struct chunk *chunk = xzalloc(sizeof(*chunk)); chunk->data = dma_alloc(BUFSIZE); chunk->num = i; -- 2.17.1 _______________________________________________ barebox mailing list barebox@xxxxxxxxxxxxxxxxxxx http://lists.infradead.org/mailman/listinfo/barebox