Emulated PCI IDE controllers now use the memory access interface. This also allows an emulated IOMMU to translate and check accesses. Map invalidation results in cancelling DMA transfers. Since the guest OS can't properly recover the DMA results in case the mapping is changed, this is a fairly good approximation. Signed-off-by: Eduard - Gabriel Munteanu <eduard.munteanu@xxxxxxxxxxx> --- dma-helpers.c | 37 +++++++++++++++++++++++++++++++++++-- dma.h | 21 ++++++++++++++++++++- hw/ide/core.c | 15 ++++++++------- hw/ide/internal.h | 39 +++++++++++++++++++++++++++++++++++++++ hw/ide/pci.c | 7 +++++++ 5 files changed, 109 insertions(+), 10 deletions(-) diff --git a/dma-helpers.c b/dma-helpers.c index d4fc077..408fee3 100644 --- a/dma-helpers.c +++ b/dma-helpers.c @@ -10,12 +10,34 @@ #include "dma.h" #include "block_int.h" -void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint) +static void *qemu_sglist_default_map(void *opaque, + target_phys_addr_t addr, + target_phys_addr_t *len, + int is_write) +{ + return cpu_physical_memory_map(addr, len, is_write); +} + +static void qemu_sglist_default_unmap(void *opaque, + void *buffer, + target_phys_addr_t len, + int is_write, + target_phys_addr_t access_len) +{ + cpu_physical_memory_unmap(buffer, len, is_write, access_len); +} + +void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint, + QEMUSGMapFunc *map, QEMUSGUnmapFunc *unmap, void *opaque) { qsg->sg = qemu_malloc(alloc_hint * sizeof(ScatterGatherEntry)); qsg->nsg = 0; qsg->nalloc = alloc_hint; qsg->size = 0; + + qsg->map = map ? map : (QEMUSGMapFunc *) qemu_sglist_default_map; + qsg->unmap = unmap ? unmap : (QEMUSGUnmapFunc *) qemu_sglist_default_unmap; + qsg->opaque = opaque; } void qemu_sglist_add(QEMUSGList *qsg, target_phys_addr_t base, @@ -79,6 +101,16 @@ static void dma_bdrv_unmap(DMAAIOCB *dbs) } } +static void dma_bdrv_cancel(void *opaque) +{ + DMAAIOCB *dbs = opaque; + + bdrv_aio_cancel(dbs->acb); + dma_bdrv_unmap(dbs); + qemu_iovec_destroy(&dbs->iov); + qemu_aio_release(dbs); +} + static void dma_bdrv_cb(void *opaque, int ret) { DMAAIOCB *dbs = (DMAAIOCB *)opaque; @@ -100,7 +132,8 @@ static void dma_bdrv_cb(void *opaque, int ret) while (dbs->sg_cur_index < dbs->sg->nsg) { cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte; cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte; - mem = cpu_physical_memory_map(cur_addr, &cur_len, !dbs->is_write); + mem = dbs->sg->map(dbs->sg->opaque, dma_bdrv_cancel, dbs, + cur_addr, &cur_len, !dbs->is_write); if (!mem) break; qemu_iovec_add(&dbs->iov, mem, cur_len); diff --git a/dma.h b/dma.h index f3bb275..d48f35c 100644 --- a/dma.h +++ b/dma.h @@ -15,6 +15,19 @@ #include "hw/hw.h" #include "block.h" +typedef void QEMUSGInvalMapFunc(void *opaque); +typedef void *QEMUSGMapFunc(void *opaque, + QEMUSGInvalMapFunc *inval_cb, + void *inval_opaque, + target_phys_addr_t addr, + target_phys_addr_t *len, + int is_write); +typedef void QEMUSGUnmapFunc(void *opaque, + void *buffer, + target_phys_addr_t len, + int is_write, + target_phys_addr_t access_len); + typedef struct { target_phys_addr_t base; target_phys_addr_t len; @@ -25,9 +38,15 @@ typedef struct { int nsg; int nalloc; target_phys_addr_t size; + + QEMUSGMapFunc *map; + QEMUSGUnmapFunc *unmap; + void *opaque; } QEMUSGList; -void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint); +void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint, + QEMUSGMapFunc *map, QEMUSGUnmapFunc *unmap, + void *opaque); void qemu_sglist_add(QEMUSGList *qsg, target_phys_addr_t base, target_phys_addr_t len); void qemu_sglist_destroy(QEMUSGList *qsg); diff --git a/hw/ide/core.c b/hw/ide/core.c index 0b3b7c2..c19013a 100644 --- a/hw/ide/core.c +++ b/hw/ide/core.c @@ -435,7 +435,8 @@ static int dma_buf_prepare(BMDMAState *bm, int is_write) } prd; int l, len; - qemu_sglist_init(&s->sg, s->nsector / (IDE_PAGE_SIZE / 512) + 1); + qemu_sglist_init(&s->sg, s->nsector / (IDE_PAGE_SIZE / 512) + 1, + bm->map, bm->unmap, bm->opaque); s->io_buffer_size = 0; for(;;) { if (bm->cur_prd_len == 0) { @@ -443,7 +444,7 @@ static int dma_buf_prepare(BMDMAState *bm, int is_write) if (bm->cur_prd_last || (bm->cur_addr - bm->addr) >= IDE_PAGE_SIZE) return s->io_buffer_size != 0; - cpu_physical_memory_read(bm->cur_addr, (uint8_t *)&prd, 8); + bmdma_memory_read(bm, bm->cur_addr, (uint8_t *)&prd, 8); bm->cur_addr += 8; prd.addr = le32_to_cpu(prd.addr); prd.size = le32_to_cpu(prd.size); @@ -526,7 +527,7 @@ static int dma_buf_rw(BMDMAState *bm, int is_write) if (bm->cur_prd_last || (bm->cur_addr - bm->addr) >= IDE_PAGE_SIZE) return 0; - cpu_physical_memory_read(bm->cur_addr, (uint8_t *)&prd, 8); + bmdma_memory_read(bm, bm->cur_addr, (uint8_t *)&prd, 8); bm->cur_addr += 8; prd.addr = le32_to_cpu(prd.addr); prd.size = le32_to_cpu(prd.size); @@ -541,11 +542,11 @@ static int dma_buf_rw(BMDMAState *bm, int is_write) l = bm->cur_prd_len; if (l > 0) { if (is_write) { - cpu_physical_memory_write(bm->cur_prd_addr, - s->io_buffer + s->io_buffer_index, l); + bmdma_memory_write(bm, bm->cur_prd_addr, + s->io_buffer + s->io_buffer_index, l); } else { - cpu_physical_memory_read(bm->cur_prd_addr, - s->io_buffer + s->io_buffer_index, l); + bmdma_memory_read(bm, bm->cur_prd_addr, + s->io_buffer + s->io_buffer_index, l); } bm->cur_prd_addr += l; bm->cur_prd_len -= l; diff --git a/hw/ide/internal.h b/hw/ide/internal.h index eef1ee1..0f3b707 100644 --- a/hw/ide/internal.h +++ b/hw/ide/internal.h @@ -476,6 +476,24 @@ struct IDEDeviceInfo { #define BM_CMD_START 0x01 #define BM_CMD_READ 0x08 +typedef void BMDMAInvalMapFunc(void *opaque); +typedef void BMDMARWFunc(void *opaque, + target_phys_addr_t addr, + uint8_t *buf, + target_phys_addr_t len, + int is_write); +typedef void *BMDMAMapFunc(void *opaque, + BMDMAInvalMapFunc *inval_cb, + void *inval_opaque, + target_phys_addr_t addr, + target_phys_addr_t *len, + int is_write); +typedef void BMDMAUnmapFunc(void *opaque, + void *buffer, + target_phys_addr_t len, + int is_write, + target_phys_addr_t access_len); + struct BMDMAState { uint8_t cmd; uint8_t status; @@ -495,8 +513,29 @@ struct BMDMAState { int64_t sector_num; uint32_t nsector; QEMUBH *bh; + + BMDMARWFunc *rw; + BMDMAMapFunc *map; + BMDMAUnmapFunc *unmap; + void *opaque; }; +static inline void bmdma_memory_read(BMDMAState *bm, + target_phys_addr_t addr, + uint8_t *buf, + target_phys_addr_t len) +{ + bm->rw(bm->opaque, addr, buf, len, 0); +} + +static inline void bmdma_memory_write(BMDMAState *bm, + target_phys_addr_t addr, + uint8_t *buf, + target_phys_addr_t len) +{ + bm->rw(bm->opaque, addr, buf, len, 1); +} + static inline IDEState *idebus_active_if(IDEBus *bus) { return bus->ifs + bus->unit; diff --git a/hw/ide/pci.c b/hw/ide/pci.c index 4d95cc5..5879044 100644 --- a/hw/ide/pci.c +++ b/hw/ide/pci.c @@ -183,4 +183,11 @@ void pci_ide_create_devs(PCIDevice *dev, DriveInfo **hd_table) continue; ide_create_drive(d->bus+bus[i], unit[i], hd_table[i]); } + + for (i = 0; i < 2; i++) { + d->bmdma[i].rw = (void *) pci_memory_rw; + d->bmdma[i].map = (void *) pci_memory_map; + d->bmdma[i].unmap = (void *) pci_memory_unmap; + d->bmdma[i].opaque = dev; + } } -- 1.7.1 -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html