Avoid bounce buffers when QEMUIOVector elements are within previously
registered bdrv_register_buf() buffers.
The idea is that emulated storage controllers will register guest RAM
using bdrv_register_buf() and set the BDRV_REQ_REGISTERED_BUF on I/O
requests. Therefore no blkio_map_mem_region() calls are necessary in the
performance-critical I/O code path.
This optimization doesn't apply if the I/O buffer is internally
allocated by QEMU (e.g. qcow2 metadata). There we still take the slow
path because BDRV_REQ_REGISTERED_BUF is not set.
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
block/blkio.c | 104 ++++++++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 101 insertions(+), 3 deletions(-)
diff --git a/block/blkio.c b/block/blkio.c
index 7fbdbd7fae..37d593a20c 100644
--- a/block/blkio.c
+++ b/block/blkio.c
@@ -1,7 +1,9 @@
#include "qemu/osdep.h"
#include <blkio.h>
#include "block/block_int.h"
+#include "exec/memory.h"
#include "qapi/error.h"
+#include "qemu/error-report.h"
#include "qapi/qmp/qdict.h"
#include "qemu/module.h"
@@ -28,6 +30,9 @@ typedef struct {
/* Can we skip adding/deleting blkio_mem_regions? */
bool needs_mem_regions;
+
+ /* Are file descriptors necessary for blkio_mem_regions? */
+ bool needs_mem_region_fd;
} BDRVBlkioState;
static void blkio_aiocb_complete(BlkioAIOCB *acb, int ret)
@@ -198,6 +203,8 @@ static BlockAIOCB *blkio_aio_preadv(BlockDriverState *bs,
int64_t offset,
BlockCompletionFunc *cb, void *opaque)
{
BDRVBlkioState *s = bs->opaque;
+ bool needs_mem_regions =
+ s->needs_mem_regions && !(flags & BDRV_REQ_REGISTERED_BUF);
struct iovec *iov = qiov->iov;
int iovcnt = qiov->niov;
BlkioAIOCB *acb;
@@ -206,7 +213,7 @@ static BlockAIOCB *blkio_aio_preadv(BlockDriverState *bs,
int64_t offset,
acb = blkio_aiocb_get(bs, cb, opaque);
- if (s->needs_mem_regions) {
+ if (needs_mem_regions) {
if (blkio_aiocb_init_mem_region_locked(acb, bytes) < 0) {
qemu_aio_unref(&acb->common);
return NULL;
@@ -230,6 +237,8 @@ static BlockAIOCB *blkio_aio_pwritev(BlockDriverState *bs,
int64_t offset,
{
uint32_t blkio_flags = (flags & BDRV_REQ_FUA) ? BLKIO_REQ_FUA : 0;
BDRVBlkioState *s = bs->opaque;
+ bool needs_mem_regions =
+ s->needs_mem_regions && !(flags & BDRV_REQ_REGISTERED_BUF);
struct iovec *iov = qiov->iov;
int iovcnt = qiov->niov;
BlkioAIOCB *acb;
@@ -238,7 +247,7 @@ static BlockAIOCB *blkio_aio_pwritev(BlockDriverState *bs,
int64_t offset,
acb = blkio_aiocb_get(bs, cb, opaque);
- if (s->needs_mem_regions) {
+ if (needs_mem_regions) {
if (blkio_aiocb_init_mem_region_locked(acb, bytes) < 0) {
qemu_aio_unref(&acb->common);
return NULL;
@@ -324,6 +333,80 @@ static void blkio_io_unplug(BlockDriverState *bs)
}
}
+static void blkio_register_buf(BlockDriverState *bs, void *host, size_t size)
+{
+ BDRVBlkioState *s = bs->opaque;
+ int ret;
+ struct blkio_mem_region region = (struct blkio_mem_region){
+ .addr = host,
+ .len = size,
+ .fd = -1,
+ };
+
+ if (((uintptr_t)host | size) % s->mem_region_alignment) {
+ error_report_once("%s: skipping unaligned buf %p with size %zu",
+ __func__, host, size);
+ return; /* skip unaligned */
+ }
+
+ /* Attempt to find the fd for a MemoryRegion */
+ if (s->needs_mem_region_fd) {
+ int fd = -1;
+ ram_addr_t offset;
+ MemoryRegion *mr;
+
+ /*
+ * bdrv_register_buf() is called with the BQL held so mr lives at least
+ * until this function returns.
+ */
+ mr = memory_region_from_host(host, &offset);
+ if (mr) {
+ fd = memory_region_get_fd(mr);