[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PULL 45/56] Lift max ram slots limit in libvhost-user
From: |
Michael S. Tsirkin |
Subject: |
[PULL 45/56] Lift max ram slots limit in libvhost-user |
Date: |
Wed, 10 Jun 2020 00:28:13 -0400 |
From: Raphael Norwitz <raphael.norwitz@nutanix.com>
Historically, VMs with vhost-user devices could hot-add memory a maximum
of 8 times. Now that the VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS
protocol feature has been added, VMs with vhost-user backends which
support this new feature can support a configurable number of ram slots
up to the maximum supported by the target platform.
This change adds VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS support for
backends built with libvhost-user, and increases the number of supported
ram slots from 8 to 32.
Memory hot-add, hot-remove and postcopy migration were tested with
the vhost-user-bridge sample.
Signed-off-by: Raphael Norwitz <raphael.norwitz@nutanix.com>
Message-Id: <1588533678-23450-11-git-send-email-raphael.norwitz@nutanix.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
---
contrib/libvhost-user/libvhost-user.h | 15 +++++++++++----
contrib/libvhost-user/libvhost-user.c | 17 +++++++++--------
2 files changed, 20 insertions(+), 12 deletions(-)
diff --git a/contrib/libvhost-user/libvhost-user.h
b/contrib/libvhost-user/libvhost-user.h
index f8439713a8..844c37c648 100644
--- a/contrib/libvhost-user/libvhost-user.h
+++ b/contrib/libvhost-user/libvhost-user.h
@@ -28,7 +28,13 @@
#define VIRTQUEUE_MAX_SIZE 1024
-#define VHOST_MEMORY_MAX_NREGIONS 8
+#define VHOST_MEMORY_BASELINE_NREGIONS 8
+
+/*
+ * Set a reasonable maximum number of ram slots, which will be supported by
+ * any architecture.
+ */
+#define VHOST_USER_MAX_RAM_SLOTS 32
typedef enum VhostSetConfigType {
VHOST_SET_CONFIG_TYPE_MASTER = 0,
@@ -55,6 +61,7 @@ enum VhostUserProtocolFeature {
VHOST_USER_PROTOCOL_F_HOST_NOTIFIER = 11,
VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD = 12,
VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS = 14,
+ VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS = 15,
VHOST_USER_PROTOCOL_F_MAX
};
@@ -123,7 +130,7 @@ typedef struct VhostUserMemoryRegion {
typedef struct VhostUserMemory {
uint32_t nregions;
uint32_t padding;
- VhostUserMemoryRegion regions[VHOST_MEMORY_MAX_NREGIONS];
+ VhostUserMemoryRegion regions[VHOST_MEMORY_BASELINE_NREGIONS];
} VhostUserMemory;
typedef struct VhostUserMemRegMsg {
@@ -190,7 +197,7 @@ typedef struct VhostUserMsg {
VhostUserInflight inflight;
} payload;
- int fds[VHOST_MEMORY_MAX_NREGIONS];
+ int fds[VHOST_MEMORY_BASELINE_NREGIONS];
int fd_num;
uint8_t *data;
} VU_PACKED VhostUserMsg;
@@ -368,7 +375,7 @@ typedef struct VuDevInflightInfo {
struct VuDev {
int sock;
uint32_t nregions;
- VuDevRegion regions[VHOST_MEMORY_MAX_NREGIONS];
+ VuDevRegion regions[VHOST_USER_MAX_RAM_SLOTS];
VuVirtq *vq;
VuDevInflightInfo inflight_info;
int log_call_fd;
diff --git a/contrib/libvhost-user/libvhost-user.c
b/contrib/libvhost-user/libvhost-user.c
index 386449b697..b1e607298c 100644
--- a/contrib/libvhost-user/libvhost-user.c
+++ b/contrib/libvhost-user/libvhost-user.c
@@ -269,7 +269,7 @@ have_userfault(void)
static bool
vu_message_read(VuDev *dev, int conn_fd, VhostUserMsg *vmsg)
{
- char control[CMSG_SPACE(VHOST_MEMORY_MAX_NREGIONS * sizeof(int))] = { };
+ char control[CMSG_SPACE(VHOST_MEMORY_BASELINE_NREGIONS * sizeof(int))] =
{};
struct iovec iov = {
.iov_base = (char *)vmsg,
.iov_len = VHOST_USER_HDR_SIZE,
@@ -340,7 +340,7 @@ vu_message_write(VuDev *dev, int conn_fd, VhostUserMsg
*vmsg)
{
int rc;
uint8_t *p = (uint8_t *)vmsg;
- char control[CMSG_SPACE(VHOST_MEMORY_MAX_NREGIONS * sizeof(int))] = { };
+ char control[CMSG_SPACE(VHOST_MEMORY_BASELINE_NREGIONS * sizeof(int))] =
{};
struct iovec iov = {
.iov_base = (char *)vmsg,
.iov_len = VHOST_USER_HDR_SIZE,
@@ -353,7 +353,7 @@ vu_message_write(VuDev *dev, int conn_fd, VhostUserMsg
*vmsg)
struct cmsghdr *cmsg;
memset(control, 0, sizeof(control));
- assert(vmsg->fd_num <= VHOST_MEMORY_MAX_NREGIONS);
+ assert(vmsg->fd_num <= VHOST_MEMORY_BASELINE_NREGIONS);
if (vmsg->fd_num > 0) {
size_t fdsize = vmsg->fd_num * sizeof(int);
msg.msg_controllen = CMSG_SPACE(fdsize);
@@ -780,7 +780,7 @@ static bool
vu_rem_mem_reg(VuDev *dev, VhostUserMsg *vmsg) {
int i, j;
bool found = false;
- VuDevRegion shadow_regions[VHOST_MEMORY_MAX_NREGIONS] = {};
+ VuDevRegion shadow_regions[VHOST_USER_MAX_RAM_SLOTS] = {};
VhostUserMemoryRegion m = vmsg->payload.memreg.region, *msg_region = &m;
DPRINT("Removing region:\n");
@@ -813,7 +813,7 @@ vu_rem_mem_reg(VuDev *dev, VhostUserMsg *vmsg) {
if (found) {
memcpy(dev->regions, shadow_regions,
- sizeof(VuDevRegion) * VHOST_MEMORY_MAX_NREGIONS);
+ sizeof(VuDevRegion) * VHOST_USER_MAX_RAM_SLOTS);
DPRINT("Successfully removed a region\n");
dev->nregions--;
vmsg_set_reply_u64(vmsg, 0);
@@ -1394,7 +1394,8 @@ vu_get_protocol_features_exec(VuDev *dev, VhostUserMsg
*vmsg)
1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ |
1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER |
1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD |
- 1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK;
+ 1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK |
+ 1ULL << VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS;
if (have_userfault()) {
features |= 1ULL << VHOST_USER_PROTOCOL_F_PAGEFAULT;
@@ -1732,14 +1733,14 @@ static bool vu_handle_get_max_memslots(VuDev *dev,
VhostUserMsg *vmsg)
{
vmsg->flags = VHOST_USER_REPLY_MASK | VHOST_USER_VERSION;
vmsg->size = sizeof(vmsg->payload.u64);
- vmsg->payload.u64 = VHOST_MEMORY_MAX_NREGIONS;
+ vmsg->payload.u64 = VHOST_USER_MAX_RAM_SLOTS;
vmsg->fd_num = 0;
if (!vu_message_write(dev, dev->sock, vmsg)) {
vu_panic(dev, "Failed to send max ram slots: %s\n", strerror(errno));
}
- DPRINT("u64: 0x%016"PRIx64"\n", (uint64_t) VHOST_MEMORY_MAX_NREGIONS);
+ DPRINT("u64: 0x%016"PRIx64"\n", (uint64_t) VHOST_USER_MAX_RAM_SLOTS);
return false;
}
--
MST
- [PULL 33/56] hw/pci-host: Use the IEC binary prefix definitions, (continued)
- [PULL 33/56] hw/pci-host: Use the IEC binary prefix definitions, Michael S. Tsirkin, 2020/06/10
- [PULL 35/56] vhost-user-blk: delay vhost_user_blk_disconnect, Michael S. Tsirkin, 2020/06/10
- [PULL 34/56] char-socket: return -1 in case of disconnect during tcp_chr_write, Michael S. Tsirkin, 2020/06/10
- [PULL 36/56] Add helper to populate vhost-user message regions, Michael S. Tsirkin, 2020/06/10
- [PULL 38/56] Add VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS, Michael S. Tsirkin, 2020/06/10
- [PULL 39/56] Transmit vhost-user memory regions individually, Michael S. Tsirkin, 2020/06/10
- [PULL 40/56] Lift max memory slots limit imposed by vhost-user, Michael S. Tsirkin, 2020/06/10
- [PULL 41/56] Refactor out libvhost-user fault generation logic, Michael S. Tsirkin, 2020/06/10
- [PULL 42/56] Support ram slot configuration in libvhost-user, Michael S. Tsirkin, 2020/06/10
- [PULL 44/56] Support individual region unmap in libvhost-user, Michael S. Tsirkin, 2020/06/10
- [PULL 45/56] Lift max ram slots limit in libvhost-user,
Michael S. Tsirkin <=
- [PULL 46/56] libvhost-user: advertise vring features, Michael S. Tsirkin, 2020/06/10
- [PULL 47/56] hw/pci: Fix crash when running QEMU with "-nic model=rocker", Michael S. Tsirkin, 2020/06/10
- [PULL 48/56] vhost-vsock: add vhost-vsock-common abstraction, Michael S. Tsirkin, 2020/06/10
- [PULL 50/56] virtio: add vhost-user-vsock-pci device, Michael S. Tsirkin, 2020/06/10
- [PULL 55/56] acpi: ged: rename event memory region, Michael S. Tsirkin, 2020/06/10
- [PULL 56/56] Fix parameter type in vhost migration log path, Michael S. Tsirkin, 2020/06/10
- [PULL 03/56] hw/acpi/nvdimm: add a helper to augment SRAT generation, Michael S. Tsirkin, 2020/06/10
- [PULL 30/56] hw/pci-host/prep: Correct RAVEN bus bridge memory region size, Michael S. Tsirkin, 2020/06/10
- [PULL 04/56] tests/acpi: update expected SRAT files, Michael S. Tsirkin, 2020/06/10
- [PULL 11/56] acpi: move aml builder code for parallel device, Michael S. Tsirkin, 2020/06/10