qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [QEMU PATCH v4 09/13] virtio-gpu: Handle resource blob commands


From: Akihiko Odaki
Subject: Re: [QEMU PATCH v4 09/13] virtio-gpu: Handle resource blob commands
Date: Tue, 5 Sep 2023 18:20:43 +0900
User-agent: Mozilla Thunderbird

On 2023/09/05 18:08, Huang Rui wrote:
On Thu, Aug 31, 2023 at 06:24:32PM +0800, Akihiko Odaki wrote:
On 2023/08/31 18:32, Huang Rui wrote:
From: Antonio Caggiano <antonio.caggiano@collabora.com>

Support BLOB resources creation, mapping and unmapping by calling the
new stable virglrenderer 0.10 interface. Only enabled when available and
via the blob config. E.g. -device virtio-vga-gl,blob=true

Signed-off-by: Antonio Caggiano <antonio.caggiano@collabora.com>
Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Signed-off-by: Xenia Ragiadakou <xenia.ragiadakou@amd.com>
Signed-off-by: Huang Rui <ray.huang@amd.com>
---

v1->v2:
      - Remove unused #include "hw/virtio/virtio-iommu.h"

      - Add a local function, called virgl_resource_destroy(), that is used
        to release a vgpu resource on error paths and in resource_unref.

      - Remove virtio_gpu_virgl_resource_unmap from 
virtio_gpu_cleanup_mapping(),
        since this function won't be called on blob resources and also because
        blob resources are unmapped via virgl_cmd_resource_unmap_blob().

      - In virgl_cmd_resource_create_blob(), do proper cleanup in error paths
        and move QTAILQ_INSERT_HEAD(&g->reslist, res, next) after the resource
        has been fully initialized.

      - Memory region has a different life-cycle from virtio gpu resources
        i.e. cannot be released synchronously along with the vgpu resource.
        So, here the field "region" was changed to a pointer that will be
        released automatically once the memory region is unparented and all
        of its references have been released.
        Also, since the pointer can be used to indicate whether the blob
        is mapped, the explicit field "mapped" was removed.

      - In virgl_cmd_resource_map_blob(), add check on the value of
        res->region, to prevent beeing called twice on the same resource.

      - Remove direct references to parent_obj.

      - Separate declarations from code.

   hw/display/virtio-gpu-virgl.c  | 213 +++++++++++++++++++++++++++++++++
   hw/display/virtio-gpu.c        |   4 +-
   include/hw/virtio/virtio-gpu.h |   5 +
   meson.build                    |   4 +
   4 files changed, 225 insertions(+), 1 deletion(-)

diff --git a/hw/display/virtio-gpu-virgl.c b/hw/display/virtio-gpu-virgl.c
index 312953ec16..17b634d4ee 100644
--- a/hw/display/virtio-gpu-virgl.c
+++ b/hw/display/virtio-gpu-virgl.c
@@ -17,6 +17,7 @@
   #include "trace.h"
   #include "hw/virtio/virtio.h"
   #include "hw/virtio/virtio-gpu.h"
+#include "hw/virtio/virtio-gpu-bswap.h"
#include "ui/egl-helpers.h" @@ -78,9 +79,24 @@ static void virgl_cmd_create_resource_3d(VirtIOGPU *g,
       virgl_renderer_resource_create(&args, NULL, 0);
   }
+static void virgl_resource_destroy(VirtIOGPU *g,
+                                   struct virtio_gpu_simple_resource *res)
+{
+    if (!res)
+        return;
+
+    QTAILQ_REMOVE(&g->reslist, res, next);
+
+    virtio_gpu_cleanup_mapping_iov(g, res->iov, res->iov_cnt);
+    g_free(res->addrs);
+
+    g_free(res);
+}
+
   static void virgl_cmd_resource_unref(VirtIOGPU *g,
                                        struct virtio_gpu_ctrl_command *cmd)
   {
+    struct virtio_gpu_simple_resource *res;
       struct virtio_gpu_resource_unref unref;
       struct iovec *res_iovs = NULL;
       int num_iovs = 0;
@@ -88,13 +104,22 @@ static void virgl_cmd_resource_unref(VirtIOGPU *g,
       VIRTIO_GPU_FILL_CMD(unref);
       trace_virtio_gpu_cmd_res_unref(unref.resource_id);
+ res = virtio_gpu_find_resource(g, unref.resource_id);
+
       virgl_renderer_resource_detach_iov(unref.resource_id,
                                          &res_iovs,
                                          &num_iovs);
       if (res_iovs != NULL && num_iovs != 0) {
           virtio_gpu_cleanup_mapping_iov(g, res_iovs, num_iovs);
+        if (res) {
+            res->iov = NULL;
+            res->iov_cnt = 0;
+        }
       }
+
       virgl_renderer_resource_unref(unref.resource_id);
+
+    virgl_resource_destroy(g, res);
   }
static void virgl_cmd_context_create(VirtIOGPU *g,
@@ -426,6 +451,183 @@ static void virgl_cmd_get_capset(VirtIOGPU *g,
       g_free(resp);
   }
+#ifdef HAVE_VIRGL_RESOURCE_BLOB
+
+static void virgl_cmd_resource_create_blob(VirtIOGPU *g,
+                                           struct virtio_gpu_ctrl_command *cmd)
+{
+    struct virtio_gpu_simple_resource *res;
+    struct virtio_gpu_resource_create_blob cblob;
+    struct virgl_renderer_resource_create_blob_args virgl_args = { 0 };
+    int ret;
+
+    VIRTIO_GPU_FILL_CMD(cblob);
+    virtio_gpu_create_blob_bswap(&cblob);
+    trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size);
+
+    if (cblob.resource_id == 0) {
+        qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
+                      __func__);
+        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+        return;
+    }
+
+    res = virtio_gpu_find_resource(g, cblob.resource_id);
+    if (res) {
+        qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
+                      __func__, cblob.resource_id);
+        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+        return;
+    }
+
+    res = g_new0(struct virtio_gpu_simple_resource, 1);
+    if (!res) {
+        cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY;
+        return;
+    }
+
+    res->resource_id = cblob.resource_id;
+    res->blob_size = cblob.size;
+
+    if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_HOST3D) {
+        ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob),
+                                            cmd, &res->addrs, &res->iov,
+                                            &res->iov_cnt);
+        if (!ret) {
+            g_free(res);
+            cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
+            return;
+        }
+    }
+
+    QTAILQ_INSERT_HEAD(&g->reslist, res, next);
+
+    virgl_args.res_handle = cblob.resource_id;
+    virgl_args.ctx_id = cblob.hdr.ctx_id;
+    virgl_args.blob_mem = cblob.blob_mem;
+    virgl_args.blob_id = cblob.blob_id;
+    virgl_args.blob_flags = cblob.blob_flags;
+    virgl_args.size = cblob.size;
+    virgl_args.iovecs = res->iov;
+    virgl_args.num_iovs = res->iov_cnt;
+
+    ret = virgl_renderer_resource_create_blob(&virgl_args);
+    if (ret) {
+        virgl_resource_destroy(g, res);
+        qemu_log_mask(LOG_GUEST_ERROR, "%s: virgl blob create error: %s\n",
+                      __func__, strerror(-ret));
+        cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
+    }
+}
+
+static void virgl_cmd_resource_map_blob(VirtIOGPU *g,
+                                        struct virtio_gpu_ctrl_command *cmd)
+{
+    struct virtio_gpu_simple_resource *res;
+    struct virtio_gpu_resource_map_blob mblob;
+    int ret;
+    void *data;
+    uint64_t size;
+    struct virtio_gpu_resp_map_info resp;
+    VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
+
+    VIRTIO_GPU_FILL_CMD(mblob);
+    virtio_gpu_map_blob_bswap(&mblob);
+
+    if (mblob.resource_id == 0) {
+        qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
+                      __func__);
+        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+        return;
+    }
+
+    res = virtio_gpu_find_resource(g, mblob.resource_id);
+    if (!res) {
+        qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n",
+                      __func__, mblob.resource_id);
+        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+        return;
+    }
+    if (res->region) {
+        qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already mapped %d\n",
+                     __func__, mblob.resource_id);
+        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+        return;
+    }
+
+    ret = virgl_renderer_resource_map(res->resource_id, &data, &size);
+    if (ret) {
+        qemu_log_mask(LOG_GUEST_ERROR, "%s: resource map error: %s\n",
+                      __func__, strerror(-ret));
+        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+        return;
+    }
+
+    res->region = g_new0(MemoryRegion, 1);
+    if (!res->region) {
+        virgl_renderer_resource_unmap(res->resource_id);
+        cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY;
+        return;
+    }
+    memory_region_init_ram_device_ptr(res->region, OBJECT(g), NULL, size, 
data);

I think memory_region_init_ram_ptr() should be used instead.

Would you mind to explain the reason?

The documentation comment of memory_region_init_ram_device_ptr() says:
> A RAM device represents a mapping to a physical device, such as to a
> PCI MMIO BAR of an vfio-pci assigned device.  The memory region may be
> mapped into the VM address space and access to the region will modify
> memory directly.  However, the memory region should not be included in
> a memory dump (device may not be enabled/mapped at the time of the
> dump), and operations incompatible with manipulating MMIO should be
> avoided.  Replaces skip_dump flag.

In my understanding it's not MMIO so memory_region_init_ram_ptr() should be used instead.

Regards,
Akihiko Odaki



reply via email to

[Prev in Thread] Current Thread [Next in Thread]