qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v2 15/19] vdpa: manual forward CVQ buffers


From: Jason Wang
Subject: Re: [PATCH v2 15/19] vdpa: manual forward CVQ buffers
Date: Fri, 15 Jul 2022 16:44:01 +0800

On Fri, Jul 15, 2022 at 1:34 PM Eugenio Perez Martin
<eperezma@redhat.com> wrote:
>
> On Fri, Jul 15, 2022 at 6:08 AM Jason Wang <jasowang@redhat.com> wrote:
> >
> > On Fri, Jul 15, 2022 at 12:32 AM Eugenio Pérez <eperezma@redhat.com> wrote:
> > >
> > > Do a simple forwarding of CVQ buffers, the same work SVQ could do but
> > > through callbacks. No functional change intended.
> > >
> > > Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> > > ---
> > >  include/hw/virtio/vhost-vdpa.h |  3 ++
> > >  hw/virtio/vhost-vdpa.c         |  3 +-
> > >  net/vhost-vdpa.c               | 58 ++++++++++++++++++++++++++++++++++
> > >  3 files changed, 63 insertions(+), 1 deletion(-)
> > >
> > > diff --git a/include/hw/virtio/vhost-vdpa.h 
> > > b/include/hw/virtio/vhost-vdpa.h
> > > index 7214eb47dc..1111d85643 100644
> > > --- a/include/hw/virtio/vhost-vdpa.h
> > > +++ b/include/hw/virtio/vhost-vdpa.h
> > > @@ -15,6 +15,7 @@
> > >  #include <gmodule.h>
> > >
> > >  #include "hw/virtio/vhost-iova-tree.h"
> > > +#include "hw/virtio/vhost-shadow-virtqueue.h"
> > >  #include "hw/virtio/virtio.h"
> > >  #include "standard-headers/linux/vhost_types.h"
> > >
> > > @@ -35,6 +36,8 @@ typedef struct vhost_vdpa {
> > >      /* IOVA mapping used by the Shadow Virtqueue */
> > >      VhostIOVATree *iova_tree;
> > >      GPtrArray *shadow_vqs;
> > > +    const VhostShadowVirtqueueOps *shadow_vq_ops;
> > > +    void *shadow_vq_ops_opaque;
> > >      struct vhost_dev *dev;
> > >      VhostVDPAHostNotifier notifier[VIRTIO_QUEUE_MAX];
> > >  } VhostVDPA;
> > > diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
> > > index 96997210be..beaaa7049a 100644
> > > --- a/hw/virtio/vhost-vdpa.c
> > > +++ b/hw/virtio/vhost-vdpa.c
> > > @@ -419,7 +419,8 @@ static int vhost_vdpa_init_svq(struct vhost_dev 
> > > *hdev, struct vhost_vdpa *v,
> > >      for (unsigned n = 0; n < hdev->nvqs; ++n) {
> > >          g_autoptr(VhostShadowVirtqueue) svq;
> > >
> > > -        svq = vhost_svq_new(v->iova_tree, NULL, NULL);
> > > +        svq = vhost_svq_new(v->iova_tree, v->shadow_vq_ops,
> > > +                            v->shadow_vq_ops_opaque);
> > >          if (unlikely(!svq)) {
> > >              error_setg(errp, "Cannot create svq %u", n);
> > >              return -1;
> > > diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> > > index df1e69ee72..805c9dd6b6 100644
> > > --- a/net/vhost-vdpa.c
> > > +++ b/net/vhost-vdpa.c
> > > @@ -11,11 +11,14 @@
> > >
> > >  #include "qemu/osdep.h"
> > >  #include "clients.h"
> > > +#include "hw/virtio/virtio-net.h"
> > >  #include "net/vhost_net.h"
> > >  #include "net/vhost-vdpa.h"
> > >  #include "hw/virtio/vhost-vdpa.h"
> > >  #include "qemu/config-file.h"
> > >  #include "qemu/error-report.h"
> > > +#include "qemu/log.h"
> > > +#include "qemu/memalign.h"
> > >  #include "qemu/option.h"
> > >  #include "qapi/error.h"
> > >  #include <linux/vhost.h>
> > > @@ -187,6 +190,57 @@ static NetClientInfo net_vhost_vdpa_info = {
> > >          .check_peer_type = vhost_vdpa_check_peer_type,
> > >  };
> > >
> > > +/**
> > > + * Forward buffer for the moment.
> > > + */
> > > +static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
> > > +                                            SVQElement *svq_elem, void 
> > > *opaque)
> > > +{
> > > +    VirtQueueElement *elem = &svq_elem->elem;
> > > +    unsigned int n = elem->out_num + elem->in_num;
> > > +    g_autofree struct iovec *dev_buffers = g_new(struct iovec, n);
> > > +    size_t in_len, dev_written;
> > > +    virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
> > > +    int r;
> > > +
> > > +    memcpy(dev_buffers, elem->out_sg, elem->out_num);
> > > +    memcpy(dev_buffers + elem->out_num, elem->in_sg, elem->in_num);
> > > +
> > > +    r = vhost_svq_add(svq, &dev_buffers[0], elem->out_num, 
> > > &dev_buffers[1],
> > > +                      elem->in_num, svq_elem);
> > > +    if (unlikely(r != 0)) {
> > > +        if (unlikely(r == -ENOSPC)) {
> > > +            qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device 
> > > queue\n",
> > > +                          __func__);
> > > +        }
> > > +        goto out;
> > > +    }
> > > +
> > > +    /*
> > > +     * We can poll here since we've had BQL from the time we sent the
> > > +     * descriptor. Also, we need to take the answer before SVQ pulls by 
> > > itself,
> > > +     * when BQL is released
> > > +     */
> > > +    dev_written = vhost_svq_poll(svq);
> > > +    if (unlikely(dev_written < sizeof(status))) {
> > > +        error_report("Insufficient written data (%zu)", dev_written);
> > > +    }
> > > +
> > > +out:
> > > +    in_len = iov_from_buf(elem->in_sg, elem->in_num, 0, &status,
> > > +                          sizeof(status));
> > > +    if (unlikely(in_len < sizeof(status))) {
> > > +        error_report("Bad device CVQ written length");
> > > +    }
> > > +    vhost_svq_push_elem(svq, svq_elem, MIN(in_len, sizeof(status)));
> > > +    g_free(svq_elem);
> > > +    return r;
> > > +}
> > > +
> > > +static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = {
> > > +    .avail_handler = vhost_vdpa_net_handle_ctrl_avail,
> > > +};
> >
> > I wonder if it's possible to even remove this handler. Can we let the
> > kick to be handled by virtio_net_handler_ctrl() in virtio-net.c?
> >
>
> I kind of drafted that here:
> https://lists.nongnu.org/archive/html/qemu-devel/2022-07/msg02652.html
>
> But I'm not sure about the part of not enabling the guest event
> notifiers. I'd say we would need to move SVQ to the VirtIODevice
> itself, which seems rather complicated at this moment.
>
> Without applying that patch, all of the events of vDPA devices go
> either to device itself or to the SVQ registered handlers. If we go
> that route, we add the vq event handler, and that would depend on the
> svq state + specific vq.

So what's in my mind is to let it work more like existing vhost
backends. In that case the shadow virtqueue is only used as one way to
communicate with the vhost-backend:

1) vhost-net using TAP ioctl
2) vhost-user using UNIX domain socket
3) vhost-vDPA is using shadow cvq

>
> I'm totally ok to do it the first thing for the next merge window, or
> if I have the certainty it's the accepted way to go. Otherwise, I
> think we will not be able to deliver at least migration with SVQ for
> this merge window again.

So I think it's fine for the current stage. We can improve it in the future.

Thanks

>
> Thanks!
>




reply via email to

[Prev in Thread] Current Thread [Next in Thread]