qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH 3/3] vdpa: Check for iova range at mappings changes


From: Michael S. Tsirkin
Subject: Re: [PATCH 3/3] vdpa: Check for iova range at mappings changes
Date: Tue, 5 Oct 2021 06:46:38 -0400

On Tue, Oct 05, 2021 at 11:58:12AM +0200, Eugenio Perez Martin wrote:
> On Tue, Oct 5, 2021 at 10:14 AM Michael S. Tsirkin <mst@redhat.com> wrote:
> >
> > On Tue, Oct 05, 2021 at 10:01:31AM +0200, Eugenio Pérez wrote:
> > > Check vdpa device range before updating memory regions so we don't add
> > > any outside of it, and report the invalid change if any.
> > >
> > > Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> > > ---
> > >  include/hw/virtio/vhost-vdpa.h |  2 +
> > >  hw/virtio/vhost-vdpa.c         | 68 ++++++++++++++++++++++++++--------
> > >  hw/virtio/trace-events         |  1 +
> > >  3 files changed, 55 insertions(+), 16 deletions(-)
> > >
> > > diff --git a/include/hw/virtio/vhost-vdpa.h 
> > > b/include/hw/virtio/vhost-vdpa.h
> > > index a8963da2d9..c288cf7ecb 100644
> > > --- a/include/hw/virtio/vhost-vdpa.h
> > > +++ b/include/hw/virtio/vhost-vdpa.h
> > > @@ -13,6 +13,7 @@
> > >  #define HW_VIRTIO_VHOST_VDPA_H
> > >
> > >  #include "hw/virtio/virtio.h"
> > > +#include "standard-headers/linux/vhost_types.h"
> > >
> > >  typedef struct VhostVDPAHostNotifier {
> > >      MemoryRegion mr;
> > > @@ -24,6 +25,7 @@ typedef struct vhost_vdpa {
> > >      uint32_t msg_type;
> > >      bool iotlb_batch_begin_sent;
> > >      MemoryListener listener;
> > > +    struct vhost_vdpa_iova_range iova_range;
> > >      struct vhost_dev *dev;
> > >      VhostVDPAHostNotifier notifier[VIRTIO_QUEUE_MAX];
> > >  } VhostVDPA;
> > > diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
> > > index a1de6c7c9c..26d0258723 100644
> > > --- a/hw/virtio/vhost-vdpa.c
> > > +++ b/hw/virtio/vhost-vdpa.c
> > > @@ -33,20 +33,34 @@ static Int128 vhost_vdpa_section_end(const 
> > > MemoryRegionSection *section)
> > >      return llend;
> > >  }
> > >
> > > -static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection 
> > > *section)
> > > -{
> > > -    return (!memory_region_is_ram(section->mr) &&
> > > -            !memory_region_is_iommu(section->mr)) ||
> > > -            memory_region_is_protected(section->mr) ||
> > > -           /* vhost-vDPA doesn't allow MMIO to be mapped  */
> > > -            memory_region_is_ram_device(section->mr) ||
> > > -           /*
> > > -            * Sizing an enabled 64-bit BAR can cause spurious mappings to
> > > -            * addresses in the upper part of the 64-bit address space.  
> > > These
> > > -            * are never accessed by the CPU and beyond the address width 
> > > of
> > > -            * some IOMMU hardware.  TODO: VDPA should tell us the IOMMU 
> > > width.
> > > -            */
> > > -           section->offset_within_address_space & (1ULL << 63);
> > > +static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection 
> > > *section,
> > > +                                                uint64_t iova_min,
> > > +                                                uint64_t iova_max)
> > > +{
> > > +    Int128 llend;
> > > +    bool r = (!memory_region_is_ram(section->mr) &&
> > > +              !memory_region_is_iommu(section->mr)) ||
> > > +              memory_region_is_protected(section->mr) ||
> > > +              /* vhost-vDPA doesn't allow MMIO to be mapped  */
> > > +              memory_region_is_ram_device(section->mr);
> > > +    if (r) {
> > > +        return true;
> > > +    }
> > > +
> > > +    if (section->offset_within_address_space < iova_min) {
> > > +        error_report("RAM section out of device range (min=%lu, 
> > > addr=%lu)",
> > > +                     iova_min, section->offset_within_address_space);
> > > +        return true;
> > > +    }
> > > +
> > > +    llend = vhost_vdpa_section_end(section);
> > > +    if (int128_make64(llend) > iova_max) {
> >
> > I am puzzled by this.
> > You are taking a Int128, converting to u64, converting
> > back to Int128, and comparing to u64.
> > Head spins. What is all this back and forth trying to achieve?
> >
> 
> You are totally right, this series was extracted from a longer one
> where I didn't use vhost_vdpa_section_end, but raw addresses. Then I
> applied int128_make64 to the wrong variable, too fast.
> 
> To be sure we are on the same page, to do:
> 
> if (int128_ge(int128_make64(iova), llend)) {
>     // error message
>     return;
> }
> 
> The same way as vhost_vdpa_listener_region_{add,del} would be ok?
> 
> Thanks!


should be ok, yea

> > > +        error_report("RAM section out of device range (max=%lu, end 
> > > addr=%lu)",
> > > +                     iova_max, (uint64_t)int128_make64(llend));
> > > +        return true;
> > > +    }
> > > +
> > > +    return false;
> > >  }
> > >
> > >  static int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr 
> > > size,
> > > @@ -158,7 +172,8 @@ static void 
> > > vhost_vdpa_listener_region_add(MemoryListener *listener,
> > >      void *vaddr;
> > >      int ret;
> > >
> > > -    if (vhost_vdpa_listener_skipped_section(section)) {
> > > +    if (vhost_vdpa_listener_skipped_section(section, v->iova_range.first,
> > > +                                            v->iova_range.last)) {
> > >          return;
> > >      }
> > >
> > > @@ -216,7 +231,8 @@ static void 
> > > vhost_vdpa_listener_region_del(MemoryListener *listener,
> > >      Int128 llend, llsize;
> > >      int ret;
> > >
> > > -    if (vhost_vdpa_listener_skipped_section(section)) {
> > > +    if (vhost_vdpa_listener_skipped_section(section, v->iova_range.first,
> > > +                                            v->iova_range.last)) {
> > >          return;
> > >      }
> > >
> > > @@ -284,9 +300,24 @@ static void vhost_vdpa_add_status(struct vhost_dev 
> > > *dev, uint8_t status)
> > >      vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &s);
> > >  }
> > >
> > > +static int vhost_vdpa_get_iova_range(struct vhost_vdpa *v)
> > > +{
> > > +    int ret;
> > > +
> > > +    ret = vhost_vdpa_call(v->dev, VHOST_VDPA_GET_IOVA_RANGE, 
> > > &v->iova_range);
> > > +    if (ret != 0) {
> > > +        return ret;
> > > +    }
> > > +
> > > +    trace_vhost_vdpa_get_iova_range(v->dev, v->iova_range.first,
> > > +                                    v->iova_range.last);
> > > +    return ret;
> > > +}
> > > +
> > >  static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error 
> > > **errp)
> > >  {
> > >      struct vhost_vdpa *v;
> > > +    int r;
> > >      assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
> > >      trace_vhost_vdpa_init(dev, opaque);
> > >
> > > @@ -296,6 +327,11 @@ static int vhost_vdpa_init(struct vhost_dev *dev, 
> > > void *opaque, Error **errp)
> > >      v->listener = vhost_vdpa_memory_listener;
> > >      v->msg_type = VHOST_IOTLB_MSG_V2;
> > >
> > > +    r = vhost_vdpa_get_iova_range(v);
> > > +    if (unlikely(!r)) {
> > > +        return r;
> > > +    }
> > > +
> > >      vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
> > >                                 VIRTIO_CONFIG_S_DRIVER);
> > >
> > > diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events
> > > index 8ed19e9d0c..650e521e35 100644
> > > --- a/hw/virtio/trace-events
> > > +++ b/hw/virtio/trace-events
> > > @@ -52,6 +52,7 @@ vhost_vdpa_set_vring_call(void *dev, unsigned int 
> > > index, int fd) "dev: %p index:
> > >  vhost_vdpa_get_features(void *dev, uint64_t features) "dev: %p features: 
> > > 0x%"PRIx64
> > >  vhost_vdpa_set_owner(void *dev) "dev: %p"
> > >  vhost_vdpa_vq_get_addr(void *dev, void *vq, uint64_t desc_user_addr, 
> > > uint64_t avail_user_addr, uint64_t used_user_addr) "dev: %p vq: %p 
> > > desc_user_addr: 0x%"PRIx64" avail_user_addr: 0x%"PRIx64" used_user_addr: 
> > > 0x%"PRIx64
> > > +vhost_vdpa_get_iova_range(void *dev, uint64_t first, uint64_t last) 
> > > "dev: %p first: 0x%"PRIx64" last: 0x%"PRIx64
> > >
> > >  # virtio.c
> > >  virtqueue_alloc_element(void *elem, size_t sz, unsigned in_num, unsigned 
> > > out_num) "elem %p size %zd in_num %u out_num %u"
> > > --
> > > 2.27.0
> >




reply via email to

[Prev in Thread] Current Thread [Next in Thread]