[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [Qemu-devel] [PATCH 5/9] memory: let address_space_rw/ld*/st* run ou
From: |
Alex Bennée |
Subject: |
Re: [Qemu-devel] [PATCH 5/9] memory: let address_space_rw/ld*/st* run outside the BQL |
Date: |
Wed, 24 Jun 2015 17:56:02 +0100 |
Paolo Bonzini <address@hidden> writes:
> From: Jan Kiszka <address@hidden>
>
> The MMIO case is further broken up in two cases: if the caller does not
> hold the BQL on invocation, the unlocked one takes or avoids BQL depending
> on the locking strategy of the target memory region and its coalesced
> MMIO handling. In this case, the caller should not hold _any_ lock
> (a friendly suggestion which is disregarded by virtio-scsi-dataplane).
>
> Signed-off-by: Jan Kiszka <address@hidden>
> Signed-off-by: Paolo Bonzini <address@hidden>
> ---
> exec.c | 66
> +++++++++++++++++++++++++++++++++++++++++++++++++++++++++---------
> 1 file changed, 57 insertions(+), 9 deletions(-)
>
> diff --git a/exec.c b/exec.c
> index 094f87e..78c99f6 100644
> --- a/exec.c
> +++ b/exec.c
> @@ -48,6 +48,7 @@
> #endif
> #include "exec/cpu-all.h"
> #include "qemu/rcu_queue.h"
> +#include "qemu/main-loop.h"
> #include "exec/cputlb.h"
> #include "translate-all.h"
>
> @@ -2318,11 +2319,27 @@ static int memory_access_size(MemoryRegion *mr,
> unsigned l, hwaddr addr)
> return l;
> }
>
> -static void prepare_mmio_access(MemoryRegion *mr)
> +static bool prepare_mmio_access(MemoryRegion *mr)
> {
> + bool unlocked = !qemu_mutex_iothread_locked();
> + bool release_lock = false;
> +
> + if (unlocked && mr->global_locking) {
> + qemu_mutex_lock_iothread();
> + unlocked = false;
> + release_lock = true;
> + }
> if (mr->flush_coalesced_mmio) {
> + if (unlocked) {
> + qemu_mutex_lock_iothread();
> + }
> qemu_flush_coalesced_mmio_buffer();
> + if (unlocked) {
> + qemu_mutex_unlock_iothread();
> + }
> }
> +
> + return release_lock;
> }
This is where I get confused between the advantage of this over however
same pid recursive locking. If you use recursive locking you don't need
to add a bunch of state to remind you of when to release the lock. Then
you'd just need:
static void prepare_mmio_access(MemoryRegion *mr)
{
if (mr->global_locking) {
qemu_mutex_lock_iothread();
}
if (mr->flush_coalesced_mmio) {
qemu_mutex_lock_iothread();
qemu_flush_coalesced_mmio_buffer();
qemu_mutex_unlock_iothread();
}
}
and a bunch of:
if (mr->global_locking)
qemu_mutex_unlock_iothread();
in the access functions. Although I suspect you could push the
mr->global_locking up to the dispatch functions.
Am I missing something here?
>
> MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
> @@ -2334,6 +2351,7 @@ MemTxResult address_space_rw(AddressSpace *as, hwaddr
> addr, MemTxAttrs attrs,
> hwaddr addr1;
> MemoryRegion *mr;
> MemTxResult result = MEMTX_OK;
> + bool release_lock = false;
>
> rcu_read_lock();
> while (len > 0) {
> @@ -2342,7 +2360,7 @@ MemTxResult address_space_rw(AddressSpace *as, hwaddr
> addr, MemTxAttrs attrs,
>
> if (is_write) {
> if (!memory_access_is_direct(mr, is_write)) {
> - prepare_mmio_access(mr);
> + release_lock |= prepare_mmio_access(mr);
> l = memory_access_size(mr, l, addr1);
> /* XXX: could force current_cpu to NULL to avoid
> potential bugs */
> @@ -2384,7 +2402,7 @@ MemTxResult address_space_rw(AddressSpace *as, hwaddr
> addr, MemTxAttrs attrs,
> } else {
> if (!memory_access_is_direct(mr, is_write)) {
> /* I/O case */
> - prepare_mmio_access(mr);
> + release_lock |= prepare_mmio_access(mr);
> l = memory_access_size(mr, l, addr1);
> switch (l) {
> case 8:
> @@ -2420,6 +2438,12 @@ MemTxResult address_space_rw(AddressSpace *as, hwaddr
> addr, MemTxAttrs attrs,
> memcpy(buf, ptr, l);
> }
> }
> +
> + if (release_lock) {
> + qemu_mutex_unlock_iothread();
> + release_lock = false;
> + }
> +
> len -= l;
> buf += l;
> addr += l;
> @@ -2746,11 +2770,12 @@ static inline uint32_t
> address_space_ldl_internal(AddressSpace *as, hwaddr addr,
> hwaddr l = 4;
> hwaddr addr1;
> MemTxResult r;
> + bool release_lock = false;
>
> rcu_read_lock();
> mr = address_space_translate(as, addr, &addr1, &l, false);
> if (l < 4 || !memory_access_is_direct(mr, false)) {
> - prepare_mmio_access(mr);
> + release_lock |= prepare_mmio_access(mr);
>
> /* I/O case */
> r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
> @@ -2784,6 +2809,9 @@ static inline uint32_t
> address_space_ldl_internal(AddressSpace *as, hwaddr addr,
> if (result) {
> *result = r;
> }
> + if (release_lock) {
> + qemu_mutex_unlock_iothread();
> + }
> rcu_read_unlock();
> return val;
> }
> @@ -2836,12 +2864,13 @@ static inline uint64_t
> address_space_ldq_internal(AddressSpace *as, hwaddr addr,
> hwaddr l = 8;
> hwaddr addr1;
> MemTxResult r;
> + bool release_lock = false;
>
> rcu_read_lock();
> mr = address_space_translate(as, addr, &addr1, &l,
> false);
> if (l < 8 || !memory_access_is_direct(mr, false)) {
> - prepare_mmio_access(mr);
> + release_lock |= prepare_mmio_access(mr);
>
> /* I/O case */
> r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
> @@ -2875,6 +2904,9 @@ static inline uint64_t
> address_space_ldq_internal(AddressSpace *as, hwaddr addr,
> if (result) {
> *result = r;
> }
> + if (release_lock) {
> + qemu_mutex_unlock_iothread();
> + }
> rcu_read_unlock();
> return val;
> }
> @@ -2947,12 +2979,13 @@ static inline uint32_t
> address_space_lduw_internal(AddressSpace *as,
> hwaddr l = 2;
> hwaddr addr1;
> MemTxResult r;
> + bool release_lock = false;
>
> rcu_read_lock();
> mr = address_space_translate(as, addr, &addr1, &l,
> false);
> if (l < 2 || !memory_access_is_direct(mr, false)) {
> - prepare_mmio_access(mr);
> + release_lock |= prepare_mmio_access(mr);
>
> /* I/O case */
> r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
> @@ -2986,6 +3019,9 @@ static inline uint32_t
> address_space_lduw_internal(AddressSpace *as,
> if (result) {
> *result = r;
> }
> + if (release_lock) {
> + qemu_mutex_unlock_iothread();
> + }
> rcu_read_unlock();
> return val;
> }
> @@ -3037,12 +3073,13 @@ void address_space_stl_notdirty(AddressSpace *as,
> hwaddr addr, uint32_t val,
> hwaddr l = 4;
> hwaddr addr1;
> MemTxResult r;
> + bool release_lock = false;
>
> rcu_read_lock();
> mr = address_space_translate(as, addr, &addr1, &l,
> true);
> if (l < 4 || !memory_access_is_direct(mr, true)) {
> - prepare_mmio_access(mr);
> + release_lock |= prepare_mmio_access(mr);
>
> r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
> } else {
> @@ -3063,6 +3100,9 @@ void address_space_stl_notdirty(AddressSpace *as,
> hwaddr addr, uint32_t val,
> if (result) {
> *result = r;
> }
> + if (release_lock) {
> + qemu_mutex_unlock_iothread();
> + }
> rcu_read_unlock();
> }
>
> @@ -3083,12 +3123,13 @@ static inline void
> address_space_stl_internal(AddressSpace *as,
> hwaddr l = 4;
> hwaddr addr1;
> MemTxResult r;
> + bool release_lock = false;
>
> rcu_read_lock();
> mr = address_space_translate(as, addr, &addr1, &l,
> true);
> if (l < 4 || !memory_access_is_direct(mr, true)) {
> - prepare_mmio_access(mr);
> + release_lock |= prepare_mmio_access(mr);
>
> #if defined(TARGET_WORDS_BIGENDIAN)
> if (endian == DEVICE_LITTLE_ENDIAN) {
> @@ -3121,6 +3162,9 @@ static inline void
> address_space_stl_internal(AddressSpace *as,
> if (result) {
> *result = r;
> }
> + if (release_lock) {
> + qemu_mutex_unlock_iothread();
> + }
> rcu_read_unlock();
> }
>
> @@ -3190,11 +3234,12 @@ static inline void
> address_space_stw_internal(AddressSpace *as,
> hwaddr l = 2;
> hwaddr addr1;
> MemTxResult r;
> + bool release_lock = false;
>
> rcu_read_lock();
> mr = address_space_translate(as, addr, &addr1, &l, true);
> if (l < 2 || !memory_access_is_direct(mr, true)) {
> - prepare_mmio_access(mr);
> + release_lock |= prepare_mmio_access(mr);
>
> #if defined(TARGET_WORDS_BIGENDIAN)
> if (endian == DEVICE_LITTLE_ENDIAN) {
> @@ -3227,6 +3272,9 @@ static inline void
> address_space_stw_internal(AddressSpace *as,
> if (result) {
> *result = r;
> }
> + if (release_lock) {
> + qemu_mutex_unlock_iothread();
> + }
> rcu_read_unlock();
> }
--
Alex Bennée
[Qemu-devel] [PATCH 6/9] kvm: First step to push iothread lock out of inner run loop, Paolo Bonzini, 2015/06/18
[Qemu-devel] [PATCH 7/9] kvm: Switch to unlocked PIO, Paolo Bonzini, 2015/06/18