[Top][All Lists]
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [XEN][RFC PATCH V2 07/17] hvm-io: send invalidate map cache
From: |
Julien Grall |
Subject: |
[Qemu-devel] [XEN][RFC PATCH V2 07/17] hvm-io: send invalidate map cache to each registered servers |
Date: |
Wed, 22 Aug 2012 13:31:53 +0100 |
When an invalidate mapcache cache occurs, Xen need to send and
IOREQ_TYPE_INVALIDATE to each server and wait that all IO is completed.
We introduce a new function hvm_wait_on_io to wait until an IO is completed.
Signed-off-by: Julien Grall <address@hidden>
---
xen/arch/x86/hvm/hvm.c | 41 ++++++++++++++++++++++++++++++++---------
xen/arch/x86/hvm/io.c | 15 +++++++++++++--
2 files changed, 45 insertions(+), 11 deletions(-)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 33ef0f2..fdb2515 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -316,16 +316,9 @@ void hvm_migrate_pirqs(struct vcpu *v)
spin_unlock(&d->event_lock);
}
-void hvm_do_resume(struct vcpu *v)
+static void hvm_wait_on_io(struct vcpu *v, ioreq_t *p)
{
- ioreq_t *p;
-
- pt_restore_timer(v);
-
- check_wakeup_from_wait();
-
/* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */
- p = get_ioreq(v);
while ( p->state != STATE_IOREQ_NONE )
{
switch ( p->state )
@@ -335,7 +328,7 @@ void hvm_do_resume(struct vcpu *v)
break;
case STATE_IOREQ_READY: /* IOREQ_{READY,INPROCESS} -> IORESP_READY */
case STATE_IOREQ_INPROCESS:
- wait_on_xen_event_channel(v->arch.hvm_vcpu.xen_port,
+ wait_on_xen_event_channel(p->vp_eport,
(p->state != STATE_IOREQ_READY) &&
(p->state != STATE_IOREQ_INPROCESS));
break;
@@ -345,6 +338,36 @@ void hvm_do_resume(struct vcpu *v)
return; /* bail */
}
}
+}
+
+void hvm_do_resume(struct vcpu *v)
+{
+ ioreq_t *p;
+ struct hvm_ioreq_server *s;
+ shared_iopage_t *page;
+
+ pt_restore_timer(v);
+
+ check_wakeup_from_wait();
+
+ p = get_ioreq(v);
+
+ if ( p->type == IOREQ_TYPE_INVALIDATE )
+ {
+ spin_lock(&v->domain->arch.hvm_domain.ioreq_server_lock);
+ /* Wait all servers */
+ for ( s = v->domain->arch.hvm_domain.ioreq_server_list; s; s = s->next
)
+ {
+ page = s->ioreq.va;
+ ASSERT((v == current) || spin_is_locked(&s->ioreq.lock));
+ ASSERT(s->ioreq.va != NULL);
+ v->arch.hvm_vcpu.ioreq = &s->ioreq;
+ hvm_wait_on_io(v, &page->vcpu_ioreq[v->vcpu_id]);
+ }
+ spin_unlock(&v->domain->arch.hvm_domain.ioreq_server_lock);
+ }
+ else
+ hvm_wait_on_io(v, p);
/* Inject pending hw/sw trap */
if ( v->arch.hvm_vcpu.inject_trap.vector != -1 )
diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c
index c20f4e8..b73a462 100644
--- a/xen/arch/x86/hvm/io.c
+++ b/xen/arch/x86/hvm/io.c
@@ -150,7 +150,8 @@ void send_timeoffset_req(unsigned long timeoff)
void send_invalidate_req(void)
{
struct vcpu *v = current;
- ioreq_t *p = get_ioreq(v);
+ ioreq_t p[1];
+ struct hvm_ioreq_server *s;
if ( p->state != STATE_IOREQ_NONE )
{
@@ -164,8 +165,18 @@ void send_invalidate_req(void)
p->size = 4;
p->dir = IOREQ_WRITE;
p->data = ~0UL; /* flush all */
+ p->count = 0;
+ p->addr = 0;
+
+ spin_lock(&v->domain->arch.hvm_domain.ioreq_server_lock);
+ for ( s = v->domain->arch.hvm_domain.ioreq_server_list; s; s = s->next )
+ {
+ set_ioreq(v, &s->ioreq, p);
+ (void)hvm_send_assist_req(v);
+ }
+ spin_unlock(&v->domain->arch.hvm_domain.ioreq_server_lock);
- (void)hvm_send_assist_req(v);
+ set_ioreq(v, &v->domain->arch.hvm_domain.ioreq, p);
}
int handle_mmio(void)
--
Julien Grall
- [Qemu-devel] [XEN][RFC PATCH V2 15/17] xl: support spawn/destroy on multiple device model, (continued)
- [Qemu-devel] [XEN][RFC PATCH V2 15/17] xl: support spawn/destroy on multiple device model, Julien Grall, 2012/08/22
- [Qemu-devel] [XEN][RFC PATCH V2 13/17] xl: add device model id to qmp functions, Julien Grall, 2012/08/22
- [Qemu-devel] [XEN][RFC PATCH V2 04/17] hvm: Change initialization/destruction of an hvm, Julien Grall, 2012/08/22
- [Qemu-devel] [XEN][RFC PATCH V2 14/17] xl-parsing: Parse new device_models option, Julien Grall, 2012/08/22
- [Qemu-devel] [XEN][RFC PATCH V2 03/17] hvm-pci: Handle PCI config space in Xen, Julien Grall, 2012/08/22
- [Qemu-devel] [XEN][RFC PATCH V2 17/17] xl: implement save/restore for multiple device models, Julien Grall, 2012/08/22
- [Qemu-devel] [XEN][RFC PATCH V2 16/17] xl: Fix PCI library, Julien Grall, 2012/08/22
- [Qemu-devel] [XEN][RFC PATCH V2 07/17] hvm-io: send invalidate map cache to each registered servers,
Julien Grall <=