On 02/21/2016 08:10 PM, David Kiarie wrote:
Add AMD IOMMU emulaton to Qemu in addition to Intel IOMMU
The IOMMU does basic translation, error checking and has a
mininal IOTLB implementation
Hi,
Signed-off-by: David Kiarie <address@hidden>
---
hw/i386/Makefile.objs | 1 +
hw/i386/amd_iommu.c | 1432
+++++++++++++++++++++++++++++++++++++++++++++++++
hw/i386/amd_iommu.h | 395 ++++++++++++++
include/hw/pci/pci.h | 2 +
4 files changed, 1830 insertions(+)
create mode 100644 hw/i386/amd_iommu.c
create mode 100644 hw/i386/amd_iommu.h
diff --git a/hw/i386/Makefile.objs b/hw/i386/Makefile.objs
index b52d5b8..2f1a265 100644
--- a/hw/i386/Makefile.objs
+++ b/hw/i386/Makefile.objs
@@ -3,6 +3,7 @@ obj-y += multiboot.o
obj-y += pc.o pc_piix.o pc_q35.o
obj-y += pc_sysfw.o
obj-y += intel_iommu.o
+obj-y += amd_iommu.o
obj-$(CONFIG_XEN) += ../xenpv/ xen/
obj-y += kvmvapic.o
diff --git a/hw/i386/amd_iommu.c b/hw/i386/amd_iommu.c
new file mode 100644
index 0000000..3dac043
--- /dev/null
+++ b/hw/i386/amd_iommu.c
@@ -0,0 +1,1432 @@
+/*
+ * QEMU emulation of AMD IOMMU (AMD-Vi)
+ *
+ * Copyright (C) 2011 Eduard - Gabriel Munteanu
+ * Copyright (C) 2015 David Kiarie, <address@hidden>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+
+ * You should have received a copy of the GNU General Public License
along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Cache implementation inspired by hw/i386/intel_iommu.c
+ *
+ */
+#include "hw/i386/amd_iommu.h"
+
+/*#define DEBUG_AMD_IOMMU*/
+#ifdef DEBUG_AMD_IOMMU
+enum {
+ DEBUG_GENERAL, DEBUG_CAPAB, DEBUG_MMIO, DEBUG_ELOG,
+ DEBUG_CACHE, DEBUG_COMMAND, DEBUG_MMU
+};
+
+#define IOMMU_DBGBIT(x) (1 << DEBUG_##x)
+static int iommu_dbgflags = IOMMU_DBGBIT(MMIO);
+
+#define IOMMU_DPRINTF(what, fmt, ...) do { \
+ if (iommu_dbgflags & IOMMU_DBGBIT(what)) { \
+ fprintf(stderr, "(amd-iommu)%s: " fmt "\n", __func__, \
+ ## __VA_ARGS__); } \
+ } while (0)
+#else
+#define IOMMU_DPRINTF(what, fmt, ...) do {} while (0)
+#endif
+
+typedef struct AMDIOMMUAddressSpace {
+ uint8_t bus_num; /* bus
number */
+ uint8_t devfn; /* device
function */
+ AMDIOMMUState *iommu_state; /* IOMMU - one per
machine */
+ MemoryRegion iommu; /* Device's iommu
region */
+ AddressSpace as; /* device's corresponding address
space */
+} AMDIOMMUAddressSpace;
+
+/* IOMMU cache entry */
+typedef struct IOMMUIOTLBEntry {
+ uint64_t gfn;
+ uint16_t domid;
+ uint64_t devid;
+ uint64_t perms;
+ uint64_t translated_addr;
+} IOMMUIOTLBEntry;
+
+/* configure MMIO registers at startup/reset */
+static void amd_iommu_set_quad(AMDIOMMUState *s, hwaddr addr,
uint64_t val,
+ uint64_t romask, uint64_t w1cmask)
+{
+ stq_le_p(&s->mmior[addr], val);
+ stq_le_p(&s->romask[addr], romask);
+ stq_le_p(&s->w1cmask[addr], w1cmask);
+}
+
+static uint16_t amd_iommu_readw(AMDIOMMUState *s, hwaddr addr)
+{
+ return lduw_le_p(&s->mmior[addr]);
+}
+
+static uint32_t amd_iommu_readl(AMDIOMMUState *s, hwaddr addr)
+{
+ return ldl_le_p(&s->mmior[addr]);
+}
+
+static uint64_t amd_iommu_readq(AMDIOMMUState *s, hwaddr addr)
+{
+ return ldq_le_p(&s->mmior[addr]);
+}
+
+/* internal write */
+static void amd_iommu_writeq_raw(AMDIOMMUState *s, uint64_t val,
hwaddr addr)
+{
+ stq_le_p(&s->mmior[addr], val);
+}
+
+/* external write */
+static void amd_iommu_writew(AMDIOMMUState *s, hwaddr addr, uint16_t
val)
+{
+ uint16_t romask = lduw_le_p(&s->romask[addr]);
+ uint16_t w1cmask = lduw_le_p(&s->w1cmask[addr]);
+ uint16_t oldval = lduw_le_p(&s->mmior[addr]);
+ stw_le_p(&s->mmior[addr], (val & ~(val & w1cmask)) | (romask &
oldval));
+}
+
+static void amd_iommu_writel(AMDIOMMUState *s, hwaddr addr, uint32_t
val)
+{
+ uint32_t romask = ldl_le_p(&s->romask[addr]);
+ uint32_t w1cmask = ldl_le_p(&s->w1cmask[addr]);
+ uint32_t oldval = ldl_le_p(&s->mmior[addr]);
+ stl_le_p(&s->mmior[addr], (val & ~(val & w1cmask)) | (romask &
oldval));
+}
+
+static void amd_iommu_writeq(AMDIOMMUState *s, hwaddr addr, uint64_t
val)
+{
+ uint64_t romask = ldq_le_p(&s->romask[addr]);
+ uint64_t w1cmask = ldq_le_p(&s->w1cmask[addr]);
+ uint32_t oldval = ldq_le_p(&s->mmior[addr]);
+ stq_le_p(&s->mmior[addr], (val & ~(val & w1cmask)) | (romask &
oldval));
+}
+
+static void amd_iommu_log_event(AMDIOMMUState *s, uint16_t *evt)
+{
+ /* event logging not enabled */
+ if (!s->evtlog_enabled || *(uint64_t
*)&s->mmior[IOMMU_MMIO_STATUS] |
+ IOMMU_MMIO_STATUS_EVT_OVF) {
+ return;
+ }
+
+ /* event log buffer full */
+ if (s->evtlog_tail >= s->evtlog_len) {
+ *(uint64_t *)&s->mmior[IOMMU_MMIO_STATUS] |=
IOMMU_MMIO_STATUS_EVT_OVF;
+ /* generate interrupt */
+ msi_notify(&s->dev, 0);
+ }
+
+ if (dma_memory_write(&address_space_memory, s->evtlog_len +
s->evtlog_tail,
+ &evt, IOMMU_EVENT_LEN)) {
+ IOMMU_DPRINTF(ELOG, "error: fail to write at address 0x%"PRIx64
+ " + offset 0x%"PRIx32, s->evtlog,
s->evtlog_tail);
+ }
+
+ s->evtlog_tail += IOMMU_EVENT_LEN;
+ *(uint64_t *)&s->mmior[IOMMU_MMIO_STATUS] |=
IOMMU_MMIO_STATUS_COMP_INT;
+}
+
+/* log an error encountered page-walking
+ *
+ * @addr: virtual address in translation request
+ */
+static void amd_iommu_page_fault(AMDIOMMUState *s, uint16_t devid,
+ dma_addr_t addr, uint16_t info)
+{
+ IOMMU_DPRINTF(ELOG, "");
+
+ uint16_t evt[8];
+
+ info |= IOMMU_EVENT_IOPF_I;
+
+ /* encode information */
+ *(uint16_t *)&evt[0] = devid;
+ *(uint16_t *)&evt[3] = info;
+ *(uint64_t *)&evt[4] = cpu_to_le64(addr);
+
+ /* log a page fault */
+ amd_iommu_log_event(s, evt);
+
+ /* Abort the translation */
+ pci_word_test_and_set_mask(s->dev.config + PCI_STATUS,
+ PCI_STATUS_SIG_TARGET_ABORT);
+}
+/*
+ * log a master abort accessing device table
+ * @devtab : address of device table entry
+ * @info : error flags
+ */
+static void amd_iommu_log_devtab_error(AMDIOMMUState *s, uint16_t
devid,
+ dma_addr_t devtab, uint16_t
info)
+{
+
+ IOMMU_DPRINTF(ELOG, "");
+
+ uint16_t evt[8];
+
+ info |= IOMMU_EVENT_DEV_TAB_HW_ERROR;
+
+ /* encode information */
+ *(uint16_t *)&evt[0] = devid;
+ *(uint8_t *)&evt[3] = info;
+ *(uint64_t *)&evt[4] = cpu_to_le64(devtab);
+
+ amd_iommu_log_event(s, evt);
+
+ /* Abort the translation */
+ pci_word_test_and_set_mask(s->dev.config + PCI_STATUS,
+ PCI_STATUS_SIG_TARGET_ABORT);
+
+}
+
+/* log a master abort encountered during a page-walk
+ * @addr : address that couldn't be accessed
+ */
+static void amd_iommu_log_pagetab_error(AMDIOMMUState *s, uint16_t
devid,
+ dma_addr_t addr, uint16_t info)
+{
+ IOMMU_DPRINTF(ELOG, "");
+
+ uint16_t evt[8];
+
+ info |= IOMMU_EVENT_PAGE_TAB_HW_ERROR;
+
+ /* encode information */
+ *(uint16_t *)&evt[0] = devid;
+ *(uint8_t *)&evt[3] = info;
+ *(uint64_t *)&evt[4] = (cpu_to_le64(addr) >> 3);
+
+ amd_iommu_log_event(s, evt);
+
+ /* Abort the translation */
+ pci_word_test_and_set_mask(s->dev.config + PCI_STATUS,
+ PCI_STATUS_SIG_TARGET_ABORT);
+
+}
+
+/* log an event trying to access command buffer
+ * @addr : address that couldn't be accessed
+ */
+static void amd_iommu_log_command_error(AMDIOMMUState *s, dma_addr_t
addr)
+{
+ IOMMU_DPRINTF(ELOG, "");
+
+ uint16_t evt[8];
+
+ /* encode information */
+ *(uint8_t *)&evt[3] = (uint8_t)IOMMU_EVENT_COMMAND_HW_ERROR;
+ *(uint64_t *)&evt[4] = (cpu_to_le64(addr) >> 3);
+
+ amd_iommu_log_event(s, evt);
+
+ /* Abort the translation */
+ pci_word_test_and_set_mask(s->dev.config + PCI_STATUS,
+ PCI_STATUS_SIG_TARGET_ABORT);
+}
+
+/* log an illegal comand event
+ * @addr : address of illegal command
+ */
+static void amd_iommu_log_illegalcom_error(AMDIOMMUState *s,
uint16_t info,
+ dma_addr_t addr)
+{
+ IOMMU_DPRINTF(ELOG, "");
+
+ uint16_t evt[8];
+
+ /* encode information */
+ *(uint8_t *)&evt[3] = (uint8_t)IOMMU_EVENT_ILLEGAL_COMMAND_ERROR;
+ *(uint64_t *)&evt[4] = (cpu_to_le64(addr) >> 3);
Can you please use a macro instead of 3 literal?
+
+ amd_iommu_log_event(s, evt);
+}
+
+/* log an error accessing device table
+ *
+ * @devid : device owning the table entry
+ * @devtab : address of device table entry
+ * @info : error flags
+ */
+static void amd_iommu_log_illegaldevtab_error(AMDIOMMUState *s,
uint16_t devid,
+ dma_addr_t addr,
uint16_t info)
+{
+ IOMMU_DPRINTF(ELOG, "");
+
+ uint16_t evt[8];
+
+ info |= IOMMU_EVENT_ILLEGAL_DEVTAB_ENTRY;
+
+ *(uint16_t *)&evt[0] = devid;
+ *(uint8_t *)&evt[3] = info;
+ *(uint64_t *)&evt[4] = (cpu_to_le64(addr) >> 3);
+
+ amd_iommu_log_event(s, evt);
+}
It seems that the all log functions do the same:
create an event, log it and optionally set PCI_STATUS_SIG_TARGET_ABORT
I would consider to unite them in the same function. (not a must)
+
+static gboolean amd_iommu_uint64_equal(gconstpointer v1,
gconstpointer v2)
+{
+ return *((const uint64_t *)v1) == *((const uint64_t *)v2);
+}
+
+static guint amd_iommu_uint64_hash(gconstpointer v)
+{
+ return (guint)*(const uint64_t *)v;
+}
+
+static IOMMUIOTLBEntry *amd_iommu_iotlb_lookup(AMDIOMMUState *s,
hwaddr addr,
+ uint64_t devid)
+{
+ uint64_t key = (addr >> IOMMU_PAGE_SHIFT_4K) |
+ ((uint64_t)(devid) << IOMMU_DEVID_SHIFT);
+ return g_hash_table_lookup(s->iotlb, &key);
+}
+
+static void amd_iommu_iotlb_reset(AMDIOMMUState *s)
+{
+ assert(s->iotlb);
+ g_hash_table_remove_all(s->iotlb);
+}
+
+static gboolean amd_iommu_iotlb_remove_by_devid(gpointer key,
gpointer value,
+ gpointer user_data)
+{
+ IOMMUIOTLBEntry *entry = (IOMMUIOTLBEntry *)value;
+ uint16_t devid = *(uint16_t *)user_data;
+ return entry->devid == devid;
+}
+
+static void amd_iommu_iotlb_remove_page(AMDIOMMUState *s, hwaddr addr,
+ uint64_t devid)
+{
+ uint64_t key = (addr >> IOMMU_PAGE_SHIFT_4K) |
+ ((uint64_t)(devid) << IOMMU_DEVID_SHIFT);
+ g_hash_table_remove(s->iotlb, &key);
+}
+
+/* extract device id */
+static inline uint16_t devid_extract(uint8_t *cmd)
+{
+ return (uint16_t)cmd[2] & IOMMU_INVAL_DEV_ID_MASK;
+}
+
+static void amd_iommu_invalidate_iotlb(AMDIOMMUState *s, uint64_t *cmd)
+{
+ uint16_t devid = devid_extract((uint8_t *)cmd);
+ /* if invalidation of more than one page requested */
+ if (IOMMU_INVAL_ALL(cmd[0])) {
+ g_hash_table_foreach_remove(s->iotlb,
amd_iommu_iotlb_remove_by_devid,
+ &devid);
+ } else {
+ hwaddr addr = (hwaddr)(cmd[1] & IOMMU_INVAL_ADDR_MASK);
+ amd_iommu_iotlb_remove_page(s, addr, devid);
+ }
+}
+
+static void amd_iommu_update_iotlb(AMDIOMMUState *s, uint16_t devid,
+ uint64_t gpa, uint64_t spa,
uint64_t perms,
+ uint16_t domid)
+{
+ IOMMUIOTLBEntry *entry = g_malloc(sizeof(*entry));
+ uint64_t *key = g_malloc(sizeof(key));
+ uint64_t gfn = gpa >> IOMMU_PAGE_SHIFT_4K;
+
+ IOMMU_DPRINTF(CACHE, " update iotlb devid: %02x:%02x.%x gpa
0x%"PRIx64
+ " hpa 0x%"PRIx64, PCI_BUS_NUM(devid),
PCI_SLOT(devid),
+ PCI_FUNC(devid), gpa, spa);
+
+ if (g_hash_table_size(s->iotlb) >= IOMMU_IOTLB_MAX_SIZE) {
+ IOMMU_DPRINTF(CACHE, "iotlb exceeds size limit - reset");
+ amd_iommu_iotlb_reset(s);
+ }
+
+ entry->gfn = gfn;
+ entry->domid = domid;
+ entry->perms = perms;
+ entry->translated_addr = spa;
+ *key = gfn | ((uint64_t)(devid) << IOMMU_DEVID_SHIFT);
+ g_hash_table_replace(s->iotlb, key, entry);
+}
+
+/* execute a completion wait command */
+static void amd_iommu_completion_wait(AMDIOMMUState *s, uint8_t *cmd)
+{
+ IOMMU_DPRINTF(COMMAND, "");
+ unsigned int addr;
+
+ /* completion store */
+ if (cmd[0] & IOMMU_COM_COMPLETION_STORE_MASK) {
+ addr = le64_to_cpu(*(uint64_t *)cmd) &
IOMMU_COM_STORE_ADDRESS_MASK;
+ if (dma_memory_write(&address_space_memory, addr, cmd + 8,
8)) {
+ IOMMU_DPRINTF(ELOG, "error: fail to write at address
0%x"PRIx64,
+ addr);
+ }
+ }
+
+ /* set completion interrupt */
+ if (cmd[0] & IOMMU_COM_COMPLETION_INTR) {
+ s->mmior[IOMMU_MMIO_STATUS] |= IOMMU_MMIO_STATUS_COMP_INT;
+ }
+}
+
+/* get command type */
+static uint8_t opcode(uint8_t *cmd)
+{
+ return cmd[IOMMU_CMDBUF_ID_BYTE] >> IOMMU_CMDBUF_ID_RSHIFT;
+}
+
+/* linux seems to be using reserved bits so I just log without
abortig bug */
I couldn't quite understand the comment
+static void iommu_inval_devtab_entry(AMDIOMMUState *s, uint8_t *cmd,
+ uint8_t type)
+{
+ IOMMU_DPRINTF(COMMAND, "");
+
+ /* This command should invalidate internal caches of which there
isn't */
+ if (*(uint64_t *)&cmd[0] & IOMMU_CMD_INVAL_DEV_RSVD ||
+ *(uint64_t *)&cmd[1]) {
+ amd_iommu_log_illegalcom_error(s, type, s->cmdbuf +
s->cmdbuf_head);
+ }
+#ifdef DEBUG_AMD_IOMMU
+ uint16_t devid = devid_extract(cmd);
+#endif
+ IOMMU_DPRINTF(COMMAND, "device table entry for devid: %02x:%02x.%x"
+ "invalidated", PCI_BUS_NUM(devid), PCI_SLOT(devid),
+ PCI_FUNC(devid));
+}
+
+static void iommu_completion_wait(AMDIOMMUState *s, uint8_t *cmd,
uint8_t type)
+{
+ IOMMU_DPRINTF(COMMAND, "");
+
+ if (*(uint32_t *)&cmd[1] & IOMMU_COMPLETION_WAIT_RSVD) {
+ amd_iommu_log_illegalcom_error(s, type, s->cmdbuf +
s->cmdbuf_head);
+ }
+ /* pretend to wait for command execution to complete */
+ IOMMU_DPRINTF(COMMAND, "completion wait requested with store
address 0x%"
+ PRIx64 " and store data 0x%"PRIx64, (cmd[0] &
+ IOMMU_COM_STORE_ADDRESS_MASK), *(uint64_t *)(cmd +
8));
+ amd_iommu_completion_wait(s, cmd);
+}
+
+static void iommu_complete_ppr(AMDIOMMUState *s, uint8_t *cmd,
uint8_t type)
+{
+ IOMMU_DPRINTF(COMMAND, "");
+
+ if ((*(uint64_t *)&cmd[0] & IOMMU_COMPLETE_PPR_RQ_RSVD) ||
+ *(uint64_t *)&cmd[1] & 0xffff000000000000) {
Can you please document this mask?
+ amd_iommu_log_illegalcom_error(s, type, s->cmdbuf +
s->cmdbuf_head);
+ }
+
+ IOMMU_DPRINTF(COMMAND, "Execution of PPR queue requested");
+}
+
+static void iommu_inval_all(AMDIOMMUState *s, uint8_t *cmd, uint8_t
type)
+{
+ IOMMU_DPRINTF(COMMAND, "");
+
+ if ((*(uint64_t *)&cmd[0] & IOMMU_INVAL_IOMMU_ALL_RSVD) ||
+ *(uint64_t *)&cmd[1]) {
+ amd_iommu_log_illegalcom_error(s, type, s->cmdbuf +
s->cmdbuf_head);
+ }
+
+ amd_iommu_iotlb_reset(s);
+ IOMMU_DPRINTF(COMMAND, "Invalidation of all IOMMU cache
requested");
+}
+
+static inline uint16_t domid_extract(uint64_t *cmd)
+{
+ return (uint16_t)cmd[0] & IOMMU_INVAL_PAGES_DOMID;
+}
+
+static gboolean amd_iommu_iotlb_remove_by_domid(gpointer key,
gpointer value,
+ gpointer user_data)
+{
+ IOMMUIOTLBEntry *entry = (IOMMUIOTLBEntry *)value;
+ uint16_t domid = *(uint16_t *)user_data;
+ return entry->domid == domid;
+}
+
+/* we don't have devid - we can't remove pages by address */
+static void iommu_inval_pages(AMDIOMMUState *s, uint8_t *cmd,
uint8_t type)
+{
+ IOMMU_DPRINTF(COMMAND, "");
+ uint16_t domid = domid_extract((uint64_t *)cmd);
+
+ if (*(uint64_t *)&cmd[0] & IOMMU_INVAL_IOMMU_PAGES_RSVD ||
+ *(uint32_t *)&cmd[1] & 0x00000ff0) {
+ amd_iommu_log_illegalcom_error(s, type, s->cmdbuf +
s->cmdbuf_head);
+ }
+
+ g_hash_table_foreach_remove(s->iotlb,
amd_iommu_iotlb_remove_by_domid,
+ &domid);
+
+ IOMMU_DPRINTF(COMMAND, "IOMMU pages for domain 0x%"PRIx16
"invalidated",
+ domid);
+}
+
+static void iommu_prefetch_pages(AMDIOMMUState *s, uint8_t *cmd,
uint8_t type)
+{
+ IOMMU_DPRINTF(COMMAND, "");
+
+ if ((*(uint64_t *)&cmd[0] & IOMMU_PRF_IOMMU_PAGES_RSVD) ||
+ (*(uint32_t *)&cmd[1] & 0x00000fd4)) {
Here the same, maybe you can name the mask, so we can easier follow
the spec.
+ amd_iommu_log_illegalcom_error(s, type, s->cmdbuf +
s->cmdbuf_head);
+ }
+
+ IOMMU_DPRINTF(COMMAND, "Pre-fetch of IOMMU pages requested");
+}
+
+static void iommu_inval_inttable(AMDIOMMUState *s, uint8_t *cmd,
uint8_t type)
+{
+ IOMMU_DPRINTF(COMMAND, "");
+
+ if ((*(uint64_t *)&cmd[0] & IOMMU_INVAL_INTR_TABLE_RSVD) ||
+ *(uint64_t *)&cmd[1]) {
+ amd_iommu_log_illegalcom_error(s, type, s->cmdbuf +
s->cmdbuf_head);
+ return;
+ }
+
+ IOMMU_DPRINTF(COMMAND, "interrupt table invalidated");
+}
+
+static void iommu_inval_iotlb(AMDIOMMUState *s, uint8_t *cmd,
uint8_t type)
+{
+ IOMMU_DPRINTF(COMMAND, "");
+
+ if (*(uint32_t *)&cmd[2] & IOMMU_INVAL_IOTLB_PAGES_RSVD) {
+ amd_iommu_log_illegalcom_error(s, type, s->cmdbuf +
s->cmdbuf_head);
+ return;
+ }
+
+ amd_iommu_invalidate_iotlb(s, (uint64_t *)cmd);
+ IOMMU_DPRINTF(COMMAND, "IOTLB pages invalidated");
+}
+
+/* not honouring reserved bits is regarded as an illegal command */
+static void amd_iommu_cmdbuf_exec(AMDIOMMUState *s)
+{
+ IOMMU_DPRINTF(COMMAND, "");
+
+ uint8_t type;
+ uint8_t cmd[IOMMU_COMMAND_SIZE];
+
+ memset(cmd, 0, IOMMU_COMMAND_SIZE);
+
+ if (dma_memory_read(&address_space_memory, s->cmdbuf +
s->cmdbuf_head, cmd,
+ IOMMU_COMMAND_SIZE)) {
+ IOMMU_DPRINTF(COMMAND, "error: fail to access memory at
0x%"PRIx64
+ " + 0x%"PRIu8, s->cmdbuf, s->cmdbuf_head);
+ amd_iommu_log_command_error(s, s->cmdbuf + s->cmdbuf_head);
+ return;
+ }
+
+ type = opcode(cmd);
+
+ switch (type) {
+ case IOMMU_CMD_COMPLETION_WAIT:
+ iommu_completion_wait(s, cmd, type);
+ break;
+
+ case IOMMU_CMD_INVAL_DEVTAB_ENTRY:
+ iommu_inval_devtab_entry(s, cmd, type);
+ break;
+
+ case IOMMU_CMD_INVAL_IOMMU_PAGES:
+ iommu_inval_pages(s, cmd, type);
+ break;
+
+ case IOMMU_CMD_INVAL_IOTLB_PAGES:
+ iommu_inval_iotlb(s, cmd, type);
+ break;
+
+ case IOMMU_CMD_INVAL_INTR_TABLE:
+ iommu_inval_inttable(s, cmd, type);
+ break;
+
+ case IOMMU_CMD_PREFETCH_IOMMU_PAGES:
+ iommu_prefetch_pages(s, cmd, type);
+ break;
+
+ case IOMMU_CMD_COMPLETE_PPR_REQUEST:
+ iommu_complete_ppr(s, cmd, type);
+ break;
+
+ case IOMMU_CMD_INVAL_IOMMU_ALL:
+ iommu_inval_all(s, cmd, type);
+ break;
+
+ default:
+ IOMMU_DPRINTF(COMMAND, "unhandled command %d", type);
+ /* log illegal command */
+ amd_iommu_log_illegalcom_error(s, type,
+ s->cmdbuf + s->cmdbuf_head);
+ break;
+ }
+
+}
+
+static void amd_iommu_cmdbuf_run(AMDIOMMUState *s)
+{
+ IOMMU_DPRINTF(COMMAND, "");
+
+ uint64_t *mmio_cmdbuf_head = (uint64_t *)(s->mmior +
+ IOMMU_MMIO_COMMAND_HEAD);
+
+ if (!s->cmdbuf_enabled) {
+ IOMMU_DPRINTF(COMMAND, "error: IOMMU trying to execute
commands with "
+ "command buffer disabled. IOMMU control value
0x%"PRIx64,
+ amd_iommu_readq(s, IOMMU_MMIO_CONTROL));
+ return;
+ }
+
+ while (s->cmdbuf_head != s->cmdbuf_tail) {
+ /* check if there is work to do. */
+ IOMMU_DPRINTF(COMMAND, "command buffer head at 0x%"PRIx32 "
command "
+ "buffer tail at 0x%"PRIx32" command buffer
base at 0x%"
+ PRIx64, s->cmdbuf_head, s->cmdbuf_tail,
s->cmdbuf);
+ amd_iommu_cmdbuf_exec(s);
+ s->cmdbuf_head += IOMMU_COMMAND_SIZE;
+ amd_iommu_writeq_raw(s, s->cmdbuf_head,
IOMMU_MMIO_COMMAND_HEAD);
+
+ /* wrap head pointer */
+ if (s->cmdbuf_head >= s->cmdbuf_len * IOMMU_COMMAND_SIZE) {
+ s->cmdbuf_head = 0;
+ }
+ }
+
+ *mmio_cmdbuf_head = cpu_to_le64(s->cmdbuf_head);
+}
+
+/* System Software might never read from some of this fields but
anyways */
+static uint64_t amd_iommu_mmio_read(void *opaque, hwaddr addr,
unsigned size)
+{
+ AMDIOMMUState *s = opaque;
+
+ uint64_t val = -1;
The above might work, but it looks a little weird
+ if (addr + size > IOMMU_MMIO_SIZE) {
+ IOMMU_DPRINTF(MMIO, "error: addr outside region: max 0x%"PRIX64
+ ", got 0x%"PRIx64 " %d",
(uint64_t)IOMMU_MMIO_SIZE, addr,
+ size);
+ return (uint64_t)-1;
+ }
+
+ if (size == 2) {
+ val = amd_iommu_readw(s, addr);
+ } else if (size == 4) {
+ val = amd_iommu_readl(s, addr);
+ } else if (size == 8) {
+ val = amd_iommu_readq(s, addr);
+ }
+
+ switch (addr & ~0x07) {
+ case IOMMU_MMIO_DEVICE_TABLE:
+ IOMMU_DPRINTF(MMIO, "MMIO_DEVICE_TABLE read addr 0x%"PRIx64
+ ", size %d offset 0x%"PRIx64, addr, size,
+ addr & ~0x07);
+ break;
+
+ case IOMMU_MMIO_COMMAND_BASE:
+ IOMMU_DPRINTF(MMIO, "MMIO_COMMAND_BASE read addr 0x%"PRIx64
+ ", size %d offset 0x%"PRIx64, addr, size,
+ addr & ~0x07);
+ break;
+
+ case IOMMU_MMIO_EVENT_BASE:
+ IOMMU_DPRINTF(MMIO, "MMIO_EVENT_BASE read addr 0x%"PRIx64
+ ", size %d offset 0x%"PRIx64, addr, size,
+ addr & ~0x07);
+ break;
+
+ case IOMMU_MMIO_CONTROL:
+ IOMMU_DPRINTF(MMIO, "MMIO_CONTROL read addr 0x%"PRIx64
+ ", size %d offset 0x%"PRIx64, addr, size,
+ addr & ~0x07);
+ break;
+
+ case IOMMU_MMIO_EXCL_BASE:
+ IOMMU_DPRINTF(MMIO, "MMIO_EXCL_BASE read addr 0x%"PRIx64
+ ", size %d offset 0x%"PRIx64, addr, size,
+ addr & ~0x07);
+ break;
+
+ case IOMMU_MMIO_EXCL_LIMIT:
+ IOMMU_DPRINTF(MMIO, "MMIO_EXCL_LIMIT read addr 0x%"PRIx64
+ ", size %d offset 0x%"PRIx64, addr, size,
+ addr & ~0x07);
+ break;
+
+ case IOMMU_MMIO_COMMAND_HEAD:
+ IOMMU_DPRINTF(MMIO, "MMIO_COMMAND_HEAD read addr 0x%"PRIx64
+ ", size %d offset 0x%"PRIx64, addr, size,
+ addr & ~0x07);
+ break;
+
+ case IOMMU_MMIO_COMMAND_TAIL:
+ IOMMU_DPRINTF(MMIO, "MMIO_COMMAND_TAIL read addr 0x%"PRIx64
+ ", size %d offset 0x%"PRIx64, addr, size,
+ addr & ~0x07);
+ break;
+
+ case IOMMU_MMIO_EVENT_HEAD:
+ IOMMU_DPRINTF(MMIO, "MMIO_EVENT_HEAD read addr 0x%"PRIx64
+ ", size %d offset 0x%"PRIx64, addr, size,
+ addr & ~0x07);
+ break;
+
+ case IOMMU_MMIO_EVENT_TAIL:
+ IOMMU_DPRINTF(MMIO, "MMIO_EVENT_TAIL read addr 0x%"PRIx64
+ ", size %d offset 0x%"PRIx64, addr, size,
+ addr & ~0x07);
+ break;
+
+ case IOMMU_MMIO_STATUS:
+ IOMMU_DPRINTF(MMIO, "MMIO_STATUS read addr 0x%"PRIx64
+ ", size %d offset 0x%"PRIx64, addr, size,
+ addr & ~0x07);
+ break;
+
+ case IOMMU_MMIO_EXT_FEATURES:
+ IOMMU_DPRINTF(MMU, "MMIO_EXT_FEATURES read addr 0x%"PRIx64
+ ", size %d offset 0x%"PRIx64 "value 0x%"PRIx64,
+ addr, size, addr & ~0x07, val);
+ break;
+
+ default:
+ IOMMU_DPRINTF(MMIO, "UNHANDLED MMIO read addr 0x%"PRIx64
+ ", size %d offset 0x%"PRIx64, addr, size,
+ addr & ~0x07);
+ }
+ return val;
+}
+
+static void iommu_handle_control_write(AMDIOMMUState *s)
+{
+ IOMMU_DPRINTF(COMMAND, "");
+ /*
+ * read whatever is already written in case
+ * software is writing in chucks less than 8 bytes
+ */
+ unsigned long control = amd_iommu_readq(s, IOMMU_MMIO_CONTROL);
+ s->enabled = !!(control & IOMMU_MMIO_CONTROL_IOMMUEN);
+
+ s->ats_enabled = !!(control & IOMMU_MMIO_CONTROL_HTTUNEN);
+ s->evtlog_enabled = s->enabled && !!(control &
+ IOMMU_MMIO_CONTROL_EVENTLOGEN);
+
+ s->evtlog_intr = !!(control & IOMMU_MMIO_CONTROL_EVENTINTEN);
+ s->completion_wait_intr = !!(control &
IOMMU_MMIO_CONTROL_COMWAITINTEN);
+ s->cmdbuf_enabled = s->enabled && !!(control &
+ IOMMU_MMIO_CONTROL_CMDBUFLEN);
+
+ /* update the flags depending on the control register */
+ if (s->cmdbuf_enabled) {
+ (*(uint64_t *)&s->mmior[IOMMU_MMIO_STATUS]) |=
+ IOMMU_MMIO_STATUS_CMDBUF_RUN;
+ } else {
+ (*(uint64_t *)&s->mmior[IOMMU_MMIO_STATUS]) &=
+ ~IOMMU_MMIO_STATUS_CMDBUF_RUN;
+ }
+ if (s->evtlog_enabled) {
+ (*(uint64_t *)&s->mmior[IOMMU_MMIO_STATUS]) |=
+ IOMMU_MMIO_STATUS_EVT_RUN;
+ } else {
+ (*(uint64_t *)&s->mmior[IOMMU_MMIO_STATUS]) &=
+ ~IOMMU_MMIO_STATUS_EVT_RUN;
+ }
+
+ IOMMU_DPRINTF(COMMAND, "MMIO_STATUS state 0x%"PRIx64, control);
+
+ amd_iommu_cmdbuf_run(s);
+}
+
+static inline void iommu_handle_devtab_write(AMDIOMMUState *s)
+
+{
+ IOMMU_DPRINTF(COMMAND, "");
+
+ uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_DEVICE_TABLE);
+ s->devtab = (dma_addr_t)(val & IOMMU_MMIO_DEVTAB_BASE_MASK);
+
+ /* set device table length */
+ s->devtab_len = ((val & IOMMU_MMIO_DEVTAB_SIZE_MASK) + 1 *
+ (IOMMU_MMIO_DEVTAB_SIZE_UNIT /
+ IOMMU_MMIO_DEVTAB_ENTRY_SIZE));
+}
+
+static inline void iommu_handle_cmdhead_write(AMDIOMMUState *s)
+{
+ IOMMU_DPRINTF(COMMAND, "");
+
+ s->cmdbuf_head = (dma_addr_t)amd_iommu_readq(s,
IOMMU_MMIO_COMMAND_HEAD)
+ & IOMMU_MMIO_CMDBUF_HEAD_MASK;
+ amd_iommu_cmdbuf_run(s);
+}
+
+static inline void iommu_handle_cmdbase_write(AMDIOMMUState *s)
+{
+ IOMMU_DPRINTF(COMMAND, "");
+
+ s->cmdbuf = (dma_addr_t)amd_iommu_readq(s, IOMMU_MMIO_COMMAND_BASE)
+ & IOMMU_MMIO_CMDBUF_BASE_MASK;
+ s->cmdbuf_len = 1UL << (s->mmior[IOMMU_MMIO_CMDBUF_SIZE_BYTE]
+ & IOMMU_MMIO_CMDBUF_SIZE_MASK);
+ s->cmdbuf_head = s->cmdbuf_tail = 0;
+
+}
+
+static inline void iommu_handle_cmdtail_write(AMDIOMMUState *s)
+{
+ s->cmdbuf_tail = amd_iommu_readq(s, IOMMU_MMIO_COMMAND_TAIL)
+ & IOMMU_MMIO_CMDBUF_TAIL_MASK;
+ amd_iommu_cmdbuf_run(s);
+}
+
+static inline void iommu_handle_excllim_write(AMDIOMMUState *s)
+{
+ IOMMU_DPRINTF(COMMAND, "");
+
+ uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_EXCL_LIMIT);
+ s->excl_limit = (val & IOMMU_MMIO_EXCL_LIMIT_MASK) |
+ IOMMU_MMIO_EXCL_LIMIT_LOW;
+}
+
+static inline void iommu_handle_evtbase_write(AMDIOMMUState *s)
+{
+ IOMMU_DPRINTF(COMMAND, "");
+
+ uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_EVENT_BASE);
+ s->evtlog = val & IOMMU_MMIO_EVTLOG_BASE_MASK;
+ s->evtlog_len = 1UL << (*(uint64_t
*)&s->mmior[IOMMU_MMIO_EVTLOG_SIZE_BYTE]
+ & IOMMU_MMIO_EVTLOG_SIZE_MASK);
+}
+
+static inline void iommu_handle_evttail_write(AMDIOMMUState *s)
+{
+ IOMMU_DPRINTF(COMMAND, "");
+
+ uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_EVENT_TAIL);
+ s->evtlog_tail = val & IOMMU_MMIO_EVTLOG_TAIL_MASK;
+}
+
+static inline void iommu_handle_evthead_write(AMDIOMMUState *s)
+{
+ IOMMU_DPRINTF(COMMAND, "");
+
+ uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_EVENT_HEAD);
+ s->evtlog_head = val & IOMMU_MMIO_EVTLOG_HEAD_MASK;
+}
+
+static inline void iommu_handle_pprbase_write(AMDIOMMUState *s)
+{
+ IOMMU_DPRINTF(COMMAND, "");
+
+ uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_PPR_BASE);
+ s->ppr_log = val & IOMMU_MMIO_PPRLOG_BASE_MASK;
+ s->pprlog_len = 1UL << (*(uint64_t
*)&s->mmior[IOMMU_MMIO_PPRLOG_SIZE_BYTE]
+ & IOMMU_MMIO_PPRLOG_SIZE_MASK);
+}
+
+static inline void iommu_handle_pprhead_write(AMDIOMMUState *s)
+{
+ IOMMU_DPRINTF(COMMAND, "");
+
+ uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_PPR_HEAD);
+ s->pprlog_head = val & IOMMU_MMIO_PPRLOG_HEAD_MASK;
+}
+
+static inline void iommu_handle_pprtail_write(AMDIOMMUState *s)
+{
+ IOMMU_DPRINTF(COMMAND, "");
+
+ uint64_t val = amd_iommu_readq(s, IOMMU_MMIO_PPR_TAIL);
+ s->pprlog_tail = val & IOMMU_MMIO_PPRLOG_TAIL_MASK;
+}
+
+/* FIXME: something might go wrong if System Software writes in chunks
+ * of one byte but linux writes in chunks of 4 bytes so currently it
+ * works correctly with linux but will definitely be busted if software
+ * reads/writes 8 bytes
+ */
+static void amd_iommu_mmio_write(void *opaque, hwaddr addr, uint64_t
val,
+ unsigned size)
+{
+
+ IOMMU_DPRINTF(COMMAND, "");
+
+ AMDIOMMUState *s = opaque;
+ unsigned long offset = addr & 0x07;
+
+ if (addr + size > IOMMU_MMIO_SIZE) {
+ IOMMU_DPRINTF(MMIO, "error: addr outside region: max 0x%"PRIx64
+ ", got 0x%"PRIx64 " %d",
(uint64_t)IOMMU_MMIO_SIZE, addr,
+ size);
+ return;
+ }
+
+ switch (addr & ~0x07) {
+ case IOMMU_MMIO_CONTROL:
+ if (size == 2) {
+ amd_iommu_writew(s, addr, val);
+ } else if (size == 4) {
+ amd_iommu_writel(s, addr, val);
+ } else if (size == 8) {
+ amd_iommu_writeq(s, addr, val);
+ }
+
+ IOMMU_DPRINTF(COMMAND, "MMIO_CONTROL write addr 0x%"PRIx64
+ ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
+ addr, size, val, offset);
+ iommu_handle_control_write(s);
+ break;
+
+ case IOMMU_MMIO_DEVICE_TABLE:
+ IOMMU_DPRINTF(MMIO, "MMIO_DEVICE_TABLE write addr 0x%"PRIx64
+ ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
+ addr, size, val, offset);
+ if (size == 2) {
+ amd_iommu_writew(s, addr, val);
+ } else if (size == 4) {
+ amd_iommu_writel(s, addr, val);
+ } else if (size == 8) {
+ amd_iommu_writeq(s, addr, val);
+ }
+
+ /* set device table address
+ * This also suffers from inability to tell whether software
+ * is done writing
+ */
+
+ if (offset || (size == 8)) {
+ iommu_handle_devtab_write(s);
+ }
+ break;
+
+ case IOMMU_MMIO_COMMAND_HEAD:
+ IOMMU_DPRINTF(MMIO, "MMIO_COMMAND_HEAD write addr 0x%"PRIx64
+ ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
+ addr, size, val, offset);
+ if (size == 2) {
+ amd_iommu_writew(s, addr, val);
+ } else if (size == 4) {
+ amd_iommu_writel(s, addr, val);
+ } else if (size == 8) {
+ amd_iommu_writeq(s, addr, val);
+ }
+
+ iommu_handle_cmdhead_write(s);
+ break;
+
+ case IOMMU_MMIO_COMMAND_BASE:
+ IOMMU_DPRINTF(COMMAND, "MMIO_COMMAND_BASE write addr 0x%"PRIx64
+ ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
+ addr, size, val, offset);
+ if (size == 2) {
+ amd_iommu_writew(s, addr, val);
+ } else if (size == 4) {
+ amd_iommu_writel(s, addr, val);
+ } else if (size == 8) {
+ amd_iommu_writeq(s, addr, val);
+ }
+
+ /* FIXME - make sure System Software has finished writing
incase
+ * it writes in chucks less than 8 bytes in a robust way.As for
+ * now, this hacks works for the linux driver
+ */
+ if (offset || (size == 8)) {
+ iommu_handle_cmdbase_write(s);
+ }
+ break;
+
+ case IOMMU_MMIO_COMMAND_TAIL:
+ IOMMU_DPRINTF(COMMAND, "MMIO_COMMAND_TAIL write addr 0x%"PRIx64
+ ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
+ addr, size, val, offset);
+ if (size == 2) {
+ amd_iommu_writew(s, addr, val);
+ } else if (size == 4) {
+ amd_iommu_writel(s, addr, val);
+ } else if (size == 8) {
+ amd_iommu_writeq(s, addr, val);
+ }
+ iommu_handle_cmdtail_write(s);
+ break;
+
+ case IOMMU_MMIO_EVENT_BASE:
+ IOMMU_DPRINTF(MMIO, "MMIO_EVENT_BASE write addr 0x%"PRIx64
+ ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
+ addr, size, val, offset);
+ if (size == 2) {
+ amd_iommu_writew(s, addr, val);
+ } else if (size == 4) {
+ amd_iommu_writel(s, addr, val);
+ } else if (size == 8) {
+ amd_iommu_writeq(s, addr, val);
+ }
+ iommu_handle_evtbase_write(s);
+ break;
+
+ case IOMMU_MMIO_EVENT_HEAD:
+ IOMMU_DPRINTF(MMIO, "MMIO_EVENT_HEAD write addr 0x%"PRIx64
+ ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
+ addr, size, val, offset);
+ if (size == 2) {
+ amd_iommu_writew(s, addr, val);
+ } else if (size == 4) {
+ amd_iommu_writel(s, addr, val);
+ } else if (size == 8) {
+ amd_iommu_writeq(s, addr, val);
+ }
+ iommu_handle_evthead_write(s);
+ break;
+
+ case IOMMU_MMIO_EVENT_TAIL:
+ IOMMU_DPRINTF(MMIO, "MMIO_EVENT_TAIL write addr 0x%"PRIx64
+ ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
+ addr, size, val, offset);
+ if (size == 2) {
+ amd_iommu_writew(s, addr, val);
+ } else if (size == 4) {
+ amd_iommu_writel(s, addr, val);
+ } else if (size == 8) {
+ amd_iommu_writeq(s, addr, val);
+ }
+ iommu_handle_evttail_write(s);
+ break;
+
+ case IOMMU_MMIO_EXCL_LIMIT:
+ IOMMU_DPRINTF(MMIO, "MMIO_EXCL_LIMIT write addr 0x%"PRIx64
+ ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
+ addr, size, val, offset);
+ if (size == 2) {
+ amd_iommu_writew(s, addr, val);
+ } else if (size == 4) {
+ amd_iommu_writel(s, addr, val);
+ } else if (size == 8) {
+ amd_iommu_writeq(s, addr, val);
+ }
+ iommu_handle_excllim_write(s);
+ break;
+
+ /* PPR log base - unused for now */
+ case IOMMU_MMIO_PPR_BASE:
+ IOMMU_DPRINTF(MMIO, "MMIO_PPR_BASE write addr 0x%"PRIx64
+ ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
+ addr, size, val, offset);
+ if (size == 2) {
+ amd_iommu_writew(s, addr, val);
+ } else if (size == 4) {
+ amd_iommu_writel(s, addr, val);
+ } else if (size == 8) {
+ amd_iommu_writeq(s, addr, val);
+ }
+ iommu_handle_pprbase_write(s);
+ break;
+ /* PPR log head - also unused for now */
+ case IOMMU_MMIO_PPR_HEAD:
+ IOMMU_DPRINTF(MMIO, "MMIO_PPR_HEAD write addr 0x%"PRIx64
+ ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
+ addr, size, val, offset);
+ if (size == 2) {
+ amd_iommu_writew(s, addr, val);
+ } else if (size == 4) {
+ amd_iommu_writel(s, addr, val);
+ } else if (size == 8) {
+ amd_iommu_writeq(s, addr, val);
+ }
+ iommu_handle_pprhead_write(s);
+ break;
+ /* PPR log tail - unused for now */
+ case IOMMU_MMIO_PPR_TAIL:
+ IOMMU_DPRINTF(MMIO, "MMIO_PPR_TAIL write addr 0x%"PRIx64
+ ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
+ addr, size, val, offset);
+ if (size == 2) {
+ amd_iommu_writew(s, addr, val);
+ } else if (size == 4) {
+ amd_iommu_writel(s, addr, val);
+ } else if (size == 8) {
+ amd_iommu_writeq(s, addr, val);
+ }
+ iommu_handle_pprtail_write(s);
+ break;
+
+ /* ignore write to ext_features */
+ default:
+ IOMMU_DPRINTF(MMIO, "UNHANDLED MMIO write addr 0x%"PRIx64
+ ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
+ addr, size, val, offset);
+ }
+
+}
+
+static inline uint64_t amd_iommu_get_perms(uint64_t entry)
+{
+ return (entry & (IOMMU_DEV_PERM_READ | IOMMU_DEV_PERM_WRITE)) >>
+ IOMMU_DEV_PERM_SHIFT;
+}
+
+AddressSpace *bridge_host_amd_iommu(PCIBus *bus, void *opaque, int
devfn)
+{
+ AMDIOMMUState *s = opaque;
+ AMDIOMMUAddressSpace **iommu_as;
+ int bus_num = pci_bus_num(bus);
+
+ /* just in case */
This comment troubles me, do we need the assert?
+ assert(0 <= bus_num && bus_num <= PCI_BUS_MAX);
bus_num < PCI_BUS_MAX, right ?
+ assert(0 <= devfn && devfn <= PCI_DEVFN_MAX);
same with devfn I suppose.
+
+ iommu_as = s->address_spaces[bus_num];
+
+ /* allocate memory during the first run */
+ if (!iommu_as) {
Why lazy init? We can do that at AMDIOMMUState init, right?
+ iommu_as = g_malloc0(sizeof(AMDIOMMUAddressSpace *) *
PCI_DEVFN_MAX);
+ s->address_spaces[bus_num] = iommu_as;
+ }
+
+ /* set up IOMMU region */
+ if (!iommu_as[devfn]) {
+ iommu_as[devfn] = g_malloc0(sizeof(AMDIOMMUAddressSpace));
same here
+ iommu_as[devfn]->bus_num = (uint8_t)bus_num;
+ iommu_as[devfn]->devfn = (uint8_t)devfn;
+ iommu_as[devfn]->iommu_state = s;
+
+ memory_region_init_iommu(&iommu_as[devfn]->iommu, OBJECT(s),
+ &s->iommu_ops, "amd-iommu",
UINT64_MAX);
+ address_space_init(&iommu_as[devfn]->as,
&iommu_as[devfn]->iommu,
+ "amd-iommu");
+ }
+ return &iommu_as[devfn]->as;
+}
+
+/* validate a page table entry */
+static bool amd_iommu_validate_dte(AMDIOMMUState *s, uint16_t devid,
+ uint64_t *dte)
+{
+ if ((dte[0] & IOMMU_DTE_LOWER_QUAD_RESERVED)
+ || (dte[1] & IOMMU_DTE_MIDDLE_QUAD_RESERVED)
+ || (dte[2] & IOMMU_DTE_UPPER_QUAD_RESERVED) || dte[3]) {
+ amd_iommu_log_illegaldevtab_error(s, devid,
+ s->devtab + devid *
IOMMU_DEVTAB_ENTRY_SIZE, 0);
+ return false;
+ }
+
+ return dte[0] & IOMMU_DEV_VALID && (dte[0] &
IOMMU_DEV_TRANSLATION_VALID)
+ && (dte[0] & IOMMU_DEV_PT_ROOT_MASK);
+}
+
+/* get a device table entry given the devid */
+static bool amd_iommu_get_dte(AMDIOMMUState *s, int devid, uint64_t
*entry)
+{
+ uint32_t offset = devid * IOMMU_DEVTAB_ENTRY_SIZE;
+
+ IOMMU_DPRINTF(MMU, "Device Table at 0x%"PRIx64, s->devtab);
+
+ if (dma_memory_read(&address_space_memory, s->devtab + offset,
entry,
+ IOMMU_DEVTAB_ENTRY_SIZE)) {
+ IOMMU_DPRINTF(MMU, "error: fail to access Device Entry
devtab 0x%"PRIx64
+ "offset 0x%"PRIx32, s->devtab, offset);
+ /* log ever accessing dte */
+ amd_iommu_log_devtab_error(s, devid, s->devtab + offset, 0);
+ return false;
+ }
+
+ if (!amd_iommu_validate_dte(s, devid, entry)) {
+ IOMMU_DPRINTF(MMU,
+ "Pte entry at 0x%"PRIx64" is invalid", entry[0]);
+ return false;
+ }
+
+ return true;
+}
+
+/* get pte translation mode */
+static inline uint8_t get_pte_translation_mode(uint64_t pte)
+{
+ return (pte >> IOMMU_DEV_MODE_RSHIFT) & IOMMU_DEV_MODE_MASK;
+}
+
+static int amd_iommu_page_walk(AMDIOMMUAddressSpace *as, uint64_t *dte,
+ IOMMUTLBEntry *ret, unsigned perms,
+ hwaddr addr)
+{
+ uint8_t level, oldlevel;
+ unsigned present;
+ uint64_t pte, pte_addr;
+ uint64_t pte_perms;
+ pte = dte[0];
+
+ level = get_pte_translation_mode(pte);
+
+ if (level >= 7 || level == 0) {
+ IOMMU_DPRINTF(MMU, "error: translation level 0x%"PRIu8 "
detected"
+ "while translating 0x%"PRIx64, level, addr);
+ return -1;
+ }
+
+ while (level > 0) {
+ pte_perms = amd_iommu_get_perms(pte);
+ present = pte & 1;
+ if (!present || perms != (perms & pte_perms)) {
+ amd_iommu_page_fault(as->iommu_state, as->devfn, addr,
perms);
+ IOMMU_DPRINTF(MMU, "error: page fault accessing virtual
addr 0x%"
+ PRIx64, addr);
+ return -1;
+ }
+
+ /* go to the next lower level */
+ pte_addr = pte & IOMMU_DEV_PT_ROOT_MASK;
+ /* add offset and load pte */
+ pte_addr += ((addr >> (3 + 9 * level)) & 0x1FF) << 3;
+ pte = ldq_phys(&address_space_memory, pte_addr);
+ oldlevel = level;
+ level = get_pte_translation_mode(pte);
+
+ /* PT is corrupted or not there */
+ if (level != oldlevel - 1) {
+ return -1;
+ }
+ }
+
+ ret->iova = addr & IOMMU_PAGE_MASK_4K;
+ ret->translated_addr = (pte & IOMMU_DEV_PT_ROOT_MASK) &
IOMMU_PAGE_MASK_4K;
+ ret->addr_mask = ~IOMMU_PAGE_MASK_4K;
+ ret->perm = IOMMU_RW;
+ return 0;
+}
+
+/* TODO : Mark addresses as Accessed and Dirty */
If you don't mark addresses as dirty, can't this cause the sporadic
errors
of arbitrary programs Jan talked about?
+static void amd_iommu_do_translate(AMDIOMMUAddressSpace *as, hwaddr
addr,
+ bool is_write, IOMMUTLBEntry *ret)
+{
+ AMDIOMMUState *s = as->iommu_state;
+ uint16_t devid = PCI_DEVID(as->bus_num, as->devfn);
+ IOMMUIOTLBEntry *iotlb_entry;
+ uint8_t err;
+ uint64_t entry[4];
+
+ /* try getting a cache entry first */
+ iotlb_entry = amd_iommu_iotlb_lookup(s, addr, as->devfn);
+
+ if (iotlb_entry) {
+ IOMMU_DPRINTF(CACHE, "hit iotlb devid: %02x:%02x.%x gpa
0x%"PRIx64
+ " hpa 0x%"PRIx64, PCI_BUS_NUM(devid),
PCI_SLOT(devid),
+ PCI_FUNC(devid), addr,
iotlb_entry->translated_addr);
+ ret->iova = addr & IOMMU_PAGE_MASK_4K;
+ ret->translated_addr = iotlb_entry->translated_addr;
+ ret->addr_mask = ~IOMMU_PAGE_MASK_4K;
+ ret->perm = iotlb_entry->perms;
+ return;
+ } else {
you return from the if clause so you don't need the else
+ if (!amd_iommu_get_dte(s, devid, entry)) {
is not an error if you did not find the device id?
+ goto out;
+ }
+
+ err = amd_iommu_page_walk(as, entry, ret,
+ is_write ? IOMMU_PERM_WRITE :
IOMMU_PERM_READ,
+ addr);
+ if (err) {
+ IOMMU_DPRINTF(MMU, "error: hardware error accessing page
tables"
+ " while translating addr 0x%"PRIx64, addr);
+ amd_iommu_log_pagetab_error(s, as->devfn, addr, 0);
+ goto out;
+ }
+
+ amd_iommu_update_iotlb(s, as->devfn, addr,
ret->translated_addr,
+ ret->perm, entry[1] &
IOMMU_DEV_DOMID_ID_MASK);
+ return;
+ }
+
+out:
+ ret->iova = addr;
+ ret->translated_addr = addr & IOMMU_PAGE_MASK_4K;
+ ret->addr_mask = ~IOMMU_PAGE_MASK_4K;
+ ret->perm = IOMMU_RW;
+ return;
you don't need the above return
+}
+
+static IOMMUTLBEntry amd_iommu_translate(MemoryRegion *iommu, hwaddr
addr,
+ bool is_write)
+{
+ IOMMU_DPRINTF(GENERAL, "");
+
+ AMDIOMMUAddressSpace *as = container_of(iommu,
AMDIOMMUAddressSpace, iommu);
+ AMDIOMMUState *s = as->iommu_state;
+
+ IOMMUTLBEntry ret = {
+ .target_as = &address_space_memory,
+ .iova = addr,
+ .translated_addr = 0,
+ .addr_mask = ~(hwaddr)0,
+ .perm = IOMMU_NONE,
+ };
+
+ if (!s->enabled) {
+ /* IOMMU disabled - corresponds to iommu=off not
+ * failure to provide any parameter
+ */
+ ret.iova = addr & IOMMU_PAGE_MASK_4K;
+ ret.translated_addr = addr & IOMMU_PAGE_MASK_4K;
+ ret.addr_mask = ~IOMMU_PAGE_MASK_4K;
+ ret.perm = IOMMU_RW;
+ return ret;
+ }
+
+ amd_iommu_do_translate(as, addr, is_write, &ret);
+ IOMMU_DPRINTF(MMU, "devid: %02x:%02x.%x gpa 0x%"PRIx64 " hpa
0x%"PRIx64,
+ as->bus_num, PCI_SLOT(as->devfn),
PCI_FUNC(as->devfn), addr,
+ ret.translated_addr);
+
+ return ret;
+}
+
+static const MemoryRegionOps mmio_mem_ops = {
+ .read = amd_iommu_mmio_read,
+ .write = amd_iommu_mmio_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .impl = {
+ .min_access_size = 1,
+ .max_access_size = 8,
+ .unaligned = false,
+ },
+ .valid = {
+ .min_access_size = 1,
+ .max_access_size = 8,
+ }
+};
+
+static void amd_iommu_init(AMDIOMMUState *s)
+{
+ printf("amd_iommu_init");
you should use the debug macro here
+
+ amd_iommu_iotlb_reset(s);
+
+ s->iommu_ops.translate = amd_iommu_translate;
+
+ s->devtab_len = 0;
+ s->cmdbuf_len = 0;
+ s->cmdbuf_head = 0;
+ s->cmdbuf_tail = 0;
+ s->evtlog_head = 0;
+ s->evtlog_tail = 0;
+ s->excl_enabled = false;
+ s->excl_allow = false;
+ s->mmio_enabled = false;
+ s->enabled = false;
+ s->ats_enabled = false;
+ s->cmdbuf_enabled = false;
+
+ /* reset MMIO */
+ memset(s->mmior, 0, IOMMU_MMIO_SIZE);
+ amd_iommu_set_quad(s, IOMMU_MMIO_EXT_FEATURES, IOMMU_EXT_FEATURES,
+ 0xffffffffffffffef, 0);
+ amd_iommu_set_quad(s, IOMMU_MMIO_STATUS, 0, 0x98, 0x67);
+ /* reset device ident */
+ pci_config_set_vendor_id(s->dev.config, PCI_VENDOR_ID_AMD);
+ pci_config_set_device_id(s->dev.config, PCI_DEVICE_ID_RD890_IOMMU);
+ pci_config_set_prog_interface(s->dev.config, 00);
+ pci_config_set_class(s->dev.config, 0x0806);
+
+ /* reset IOMMU specific capabilities */
+ pci_set_long(s->dev.config + s->capab_offset,
IOMMU_CAPAB_FEATURES);
+ pci_set_long(s->dev.config + s->capab_offset + IOMMU_CAPAB_BAR_LOW,
+ s->mmio.addr & ~(0xffff0000));
+ pci_set_long(s->dev.config + s->capab_offset +
IOMMU_CAPAB_BAR_HIGH,
+ (s->mmio.addr & ~(0xffff)) >> 16);
+ pci_set_long(s->dev.config + s->capab_offset + IOMMU_CAPAB_RANGE,
+ 0xff000000);
+ pci_set_long(s->dev.config + s->capab_offset + IOMMU_CAPAB_MISC,
0);
+ pci_set_long(s->dev.config + s->capab_offset + IOMMU_CAPAB_MISC,
+ IOMMU_MAX_PH_ADDR | IOMMU_MAX_GVA_ADDR |
IOMMU_MAX_VA_ADDR);
All the capabilities are read-write? Otherwise you need to set the wmask
to indicate what fields are writable.