From df4c27873347545cd956c22b4c3d188ea3ab5798 Mon Sep 17 00:00:00 2001 From: Zha Bin Date: Mon, 30 Dec 2019 13:59:34 +0800 Subject: [PATCH 1/8] virtio-mmio: add notify feature for per-queue The standard virtio-mmio devices use notification register to signal backend. This will cause vmexits and slow down the performance when we passthrough the virtio-mmio devices to guest virtual machines. We proposed to update virtio over MMIO spec to add the per-queue notify feature VIRTIO_F_MMIO_NOTIFICATION. It can allow the VMM to configure notify location for each queue. Signed-off-by: Liu Jiang Signed-off-by: Zha Bin Signed-off-by: Chao Peng Signed-off-by: Jing Liu --- drivers/virtio/virtio_mmio.c | 22 +++++++++++++++++++++- drivers/virtio/virtio_ring.c | 2 ++ include/uapi/linux/virtio_config.h | 7 ++++++- 3 files changed, 29 insertions(+), 2 deletions(-) diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index e09edb5c5e0653..c0b80698c815f0 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -90,6 +90,9 @@ struct virtio_mmio_device { /* a list of queues so we can dispatch IRQs */ spinlock_t lock; struct list_head virtqueues; + + unsigned short notify_base; + unsigned short notify_multiplier; }; struct virtio_mmio_vq_info { @@ -98,6 +101,9 @@ struct virtio_mmio_vq_info { /* the list node for the virtqueues list */ struct list_head node; + + /* Notify Address*/ + unsigned int priv; }; @@ -272,10 +278,14 @@ static void vm_reset(struct virtio_device *vdev) static bool vm_notify(struct virtqueue *vq) { struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev); + struct virtio_mmio_vq_info *info = vq->priv; + if (info && info->priv != 0) + writel(vq->index, vm_dev->base + info->priv); + else /* We write the queue's selector into the notification register to * signal the other end */ - writel(vq->index, vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY); + writel(vq->index, vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY); return true; } @@ -433,6 +443,9 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index, vq->priv = info; info->vq = vq; + /* If not set VIRTIO_F_MMIO_NOTIFICATION, info->priv is 0 */ + info->priv = vm_dev->notify_base + + vm_dev->notify_multiplier * vq->index; spin_lock_irqsave(&vm_dev->lock, flags); list_add(&info->node, &vm_dev->virtqueues); @@ -471,6 +484,13 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs, return irq; } + if (__virtio_test_bit(vdev, VIRTIO_F_MMIO_NOTIFICATION)) { + u32 db = readl(vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY); + + vm_dev->notify_base = db & 0xffff; + vm_dev->notify_multiplier = (db >> 16) & 0xffff; + } + err = request_irq(irq, vm_interrupt, IRQF_SHARED, dev_name(&vdev->dev), vm_dev); if (err) diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 867c7ebd3f107a..af578f602ebd45 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -2231,6 +2231,8 @@ void vring_transport_features(struct virtio_device *vdev) break; case VIRTIO_F_ORDER_PLATFORM: break; + case VIRTIO_F_MMIO_NOTIFICATION: + break; default: /* We don't understand this bit. */ __virtio_clear_bit(vdev, i); diff --git a/include/uapi/linux/virtio_config.h b/include/uapi/linux/virtio_config.h index ff8e7dc9d4dd22..1da4f82225870f 100644 --- a/include/uapi/linux/virtio_config.h +++ b/include/uapi/linux/virtio_config.h @@ -52,7 +52,7 @@ * rest are per-device feature bits. */ #define VIRTIO_TRANSPORT_F_START 28 -#define VIRTIO_TRANSPORT_F_END 38 +#define VIRTIO_TRANSPORT_F_END 40 #ifndef VIRTIO_CONFIG_NO_LEGACY /* Do we get callbacks when the ring is completely used, even if we've @@ -88,4 +88,9 @@ * Does the device support Single Root I/O Virtualization? */ #define VIRTIO_F_SR_IOV 37 + +/* + * Note the feature is supported by virtio-mmio device + */ +#define VIRTIO_F_MMIO_NOTIFICATION 39 #endif /* _UAPI_LINUX_VIRTIO_CONFIG_H */ From f7da48aa57e723a67dccae55292b21afbd921102 Mon Sep 17 00:00:00 2001 From: Jing Liu Date: Sat, 18 Jan 2020 00:14:59 +0800 Subject: [PATCH 2/8] Refine feature bit finalizing and some renaming - bit 39 is not for vring on all transport layer so we need do like what SRIOV does. - priv is changed to notify_addr which seems more explicit, and unify the vm_notify address. Feel free to discuss if you have different idea. Once tested, just squash into previous commit. Signed-off-by: Jing Liu --- drivers/virtio/virtio_mmio.c | 37 +++++++++++++++++++----------- drivers/virtio/virtio_ring.c | 2 -- include/uapi/linux/virtio_config.h | 2 +- 3 files changed, 25 insertions(+), 16 deletions(-) diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index c0b80698c815f0..14f59c98134a7b 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -102,8 +102,8 @@ struct virtio_mmio_vq_info { /* the list node for the virtqueues list */ struct list_head node; - /* Notify Address*/ - unsigned int priv; + /* Notify Address */ + unsigned int notify_addr; }; @@ -125,6 +125,12 @@ static u64 vm_get_features(struct virtio_device *vdev) return features; } +static void vm_transport_features(struct virtio_device *vdev) +{ + if (vdev->features & BIT_ULL(VIRTIO_F_MMIO_NOTIFICATION)) + __virtio_set_bit(vdev, VIRTIO_F_MMIO_NOTIFICATION); +} + static int vm_finalize_features(struct virtio_device *vdev) { struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); @@ -132,6 +138,9 @@ static int vm_finalize_features(struct virtio_device *vdev) /* Give virtio_ring a chance to accept features. */ vring_transport_features(vdev); + /* Give virtio_mmio a chance to accept features. */ + vm_transport_features(vdev); + /* Make sure there is are no mixed devices */ if (vm_dev->version == 2 && !__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) { @@ -280,12 +289,11 @@ static bool vm_notify(struct virtqueue *vq) struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev); struct virtio_mmio_vq_info *info = vq->priv; - if (info && info->priv != 0) - writel(vq->index, vm_dev->base + info->priv); - else - /* We write the queue's selector into the notification register to + /* We write the queue's selector into Notify Address to * signal the other end */ - writel(vq->index, vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY); + if (info) + writel(vq->index, vm_dev->base + info->notify_addr); + return true; } @@ -443,9 +451,12 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index, vq->priv = info; info->vq = vq; - /* If not set VIRTIO_F_MMIO_NOTIFICATION, info->priv is 0 */ - info->priv = vm_dev->notify_base + - vm_dev->notify_multiplier * vq->index; + + if (__virtio_test_bit(vdev, VIRTIO_F_MMIO_NOTIFICATION)) + info->notify_addr = vm_dev->notify_base + + vm_dev->notify_multiplier * vq->index; + else + info->notify_addr = VIRTIO_MMIO_QUEUE_NOTIFY; spin_lock_irqsave(&vm_dev->lock, flags); list_add(&info->node, &vm_dev->virtqueues); @@ -485,10 +496,10 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs, } if (__virtio_test_bit(vdev, VIRTIO_F_MMIO_NOTIFICATION)) { - u32 db = readl(vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY); + unsigned notify = readl(vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY); - vm_dev->notify_base = db & 0xffff; - vm_dev->notify_multiplier = (db >> 16) & 0xffff; + vm_dev->notify_base = notify & 0xffff; + vm_dev->notify_multiplier = (notify >> 16) & 0xffff; } err = request_irq(irq, vm_interrupt, IRQF_SHARED, diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index af578f602ebd45..867c7ebd3f107a 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -2231,8 +2231,6 @@ void vring_transport_features(struct virtio_device *vdev) break; case VIRTIO_F_ORDER_PLATFORM: break; - case VIRTIO_F_MMIO_NOTIFICATION: - break; default: /* We don't understand this bit. */ __virtio_clear_bit(vdev, i); diff --git a/include/uapi/linux/virtio_config.h b/include/uapi/linux/virtio_config.h index 1da4f82225870f..0d153756350490 100644 --- a/include/uapi/linux/virtio_config.h +++ b/include/uapi/linux/virtio_config.h @@ -90,7 +90,7 @@ #define VIRTIO_F_SR_IOV 37 /* - * Note the feature is supported by virtio-mmio device + * This feature indicates the enhanced notification support on MMIO layer. */ #define VIRTIO_F_MMIO_NOTIFICATION 39 #endif /* _UAPI_LINUX_VIRTIO_CONFIG_H */ From 0ccb6e9e370af5cd92e95809374d0063f574e109 Mon Sep 17 00:00:00 2001 From: Jing Liu Date: Sun, 19 Jan 2020 21:46:49 +0800 Subject: [PATCH 3/8] virtio-mmio: Refactor common functionality Common functionality is refactored into virtio_mmio_common.h in order to MSI support in later patch set. Signed-off-by: Jing Liu --- drivers/virtio/virtio_mmio.c | 21 +------------------ drivers/virtio/virtio_mmio_common.h | 31 +++++++++++++++++++++++++++++ 2 files changed, 32 insertions(+), 20 deletions(-) create mode 100644 drivers/virtio/virtio_mmio_common.h diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index 14f59c98134a7b..fc2d7e60af3565 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -61,13 +61,12 @@ #include #include #include -#include #include #include -#include #include #include #include +#include "virtio_mmio_common.h" @@ -77,24 +76,6 @@ -#define to_virtio_mmio_device(_plat_dev) \ - container_of(_plat_dev, struct virtio_mmio_device, vdev) - -struct virtio_mmio_device { - struct virtio_device vdev; - struct platform_device *pdev; - - void __iomem *base; - unsigned long version; - - /* a list of queues so we can dispatch IRQs */ - spinlock_t lock; - struct list_head virtqueues; - - unsigned short notify_base; - unsigned short notify_multiplier; -}; - struct virtio_mmio_vq_info { /* the actual virtqueue */ struct virtqueue *vq; diff --git a/drivers/virtio/virtio_mmio_common.h b/drivers/virtio/virtio_mmio_common.h new file mode 100644 index 00000000000000..90cb304b7c5eb3 --- /dev/null +++ b/drivers/virtio/virtio_mmio_common.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef _DRIVERS_VIRTIO_VIRTIO_MMIO_COMMON_H +#define _DRIVERS_VIRTIO_VIRTIO_MMIO_COMMON_H +/* + * Virtio MMIO driver - common functionality for all device versions + * + * This module allows virtio devices to be used over a memory-mapped device. + */ + +#include +#include + +#define to_virtio_mmio_device(_plat_dev) \ + container_of(_plat_dev, struct virtio_mmio_device, vdev) + +struct virtio_mmio_device { + struct virtio_device vdev; + struct platform_device *pdev; + + void __iomem *base; + unsigned long version; + + /* a list of queues so we can dispatch IRQs */ + spinlock_t lock; + struct list_head virtqueues; + + unsigned short notify_base; + unsigned short notify_multiplier; +}; + +#endif From e8d6123b8be85b2ab4ac5cb2476cf17456000827 Mon Sep 17 00:00:00 2001 From: Jing Liu Date: Mon, 20 Jan 2020 00:15:46 +0800 Subject: [PATCH 4/8] virtio-mmio: Create a generic MSI irq domain Create a generic irq domain for all architectures which supports virtio-mmio. The device offering VIRTIO_F_MMIO_MSI feature bit can use this irq domain. Signed-off-by: Jing Liu --- drivers/base/platform-msi.c | 4 +- drivers/virtio/Kconfig | 8 +++ drivers/virtio/virtio_mmio_msi.h | 93 ++++++++++++++++++++++++++++++++ include/linux/msi.h | 1 + 4 files changed, 104 insertions(+), 2 deletions(-) create mode 100644 drivers/virtio/virtio_mmio_msi.h diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c index 8da314b81eabbe..45752f16631d12 100644 --- a/drivers/base/platform-msi.c +++ b/drivers/base/platform-msi.c @@ -31,12 +31,11 @@ struct platform_msi_priv_data { /* The devid allocator */ static DEFINE_IDA(platform_msi_devid_ida); -#ifdef GENERIC_MSI_DOMAIN_OPS /* * Convert an msi_desc to a globaly unique identifier (per-device * devid + msi_desc position in the msi_list). */ -static irq_hw_number_t platform_msi_calc_hwirq(struct msi_desc *desc) +irq_hw_number_t platform_msi_calc_hwirq(struct msi_desc *desc) { u32 devid; @@ -45,6 +44,7 @@ static irq_hw_number_t platform_msi_calc_hwirq(struct msi_desc *desc) return (devid << (32 - DEV_ID_SHIFT)) | desc->platform.msi_index; } +#ifdef GENERIC_MSI_DOMAIN_OPS static void platform_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc) { arg->desc = desc; diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig index 078615cf2afcd5..5f2446e06d737a 100644 --- a/drivers/virtio/Kconfig +++ b/drivers/virtio/Kconfig @@ -84,6 +84,14 @@ config VIRTIO_MMIO If unsure, say N. +config VIRTIO_MMIO_MSI + bool "Memory-mapped virtio device MSI" + depends on VIRTIO_MMIO && GENERIC_MSI_IRQ_DOMAIN && GENERIC_MSI_IRQ + ---help--- + This allows device drivers to enable MSI to improve performance. + + If unsure, say N. + config VIRTIO_MMIO_CMDLINE_DEVICES bool "Memory mapped virtio devices parameter parsing" depends on VIRTIO_MMIO diff --git a/drivers/virtio/virtio_mmio_msi.h b/drivers/virtio/virtio_mmio_msi.h new file mode 100644 index 00000000000000..27cb2af8235234 --- /dev/null +++ b/drivers/virtio/virtio_mmio_msi.h @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef _DRIVERS_VIRTIO_VIRTIO_MMIO_MSI_H +#define _DRIVERS_VIRTIO_VIRTIO_MMIO_MSI_H + +#ifdef CONFIG_VIRTIO_MMIO_MSI + +#include +#include +#include +#include + +static irq_hw_number_t mmio_msi_hwirq; +static struct irq_domain *mmio_msi_domain; + +struct irq_domain *__weak arch_msi_root_irq_domain(void) +{ + return NULL; +} + +void __weak irq_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) +{ +} + +static void mmio_msi_mask_irq(struct irq_data *data) +{ +} + +static void mmio_msi_unmask_irq(struct irq_data *data) +{ +} + +static struct irq_chip mmio_msi_controller = { + .name = "VIRTIO-MMIO-MSI", + .irq_mask = mmio_msi_mask_irq, + .irq_unmask = mmio_msi_unmask_irq, + .irq_ack = irq_chip_ack_parent, + .irq_retrigger = irq_chip_retrigger_hierarchy, + .irq_compose_msi_msg = irq_msi_compose_msg, + .flags = IRQCHIP_SKIP_SET_WAKE, +}; + +static int mmio_msi_prepare(struct irq_domain *domain, struct device *dev, + int nvec, msi_alloc_info_t *arg) +{ + memset(arg, 0, sizeof(*arg)); + return 0; +} + +static void mmio_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc) +{ + mmio_msi_hwirq = platform_msi_calc_hwirq(desc); +} + +static irq_hw_number_t mmio_msi_get_hwirq(struct msi_domain_info *info, + msi_alloc_info_t *arg) +{ + return mmio_msi_hwirq; +} + +static struct msi_domain_ops mmio_msi_domain_ops = { + .msi_prepare = mmio_msi_prepare, + .set_desc = mmio_msi_set_desc, + .get_hwirq = mmio_msi_get_hwirq, +}; + +static struct msi_domain_info mmio_msi_domain_info = { + .flags = MSI_FLAG_USE_DEF_DOM_OPS | + MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_ACTIVATE_EARLY, + .ops = &mmio_msi_domain_ops, + .chip = &mmio_msi_controller, + .handler = handle_edge_irq, + .handler_name = "edge", +}; + +static inline void mmio_msi_create_irq_domain(void) +{ + struct fwnode_handle *fn; + struct irq_domain *parent = arch_msi_root_irq_domain(); + + fn = irq_domain_alloc_named_fwnode("VIRTIO-MMIO-MSI"); + if (fn && parent) { + mmio_msi_domain = + platform_msi_create_irq_domain(fn, + &mmio_msi_domain_info, parent); + irq_domain_free_fwnode(fn); + } +} +#else +static inline void mmio_msi_create_irq_domain(void) {} +#endif + +#endif diff --git a/include/linux/msi.h b/include/linux/msi.h index 8ad679e9d9c04a..ee5f56629547c7 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -362,6 +362,7 @@ int platform_msi_domain_alloc(struct irq_domain *domain, unsigned int virq, void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq, unsigned int nvec); void *platform_msi_get_host_data(struct irq_domain *domain); +irq_hw_number_t platform_msi_calc_hwirq(struct msi_desc *desc); #endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */ #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN From 4ff299b1014455010e68696f74b09f084047ac22 Mon Sep 17 00:00:00 2001 From: Jing Liu Date: Mon, 20 Jan 2020 22:52:37 +0800 Subject: [PATCH 5/8] virtio-mmio: Add MSI feature bit definition The VIRTIO_F_MMIO_MSI feature bit (40) is used to indicate that device supports MSI for only MMIO transport layer. Signed-off-by: Jing Liu --- drivers/virtio/virtio_mmio.c | 7 +++---- include/uapi/linux/virtio_config.h | 7 ++++++- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index fc2d7e60af3565..2500f9e59a79be 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -68,14 +68,10 @@ #include #include "virtio_mmio_common.h" - - /* The alignment to use between consumer and producer parts of vring. * Currently hardcoded to the page size. */ #define VIRTIO_MMIO_VRING_ALIGN PAGE_SIZE - - struct virtio_mmio_vq_info { /* the actual virtqueue */ struct virtqueue *vq; @@ -110,6 +106,9 @@ static void vm_transport_features(struct virtio_device *vdev) { if (vdev->features & BIT_ULL(VIRTIO_F_MMIO_NOTIFICATION)) __virtio_set_bit(vdev, VIRTIO_F_MMIO_NOTIFICATION); + + if (vdev->features & BIT_ULL(VIRTIO_F_MMIO_MSI)) + __virtio_set_bit(vdev, VIRTIO_F_MMIO_MSI); } static int vm_finalize_features(struct virtio_device *vdev) diff --git a/include/uapi/linux/virtio_config.h b/include/uapi/linux/virtio_config.h index 0d153756350490..5982b76b24a1e6 100644 --- a/include/uapi/linux/virtio_config.h +++ b/include/uapi/linux/virtio_config.h @@ -52,7 +52,7 @@ * rest are per-device feature bits. */ #define VIRTIO_TRANSPORT_F_START 28 -#define VIRTIO_TRANSPORT_F_END 40 +#define VIRTIO_TRANSPORT_F_END 41 #ifndef VIRTIO_CONFIG_NO_LEGACY /* Do we get callbacks when the ring is completely used, even if we've @@ -93,4 +93,9 @@ * This feature indicates the enhanced notification support on MMIO layer. */ #define VIRTIO_F_MMIO_NOTIFICATION 39 + +/* + * This feature indicates the MSI support on MMIO layer. + */ +#define VIRTIO_F_MMIO_MSI 40 #endif /* _UAPI_LINUX_VIRTIO_CONFIG_H */ From 0c2698592cdc507410b4b41580b1e657e187118c Mon Sep 17 00:00:00 2001 From: Jing Liu Date: Mon, 20 Jan 2020 23:27:59 +0800 Subject: [PATCH 6/8] virtio-mmio: Add MSI interrupt support Userspace VMMs (e.g. Qemu microvm, Firecracker) take advantage of using virtio over mmio devices as a lightweight machine model for modern cloud. The standard virtio over MMIO transport layer only supports one legacy interrupt, which is much heavier than virtio over PCI transport layer using MSI. Legacy interrupt has long work path and causes specific VMExits in following cases, which would considerably slow down the performance: 1) read interrupt status register 2) update interrupt status register 3) write IOAPIC EOI register We proposed to add MSI support for virtio over MMIO via new feature bit VIRTIO_F_MMIO_MSI which increases the interrupt performance for virtio multi-queue devices. Signed-off-by: Liu Jiang Signed-off-by: Zha Bin Signed-off-by: Chao Peng Signed-off-by: Jing Liu --- drivers/virtio/virtio_mmio.c | 267 +++++++++++++++++++++++++--- drivers/virtio/virtio_mmio_common.h | 8 + drivers/virtio/virtio_mmio_msi.h | 81 +++++++++ include/uapi/linux/virtio_mmio.h | 27 +++ 4 files changed, 361 insertions(+), 22 deletions(-) diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index 2500f9e59a79be..85453b03e8918b 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -67,6 +67,7 @@ #include #include #include "virtio_mmio_common.h" +#include "virtio_mmio_msi.h" /* The alignment to use between consumer and producer parts of vring. * Currently hardcoded to the page size. */ @@ -81,9 +82,13 @@ struct virtio_mmio_vq_info { /* Notify Address */ unsigned int notify_addr; -}; + /* MSI vector (or none) */ + unsigned int msi_vector; +}; +static void vm_free_msi_irqs(struct virtio_device *vdev); +static int vm_request_msi_vectors(struct virtio_device *vdev, int nirqs); /* Configuration interface */ @@ -259,8 +264,6 @@ static void vm_reset(struct virtio_device *vdev) writel(0, vm_dev->base + VIRTIO_MMIO_STATUS); } - - /* Transport interface */ /* the notify function used when creating a virt queue */ @@ -305,8 +308,34 @@ static irqreturn_t vm_interrupt(int irq, void *opaque) return ret; } +static irqreturn_t vm_vring_interrupt(int irq, void *opaque) +{ + struct virtio_mmio_device *vm_dev = opaque; + struct virtio_mmio_vq_info *info; + irqreturn_t ret = IRQ_NONE; + unsigned long flags; + + spin_lock_irqsave(&vm_dev->lock, flags); + list_for_each_entry(info, &vm_dev->virtqueues, node) { + if (vring_interrupt(irq, info->vq) == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + spin_unlock_irqrestore(&vm_dev->lock, flags); + + return ret; +} +/* Handle a configuration change */ +static irqreturn_t vm_config_changed(int irq, void *opaque) +{ + struct virtio_mmio_device *vm_dev = opaque; + + virtio_config_changed(&vm_dev->vdev); + + return IRQ_HANDLED; +} + static void vm_del_vq(struct virtqueue *vq) { struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev); @@ -314,6 +343,15 @@ static void vm_del_vq(struct virtqueue *vq) unsigned long flags; unsigned int index = vq->index; + if (vm_dev->msi_enabled && vm_dev->per_vq_vectors) { + if (info->msi_vector != VIRTIO_MMIO_MSI_NO_VECTOR) { + int irq = mmio_msi_irq_vector(&vq->vdev->dev, + info->msi_vector); + + free_irq(irq, vq); + } + } + spin_lock_irqsave(&vm_dev->lock, flags); list_del(&info->node); spin_unlock_irqrestore(&vm_dev->lock, flags); @@ -332,20 +370,42 @@ static void vm_del_vq(struct virtqueue *vq) kfree(info); } -static void vm_del_vqs(struct virtio_device *vdev) +static void vm_free_irqs(struct virtio_device *vdev) { struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); + + if (vm_dev->msi_enabled) + vm_free_msi_irqs(vdev); + else + free_irq(platform_get_irq(vm_dev->pdev, 0), vm_dev); +} + +static void vm_del_vqs(struct virtio_device *vdev) +{ struct virtqueue *vq, *n; list_for_each_entry_safe(vq, n, &vdev->vqs, list) vm_del_vq(vq); - free_irq(platform_get_irq(vm_dev->pdev, 0), vm_dev); + vm_free_irqs(vdev); +} + +static inline void mmio_msi_set_enable(struct virtio_mmio_device *vm_dev, + int enable) +{ + u32 state; + + state = readl(vm_dev->base + VIRTIO_MMIO_MSI_STATE); + if (enable && (state & VIRTIO_MMIO_MSI_ENABLED_MASK)) + return; + + writel(VIRTIO_MMIO_MSI_CMD_ENABLE, + vm_dev->base + VIRTIO_MMIO_MSI_COMMAND); } static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index, void (*callback)(struct virtqueue *vq), - const char *name, bool ctx) + const char *name, bool ctx, u32 msi_vector) { struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); struct virtio_mmio_vq_info *info; @@ -438,6 +498,8 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index, else info->notify_addr = VIRTIO_MMIO_QUEUE_NOTIFY; + info->msi_vector = msi_vector; + spin_lock_irqsave(&vm_dev->lock, flags); list_add(&info->node, &vm_dev->virtqueues); spin_unlock_irqrestore(&vm_dev->lock, flags); @@ -459,12 +521,11 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index, return ERR_PTR(err); } -static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs, - struct virtqueue *vqs[], - vq_callback_t *callbacks[], - const char * const names[], - const bool *ctx, - struct irq_affinity *desc) +static int vm_find_vqs_intx(struct virtio_device *vdev, unsigned int nvqs, + struct virtqueue *vqs[], + vq_callback_t *callbacks[], + const char * const names[], + const bool *ctx) { struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); int irq = platform_get_irq(vm_dev->pdev, 0); @@ -475,17 +536,8 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs, return irq; } - if (__virtio_test_bit(vdev, VIRTIO_F_MMIO_NOTIFICATION)) { - unsigned notify = readl(vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY); - - vm_dev->notify_base = notify & 0xffff; - vm_dev->notify_multiplier = (notify >> 16) & 0xffff; - } - err = request_irq(irq, vm_interrupt, IRQF_SHARED, dev_name(&vdev->dev), vm_dev); - if (err) - return err; for (i = 0; i < nvqs; ++i) { if (!names[i]) { @@ -494,14 +546,183 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs, } vqs[i] = vm_setup_vq(vdev, queue_idx++, callbacks[i], names[i], - ctx ? ctx[i] : false); + ctx ? ctx[i] : false, + VIRTIO_MMIO_MSI_NO_VECTOR); if (IS_ERR(vqs[i])) { vm_del_vqs(vdev); return PTR_ERR(vqs[i]); } } + return err; +} + +static int vm_find_vqs_msi(struct virtio_device *vdev, unsigned int nvqs, + struct virtqueue *vqs[], vq_callback_t *callbacks[], + const char * const names[], bool per_vq_vectors, + const bool *ctx, struct irq_affinity *desc) +{ + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); + int i, err, allocated_vectors, nvectors; + u32 msi_vec; + + if (per_vq_vectors) { + nvectors = 1; + for (i = 0; i < nvqs; ++i) + if (callbacks[i]) + ++nvectors; + } else { + nvectors = 2; + } + + vm_dev->per_vq_vectors = per_vq_vectors; + + /* Allocate nvqs irqs for queues and one irq for configuration */ + err = vm_request_msi_vectors(vdev, nvectors); + if (err != 0) + return err; + + allocated_vectors = vm_dev->msi_used_vectors; + for (i = 0; i < nvqs; i++) { + if (!names[i]) { + vqs[i] = NULL; + continue; + } + if (!callbacks[i]) + msi_vec = VIRTIO_MMIO_MSI_NO_VECTOR; + else if (per_vq_vectors) + msi_vec = allocated_vectors++; + else + msi_vec = 1; + vqs[i] = vm_setup_vq(vdev, i, callbacks[i], names[i], + ctx ? ctx[i] : false, msi_vec); + if (IS_ERR(vqs[i])) { + err = PTR_ERR(vqs[i]); + goto error_find; + } + + if (!per_vq_vectors || + msi_vec == VIRTIO_MMIO_MSI_NO_VECTOR) + continue; + + /* allocate per-vq irq if available and necessary */ + snprintf(vm_dev->vm_vq_names[msi_vec], + sizeof(*vm_dev->vm_vq_names), + "%s-%s", + dev_name(&vm_dev->vdev.dev), names[i]); + err = request_irq(mmio_msi_irq_vector(&vqs[i]->vdev->dev, + msi_vec), + vring_interrupt, 0, + vm_dev->vm_vq_names[msi_vec], vqs[i]); + + if (err) + goto error_find; + } + return 0; + +error_find: + vm_del_vqs(vdev); + return err; +} + +static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs, + struct virtqueue *vqs[], + vq_callback_t *callbacks[], + const char * const names[], + const bool *ctx, + struct irq_affinity *desc) +{ + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); + int err; + + if (__virtio_test_bit(vdev, VIRTIO_F_MMIO_NOTIFICATION)) { + unsigned notify = readl(vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY); + + vm_dev->notify_base = notify & 0xffff; + vm_dev->notify_multiplier = (notify >> 16) & 0xffff; + } + + if (__virtio_test_bit(vdev, VIRTIO_F_MMIO_MSI)) { + err = vm_find_vqs_msi(vdev, nvqs, vqs, callbacks, + names, true, ctx, desc); + if (!err) + return 0; + } + + return vm_find_vqs_intx(vdev, nvqs, vqs, callbacks, names, ctx); +} + +static int vm_request_msi_vectors(struct virtio_device *vdev, int nirqs) +{ + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); + unsigned int v; + int irq, err; + + if (vm_dev->msi_enabled) + return -EINVAL; + + vm_dev->vm_vq_names = kmalloc_array(nirqs, sizeof(*vm_dev->vm_vq_names), + GFP_KERNEL); + if (!vm_dev->vm_vq_names) + return -ENOMEM; + + mmio_get_msi_domain(vdev); + err = mmio_msi_domain_alloc_irqs(&vdev->dev, nirqs); + if (err) { + kfree(vm_dev->vm_vq_names); + vm_dev->vm_vq_names = NULL; + return err; + } + + mmio_msi_set_enable(vm_dev, 1); + vm_dev->msi_enabled = true; + + v = vm_dev->msi_used_vectors; + /* The first MSI vector is used for configuration change event. */ + snprintf(vm_dev->vm_vq_names[v], sizeof(*vm_dev->vm_vq_names), + "%s-config", dev_name(&vdev->dev)); + irq = mmio_msi_irq_vector(&vdev->dev, v); + err = request_irq(irq, vm_config_changed, 0, vm_dev->vm_vq_names[v], + vm_dev); + if (err) + goto error_request_irq; + + ++vm_dev->msi_used_vectors; + + if (!vm_dev->per_vq_vectors) { + v = vm_dev->msi_used_vectors; + snprintf(vm_dev->vm_vq_names[v], sizeof(*vm_dev->vm_vq_names), + "%s-virtqueues", dev_name(&vm_dev->vdev.dev)); + err = request_irq(mmio_msi_irq_vector(&vdev->dev, v), + vm_vring_interrupt, 0, vm_dev->vm_vq_names[v], + vm_dev); + if (err) + goto error_request_irq; + ++vm_dev->msi_used_vectors; + } + + return 0; + +error_request_irq: + vm_free_msi_irqs(vdev); + + return err; +} + +static void vm_free_msi_irqs(struct virtio_device *vdev) +{ + int i; + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); + + mmio_msi_set_enable(vm_dev, 0); + for (i = 0; i < vm_dev->msi_used_vectors; i++) + free_irq(mmio_msi_irq_vector(&vdev->dev, i), vm_dev); + mmio_msi_domain_free_irqs(&vdev->dev); + kfree(vm_dev->vm_vq_names); + vm_dev->vm_vq_names = NULL; + vm_dev->msi_enabled = false; + vm_dev->msi_used_vectors = 0; } static const char *vm_bus_name(struct virtio_device *vdev) @@ -615,6 +836,8 @@ static int virtio_mmio_probe(struct platform_device *pdev) platform_set_drvdata(pdev, vm_dev); + mmio_msi_create_irq_domain(); + rc = register_virtio_device(&vm_dev->vdev); if (rc) put_device(&vm_dev->vdev.dev); diff --git a/drivers/virtio/virtio_mmio_common.h b/drivers/virtio/virtio_mmio_common.h index 90cb304b7c5eb3..77b53e6d853dd7 100644 --- a/drivers/virtio/virtio_mmio_common.h +++ b/drivers/virtio/virtio_mmio_common.h @@ -26,6 +26,14 @@ struct virtio_mmio_device { unsigned short notify_base; unsigned short notify_multiplier; + + /* Name strings for interrupts. This size should be enough. */ + char (*vm_vq_names)[256]; + + /* used vectors */ + unsigned int msi_used_vectors; + bool per_vq_vectors; + bool msi_enabled; }; #endif diff --git a/drivers/virtio/virtio_mmio_msi.h b/drivers/virtio/virtio_mmio_msi.h index 27cb2af8235234..2f545ae90a1c4f 100644 --- a/drivers/virtio/virtio_mmio_msi.h +++ b/drivers/virtio/virtio_mmio_msi.h @@ -8,6 +8,7 @@ #include #include #include +#include "virtio_mmio_common.h" static irq_hw_number_t mmio_msi_hwirq; static struct irq_domain *mmio_msi_domain; @@ -21,12 +22,41 @@ void __weak irq_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) { } +static void __iomem *vm_dev_base(struct msi_desc *desc) +{ + if (desc) { + struct device *dev = desc->dev; + struct virtio_device *vdev = dev_to_virtio(dev); + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); + + return vm_dev->base; + } + + return NULL; +} + +static void mmio_msi_set_mask_bit(struct irq_data *data, u32 flag) +{ + struct msi_desc *desc = irq_data_get_msi_desc(data); + void __iomem *base = vm_dev_base(desc); + unsigned int offset = data->irq - desc->irq; + + if (base) { + u32 op = flag ? VIRTIO_MMIO_MSI_CMD_MASK : + VIRTIO_MMIO_MSI_CMD_UNMASK; + writel(offset, base + VIRTIO_MMIO_MSI_VEC_SEL); + writel(op, base + VIRTIO_MMIO_MSI_COMMAND); + } +} + static void mmio_msi_mask_irq(struct irq_data *data) { + mmio_msi_set_mask_bit(data, 1); } static void mmio_msi_unmask_irq(struct irq_data *data) { + mmio_msi_set_mask_bit(data, 0); } static struct irq_chip mmio_msi_controller = { @@ -86,8 +116,59 @@ static inline void mmio_msi_create_irq_domain(void) irq_domain_free_fwnode(fn); } } + +static void mmio_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg) +{ + void __iomem *base = vm_dev_base(desc); + + if (base) { + writel(desc->platform.msi_index, base + VIRTIO_MMIO_MSI_VEC_SEL); + writel(msg->address_lo, base + VIRTIO_MMIO_MSI_ADDRESS_LOW); + writel(msg->address_hi, base + VIRTIO_MMIO_MSI_ADDRESS_HIGH); + writel(msg->data, base + VIRTIO_MMIO_MSI_DATA); + writel(VIRTIO_MMIO_MSI_CMD_CONFIGURE, + base + VIRTIO_MMIO_MSI_COMMAND); + } +} + +static inline int mmio_msi_domain_alloc_irqs(struct device *dev, + unsigned int nvec) +{ + return platform_msi_domain_alloc_irqs(dev, nvec, + mmio_write_msi_msg); +} + +static inline void mmio_msi_domain_free_irqs(struct device *dev) +{ + return platform_msi_domain_free_irqs(dev); +} + +static inline void mmio_get_msi_domain(struct virtio_device *vdev) +{ + if (!vdev->dev.msi_domain) + vdev->dev.msi_domain = mmio_msi_domain; +} + +static inline int mmio_msi_irq_vector(struct device *dev, unsigned int nr) +{ + struct msi_desc *entry = first_msi_entry(dev); + + return entry->irq + nr; +} + #else static inline void mmio_msi_create_irq_domain(void) {} +static inline int mmio_msi_irq_vector(struct device *dev, unsigned int nr) +{ + return -EINVAL; +} +static inline void mmio_get_msi_domain(struct virtio_device *vdev) {} +static inline int mmio_msi_domain_alloc_irqs(struct device *dev, + unsigned int nvec) +{ + return -EINVAL; +} +static inline void mmio_msi_domain_free_irqs(struct device *dev) {} #endif #endif diff --git a/include/uapi/linux/virtio_mmio.h b/include/uapi/linux/virtio_mmio.h index c4b09689ab6447..29f000b006e649 100644 --- a/include/uapi/linux/virtio_mmio.h +++ b/include/uapi/linux/virtio_mmio.h @@ -122,6 +122,21 @@ #define VIRTIO_MMIO_QUEUE_USED_LOW 0x0a0 #define VIRTIO_MMIO_QUEUE_USED_HIGH 0x0a4 +/* MSI max vector number that device supports - Read Only */ +#define VIRTIO_MMIO_MSI_VEC_NUM 0x0c0 +/* MSI state register - Read Only */ +#define VIRTIO_MMIO_MSI_STATE 0x0c4 +/* MSI command register - Write Only */ +#define VIRTIO_MMIO_MSI_COMMAND 0x0c8 +/* MSI vector selector - Write Only */ +#define VIRTIO_MMIO_MSI_VEC_SEL 0x0d0 +/* MSI low 32 bit address, 64 bits in two halves */ +#define VIRTIO_MMIO_MSI_ADDRESS_LOW 0x0d4 +/* MSI high 32 bit address, 64 bits in two halves */ +#define VIRTIO_MMIO_MSI_ADDRESS_HIGH 0x0d8 +/* MSI 32 bit data */ +#define VIRTIO_MMIO_MSI_DATA 0x0dc + /* Configuration atomicity value */ #define VIRTIO_MMIO_CONFIG_GENERATION 0x0fc @@ -130,6 +145,18 @@ #define VIRTIO_MMIO_CONFIG 0x100 +/* MSI commands */ +#define VIRTIO_MMIO_MSI_CMD_ENABLE 0x1 +#define VIRTIO_MMIO_MSI_CMD_DISABLE 0x2 +#define VIRTIO_MMIO_MSI_CMD_CONFIGURE 0x3 +#define VIRTIO_MMIO_MSI_CMD_MASK 0x4 +#define VIRTIO_MMIO_MSI_CMD_UNMASK 0x5 + +/* MSI NO_VECTOR */ +#define VIRTIO_MMIO_MSI_NO_VECTOR 0xffffffff + +/* MSI state enabled state mask */ +#define VIRTIO_MMIO_MSI_ENABLED_MASK (1 << 31) /* * Interrupt flags (re: interrupt status & acknowledge registers) From e3613709d76f31421fa79e59d483b4eda86cb791 Mon Sep 17 00:00:00 2001 From: Jing Liu Date: Tue, 21 Jan 2020 02:07:59 +0800 Subject: [PATCH 7/8] virtio-mmio: Enable MSI sharing mode Bit 1 in the return value of reading MsiState register indicates the MSI sharing mode that device uses. Bit 1 is 0: device uses non-sharing and fixed vector per event mapping. Bit 1 is 1: device uses sharing mode and dynamic mapping. For driver implementation, once device uses MSI sharing mode for not a high interrupt rate, let configuration event uses 1 vector and all queues use 1 vector. Signed-off-by: Jing Liu --- drivers/virtio/virtio_mmio.c | 49 ++++++++++++++++++++--------- drivers/virtio/virtio_mmio_common.h | 2 +- include/uapi/linux/virtio_mmio.h | 4 +++ 3 files changed, 40 insertions(+), 15 deletions(-) diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index 85453b03e8918b..257ab8a7f496e8 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -343,7 +343,7 @@ static void vm_del_vq(struct virtqueue *vq) unsigned long flags; unsigned int index = vq->index; - if (vm_dev->msi_enabled && vm_dev->per_vq_vectors) { + if (vm_dev->msi_enabled && !vm_dev->msi_share) { if (info->msi_vector != VIRTIO_MMIO_MSI_NO_VECTOR) { int irq = mmio_msi_irq_vector(&vq->vdev->dev, info->msi_vector); @@ -390,17 +390,16 @@ static void vm_del_vqs(struct virtio_device *vdev) vm_free_irqs(vdev); } -static inline void mmio_msi_set_enable(struct virtio_mmio_device *vm_dev, - int enable) +static void mmio_msi_config_vector(struct virtio_mmio_device *vm_dev, u32 vec) { - u32 state; - - state = readl(vm_dev->base + VIRTIO_MMIO_MSI_STATE); - if (enable && (state & VIRTIO_MMIO_MSI_ENABLED_MASK)) - return; + writel(vec, vm_dev->base + VIRTIO_MMIO_MSI_VEC_SEL); + writel(VIRTIO_MMIO_MSI_CMD_MAP_CONFIG, vm_dev->base + VIRTIO_MMIO_MSI_COMMAND); +} - writel(VIRTIO_MMIO_MSI_CMD_ENABLE, - vm_dev->base + VIRTIO_MMIO_MSI_COMMAND); +static void mmio_msi_queue_vector(struct virtio_mmio_device *vm_dev, u32 vec) +{ + writel(vec, vm_dev->base + VIRTIO_MMIO_MSI_VEC_SEL); + writel(VIRTIO_MMIO_MSI_CMD_MAP_QUEUE, vm_dev->base + VIRTIO_MMIO_MSI_COMMAND); } static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index, @@ -500,6 +499,10 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index, info->msi_vector = msi_vector; + /* Set queue event and vector mapping for MSI share mode. */ + if (vm_dev->msi_share && msi_vector != VIRTIO_MMIO_MSI_NO_VECTOR) + mmio_msi_queue_vector(vm_dev, msi_vector); + spin_lock_irqsave(&vm_dev->lock, flags); list_add(&info->node, &vm_dev->virtqueues); spin_unlock_irqrestore(&vm_dev->lock, flags); @@ -565,6 +568,13 @@ static int vm_find_vqs_msi(struct virtio_device *vdev, unsigned int nvqs, struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); int i, err, allocated_vectors, nvectors; u32 msi_vec; + u32 max_vec_num = readl(vm_dev->base + VIRTIO_MMIO_MSI_VEC_NUM); + + /* For MSI non-sharing, the max vector number MUST greater than nvqs. + * Otherwise, go back to legacy interrupt. + */ + if (per_vq_vectors && max_vec_num < (nvqs + 1)) + return -EINVAL; if (per_vq_vectors) { nvectors = 1; @@ -575,7 +585,7 @@ static int vm_find_vqs_msi(struct virtio_device *vdev, unsigned int nvqs, nvectors = 2; } - vm_dev->per_vq_vectors = per_vq_vectors; + vm_dev->msi_share = !per_vq_vectors; /* Allocate nvqs irqs for queues and one irq for configuration */ err = vm_request_msi_vectors(vdev, nvectors); @@ -644,8 +654,15 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs, } if (__virtio_test_bit(vdev, VIRTIO_F_MMIO_MSI)) { - err = vm_find_vqs_msi(vdev, nvqs, vqs, callbacks, - names, true, ctx, desc); + bool dyn_mapping = !!(readl(vm_dev->base + VIRTIO_MMIO_MSI_STATE) & + VIRTIO_MMIO_MSI_SHARING_MASK); + + if (!dyn_mapping) + err = vm_find_vqs_msi(vdev, nvqs, vqs, callbacks, + names, true, ctx, desc); + else + err = vm_find_vqs_msi(vdev, nvqs, vqs, callbacks, + names, false, ctx, desc); if (!err) return 0; } @@ -688,9 +705,13 @@ static int vm_request_msi_vectors(struct virtio_device *vdev, int nirqs) if (err) goto error_request_irq; + /* Set the configuration event mapping. */ + if (vm_dev->msi_share) + mmio_msi_config_vector(vm_dev, v); + ++vm_dev->msi_used_vectors; - if (!vm_dev->per_vq_vectors) { + if (vm_dev->msi_share) { v = vm_dev->msi_used_vectors; snprintf(vm_dev->vm_vq_names[v], sizeof(*vm_dev->vm_vq_names), "%s-virtqueues", dev_name(&vm_dev->vdev.dev)); diff --git a/drivers/virtio/virtio_mmio_common.h b/drivers/virtio/virtio_mmio_common.h index 77b53e6d853dd7..ccf6320a998ac5 100644 --- a/drivers/virtio/virtio_mmio_common.h +++ b/drivers/virtio/virtio_mmio_common.h @@ -32,7 +32,7 @@ struct virtio_mmio_device { /* used vectors */ unsigned int msi_used_vectors; - bool per_vq_vectors; + bool msi_share; bool msi_enabled; }; diff --git a/include/uapi/linux/virtio_mmio.h b/include/uapi/linux/virtio_mmio.h index 29f000b006e649..777cb0e6209649 100644 --- a/include/uapi/linux/virtio_mmio.h +++ b/include/uapi/linux/virtio_mmio.h @@ -151,12 +151,16 @@ #define VIRTIO_MMIO_MSI_CMD_CONFIGURE 0x3 #define VIRTIO_MMIO_MSI_CMD_MASK 0x4 #define VIRTIO_MMIO_MSI_CMD_UNMASK 0x5 +#define VIRTIO_MMIO_MSI_CMD_MAP_CONFIG 0x6 +#define VIRTIO_MMIO_MSI_CMD_MAP_QUEUE 0x7 /* MSI NO_VECTOR */ #define VIRTIO_MMIO_MSI_NO_VECTOR 0xffffffff /* MSI state enabled state mask */ #define VIRTIO_MMIO_MSI_ENABLED_MASK (1 << 31) +/* MSI state MSI sharing mask */ +#define VIRTIO_MMIO_MSI_SHARING_MASK (1 << 30) /* * Interrupt flags (re: interrupt status & acknowledge registers) From 2ac726d977d887590b54042b783f491b0ca9dd90 Mon Sep 17 00:00:00 2001 From: Jing Liu Date: Tue, 21 Jan 2020 02:26:55 +0800 Subject: [PATCH 8/8] x86: virtio-mmio: support virtio-mmio with MSI for x86 Virtio-mmio supports a generic MSI irq domain for all archs. This patch adds the x86 architecture support. Signed-off-by: Jing Liu --- arch/x86/kernel/apic/msi.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/apic/msi.c b/arch/x86/kernel/apic/msi.c index 7f7533462474a4..6462f2e43f7993 100644 --- a/arch/x86/kernel/apic/msi.c +++ b/arch/x86/kernel/apic/msi.c @@ -23,7 +23,11 @@ static struct irq_domain *msi_default_domain; -static void irq_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) +/* + * x86 PCI-MSI/HPET/DMAR related method. + * Also can be used as arch specific method for virtio-mmio MSI. + */ +void irq_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) { struct irq_cfg *cfg = irqd_cfg(data); @@ -47,6 +51,11 @@ static void irq_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) MSI_DATA_VECTOR(cfg->vector); } +struct irq_domain *arch_msi_root_irq_domain(void) +{ + return x86_vector_domain; +} + /* * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices, * which implement the MSI or MSI-X Capability Structure.