diff --git a/arch/x86/kernel/apic/msi.c b/arch/x86/kernel/apic/msi.c index 7f7533462474a4..6462f2e43f7993 100644 --- a/arch/x86/kernel/apic/msi.c +++ b/arch/x86/kernel/apic/msi.c @@ -23,7 +23,11 @@ static struct irq_domain *msi_default_domain; -static void irq_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) +/* + * x86 PCI-MSI/HPET/DMAR related method. + * Also can be used as arch specific method for virtio-mmio MSI. + */ +void irq_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) { struct irq_cfg *cfg = irqd_cfg(data); @@ -47,6 +51,11 @@ static void irq_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) MSI_DATA_VECTOR(cfg->vector); } +struct irq_domain *arch_msi_root_irq_domain(void) +{ + return x86_vector_domain; +} + /* * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices, * which implement the MSI or MSI-X Capability Structure. diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c index 8da314b81eabbe..45752f16631d12 100644 --- a/drivers/base/platform-msi.c +++ b/drivers/base/platform-msi.c @@ -31,12 +31,11 @@ struct platform_msi_priv_data { /* The devid allocator */ static DEFINE_IDA(platform_msi_devid_ida); -#ifdef GENERIC_MSI_DOMAIN_OPS /* * Convert an msi_desc to a globaly unique identifier (per-device * devid + msi_desc position in the msi_list). */ -static irq_hw_number_t platform_msi_calc_hwirq(struct msi_desc *desc) +irq_hw_number_t platform_msi_calc_hwirq(struct msi_desc *desc) { u32 devid; @@ -45,6 +44,7 @@ static irq_hw_number_t platform_msi_calc_hwirq(struct msi_desc *desc) return (devid << (32 - DEV_ID_SHIFT)) | desc->platform.msi_index; } +#ifdef GENERIC_MSI_DOMAIN_OPS static void platform_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc) { arg->desc = desc; diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig index 078615cf2afcd5..5f2446e06d737a 100644 --- a/drivers/virtio/Kconfig +++ b/drivers/virtio/Kconfig @@ -84,6 +84,14 @@ config VIRTIO_MMIO If unsure, say N. +config VIRTIO_MMIO_MSI + bool "Memory-mapped virtio device MSI" + depends on VIRTIO_MMIO && GENERIC_MSI_IRQ_DOMAIN && GENERIC_MSI_IRQ + ---help--- + This allows device drivers to enable MSI to improve performance. + + If unsure, say N. + config VIRTIO_MMIO_CMDLINE_DEVICES bool "Memory mapped virtio devices parameter parsing" depends on VIRTIO_MMIO diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index e09edb5c5e0653..257ab8a7f496e8 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -61,46 +61,34 @@ #include #include #include -#include #include #include -#include #include #include #include - - +#include "virtio_mmio_common.h" +#include "virtio_mmio_msi.h" /* The alignment to use between consumer and producer parts of vring. * Currently hardcoded to the page size. */ #define VIRTIO_MMIO_VRING_ALIGN PAGE_SIZE - - -#define to_virtio_mmio_device(_plat_dev) \ - container_of(_plat_dev, struct virtio_mmio_device, vdev) - -struct virtio_mmio_device { - struct virtio_device vdev; - struct platform_device *pdev; - - void __iomem *base; - unsigned long version; - - /* a list of queues so we can dispatch IRQs */ - spinlock_t lock; - struct list_head virtqueues; -}; - struct virtio_mmio_vq_info { /* the actual virtqueue */ struct virtqueue *vq; /* the list node for the virtqueues list */ struct list_head node; -}; + /* Notify Address */ + unsigned int notify_addr; + /* MSI vector (or none) */ + unsigned int msi_vector; +}; + +static void vm_free_msi_irqs(struct virtio_device *vdev); +static int vm_request_msi_vectors(struct virtio_device *vdev, int nirqs); /* Configuration interface */ @@ -119,6 +107,15 @@ static u64 vm_get_features(struct virtio_device *vdev) return features; } +static void vm_transport_features(struct virtio_device *vdev) +{ + if (vdev->features & BIT_ULL(VIRTIO_F_MMIO_NOTIFICATION)) + __virtio_set_bit(vdev, VIRTIO_F_MMIO_NOTIFICATION); + + if (vdev->features & BIT_ULL(VIRTIO_F_MMIO_MSI)) + __virtio_set_bit(vdev, VIRTIO_F_MMIO_MSI); +} + static int vm_finalize_features(struct virtio_device *vdev) { struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); @@ -126,6 +123,9 @@ static int vm_finalize_features(struct virtio_device *vdev) /* Give virtio_ring a chance to accept features. */ vring_transport_features(vdev); + /* Give virtio_mmio a chance to accept features. */ + vm_transport_features(vdev); + /* Make sure there is are no mixed devices */ if (vm_dev->version == 2 && !__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) { @@ -264,18 +264,19 @@ static void vm_reset(struct virtio_device *vdev) writel(0, vm_dev->base + VIRTIO_MMIO_STATUS); } - - /* Transport interface */ /* the notify function used when creating a virt queue */ static bool vm_notify(struct virtqueue *vq) { struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev); + struct virtio_mmio_vq_info *info = vq->priv; - /* We write the queue's selector into the notification register to + /* We write the queue's selector into Notify Address to * signal the other end */ - writel(vq->index, vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY); + if (info) + writel(vq->index, vm_dev->base + info->notify_addr); + return true; } @@ -307,7 +308,33 @@ static irqreturn_t vm_interrupt(int irq, void *opaque) return ret; } +static irqreturn_t vm_vring_interrupt(int irq, void *opaque) +{ + struct virtio_mmio_device *vm_dev = opaque; + struct virtio_mmio_vq_info *info; + irqreturn_t ret = IRQ_NONE; + unsigned long flags; + spin_lock_irqsave(&vm_dev->lock, flags); + list_for_each_entry(info, &vm_dev->virtqueues, node) { + if (vring_interrupt(irq, info->vq) == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + spin_unlock_irqrestore(&vm_dev->lock, flags); + + return ret; +} + + +/* Handle a configuration change */ +static irqreturn_t vm_config_changed(int irq, void *opaque) +{ + struct virtio_mmio_device *vm_dev = opaque; + + virtio_config_changed(&vm_dev->vdev); + + return IRQ_HANDLED; +} static void vm_del_vq(struct virtqueue *vq) { @@ -316,6 +343,15 @@ static void vm_del_vq(struct virtqueue *vq) unsigned long flags; unsigned int index = vq->index; + if (vm_dev->msi_enabled && !vm_dev->msi_share) { + if (info->msi_vector != VIRTIO_MMIO_MSI_NO_VECTOR) { + int irq = mmio_msi_irq_vector(&vq->vdev->dev, + info->msi_vector); + + free_irq(irq, vq); + } + } + spin_lock_irqsave(&vm_dev->lock, flags); list_del(&info->node); spin_unlock_irqrestore(&vm_dev->lock, flags); @@ -334,20 +370,41 @@ static void vm_del_vq(struct virtqueue *vq) kfree(info); } -static void vm_del_vqs(struct virtio_device *vdev) +static void vm_free_irqs(struct virtio_device *vdev) { struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); + + if (vm_dev->msi_enabled) + vm_free_msi_irqs(vdev); + else + free_irq(platform_get_irq(vm_dev->pdev, 0), vm_dev); +} + +static void vm_del_vqs(struct virtio_device *vdev) +{ struct virtqueue *vq, *n; list_for_each_entry_safe(vq, n, &vdev->vqs, list) vm_del_vq(vq); - free_irq(platform_get_irq(vm_dev->pdev, 0), vm_dev); + vm_free_irqs(vdev); +} + +static void mmio_msi_config_vector(struct virtio_mmio_device *vm_dev, u32 vec) +{ + writel(vec, vm_dev->base + VIRTIO_MMIO_MSI_VEC_SEL); + writel(VIRTIO_MMIO_MSI_CMD_MAP_CONFIG, vm_dev->base + VIRTIO_MMIO_MSI_COMMAND); +} + +static void mmio_msi_queue_vector(struct virtio_mmio_device *vm_dev, u32 vec) +{ + writel(vec, vm_dev->base + VIRTIO_MMIO_MSI_VEC_SEL); + writel(VIRTIO_MMIO_MSI_CMD_MAP_QUEUE, vm_dev->base + VIRTIO_MMIO_MSI_COMMAND); } static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index, void (*callback)(struct virtqueue *vq), - const char *name, bool ctx) + const char *name, bool ctx, u32 msi_vector) { struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); struct virtio_mmio_vq_info *info; @@ -434,6 +491,18 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index, vq->priv = info; info->vq = vq; + if (__virtio_test_bit(vdev, VIRTIO_F_MMIO_NOTIFICATION)) + info->notify_addr = vm_dev->notify_base + + vm_dev->notify_multiplier * vq->index; + else + info->notify_addr = VIRTIO_MMIO_QUEUE_NOTIFY; + + info->msi_vector = msi_vector; + + /* Set queue event and vector mapping for MSI share mode. */ + if (vm_dev->msi_share && msi_vector != VIRTIO_MMIO_MSI_NO_VECTOR) + mmio_msi_queue_vector(vm_dev, msi_vector); + spin_lock_irqsave(&vm_dev->lock, flags); list_add(&info->node, &vm_dev->virtqueues); spin_unlock_irqrestore(&vm_dev->lock, flags); @@ -455,12 +524,11 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index, return ERR_PTR(err); } -static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs, - struct virtqueue *vqs[], - vq_callback_t *callbacks[], - const char * const names[], - const bool *ctx, - struct irq_affinity *desc) +static int vm_find_vqs_intx(struct virtio_device *vdev, unsigned int nvqs, + struct virtqueue *vqs[], + vq_callback_t *callbacks[], + const char * const names[], + const bool *ctx) { struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); int irq = platform_get_irq(vm_dev->pdev, 0); @@ -473,8 +541,6 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs, err = request_irq(irq, vm_interrupt, IRQF_SHARED, dev_name(&vdev->dev), vm_dev); - if (err) - return err; for (i = 0; i < nvqs; ++i) { if (!names[i]) { @@ -483,14 +549,201 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs, } vqs[i] = vm_setup_vq(vdev, queue_idx++, callbacks[i], names[i], - ctx ? ctx[i] : false); + ctx ? ctx[i] : false, + VIRTIO_MMIO_MSI_NO_VECTOR); if (IS_ERR(vqs[i])) { vm_del_vqs(vdev); return PTR_ERR(vqs[i]); } } + return err; +} + +static int vm_find_vqs_msi(struct virtio_device *vdev, unsigned int nvqs, + struct virtqueue *vqs[], vq_callback_t *callbacks[], + const char * const names[], bool per_vq_vectors, + const bool *ctx, struct irq_affinity *desc) +{ + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); + int i, err, allocated_vectors, nvectors; + u32 msi_vec; + u32 max_vec_num = readl(vm_dev->base + VIRTIO_MMIO_MSI_VEC_NUM); + + /* For MSI non-sharing, the max vector number MUST greater than nvqs. + * Otherwise, go back to legacy interrupt. + */ + if (per_vq_vectors && max_vec_num < (nvqs + 1)) + return -EINVAL; + + if (per_vq_vectors) { + nvectors = 1; + for (i = 0; i < nvqs; ++i) + if (callbacks[i]) + ++nvectors; + } else { + nvectors = 2; + } + + vm_dev->msi_share = !per_vq_vectors; + + /* Allocate nvqs irqs for queues and one irq for configuration */ + err = vm_request_msi_vectors(vdev, nvectors); + if (err != 0) + return err; + + allocated_vectors = vm_dev->msi_used_vectors; + for (i = 0; i < nvqs; i++) { + if (!names[i]) { + vqs[i] = NULL; + continue; + } + if (!callbacks[i]) + msi_vec = VIRTIO_MMIO_MSI_NO_VECTOR; + else if (per_vq_vectors) + msi_vec = allocated_vectors++; + else + msi_vec = 1; + vqs[i] = vm_setup_vq(vdev, i, callbacks[i], names[i], + ctx ? ctx[i] : false, msi_vec); + if (IS_ERR(vqs[i])) { + err = PTR_ERR(vqs[i]); + goto error_find; + } + + if (!per_vq_vectors || + msi_vec == VIRTIO_MMIO_MSI_NO_VECTOR) + continue; + + /* allocate per-vq irq if available and necessary */ + snprintf(vm_dev->vm_vq_names[msi_vec], + sizeof(*vm_dev->vm_vq_names), + "%s-%s", + dev_name(&vm_dev->vdev.dev), names[i]); + err = request_irq(mmio_msi_irq_vector(&vqs[i]->vdev->dev, + msi_vec), + vring_interrupt, 0, + vm_dev->vm_vq_names[msi_vec], vqs[i]); + + if (err) + goto error_find; + } + + return 0; + +error_find: + vm_del_vqs(vdev); + return err; +} + +static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs, + struct virtqueue *vqs[], + vq_callback_t *callbacks[], + const char * const names[], + const bool *ctx, + struct irq_affinity *desc) +{ + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); + int err; + + if (__virtio_test_bit(vdev, VIRTIO_F_MMIO_NOTIFICATION)) { + unsigned notify = readl(vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY); + + vm_dev->notify_base = notify & 0xffff; + vm_dev->notify_multiplier = (notify >> 16) & 0xffff; + } + + if (__virtio_test_bit(vdev, VIRTIO_F_MMIO_MSI)) { + bool dyn_mapping = !!(readl(vm_dev->base + VIRTIO_MMIO_MSI_STATE) & + VIRTIO_MMIO_MSI_SHARING_MASK); + + if (!dyn_mapping) + err = vm_find_vqs_msi(vdev, nvqs, vqs, callbacks, + names, true, ctx, desc); + else + err = vm_find_vqs_msi(vdev, nvqs, vqs, callbacks, + names, false, ctx, desc); + if (!err) + return 0; + } + + return vm_find_vqs_intx(vdev, nvqs, vqs, callbacks, names, ctx); +} + +static int vm_request_msi_vectors(struct virtio_device *vdev, int nirqs) +{ + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); + unsigned int v; + int irq, err; + + if (vm_dev->msi_enabled) + return -EINVAL; + + vm_dev->vm_vq_names = kmalloc_array(nirqs, sizeof(*vm_dev->vm_vq_names), + GFP_KERNEL); + if (!vm_dev->vm_vq_names) + return -ENOMEM; + + mmio_get_msi_domain(vdev); + err = mmio_msi_domain_alloc_irqs(&vdev->dev, nirqs); + if (err) { + kfree(vm_dev->vm_vq_names); + vm_dev->vm_vq_names = NULL; + return err; + } + + mmio_msi_set_enable(vm_dev, 1); + vm_dev->msi_enabled = true; + + v = vm_dev->msi_used_vectors; + /* The first MSI vector is used for configuration change event. */ + snprintf(vm_dev->vm_vq_names[v], sizeof(*vm_dev->vm_vq_names), + "%s-config", dev_name(&vdev->dev)); + irq = mmio_msi_irq_vector(&vdev->dev, v); + err = request_irq(irq, vm_config_changed, 0, vm_dev->vm_vq_names[v], + vm_dev); + if (err) + goto error_request_irq; + + /* Set the configuration event mapping. */ + if (vm_dev->msi_share) + mmio_msi_config_vector(vm_dev, v); + + ++vm_dev->msi_used_vectors; + + if (vm_dev->msi_share) { + v = vm_dev->msi_used_vectors; + snprintf(vm_dev->vm_vq_names[v], sizeof(*vm_dev->vm_vq_names), + "%s-virtqueues", dev_name(&vm_dev->vdev.dev)); + err = request_irq(mmio_msi_irq_vector(&vdev->dev, v), + vm_vring_interrupt, 0, vm_dev->vm_vq_names[v], + vm_dev); + if (err) + goto error_request_irq; + ++vm_dev->msi_used_vectors; + } + return 0; + +error_request_irq: + vm_free_msi_irqs(vdev); + + return err; +} + +static void vm_free_msi_irqs(struct virtio_device *vdev) +{ + int i; + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); + + mmio_msi_set_enable(vm_dev, 0); + for (i = 0; i < vm_dev->msi_used_vectors; i++) + free_irq(mmio_msi_irq_vector(&vdev->dev, i), vm_dev); + mmio_msi_domain_free_irqs(&vdev->dev); + kfree(vm_dev->vm_vq_names); + vm_dev->vm_vq_names = NULL; + vm_dev->msi_enabled = false; + vm_dev->msi_used_vectors = 0; } static const char *vm_bus_name(struct virtio_device *vdev) @@ -604,6 +857,8 @@ static int virtio_mmio_probe(struct platform_device *pdev) platform_set_drvdata(pdev, vm_dev); + mmio_msi_create_irq_domain(); + rc = register_virtio_device(&vm_dev->vdev); if (rc) put_device(&vm_dev->vdev.dev); diff --git a/drivers/virtio/virtio_mmio_common.h b/drivers/virtio/virtio_mmio_common.h new file mode 100644 index 00000000000000..ccf6320a998ac5 --- /dev/null +++ b/drivers/virtio/virtio_mmio_common.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef _DRIVERS_VIRTIO_VIRTIO_MMIO_COMMON_H +#define _DRIVERS_VIRTIO_VIRTIO_MMIO_COMMON_H +/* + * Virtio MMIO driver - common functionality for all device versions + * + * This module allows virtio devices to be used over a memory-mapped device. + */ + +#include +#include + +#define to_virtio_mmio_device(_plat_dev) \ + container_of(_plat_dev, struct virtio_mmio_device, vdev) + +struct virtio_mmio_device { + struct virtio_device vdev; + struct platform_device *pdev; + + void __iomem *base; + unsigned long version; + + /* a list of queues so we can dispatch IRQs */ + spinlock_t lock; + struct list_head virtqueues; + + unsigned short notify_base; + unsigned short notify_multiplier; + + /* Name strings for interrupts. This size should be enough. */ + char (*vm_vq_names)[256]; + + /* used vectors */ + unsigned int msi_used_vectors; + bool msi_share; + bool msi_enabled; +}; + +#endif diff --git a/drivers/virtio/virtio_mmio_msi.h b/drivers/virtio/virtio_mmio_msi.h new file mode 100644 index 00000000000000..2f545ae90a1c4f --- /dev/null +++ b/drivers/virtio/virtio_mmio_msi.h @@ -0,0 +1,174 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef _DRIVERS_VIRTIO_VIRTIO_MMIO_MSI_H +#define _DRIVERS_VIRTIO_VIRTIO_MMIO_MSI_H + +#ifdef CONFIG_VIRTIO_MMIO_MSI + +#include +#include +#include +#include +#include "virtio_mmio_common.h" + +static irq_hw_number_t mmio_msi_hwirq; +static struct irq_domain *mmio_msi_domain; + +struct irq_domain *__weak arch_msi_root_irq_domain(void) +{ + return NULL; +} + +void __weak irq_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) +{ +} + +static void __iomem *vm_dev_base(struct msi_desc *desc) +{ + if (desc) { + struct device *dev = desc->dev; + struct virtio_device *vdev = dev_to_virtio(dev); + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); + + return vm_dev->base; + } + + return NULL; +} + +static void mmio_msi_set_mask_bit(struct irq_data *data, u32 flag) +{ + struct msi_desc *desc = irq_data_get_msi_desc(data); + void __iomem *base = vm_dev_base(desc); + unsigned int offset = data->irq - desc->irq; + + if (base) { + u32 op = flag ? VIRTIO_MMIO_MSI_CMD_MASK : + VIRTIO_MMIO_MSI_CMD_UNMASK; + writel(offset, base + VIRTIO_MMIO_MSI_VEC_SEL); + writel(op, base + VIRTIO_MMIO_MSI_COMMAND); + } +} + +static void mmio_msi_mask_irq(struct irq_data *data) +{ + mmio_msi_set_mask_bit(data, 1); +} + +static void mmio_msi_unmask_irq(struct irq_data *data) +{ + mmio_msi_set_mask_bit(data, 0); +} + +static struct irq_chip mmio_msi_controller = { + .name = "VIRTIO-MMIO-MSI", + .irq_mask = mmio_msi_mask_irq, + .irq_unmask = mmio_msi_unmask_irq, + .irq_ack = irq_chip_ack_parent, + .irq_retrigger = irq_chip_retrigger_hierarchy, + .irq_compose_msi_msg = irq_msi_compose_msg, + .flags = IRQCHIP_SKIP_SET_WAKE, +}; + +static int mmio_msi_prepare(struct irq_domain *domain, struct device *dev, + int nvec, msi_alloc_info_t *arg) +{ + memset(arg, 0, sizeof(*arg)); + return 0; +} + +static void mmio_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc) +{ + mmio_msi_hwirq = platform_msi_calc_hwirq(desc); +} + +static irq_hw_number_t mmio_msi_get_hwirq(struct msi_domain_info *info, + msi_alloc_info_t *arg) +{ + return mmio_msi_hwirq; +} + +static struct msi_domain_ops mmio_msi_domain_ops = { + .msi_prepare = mmio_msi_prepare, + .set_desc = mmio_msi_set_desc, + .get_hwirq = mmio_msi_get_hwirq, +}; + +static struct msi_domain_info mmio_msi_domain_info = { + .flags = MSI_FLAG_USE_DEF_DOM_OPS | + MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_ACTIVATE_EARLY, + .ops = &mmio_msi_domain_ops, + .chip = &mmio_msi_controller, + .handler = handle_edge_irq, + .handler_name = "edge", +}; + +static inline void mmio_msi_create_irq_domain(void) +{ + struct fwnode_handle *fn; + struct irq_domain *parent = arch_msi_root_irq_domain(); + + fn = irq_domain_alloc_named_fwnode("VIRTIO-MMIO-MSI"); + if (fn && parent) { + mmio_msi_domain = + platform_msi_create_irq_domain(fn, + &mmio_msi_domain_info, parent); + irq_domain_free_fwnode(fn); + } +} + +static void mmio_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg) +{ + void __iomem *base = vm_dev_base(desc); + + if (base) { + writel(desc->platform.msi_index, base + VIRTIO_MMIO_MSI_VEC_SEL); + writel(msg->address_lo, base + VIRTIO_MMIO_MSI_ADDRESS_LOW); + writel(msg->address_hi, base + VIRTIO_MMIO_MSI_ADDRESS_HIGH); + writel(msg->data, base + VIRTIO_MMIO_MSI_DATA); + writel(VIRTIO_MMIO_MSI_CMD_CONFIGURE, + base + VIRTIO_MMIO_MSI_COMMAND); + } +} + +static inline int mmio_msi_domain_alloc_irqs(struct device *dev, + unsigned int nvec) +{ + return platform_msi_domain_alloc_irqs(dev, nvec, + mmio_write_msi_msg); +} + +static inline void mmio_msi_domain_free_irqs(struct device *dev) +{ + return platform_msi_domain_free_irqs(dev); +} + +static inline void mmio_get_msi_domain(struct virtio_device *vdev) +{ + if (!vdev->dev.msi_domain) + vdev->dev.msi_domain = mmio_msi_domain; +} + +static inline int mmio_msi_irq_vector(struct device *dev, unsigned int nr) +{ + struct msi_desc *entry = first_msi_entry(dev); + + return entry->irq + nr; +} + +#else +static inline void mmio_msi_create_irq_domain(void) {} +static inline int mmio_msi_irq_vector(struct device *dev, unsigned int nr) +{ + return -EINVAL; +} +static inline void mmio_get_msi_domain(struct virtio_device *vdev) {} +static inline int mmio_msi_domain_alloc_irqs(struct device *dev, + unsigned int nvec) +{ + return -EINVAL; +} +static inline void mmio_msi_domain_free_irqs(struct device *dev) {} +#endif + +#endif diff --git a/include/linux/msi.h b/include/linux/msi.h index 8ad679e9d9c04a..ee5f56629547c7 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -362,6 +362,7 @@ int platform_msi_domain_alloc(struct irq_domain *domain, unsigned int virq, void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq, unsigned int nvec); void *platform_msi_get_host_data(struct irq_domain *domain); +irq_hw_number_t platform_msi_calc_hwirq(struct msi_desc *desc); #endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */ #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN diff --git a/include/uapi/linux/virtio_config.h b/include/uapi/linux/virtio_config.h index ff8e7dc9d4dd22..5982b76b24a1e6 100644 --- a/include/uapi/linux/virtio_config.h +++ b/include/uapi/linux/virtio_config.h @@ -52,7 +52,7 @@ * rest are per-device feature bits. */ #define VIRTIO_TRANSPORT_F_START 28 -#define VIRTIO_TRANSPORT_F_END 38 +#define VIRTIO_TRANSPORT_F_END 41 #ifndef VIRTIO_CONFIG_NO_LEGACY /* Do we get callbacks when the ring is completely used, even if we've @@ -88,4 +88,14 @@ * Does the device support Single Root I/O Virtualization? */ #define VIRTIO_F_SR_IOV 37 + +/* + * This feature indicates the enhanced notification support on MMIO layer. + */ +#define VIRTIO_F_MMIO_NOTIFICATION 39 + +/* + * This feature indicates the MSI support on MMIO layer. + */ +#define VIRTIO_F_MMIO_MSI 40 #endif /* _UAPI_LINUX_VIRTIO_CONFIG_H */ diff --git a/include/uapi/linux/virtio_mmio.h b/include/uapi/linux/virtio_mmio.h index c4b09689ab6447..777cb0e6209649 100644 --- a/include/uapi/linux/virtio_mmio.h +++ b/include/uapi/linux/virtio_mmio.h @@ -122,6 +122,21 @@ #define VIRTIO_MMIO_QUEUE_USED_LOW 0x0a0 #define VIRTIO_MMIO_QUEUE_USED_HIGH 0x0a4 +/* MSI max vector number that device supports - Read Only */ +#define VIRTIO_MMIO_MSI_VEC_NUM 0x0c0 +/* MSI state register - Read Only */ +#define VIRTIO_MMIO_MSI_STATE 0x0c4 +/* MSI command register - Write Only */ +#define VIRTIO_MMIO_MSI_COMMAND 0x0c8 +/* MSI vector selector - Write Only */ +#define VIRTIO_MMIO_MSI_VEC_SEL 0x0d0 +/* MSI low 32 bit address, 64 bits in two halves */ +#define VIRTIO_MMIO_MSI_ADDRESS_LOW 0x0d4 +/* MSI high 32 bit address, 64 bits in two halves */ +#define VIRTIO_MMIO_MSI_ADDRESS_HIGH 0x0d8 +/* MSI 32 bit data */ +#define VIRTIO_MMIO_MSI_DATA 0x0dc + /* Configuration atomicity value */ #define VIRTIO_MMIO_CONFIG_GENERATION 0x0fc @@ -130,6 +145,22 @@ #define VIRTIO_MMIO_CONFIG 0x100 +/* MSI commands */ +#define VIRTIO_MMIO_MSI_CMD_ENABLE 0x1 +#define VIRTIO_MMIO_MSI_CMD_DISABLE 0x2 +#define VIRTIO_MMIO_MSI_CMD_CONFIGURE 0x3 +#define VIRTIO_MMIO_MSI_CMD_MASK 0x4 +#define VIRTIO_MMIO_MSI_CMD_UNMASK 0x5 +#define VIRTIO_MMIO_MSI_CMD_MAP_CONFIG 0x6 +#define VIRTIO_MMIO_MSI_CMD_MAP_QUEUE 0x7 + +/* MSI NO_VECTOR */ +#define VIRTIO_MMIO_MSI_NO_VECTOR 0xffffffff + +/* MSI state enabled state mask */ +#define VIRTIO_MMIO_MSI_ENABLED_MASK (1 << 31) +/* MSI state MSI sharing mask */ +#define VIRTIO_MMIO_MSI_SHARING_MASK (1 << 30) /* * Interrupt flags (re: interrupt status & acknowledge registers)