diff options
Diffstat (limited to 'drivers/irqchip/irq-gic-v3-its.c')
| -rw-r--r-- | drivers/irqchip/irq-gic-v3-its.c | 1084 |
1 files changed, 759 insertions, 325 deletions
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index ba39668c3e08..ada585bfa451 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -11,13 +11,15 @@ #include <linux/cpu.h> #include <linux/crash_dump.h> #include <linux/delay.h> -#include <linux/dma-iommu.h> #include <linux/efi.h> +#include <linux/genalloc.h> #include <linux/interrupt.h> +#include <linux/iommu.h> #include <linux/iopoll.h> #include <linux/irqdomain.h> #include <linux/list.h> #include <linux/log2.h> +#include <linux/mem_encrypt.h> #include <linux/memblock.h> #include <linux/mm.h> #include <linux/msi.h> @@ -27,6 +29,7 @@ #include <linux/of_pci.h> #include <linux/of_platform.h> #include <linux/percpu.h> +#include <linux/set_memory.h> #include <linux/slab.h> #include <linux/syscore_ops.h> @@ -38,13 +41,18 @@ #include <asm/exception.h> #include "irq-gic-common.h" +#include "irq-gic-its-msi-parent.h" +#include <linux/irqchip/irq-msi-lib.h> #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0) #define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1) #define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2) +#define ITS_FLAGS_FORCE_NON_SHAREABLE (1ULL << 3) +#define ITS_FLAGS_WORKAROUND_HISILICON_162100801 (1ULL << 4) -#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) -#define RDIST_FLAGS_RD_TABLES_PREALLOCATED (1 << 1) +#define RD_LOCAL_LPI_ENABLED BIT(0) +#define RD_LOCAL_PENDTABLE_PREALLOCATED BIT(1) +#define RD_LOCAL_MEMRESERVE_DONE BIT(2) static u32 lpi_id_bits; @@ -57,7 +65,8 @@ static u32 lpi_id_bits; #define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K) #define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K) -#define LPI_PROP_DEFAULT_PRIO GICD_INT_DEF_PRI +static u8 __ro_after_init lpi_prop_prio; +static struct its_node *find_4_1_its(void); /* * Collection structure - just an ID, and a redistributor address to @@ -117,6 +126,8 @@ struct its_node { int vlpi_redist_offset; }; +static DEFINE_PER_CPU(struct its_node *, local_4_1_its); + #define is_v4(its) (!!((its)->typer & GITS_TYPER_VLPIS)) #define is_v4_1(its) (!!((its)->typer & GITS_TYPER_VMAPP)) #define device_ids(its) (FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1) @@ -161,6 +172,7 @@ struct its_device { struct its_node *its; struct event_lpi_map event_map; void *itt; + u32 itt_sz; u32 nr_ites; u32 device_id; bool shared; @@ -196,6 +208,89 @@ static DEFINE_IDA(its_vpeid_ida); #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) #define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K) +static gfp_t gfp_flags_quirk; + +static struct page *its_alloc_pages_node(int node, gfp_t gfp, + unsigned int order) +{ + struct page *page; + int ret = 0; + + page = alloc_pages_node(node, gfp | gfp_flags_quirk, order); + + if (!page) + return NULL; + + ret = set_memory_decrypted((unsigned long)page_address(page), + 1 << order); + /* + * If set_memory_decrypted() fails then we don't know what state the + * page is in, so we can't free it. Instead we leak it. + * set_memory_decrypted() will already have WARNed. + */ + if (ret) + return NULL; + + return page; +} + +static struct page *its_alloc_pages(gfp_t gfp, unsigned int order) +{ + return its_alloc_pages_node(NUMA_NO_NODE, gfp, order); +} + +static void its_free_pages(void *addr, unsigned int order) +{ + /* + * If the memory cannot be encrypted again then we must leak the pages. + * set_memory_encrypted() will already have WARNed. + */ + if (set_memory_encrypted((unsigned long)addr, 1 << order)) + return; + free_pages((unsigned long)addr, order); +} + +static struct gen_pool *itt_pool; + +static void *itt_alloc_pool(int node, int size) +{ + unsigned long addr; + struct page *page; + + if (size >= PAGE_SIZE) { + page = its_alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, get_order(size)); + + return page ? page_address(page) : NULL; + } + + do { + addr = gen_pool_alloc(itt_pool, size); + if (addr) + break; + + page = its_alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); + if (!page) + break; + + gen_pool_add(itt_pool, (unsigned long)page_address(page), PAGE_SIZE, node); + } while (!addr); + + return (void *)addr; +} + +static void itt_free_pool(void *addr, int size) +{ + if (!addr) + return; + + if (size >= PAGE_SIZE) { + its_free_pages(addr, get_order(size)); + return; + } + + gen_pool_free(itt_pool, (unsigned long)addr, size); +} + /* * Skip ITSs that have no vLPIs mapped, unless we're on GICv4.1, as we * always have vSGIs mapped. @@ -205,6 +300,11 @@ static bool require_its_list_vmovp(struct its_vm *vm, struct its_node *its) return (gic_rdists->has_rvpeid || vm->vlpi_count[its->list_nr]); } +static bool rdists_support_shareable(void) +{ + return !(gic_rdists->flags & RDIST_FLAGS_FORCE_NON_SHAREABLE); +} + static u16 get_its_list(struct its_vm *vm) { struct its_node *its; @@ -267,13 +367,23 @@ static void vpe_to_cpuid_unlock(struct its_vpe *vpe, unsigned long flags) raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags); } +static struct irq_chip its_vpe_irq_chip; + static int irq_to_cpuid_lock(struct irq_data *d, unsigned long *flags) { - struct its_vlpi_map *map = get_vlpi_map(d); + struct its_vpe *vpe = NULL; int cpu; - if (map) { - cpu = vpe_to_cpuid_lock(map->vpe, flags); + if (d->chip == &its_vpe_irq_chip) { + vpe = irq_data_get_irq_chip_data(d); + } else { + struct its_vlpi_map *map = get_vlpi_map(d); + if (map) + vpe = map->vpe; + } + + if (vpe) { + cpu = vpe_to_cpuid_lock(vpe, flags); } else { /* Physical LPIs are already locked via the irq_desc lock */ struct its_device *its_dev = irq_data_get_irq_chip_data(d); @@ -287,10 +397,18 @@ static int irq_to_cpuid_lock(struct irq_data *d, unsigned long *flags) static void irq_to_cpuid_unlock(struct irq_data *d, unsigned long flags) { - struct its_vlpi_map *map = get_vlpi_map(d); + struct its_vpe *vpe = NULL; + + if (d->chip == &its_vpe_irq_chip) { + vpe = irq_data_get_irq_chip_data(d); + } else { + struct its_vlpi_map *map = get_vlpi_map(d); + if (map) + vpe = map->vpe; + } - if (map) - vpe_to_cpuid_unlock(map->vpe, flags); + if (vpe) + vpe_to_cpuid_unlock(vpe, flags); } static struct its_collection *valid_col(struct its_collection *col) @@ -595,7 +713,6 @@ static struct its_collection *its_build_mapd_cmd(struct its_node *its, u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites); itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt); - itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN); its_encode_cmd(cmd, GITS_CMD_MAPD); its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id); @@ -742,7 +859,7 @@ static struct its_collection *its_build_invall_cmd(struct its_node *its, its_fixup_cmd(cmd); - return NULL; + return desc->its_invall_cmd.col; } static struct its_vpe *its_build_vinvall_cmd(struct its_node *its, @@ -761,6 +878,7 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its, struct its_cmd_block *cmd, struct its_cmd_desc *desc) { + struct its_vpe *vpe = valid_vpe(its, desc->its_vmapp_cmd.vpe); unsigned long vpt_addr, vconf_addr; u64 target; bool alloc; @@ -770,9 +888,14 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its, its_encode_valid(cmd, desc->its_vmapp_cmd.valid); if (!desc->its_vmapp_cmd.valid) { + alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count); if (is_v4_1(its)) { - alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count); its_encode_alloc(cmd, alloc); + /* + * Unmapping a VPE is self-synchronizing on GICv4.1, + * no need to issue a VSYNC. + */ + vpe = NULL; } goto out; @@ -785,13 +908,13 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its, its_encode_vpt_addr(cmd, vpt_addr); its_encode_vpt_size(cmd, LPI_NRBITS - 1); + alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count); + if (!is_v4_1(its)) goto out; vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page)); - alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count); - its_encode_alloc(cmd, alloc); /* @@ -807,7 +930,7 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its, out: its_fixup_cmd(cmd); - return valid_vpe(its, desc->its_vmapp_cmd.vpe); + return vpe; } static struct its_vpe *its_build_vmapti_cmd(struct its_node *its, @@ -1286,7 +1409,6 @@ static void its_send_vmovp(struct its_vpe *vpe) { struct its_cmd_desc desc = {}; struct its_node *its; - unsigned long flags; int col_id = vpe->col_idx; desc.its_vmovp_cmd.vpe = vpe; @@ -1306,8 +1428,7 @@ static void its_send_vmovp(struct its_vpe *vpe) * * Wall <-- Head. */ - raw_spin_lock_irqsave(&vmovp_lock, flags); - + guard(raw_spinlock)(&vmovp_lock); desc.its_vmovp_cmd.seq_num = vmovp_seq_num++; desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm); @@ -1322,8 +1443,6 @@ static void its_send_vmovp(struct its_vpe *vpe) desc.its_vmovp_cmd.col = &its->collections[col_id]; its_send_single_vcommand(its, its_build_vmovp_cmd, &desc); } - - raw_spin_unlock_irqrestore(&vmovp_lock, flags); } static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe) @@ -1427,14 +1546,29 @@ static void wait_for_syncr(void __iomem *rdbase) cpu_relax(); } -static void direct_lpi_inv(struct irq_data *d) +static void __direct_lpi_inv(struct irq_data *d, u64 val) { - struct its_vlpi_map *map = get_vlpi_map(d); void __iomem *rdbase; unsigned long flags; - u64 val; int cpu; + /* Target the redistributor this LPI is currently routed to */ + cpu = irq_to_cpuid_lock(d, &flags); + raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock); + + rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base; + gic_write_lpir(val, rdbase + GICR_INVLPIR); + wait_for_syncr(rdbase); + + raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock); + irq_to_cpuid_unlock(d, flags); +} + +static void direct_lpi_inv(struct irq_data *d) +{ + struct its_vlpi_map *map = get_vlpi_map(d); + u64 val; + if (map) { struct its_device *its_dev = irq_data_get_irq_chip_data(d); @@ -1447,15 +1581,7 @@ static void direct_lpi_inv(struct irq_data *d) val = d->hwirq; } - /* Target the redistributor this LPI is currently routed to */ - cpu = irq_to_cpuid_lock(d, &flags); - raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock); - rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base; - gic_write_lpir(val, rdbase + GICR_INVLPIR); - - wait_for_syncr(rdbase); - raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock); - irq_to_cpuid_unlock(d, flags); + __direct_lpi_inv(d, val); } static void lpi_update_config(struct irq_data *d, u8 clr, u8 set) @@ -1570,13 +1696,15 @@ static int its_select_cpu(struct irq_data *d, const struct cpumask *aff_mask) { struct its_device *its_dev = irq_data_get_irq_chip_data(d); - cpumask_var_t tmpmask; + static DEFINE_RAW_SPINLOCK(tmpmask_lock); + static struct cpumask __tmpmask; + struct cpumask *tmpmask; + unsigned long flags; int cpu, node; - - if (!alloc_cpumask_var(&tmpmask, GFP_ATOMIC)) - return -ENOMEM; - node = its_dev->its->numa_node; + tmpmask = &__tmpmask; + + raw_spin_lock_irqsave(&tmpmask_lock, flags); if (!irqd_affinity_is_managed(d)) { /* First try the NUMA node */ @@ -1620,7 +1748,7 @@ static int its_select_cpu(struct irq_data *d, cpu = cpumask_pick_least_loaded(d, tmpmask); } else { - cpumask_and(tmpmask, irq_data_get_affinity_mask(d), cpu_online_mask); + cpumask_copy(tmpmask, aff_mask); /* If we cannot cross sockets, limit the search to that node */ if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) && @@ -1630,7 +1758,7 @@ static int its_select_cpu(struct irq_data *d, cpu = cpumask_pick_least_loaded(d, tmpmask); } out: - free_cpumask_var(tmpmask); + raw_spin_unlock_irqrestore(&tmpmask_lock, flags); pr_debug("IRQ%d -> %*pbl CPU%d\n", d->irq, cpumask_pr_args(aff_mask), cpu); return cpu; @@ -1686,17 +1814,10 @@ static u64 its_irq_get_msi_base(struct its_device *its_dev) static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) { struct its_device *its_dev = irq_data_get_irq_chip_data(d); - struct its_node *its; - u64 addr; - - its = its_dev->its; - addr = its->get_msi_base(its_dev); - msg->address_lo = lower_32_bits(addr); - msg->address_hi = upper_32_bits(addr); - msg->data = its_get_event_id(d); - - iommu_dma_compose_msi_msg(irq_data_get_msi_desc(d), msg); + msg->data = its_get_event_id(d); + msi_msg_set_addr(irq_data_get_msi_desc(d), msg, + its_dev->its->get_msi_base(its_dev)); } static int its_irq_set_irqchip_state(struct irq_data *d, @@ -1751,12 +1872,10 @@ static bool gic_requires_eager_mapping(void) static void its_map_vm(struct its_node *its, struct its_vm *vm) { - unsigned long flags; - if (gic_requires_eager_mapping()) return; - raw_spin_lock_irqsave(&vmovp_lock, flags); + guard(raw_spinlock_irqsave)(&vm->vmapp_lock); /* * If the VM wasn't mapped yet, iterate over the vpes and get @@ -1769,65 +1888,53 @@ static void its_map_vm(struct its_node *its, struct its_vm *vm) for (i = 0; i < vm->nr_vpes; i++) { struct its_vpe *vpe = vm->vpes[i]; - struct irq_data *d = irq_get_irq_data(vpe->irq); - /* Map the VPE to the first possible CPU */ - vpe->col_idx = cpumask_first(cpu_online_mask); - its_send_vmapp(its, vpe, true); + scoped_guard(raw_spinlock, &vpe->vpe_lock) + its_send_vmapp(its, vpe, true); + its_send_vinvall(its, vpe); - irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); } } - - raw_spin_unlock_irqrestore(&vmovp_lock, flags); } static void its_unmap_vm(struct its_node *its, struct its_vm *vm) { - unsigned long flags; - /* Not using the ITS list? Everything is always mapped. */ if (gic_requires_eager_mapping()) return; - raw_spin_lock_irqsave(&vmovp_lock, flags); + guard(raw_spinlock_irqsave)(&vm->vmapp_lock); if (!--vm->vlpi_count[its->list_nr]) { int i; - for (i = 0; i < vm->nr_vpes; i++) + for (i = 0; i < vm->nr_vpes; i++) { + guard(raw_spinlock)(&vm->vpes[i]->vpe_lock); its_send_vmapp(its, vm->vpes[i], false); + } } - - raw_spin_unlock_irqrestore(&vmovp_lock, flags); } static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info) { struct its_device *its_dev = irq_data_get_irq_chip_data(d); u32 event = its_get_event_id(d); - int ret = 0; if (!info->map) return -EINVAL; - raw_spin_lock(&its_dev->event_map.vlpi_lock); - if (!its_dev->event_map.vm) { struct its_vlpi_map *maps; maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps), GFP_ATOMIC); - if (!maps) { - ret = -ENOMEM; - goto out; - } + if (!maps) + return -ENOMEM; its_dev->event_map.vm = info->map->vm; its_dev->event_map.vlpi_maps = maps; } else if (its_dev->event_map.vm != info->map->vm) { - ret = -EINVAL; - goto out; + return -EINVAL; } /* Get our private copy of the mapping information */ @@ -1859,46 +1966,32 @@ static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info) its_dev->event_map.nr_vlpis++; } -out: - raw_spin_unlock(&its_dev->event_map.vlpi_lock); - return ret; + return 0; } static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info) { struct its_device *its_dev = irq_data_get_irq_chip_data(d); struct its_vlpi_map *map; - int ret = 0; - - raw_spin_lock(&its_dev->event_map.vlpi_lock); map = get_vlpi_map(d); - if (!its_dev->event_map.vm || !map) { - ret = -EINVAL; - goto out; - } + if (!its_dev->event_map.vm || !map) + return -EINVAL; /* Copy our mapping information to the incoming request */ *info->map = *map; -out: - raw_spin_unlock(&its_dev->event_map.vlpi_lock); - return ret; + return 0; } static int its_vlpi_unmap(struct irq_data *d) { struct its_device *its_dev = irq_data_get_irq_chip_data(d); u32 event = its_get_event_id(d); - int ret = 0; - raw_spin_lock(&its_dev->event_map.vlpi_lock); - - if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) { - ret = -EINVAL; - goto out; - } + if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) + return -EINVAL; /* Drop the virtual mapping */ its_send_discard(its_dev, event); @@ -1906,7 +1999,7 @@ static int its_vlpi_unmap(struct irq_data *d) /* and restore the physical one */ irqd_clr_forwarded_to_vcpu(d); its_send_mapti(its_dev, d->hwirq, event); - lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO | + lpi_update_config(d, 0xff, (lpi_prop_prio | LPI_PROP_ENABLED | LPI_PROP_GROUP1)); @@ -1922,9 +2015,7 @@ static int its_vlpi_unmap(struct irq_data *d) kfree(its_dev->event_map.vlpi_maps); } -out: - raw_spin_unlock(&its_dev->event_map.vlpi_lock); - return ret; + return 0; } static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info) @@ -1952,6 +2043,8 @@ static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) if (!is_v4(its_dev->its)) return -EINVAL; + guard(raw_spinlock)(&its_dev->event_map.vlpi_lock); + /* Unmap request? */ if (!info) return its_vlpi_unmap(d); @@ -2140,7 +2233,7 @@ static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids) if (err) goto out; - bitmap = kcalloc(BITS_TO_LONGS(nr_irqs), sizeof (long), GFP_ATOMIC); + bitmap = bitmap_zalloc(nr_irqs, GFP_ATOMIC); if (!bitmap) goto out; @@ -2156,13 +2249,13 @@ out: static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids) { WARN_ON(free_lpi_range(base, nr_ids)); - kfree(bitmap); + bitmap_free(bitmap); } static void gic_reset_prop_table(void *va) { - /* Priority 0xa0, Group-1, disabled */ - memset(va, LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, LPI_PROPBASE_SZ); + /* Regular IRQ priority, Group-1, disabled */ + memset(va, lpi_prop_prio | LPI_PROP_GROUP1, LPI_PROPBASE_SZ); /* Make sure the GIC will observe the written configuration */ gic_flush_dcache_to_poc(va, LPI_PROPBASE_SZ); @@ -2172,7 +2265,8 @@ static struct page *its_allocate_prop_table(gfp_t gfp_flags) { struct page *prop_page; - prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ)); + prop_page = its_alloc_pages(gfp_flags, + get_order(LPI_PROPBASE_SZ)); if (!prop_page) return NULL; @@ -2183,8 +2277,7 @@ static struct page *its_allocate_prop_table(gfp_t gfp_flags) static void its_free_prop_table(struct page *prop_page) { - free_pages((unsigned long)page_address(prop_page), - get_order(LPI_PROPBASE_SZ)); + its_free_pages(page_address(prop_page), get_order(LPI_PROPBASE_SZ)); } static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size) @@ -2306,7 +2399,7 @@ static int its_setup_baser(struct its_node *its, struct its_baser *baser, order = get_order(GITS_BASER_PAGES_MAX * psz); } - page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order); + page = its_alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order); if (!page) return -ENOMEM; @@ -2319,7 +2412,7 @@ static int its_setup_baser(struct its_node *its, struct its_baser *baser, /* 52bit PA is supported only when PageSize=64K */ if (psz != SZ_64K) { pr_err("ITS: no 52bit PA support when psz=%d\n", psz); - free_pages((unsigned long)base, order); + its_free_pages(base, order); return -ENXIO; } @@ -2350,6 +2443,9 @@ retry_baser: break; } + if (!shr) + gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order)); + its_write_baser(its, baser, val); tmp = baser->val; @@ -2362,10 +2458,9 @@ retry_baser: * non-cacheable as well. */ shr = tmp & GITS_BASER_SHAREABILITY_MASK; - if (!shr) { + if (!shr) cache = GITS_BASER_nC; - gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order)); - } + goto retry_baser; } @@ -2373,7 +2468,7 @@ retry_baser: pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n", &its->phys_base, its_base_type_string[type], val, tmp); - free_pages((unsigned long)base, order); + its_free_pages(base, order); return -ENXIO; } @@ -2434,8 +2529,8 @@ static bool its_parse_indirect_baser(struct its_node *its, * feature is not supported by hardware. */ new_order = max_t(u32, get_order(esz << ids), new_order); - if (new_order >= MAX_ORDER) { - new_order = MAX_ORDER - 1; + if (new_order > MAX_PAGE_ORDER) { + new_order = MAX_PAGE_ORDER; ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz); pr_warn("ITS@%pa: %s Table too large, reduce ids %llu->%u\n", &its->phys_base, its_base_type_string[type], @@ -2512,8 +2607,7 @@ static void its_free_tables(struct its_node *its) for (i = 0; i < GITS_BASER_NR_REGS; i++) { if (its->tables[i].base) { - free_pages((unsigned long)its->tables[i].base, - its->tables[i].order); + its_free_pages(its->tables[i].base, its->tables[i].order); its->tables[i].base = NULL; } } @@ -2577,6 +2671,11 @@ static int its_alloc_tables(struct its_node *its) /* erratum 24313: ignore memory access type */ cache = GITS_BASER_nCnB; + if (its->flags & ITS_FLAGS_FORCE_NON_SHAREABLE) { + cache = GITS_BASER_nC; + shr = 0; + } + for (i = 0; i < GITS_BASER_NR_REGS; i++) { struct its_baser *baser = its->tables + i; u64 val = its_read_baser(its, baser); @@ -2674,12 +2773,15 @@ static u64 inherit_vpe_l1_table_from_its(void) break; } val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, addr >> 12); - val |= FIELD_PREP(GICR_VPROPBASER_SHAREABILITY_MASK, - FIELD_GET(GITS_BASER_SHAREABILITY_MASK, baser)); - val |= FIELD_PREP(GICR_VPROPBASER_INNER_CACHEABILITY_MASK, - FIELD_GET(GITS_BASER_INNER_CACHEABILITY_MASK, baser)); + if (rdists_support_shareable()) { + val |= FIELD_PREP(GICR_VPROPBASER_SHAREABILITY_MASK, + FIELD_GET(GITS_BASER_SHAREABILITY_MASK, baser)); + val |= FIELD_PREP(GICR_VPROPBASER_INNER_CACHEABILITY_MASK, + FIELD_GET(GITS_BASER_INNER_CACHEABILITY_MASK, baser)); + } val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, GITS_BASER_NR_PAGES(baser) - 1); + *this_cpu_ptr(&local_4_1_its) = its; return val; } @@ -2717,6 +2819,7 @@ static u64 inherit_vpe_l1_table_from_rd(cpumask_t **mask) gic_data_rdist()->vpe_l1_base = gic_data_rdist_cpu(cpu)->vpe_l1_base; *mask = gic_data_rdist_cpu(cpu)->vpe_table_mask; + *this_cpu_ptr(&local_4_1_its) = *per_cpu_ptr(&local_4_1_its, cpu); return val; } @@ -2772,7 +2875,7 @@ static bool allocate_vpe_l2_table(int cpu, u32 id) /* Allocate memory for 2nd level table */ if (!table[idx]) { - page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(psz)); + page = its_alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(psz)); if (!page) return false; @@ -2891,7 +2994,7 @@ static int allocate_vpe_l1_table(void) pr_debug("np = %d, npg = %lld, psz = %d, epp = %d, esz = %d\n", np, npg, psz, epp, esz); - page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, get_order(np * PAGE_SIZE)); + page = its_alloc_pages(GFP_ATOMIC | __GFP_ZERO, get_order(np * PAGE_SIZE)); if (!page) return -ENOMEM; @@ -2900,8 +3003,10 @@ static int allocate_vpe_l1_table(void) WARN_ON(!IS_ALIGNED(pa, psz)); val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, pa >> 12); - val |= GICR_VPROPBASER_RaWb; - val |= GICR_VPROPBASER_InnerShareable; + if (rdists_support_shareable()) { + val |= GICR_VPROPBASER_RaWb; + val |= GICR_VPROPBASER_InnerShareable; + } val |= GICR_VPROPBASER_4_1_Z; val |= GICR_VPROPBASER_4_1_VALID; @@ -2935,8 +3040,7 @@ static struct page *its_allocate_pending_table(gfp_t gfp_flags) { struct page *pend_page; - pend_page = alloc_pages(gfp_flags | __GFP_ZERO, - get_order(LPI_PENDBASE_SZ)); + pend_page = its_alloc_pages(gfp_flags | __GFP_ZERO, get_order(LPI_PENDBASE_SZ)); if (!pend_page) return NULL; @@ -2948,7 +3052,7 @@ static struct page *its_allocate_pending_table(gfp_t gfp_flags) static void its_free_pending_table(struct page *pt) { - free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ)); + its_free_pages(page_address(pt), get_order(LPI_PENDBASE_SZ)); } /* @@ -3007,18 +3111,12 @@ static int __init allocate_lpi_tables(void) return 0; } -static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set) +static u64 read_vpend_dirty_clear(void __iomem *vlpi_base) { u32 count = 1000000; /* 1s! */ bool clean; u64 val; - val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER); - val &= ~GICR_VPENDBASER_Valid; - val &= ~clr; - val |= set; - gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); - do { val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER); clean = !(val & GICR_VPENDBASER_Dirty); @@ -3029,10 +3127,26 @@ static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set) } } while (!clean && count); - if (unlikely(val & GICR_VPENDBASER_Dirty)) { + if (unlikely(!clean)) pr_err_ratelimited("ITS virtual pending table not cleaning\n"); + + return val; +} + +static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set) +{ + u64 val; + + /* Make sure we wait until the RD is done with the initial scan */ + val = read_vpend_dirty_clear(vlpi_base); + val &= ~GICR_VPENDBASER_Valid; + val &= ~clr; + val |= set; + gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); + + val = read_vpend_dirty_clear(vlpi_base); + if (unlikely(val & GICR_VPENDBASER_Dirty)) val |= GICR_VPENDBASER_PendingLast; - } return val; } @@ -3044,7 +3158,7 @@ static void its_cpu_init_lpis(void) phys_addr_t paddr; u64 val, tmp; - if (gic_data_rdist()->lpi_enabled) + if (gic_data_rdist()->flags & RD_LOCAL_LPI_ENABLED) return; val = readl_relaxed(rbase + GICR_CTLR); @@ -3063,15 +3177,13 @@ static void its_cpu_init_lpis(void) paddr &= GENMASK_ULL(51, 16); WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ)); - its_free_pending_table(gic_data_rdist()->pend_page); - gic_data_rdist()->pend_page = NULL; + gic_data_rdist()->flags |= RD_LOCAL_PENDTABLE_PREALLOCATED; goto out; } pend_page = gic_data_rdist()->pend_page; paddr = page_to_phys(pend_page); - WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ)); /* set PROPBASE */ val = (gic_rdists->prop_table_pa | @@ -3082,6 +3194,9 @@ static void its_cpu_init_lpis(void) gicr_write_propbaser(val, rbase + GICR_PROPBASER); tmp = gicr_read_propbaser(rbase + GICR_PROPBASER); + if (!rdists_support_shareable()) + tmp &= ~GICR_PROPBASER_SHAREABILITY_MASK; + if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) { if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) { /* @@ -3106,6 +3221,9 @@ static void its_cpu_init_lpis(void) gicr_write_pendbaser(val, rbase + GICR_PENDBASER); tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER); + if (!rdists_support_shareable()) + tmp &= ~GICR_PENDBASER_SHAREABILITY_MASK; + if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) { /* * The HW reports non-shareable, we must remove the @@ -3122,6 +3240,7 @@ static void its_cpu_init_lpis(void) val |= GICR_CTLR_ENABLE_LPIS; writel_relaxed(val, rbase + GICR_CTLR); +out: if (gic_rdists->has_vlpis && !gic_rdists->has_rvpeid) { void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); @@ -3157,11 +3276,11 @@ static void its_cpu_init_lpis(void) /* Make sure the GIC has seen the above */ dsb(sy); -out: - gic_data_rdist()->lpi_enabled = true; + gic_data_rdist()->flags |= RD_LOCAL_LPI_ENABLED; pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n", smp_processor_id(), - gic_data_rdist()->pend_page ? "allocated" : "reserved", + gic_data_rdist()->flags & RD_LOCAL_PENDTABLE_PREALLOCATED ? + "reserved" : "allocated", &paddr); } @@ -3268,8 +3387,8 @@ static bool its_alloc_table_entry(struct its_node *its, /* Allocate memory for 2nd level table */ if (!table[idx]) { - page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, - get_order(baser->psz)); + page = its_alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, + get_order(baser->psz)); if (!page) return false; @@ -3364,15 +3483,18 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, if (WARN_ON(!is_power_of_2(nvecs))) nvecs = roundup_pow_of_two(nvecs); - dev = kzalloc(sizeof(*dev), GFP_KERNEL); /* * Even if the device wants a single LPI, the ITT must be * sized as a power of two (and you need at least one bit...). */ nr_ites = max(2, nvecs); sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1); - sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; - itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node); + sz = max(sz, ITS_ITT_ALIGN); + + itt = itt_alloc_pool(its->numa_node, sz); + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (alloc_lpis) { lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis); if (lpi_map) @@ -3384,10 +3506,10 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, lpi_base = 0; } - if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) { + if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) { kfree(dev); - kfree(itt); - kfree(lpi_map); + itt_free_pool(itt, sz); + bitmap_free(lpi_map); kfree(col_map); return NULL; } @@ -3396,6 +3518,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, dev->its = its; dev->itt = itt; + dev->itt_sz = sz; dev->nr_ites = nr_ites; dev->event_map.lpi_map = lpi_map; dev->event_map.col_map = col_map; @@ -3423,7 +3546,7 @@ static void its_free_device(struct its_device *its_dev) list_del(&its_dev->entry); raw_spin_unlock_irqrestore(&its_dev->its->lock, flags); kfree(its_dev->event_map.col_map); - kfree(its_dev->itt); + itt_free_pool(its_dev->itt, its_dev->itt_sz); kfree(its_dev); } @@ -3502,8 +3625,33 @@ out: return err; } +static void its_msi_teardown(struct irq_domain *domain, msi_alloc_info_t *info) +{ + struct its_device *its_dev = info->scratchpad[0].ptr; + + guard(mutex)(&its_dev->its->dev_alloc_lock); + + /* If the device is shared, keep everything around */ + if (its_dev->shared) + return; + + /* LPIs should have been already unmapped at this stage */ + if (WARN_ON_ONCE(!bitmap_empty(its_dev->event_map.lpi_map, + its_dev->event_map.nr_lpis))) + return; + + its_lpi_free(its_dev->event_map.lpi_map, + its_dev->event_map.lpi_base, + its_dev->event_map.nr_lpis); + + /* Unmap device/itt, and get rid of the tracking */ + its_send_mapd(its_dev, 0); + its_free_device(its_dev); +} + static struct msi_domain_ops its_msi_domain_ops = { .msi_prepare = its_msi_prepare, + .msi_teardown = its_msi_teardown, }; static int its_irq_gic_domain_alloc(struct irq_domain *domain, @@ -3559,6 +3707,7 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, irqd = irq_get_irq_data(virq + i); irqd_set_single_target(irqd); irqd_set_affinity_on_activate(irqd); + irqd_set_resend_when_in_progress(irqd); pr_debug("ID:%d pID:%d vID:%d\n", (int)(hwirq + i - its_dev->event_map.lpi_base), (int)(hwirq + i), virq + i); @@ -3603,7 +3752,6 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, { struct irq_data *d = irq_domain_get_irq_data(domain, virq); struct its_device *its_dev = irq_data_get_irq_chip_data(d); - struct its_node *its = its_dev->its; int i; bitmap_release_region(its_dev->event_map.lpi_map, @@ -3617,30 +3765,11 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, irq_domain_reset_irq_data(data); } - mutex_lock(&its->dev_alloc_lock); - - /* - * If all interrupts have been freed, start mopping the - * floor. This is conditioned on the device not being shared. - */ - if (!its_dev->shared && - bitmap_empty(its_dev->event_map.lpi_map, - its_dev->event_map.nr_lpis)) { - its_lpi_free(its_dev->event_map.lpi_map, - its_dev->event_map.lpi_base, - its_dev->event_map.nr_lpis); - - /* Unmap device/itt */ - its_send_mapd(its_dev, 0); - its_free_device(its_dev); - } - - mutex_unlock(&its->dev_alloc_lock); - irq_domain_free_irqs_parent(domain, virq, nr_irqs); } static const struct irq_domain_ops its_domain_ops = { + .select = msi_lib_irq_domain_select, .alloc = its_irq_domain_alloc, .free = its_irq_domain_free, .activate = its_irq_domain_activate, @@ -3760,15 +3889,48 @@ static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to) raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); } +static void its_vpe_4_1_invall_locked(int cpu, struct its_vpe *vpe) +{ + void __iomem *rdbase; + u64 val; + + val = GICR_INVALLR_V; + val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id); + + guard(raw_spinlock)(&gic_data_rdist_cpu(cpu)->rd_lock); + rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base; + gic_write_lpir(val, rdbase + GICR_INVALLR); + wait_for_syncr(rdbase); +} + static int its_vpe_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force) { struct its_vpe *vpe = irq_data_get_irq_chip_data(d); - int from, cpu = cpumask_first(mask_val); + unsigned int from, cpu = nr_cpu_ids; + struct cpumask *table_mask; + struct its_node *its; unsigned long flags; /* + * Check if we're racing against a VPE being destroyed, for + * which we don't want to allow a VMOVP. + */ + if (!atomic_read(&vpe->vmapp_count)) { + if (gic_requires_eager_mapping()) + return -EINVAL; + + /* + * If we lazily map the VPEs, this isn't an error and + * we can exit cleanly. + */ + cpu = cpumask_first(mask_val); + irq_data_update_effective_affinity(d, cpumask_of(cpu)); + return IRQ_SET_MASK_OK_DONE; + } + + /* * Changing affinity is mega expensive, so let's be as lazy as * we can and only do it if we really have to. Also, if mapped * into the proxy device, we need to move the doorbell @@ -3780,28 +3942,51 @@ static int its_vpe_set_affinity(struct irq_data *d, * protect us, and that we must ensure nobody samples vpe->col_idx * during the update, hence the lock below which must also be * taken on any vLPI handling path that evaluates vpe->col_idx. + * + * Finally, we must protect ourselves against concurrent updates of + * the mapping state on this VM should the ITS list be in use (see + * the shortcut in its_send_vmovp() otherewise). */ - from = vpe_to_cpuid_lock(vpe, &flags); - if (from == cpu) - goto out; + if (its_list_map) + raw_spin_lock(&vpe->its_vm->vmapp_lock); - vpe->col_idx = cpu; + from = vpe_to_cpuid_lock(vpe, &flags); + table_mask = gic_data_rdist_cpu(from)->vpe_table_mask; /* - * GICv4.1 allows us to skip VMOVP if moving to a cpu whose RD - * is sharing its VPE table with the current one. + * If we are offered another CPU in the same GICv4.1 ITS + * affinity, pick this one. Otherwise, any CPU will do. */ - if (gic_data_rdist_cpu(cpu)->vpe_table_mask && - cpumask_test_cpu(from, gic_data_rdist_cpu(cpu)->vpe_table_mask)) + if (table_mask) + cpu = cpumask_any_and(mask_val, table_mask); + if (cpu < nr_cpu_ids) { + if (cpumask_test_cpu(from, mask_val) && + cpumask_test_cpu(from, table_mask)) + cpu = from; + } else { + cpu = cpumask_first(mask_val); + } + + if (from == cpu) goto out; + vpe->col_idx = cpu; + its_send_vmovp(vpe); + + its = find_4_1_its(); + if (its && its->flags & ITS_FLAGS_WORKAROUND_HISILICON_162100801) + its_vpe_4_1_invall_locked(cpu, vpe); + its_vpe_db_proxy_move(vpe, from, cpu); out: irq_data_update_effective_affinity(d, cpumask_of(cpu)); vpe_to_cpuid_unlock(vpe, flags); + if (its_list_map) + raw_spin_unlock(&vpe->its_vm->vmapp_lock); + return IRQ_SET_MASK_OK_DONE; } @@ -3828,14 +4013,18 @@ static void its_vpe_schedule(struct its_vpe *vpe) val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) & GENMASK_ULL(51, 12); val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK; - val |= GICR_VPROPBASER_RaWb; - val |= GICR_VPROPBASER_InnerShareable; + if (rdists_support_shareable()) { + val |= GICR_VPROPBASER_RaWb; + val |= GICR_VPROPBASER_InnerShareable; + } gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); val = virt_to_phys(page_address(vpe->vpt_page)) & GENMASK_ULL(51, 16); - val |= GICR_VPENDBASER_RaWaWb; - val |= GICR_VPENDBASER_InnerShareable; + if (rdists_support_shareable()) { + val |= GICR_VPENDBASER_RaWaWb; + val |= GICR_VPENDBASER_InnerShareable; + } /* * There is no good way of finding out if the pending table is * empty as we can race against the doorbell interrupt very @@ -3866,6 +4055,8 @@ static void its_vpe_invall(struct its_vpe *vpe) { struct its_node *its; + guard(raw_spinlock_irqsave)(&vpe->its_vm->vmapp_lock); + list_for_each_entry(its, &its_nodes, entry) { if (!is_v4(its)) continue; @@ -3926,18 +4117,10 @@ static void its_vpe_send_inv(struct irq_data *d) { struct its_vpe *vpe = irq_data_get_irq_chip_data(d); - if (gic_rdists->has_direct_lpi) { - void __iomem *rdbase; - - /* Target the redistributor this VPE is currently known on */ - raw_spin_lock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock); - rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; - gic_write_lpir(d->parent_data->hwirq, rdbase + GICR_INVLPIR); - wait_for_syncr(rdbase); - raw_spin_unlock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock); - } else { + if (gic_rdists->has_direct_lpi) + __direct_lpi_inv(d, d->parent_data->hwirq); + else its_vpe_send_cmd(vpe, its_send_inv); - } } static void its_vpe_mask_irq(struct irq_data *d) @@ -4006,7 +4189,7 @@ static struct irq_chip its_vpe_irq_chip = { static struct its_node *find_4_1_its(void) { - static struct its_node *its = NULL; + struct its_node *its = *this_cpu_ptr(&local_4_1_its); if (!its) { list_for_each_entry(its, &its_nodes, entry) { @@ -4102,22 +4285,12 @@ static void its_vpe_4_1_deschedule(struct its_vpe *vpe, static void its_vpe_4_1_invall(struct its_vpe *vpe) { - void __iomem *rdbase; unsigned long flags; - u64 val; int cpu; - val = GICR_INVALLR_V; - val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id); - /* Target the redistributor this vPE is currently known on */ cpu = vpe_to_cpuid_lock(vpe, &flags); - raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock); - rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base; - gic_write_lpir(val, rdbase + GICR_INVALLR); - - wait_for_syncr(rdbase); - raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock); + its_vpe_4_1_invall_locked(cpu, vpe); vpe_to_cpuid_unlock(vpe, flags); } @@ -4375,12 +4548,12 @@ static const struct irq_domain_ops its_sgi_domain_ops = { static int its_vpe_id_alloc(void) { - return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL); + return ida_alloc_max(&its_vpeid_ida, ITS_MAX_VPEID - 1, GFP_KERNEL); } static void its_vpe_id_free(u16 id) { - ida_simple_remove(&its_vpeid_ida, id); + ida_free(&its_vpeid_ida, id); } static int its_vpe_init(struct its_vpe *vpe) @@ -4409,9 +4582,8 @@ static int its_vpe_init(struct its_vpe *vpe) raw_spin_lock_init(&vpe->vpe_lock); vpe->vpe_id = vpe_id; vpe->vpt_page = vpt_page; - if (gic_rdists->has_rvpeid) - atomic_set(&vpe->vmapp_count, 0); - else + atomic_set(&vpe->vmapp_count, 0); + if (!gic_rdists->has_rvpeid) vpe->vpe_proxy_event = -1; return 0; @@ -4460,8 +4632,6 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq struct page *vprop_page; int base, nr_ids, i, err = 0; - BUG_ON(!vm); - bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids); if (!bitmap) return -ENOMEM; @@ -4481,6 +4651,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq vm->db_lpi_base = base; vm->nr_db_lpis = nr_ids; vm->vprop_page = vprop_page; + raw_spin_lock_init(&vm->vmapp_lock); if (gic_rdists->has_rvpeid) irqchip = &its_vpe_4_1_irq_chip; @@ -4497,15 +4668,11 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq irq_domain_set_hwirq_and_chip(domain, virq + i, i, irqchip, vm->vpes[i]); set_bit(i, bitmap); + irqd_set_resend_when_in_progress(irq_get_irq_data(virq + i)); } - if (err) { - if (i > 0) - its_vpe_irq_domain_free(domain, virq, i - 1); - - its_lpi_free(bitmap, base, nr_ids); - its_free_prop_table(vprop_page); - } + if (err) + its_vpe_irq_domain_free(domain, virq, i); return err; } @@ -4516,6 +4683,10 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain, struct its_vpe *vpe = irq_data_get_irq_chip_data(d); struct its_node *its; + /* Map the VPE to the first possible CPU */ + vpe->col_idx = cpumask_first(cpu_online_mask); + irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); + /* * If we use the list map, we issue VMAPP on demand... Unless * we're on a GICv4.1 and we eagerly map the VPE on all ITSs @@ -4524,9 +4695,6 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain, if (!gic_requires_eager_mapping()) return 0; - /* Map the VPE to the first possible CPU */ - vpe->col_idx = cpumask_first(cpu_online_mask); - list_for_each_entry(its, &its_nodes, entry) { if (!is_v4(its)) continue; @@ -4535,8 +4703,6 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain, its_send_vinvall(its, vpe); } - irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); - return 0; } @@ -4677,7 +4843,7 @@ static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data) } /* the pre-ITS breaks isolation, so disable MSI remapping */ - its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_MSI_REMAP; + its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_ISOLATED_MSI; return true; } return false; @@ -4695,6 +4861,47 @@ static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data) return true; } +static bool __maybe_unused its_enable_rk3588001(void *data) +{ + struct its_node *its = data; + + if (!of_machine_is_compatible("rockchip,rk3588") && + !of_machine_is_compatible("rockchip,rk3588s")) + return false; + + its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE; + gic_rdists->flags |= RDIST_FLAGS_FORCE_NON_SHAREABLE; + + return true; +} + +static bool its_set_non_coherent(void *data) +{ + struct its_node *its = data; + + its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE; + return true; +} + +static bool __maybe_unused its_enable_quirk_hip09_162100801(void *data) +{ + struct its_node *its = data; + + its->flags |= ITS_FLAGS_WORKAROUND_HISILICON_162100801; + return true; +} + +static bool __maybe_unused its_enable_rk3568002(void *data) +{ + if (!of_machine_is_compatible("rockchip,rk3566") && + !of_machine_is_compatible("rockchip,rk3568")) + return false; + + gfp_flags_quirk |= GFP_DMA32; + + return true; +} + static const struct gic_quirk its_quirks[] = { #ifdef CONFIG_CAVIUM_ERRATUM_22375 { @@ -4741,6 +4948,35 @@ static const struct gic_quirk its_quirks[] = { .init = its_enable_quirk_hip07_161600802, }, #endif +#ifdef CONFIG_HISILICON_ERRATUM_162100801 + { + .desc = "ITS: Hip09 erratum 162100801", + .iidr = 0x00051736, + .mask = 0xffffffff, + .init = its_enable_quirk_hip09_162100801, + }, +#endif +#ifdef CONFIG_ROCKCHIP_ERRATUM_3588001 + { + .desc = "ITS: Rockchip erratum RK3588001", + .iidr = 0x0201743b, + .mask = 0xffffffff, + .init = its_enable_rk3588001, + }, +#endif + { + .desc = "ITS: non-coherent attribute", + .property = "dma-noncoherent", + .init = its_set_non_coherent, + }, +#ifdef CONFIG_ROCKCHIP_ERRATUM_3568002 + { + .desc = "ITS: Rockchip erratum RK3568002", + .iidr = 0x0201743b, + .mask = 0xffffffff, + .init = its_enable_rk3568002, + }, +#endif { } }; @@ -4750,9 +4986,13 @@ static void its_enable_quirks(struct its_node *its) u32 iidr = readl_relaxed(its->base + GITS_IIDR); gic_enable_quirks(iidr, its_quirks, its); + + if (is_of_node(its->fwnode_handle)) + gic_enable_of_quirks(to_of_node(its->fwnode_handle), + its_quirks, its); } -static int its_save_disable(void) +static int its_save_disable(void *data) { struct its_node *its; int err = 0; @@ -4788,7 +5028,7 @@ err: return err; } -static void its_restore_enable(void) +static void its_restore_enable(void *data) { struct its_node *its; int ret; @@ -4848,33 +5088,69 @@ static void its_restore_enable(void) raw_spin_unlock(&its_lock); } -static struct syscore_ops its_syscore_ops = { +static const struct syscore_ops its_syscore_ops = { .suspend = its_save_disable, .resume = its_restore_enable, }; -static int its_init_domain(struct fwnode_handle *handle, struct its_node *its) +static struct syscore its_syscore = { + .ops = &its_syscore_ops, +}; + +static void __init __iomem *its_map_one(struct resource *res, int *err) { - struct irq_domain *inner_domain; + void __iomem *its_base; + u32 val; + + its_base = ioremap(res->start, SZ_64K); + if (!its_base) { + pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start); + *err = -ENOMEM; + return NULL; + } + + val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK; + if (val != 0x30 && val != 0x40) { + pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start); + *err = -ENODEV; + goto out_unmap; + } + + *err = its_force_quiescent(its_base); + if (*err) { + pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start); + goto out_unmap; + } + + return its_base; + +out_unmap: + iounmap(its_base); + return NULL; +} + +static int its_init_domain(struct its_node *its) +{ + struct irq_domain_info dom_info = { + .fwnode = its->fwnode_handle, + .ops = &its_domain_ops, + .domain_flags = its->msi_domain_flags, + .parent = its_parent, + }; struct msi_domain_info *info; info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; - inner_domain = irq_domain_create_tree(handle, &its_domain_ops, its); - if (!inner_domain) { - kfree(info); - return -ENOMEM; - } - - inner_domain->parent = its_parent; - irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS); - inner_domain->flags |= its->msi_domain_flags; info->ops = &its_msi_domain_ops; info->data = its; - inner_domain->host_data = info; + dom_info.host_data = info; + if (!msi_create_parent_irq_domain(&dom_info, &gic_v3_its_msi_parent_ops)) { + kfree(info); + return -ENOMEM; + } return 0; } @@ -4917,8 +5193,7 @@ static int its_init_vpe_domain(void) return 0; } -static int __init its_compute_its_list_map(struct resource *res, - void __iomem *its_base) +static int __init its_compute_its_list_map(struct its_node *its) { int its_number; u32 ctlr; @@ -4932,15 +5207,15 @@ static int __init its_compute_its_list_map(struct resource *res, its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX); if (its_number >= GICv4_ITS_LIST_MAX) { pr_err("ITS@%pa: No ITSList entry available!\n", - &res->start); + &its->phys_base); return -EINVAL; } - ctlr = readl_relaxed(its_base + GITS_CTLR); + ctlr = readl_relaxed(its->base + GITS_CTLR); ctlr &= ~GITS_CTLR_ITS_NUMBER; ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT; - writel_relaxed(ctlr, its_base + GITS_CTLR); - ctlr = readl_relaxed(its_base + GITS_CTLR); + writel_relaxed(ctlr, its->base + GITS_CTLR); + ctlr = readl_relaxed(its->base + GITS_CTLR); if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) { its_number = ctlr & GITS_CTLR_ITS_NUMBER; its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT; @@ -4948,103 +5223,61 @@ static int __init its_compute_its_list_map(struct resource *res, if (test_and_set_bit(its_number, &its_list_map)) { pr_err("ITS@%pa: Duplicate ITSList entry %d\n", - &res->start, its_number); + &its->phys_base, its_number); return -EINVAL; } return its_number; } -static int __init its_probe_one(struct resource *res, - struct fwnode_handle *handle, int numa_node) +static int __init its_probe_one(struct its_node *its) { - struct its_node *its; - void __iomem *its_base; - u32 val, ctlr; - u64 baser, tmp, typer; + u64 baser, tmp; struct page *page; + u32 ctlr; int err; - its_base = ioremap(res->start, SZ_64K); - if (!its_base) { - pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start); - return -ENOMEM; - } - - val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK; - if (val != 0x30 && val != 0x40) { - pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start); - err = -ENODEV; - goto out_unmap; - } - - err = its_force_quiescent(its_base); - if (err) { - pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start); - goto out_unmap; - } - - pr_info("ITS %pR\n", res); - - its = kzalloc(sizeof(*its), GFP_KERNEL); - if (!its) { - err = -ENOMEM; - goto out_unmap; - } + its_enable_quirks(its); - raw_spin_lock_init(&its->lock); - mutex_init(&its->dev_alloc_lock); - INIT_LIST_HEAD(&its->entry); - INIT_LIST_HEAD(&its->its_device_list); - typer = gic_read_typer(its_base + GITS_TYPER); - its->typer = typer; - its->base = its_base; - its->phys_base = res->start; if (is_v4(its)) { - if (!(typer & GITS_TYPER_VMOVP)) { - err = its_compute_its_list_map(res, its_base); + if (!(its->typer & GITS_TYPER_VMOVP)) { + err = its_compute_its_list_map(its); if (err < 0) - goto out_free_its; + goto out; its->list_nr = err; pr_info("ITS@%pa: Using ITS number %d\n", - &res->start, err); + &its->phys_base, err); } else { - pr_info("ITS@%pa: Single VMOVP capable\n", &res->start); + pr_info("ITS@%pa: Single VMOVP capable\n", &its->phys_base); } if (is_v4_1(its)) { - u32 svpet = FIELD_GET(GITS_TYPER_SVPET, typer); + u32 svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer); - its->sgir_base = ioremap(res->start + SZ_128K, SZ_64K); + its->sgir_base = ioremap(its->phys_base + SZ_128K, SZ_64K); if (!its->sgir_base) { err = -ENOMEM; - goto out_free_its; + goto out; } - its->mpidr = readl_relaxed(its_base + GITS_MPIDR); + its->mpidr = readl_relaxed(its->base + GITS_MPIDR); pr_info("ITS@%pa: Using GICv4.1 mode %08x %08x\n", - &res->start, its->mpidr, svpet); + &its->phys_base, its->mpidr, svpet); } } - its->numa_node = numa_node; - - page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, - get_order(ITS_CMD_QUEUE_SZ)); + page = its_alloc_pages_node(its->numa_node, + GFP_KERNEL | __GFP_ZERO, + get_order(ITS_CMD_QUEUE_SZ)); if (!page) { err = -ENOMEM; goto out_unmap_sgir; } its->cmd_base = (void *)page_address(page); its->cmd_write = its->cmd_base; - its->fwnode_handle = handle; - its->get_msi_base = its_irq_get_msi_base; - its->msi_domain_flags = IRQ_DOMAIN_FLAG_MSI_REMAP; - - its_enable_quirks(its); err = its_alloc_tables(its); if (err) @@ -5063,6 +5296,9 @@ static int __init its_probe_one(struct resource *res, gits_write_cbaser(baser, its->base + GITS_CBASER); tmp = gits_read_cbaser(its->base + GITS_CBASER); + if (its->flags & ITS_FLAGS_FORCE_NON_SHAREABLE) + tmp &= ~GITS_CBASER_SHAREABILITY_MASK; + if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) { if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) { /* @@ -5086,7 +5322,7 @@ static int __init its_probe_one(struct resource *res, ctlr |= GITS_CTLR_ImDe; writel_relaxed(ctlr, its->base + GITS_CTLR); - err = its_init_domain(handle, its); + err = its_init_domain(its); if (err) goto out_free_tables; @@ -5099,15 +5335,12 @@ static int __init its_probe_one(struct resource *res, out_free_tables: its_free_tables(its); out_free_cmd: - free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ)); + its_free_pages(its->cmd_base, get_order(ITS_CMD_QUEUE_SZ)); out_unmap_sgir: if (its->sgir_base) iounmap(its->sgir_base); -out_free_its: - kfree(its); -out_unmap: - iounmap(its_base); - pr_err("ITS@%pa: failed probing (%d)\n", &res->start, err); +out: + pr_err("ITS@%pa: failed probing (%d)\n", &its->phys_base, err); return err; } @@ -5138,7 +5371,7 @@ static int redist_disable_lpis(void) * * If running with preallocated tables, there is nothing to do. */ - if (gic_data_rdist()->lpi_enabled || + if ((gic_data_rdist()->flags & RD_LOCAL_LPI_ENABLED) || (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED)) return 0; @@ -5200,18 +5433,146 @@ int its_cpu_init(void) return 0; } +static void rdist_memreserve_cpuhp_cleanup_workfn(struct work_struct *work) +{ + cpuhp_remove_state_nocalls(gic_rdists->cpuhp_memreserve_state); + gic_rdists->cpuhp_memreserve_state = CPUHP_INVALID; +} + +static DECLARE_WORK(rdist_memreserve_cpuhp_cleanup_work, + rdist_memreserve_cpuhp_cleanup_workfn); + +static int its_cpu_memreserve_lpi(unsigned int cpu) +{ + struct page *pend_page; + int ret = 0; + + /* This gets to run exactly once per CPU */ + if (gic_data_rdist()->flags & RD_LOCAL_MEMRESERVE_DONE) + return 0; + + pend_page = gic_data_rdist()->pend_page; + if (WARN_ON(!pend_page)) { + ret = -ENOMEM; + goto out; + } + /* + * If the pending table was pre-programmed, free the memory we + * preemptively allocated. Otherwise, reserve that memory for + * later kexecs. + */ + if (gic_data_rdist()->flags & RD_LOCAL_PENDTABLE_PREALLOCATED) { + its_free_pending_table(pend_page); + gic_data_rdist()->pend_page = NULL; + } else { + phys_addr_t paddr = page_to_phys(pend_page); + WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ)); + } + +out: + /* Last CPU being brought up gets to issue the cleanup */ + if (!IS_ENABLED(CONFIG_SMP) || + cpumask_equal(&cpus_booted_once_mask, cpu_possible_mask)) + schedule_work(&rdist_memreserve_cpuhp_cleanup_work); + + gic_data_rdist()->flags |= RD_LOCAL_MEMRESERVE_DONE; + return ret; +} + +/* Mark all the BASER registers as invalid before they get reprogrammed */ +static int __init its_reset_one(struct resource *res) +{ + void __iomem *its_base; + int err, i; + + its_base = its_map_one(res, &err); + if (!its_base) + return err; + + for (i = 0; i < GITS_BASER_NR_REGS; i++) + gits_write_baser(0, its_base + GITS_BASER + (i << 3)); + + iounmap(its_base); + return 0; +} + static const struct of_device_id its_device_id[] = { { .compatible = "arm,gic-v3-its", }, {}, }; +static struct its_node __init *its_node_init(struct resource *res, + struct fwnode_handle *handle, int numa_node) +{ + void __iomem *its_base; + struct its_node *its; + int err; + + its_base = its_map_one(res, &err); + if (!its_base) + return NULL; + + pr_info("ITS %pR\n", res); + + its = kzalloc(sizeof(*its), GFP_KERNEL); + if (!its) + goto out_unmap; + + raw_spin_lock_init(&its->lock); + mutex_init(&its->dev_alloc_lock); + INIT_LIST_HEAD(&its->entry); + INIT_LIST_HEAD(&its->its_device_list); + + its->typer = gic_read_typer(its_base + GITS_TYPER); + its->base = its_base; + its->phys_base = res->start; + its->get_msi_base = its_irq_get_msi_base; + its->msi_domain_flags = IRQ_DOMAIN_FLAG_ISOLATED_MSI | IRQ_DOMAIN_FLAG_MSI_IMMUTABLE; + + its->numa_node = numa_node; + its->fwnode_handle = handle; + + return its; + +out_unmap: + iounmap(its_base); + return NULL; +} + +static void its_node_destroy(struct its_node *its) +{ + iounmap(its->base); + kfree(its); +} + static int __init its_of_probe(struct device_node *node) { struct device_node *np; struct resource res; + int err; + + /* + * Make sure *all* the ITS are reset before we probe any, as + * they may be sharing memory. If any of the ITS fails to + * reset, don't even try to go any further, as this could + * result in something even worse. + */ + for (np = of_find_matching_node(node, its_device_id); np; + np = of_find_matching_node(np, its_device_id)) { + if (!of_device_is_available(np) || + !of_property_read_bool(np, "msi-controller") || + of_address_to_resource(np, 0, &res)) + continue; + + err = its_reset_one(&res); + if (err) + return err; + } for (np = of_find_matching_node(node, its_device_id); np; np = of_find_matching_node(np, its_device_id)) { + struct its_node *its; + if (!of_device_is_available(np)) continue; if (!of_property_read_bool(np, "msi-controller")) { @@ -5225,7 +5586,16 @@ static int __init its_of_probe(struct device_node *node) continue; } - its_probe_one(&res, &np->fwnode, of_node_to_nid(np)); + + its = its_node_init(&res, &np->fwnode, of_node_to_nid(np)); + if (!its) + return -ENOMEM; + + err = its_probe_one(its); + if (err) { + its_node_destroy(its); + return err; + } } return 0; } @@ -5337,6 +5707,7 @@ static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header, { struct acpi_madt_generic_translator *its_entry; struct fwnode_handle *dom_handle; + struct its_node *its; struct resource res; int err; @@ -5361,30 +5732,88 @@ static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header, goto dom_err; } - err = its_probe_one(&res, dom_handle, - acpi_get_its_numa_node(its_entry->translation_id)); + its = its_node_init(&res, dom_handle, + acpi_get_its_numa_node(its_entry->translation_id)); + if (!its) { + err = -ENOMEM; + goto node_err; + } + + if (acpi_get_madt_revision() >= 7 && + (its_entry->flags & ACPI_MADT_ITS_NON_COHERENT)) + its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE; + + err = its_probe_one(its); if (!err) return 0; +node_err: iort_deregister_domain_token(its_entry->translation_id); dom_err: irq_domain_free_fwnode(dom_handle); return err; } +static int __init its_acpi_reset(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_generic_translator *its_entry; + struct resource res; + + its_entry = (struct acpi_madt_generic_translator *)header; + res = (struct resource) { + .start = its_entry->base_address, + .end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1, + .flags = IORESOURCE_MEM, + }; + + return its_reset_one(&res); +} + static void __init its_acpi_probe(void) { acpi_table_parse_srat_its(); - acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR, - gic_acpi_parse_madt_its, 0); + /* + * Make sure *all* the ITS are reset before we probe any, as + * they may be sharing memory. If any of the ITS fails to + * reset, don't even try to go any further, as this could + * result in something even worse. + */ + if (acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR, + its_acpi_reset, 0) > 0) + acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR, + gic_acpi_parse_madt_its, 0); acpi_its_srat_maps_free(); } #else static void __init its_acpi_probe(void) { } #endif +int __init its_lpi_memreserve_init(void) +{ + int state; + + if (!efi_enabled(EFI_CONFIG_TABLES)) + return 0; + + if (list_empty(&its_nodes)) + return 0; + + gic_rdists->cpuhp_memreserve_state = CPUHP_INVALID; + state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, + "irqchip/arm/gicv3/memreserve:online", + its_cpu_memreserve_lpi, + NULL); + if (state < 0) + return state; + + gic_rdists->cpuhp_memreserve_state = state; + + return 0; +} + int __init its_init(struct fwnode_handle *handle, struct rdists *rdists, - struct irq_domain *parent_domain) + struct irq_domain *parent_domain, u8 irq_prio) { struct device_node *of_node; struct its_node *its; @@ -5392,8 +5821,13 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists, bool has_v4_1 = false; int err; + itt_pool = gen_pool_create(get_order(ITS_ITT_ALIGN), -1); + if (!itt_pool) + return -ENOMEM; + gic_rdists = rdists; + lpi_prop_prio = irq_prio; its_parent = parent_domain; of_node = to_of_node(handle); if (of_node) @@ -5434,7 +5868,7 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists, } } - register_syscore_ops(&its_syscore_ops); + register_syscore(&its_syscore); return 0; } |
