diff options
Diffstat (limited to 'drivers/soc/fsl/qbman')
-rw-r--r-- | drivers/soc/fsl/qbman/Kconfig | 2 | ||||
-rw-r--r-- | drivers/soc/fsl/qbman/bman_ccsr.c | 27 | ||||
-rw-r--r-- | drivers/soc/fsl/qbman/dpaa_sys.c | 12 | ||||
-rw-r--r-- | drivers/soc/fsl/qbman/dpaa_sys.h | 4 | ||||
-rw-r--r-- | drivers/soc/fsl/qbman/qman.c | 32 | ||||
-rw-r--r-- | drivers/soc/fsl/qbman/qman_ccsr.c | 73 | ||||
-rw-r--r-- | drivers/soc/fsl/qbman/qman_portal.c | 5 | ||||
-rw-r--r-- | drivers/soc/fsl/qbman/qman_test_stash.c | 6 |
8 files changed, 58 insertions, 103 deletions
diff --git a/drivers/soc/fsl/qbman/Kconfig b/drivers/soc/fsl/qbman/Kconfig index bdecb86bb656..27774ec6ff90 100644 --- a/drivers/soc/fsl/qbman/Kconfig +++ b/drivers/soc/fsl/qbman/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only menuconfig FSL_DPAA bool "QorIQ DPAA1 framework support" - depends on ((FSL_SOC_BOOKE || ARCH_LAYERSCAPE) && ARCH_DMA_ADDR_T_64BIT) + depends on ((FSL_SOC_BOOKE || ARCH_LAYERSCAPE || COMPILE_TEST) && ARCH_DMA_ADDR_T_64BIT) select GENERIC_ALLOCATOR help The Freescale Data Path Acceleration Architecture (DPAA) is a set of diff --git a/drivers/soc/fsl/qbman/bman_ccsr.c b/drivers/soc/fsl/qbman/bman_ccsr.c index cb24a08be084..b0f26f6f731e 100644 --- a/drivers/soc/fsl/qbman/bman_ccsr.c +++ b/drivers/soc/fsl/qbman/bman_ccsr.c @@ -144,17 +144,6 @@ static int bm_set_memory(u64 ba, u32 size) static dma_addr_t fbpr_a; static size_t fbpr_sz; -static int bman_fbpr(struct reserved_mem *rmem) -{ - fbpr_a = rmem->base; - fbpr_sz = rmem->size; - - WARN_ON(!(fbpr_a && fbpr_sz)); - - return 0; -} -RESERVEDMEM_OF_DECLARE(bman_fbpr, "fsl,bman-fbpr", bman_fbpr); - static irqreturn_t bman_isr(int irq, void *ptr) { u32 isr_val, ier_val, ecsr_val, isr_mask, i; @@ -242,17 +231,11 @@ static int fsl_bman_probe(struct platform_device *pdev) return -ENODEV; } - /* - * If FBPR memory wasn't defined using the qbman compatible string - * try using the of_reserved_mem_device method - */ - if (!fbpr_a) { - ret = qbman_init_private_mem(dev, 0, &fbpr_a, &fbpr_sz); - if (ret) { - dev_err(dev, "qbman_init_private_mem() failed 0x%x\n", - ret); - return -ENODEV; - } + ret = qbman_init_private_mem(dev, 0, "fsl,bman-fbpr", &fbpr_a, &fbpr_sz); + if (ret) { + dev_err(dev, "qbman_init_private_mem() failed 0x%x\n", + ret); + return -ENODEV; } dev_dbg(dev, "Allocated FBPR 0x%llx 0x%zx\n", fbpr_a, fbpr_sz); diff --git a/drivers/soc/fsl/qbman/dpaa_sys.c b/drivers/soc/fsl/qbman/dpaa_sys.c index 33751450047e..e1d7b79cc450 100644 --- a/drivers/soc/fsl/qbman/dpaa_sys.c +++ b/drivers/soc/fsl/qbman/dpaa_sys.c @@ -34,8 +34,8 @@ /* * Initialize a devices private memory region */ -int qbman_init_private_mem(struct device *dev, int idx, dma_addr_t *addr, - size_t *size) +int qbman_init_private_mem(struct device *dev, int idx, const char *compat, + dma_addr_t *addr, size_t *size) { struct device_node *mem_node; struct reserved_mem *rmem; @@ -44,8 +44,12 @@ int qbman_init_private_mem(struct device *dev, int idx, dma_addr_t *addr, mem_node = of_parse_phandle(dev->of_node, "memory-region", idx); if (!mem_node) { - dev_err(dev, "No memory-region found for index %d\n", idx); - return -ENODEV; + mem_node = of_find_compatible_node(NULL, NULL, compat); + if (!mem_node) { + dev_err(dev, "No memory-region found for index %d or compatible '%s'\n", + idx, compat); + return -ENODEV; + } } rmem = of_reserved_mem_lookup(mem_node); diff --git a/drivers/soc/fsl/qbman/dpaa_sys.h b/drivers/soc/fsl/qbman/dpaa_sys.h index ae8afa552b1e..16485bde9636 100644 --- a/drivers/soc/fsl/qbman/dpaa_sys.h +++ b/drivers/soc/fsl/qbman/dpaa_sys.h @@ -101,8 +101,8 @@ static inline u8 dpaa_cyc_diff(u8 ringsize, u8 first, u8 last) #define DPAA_GENALLOC_OFF 0x80000000 /* Initialize the devices private memory region */ -int qbman_init_private_mem(struct device *dev, int idx, dma_addr_t *addr, - size_t *size); +int qbman_init_private_mem(struct device *dev, int idx, const char *compat, + dma_addr_t *addr, size_t *size); /* memremap() attributes for different platforms */ #ifdef CONFIG_PPC diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c index 739e4eee6b75..9be240999f87 100644 --- a/drivers/soc/fsl/qbman/qman.c +++ b/drivers/soc/fsl/qbman/qman.c @@ -991,7 +991,7 @@ struct qman_portal { /* linked-list of CSCN handlers. */ struct list_head cgr_cbs; /* list lock */ - spinlock_t cgr_lock; + raw_spinlock_t cgr_lock; struct work_struct congestion_work; struct work_struct mr_work; char irqname[MAX_IRQNAME]; @@ -1270,7 +1270,7 @@ static int qman_create_portal(struct qman_portal *portal, qm_dqrr_set_ithresh(p, QMAN_PIRQ_DQRR_ITHRESH); qm_mr_set_ithresh(p, QMAN_PIRQ_MR_ITHRESH); qm_out(p, QM_REG_ITPR, QMAN_PIRQ_IPERIOD); - portal->cgrs = kmalloc_array(2, sizeof(*cgrs), GFP_KERNEL); + portal->cgrs = kmalloc_array(2, sizeof(*portal->cgrs), GFP_KERNEL); if (!portal->cgrs) goto fail_cgrs; /* initial snapshot is no-depletion */ @@ -1281,7 +1281,7 @@ static int qman_create_portal(struct qman_portal *portal, /* if the given mask is NULL, assume all CGRs can be seen */ qman_cgrs_fill(&portal->cgrs[0]); INIT_LIST_HEAD(&portal->cgr_cbs); - spin_lock_init(&portal->cgr_lock); + raw_spin_lock_init(&portal->cgr_lock); INIT_WORK(&portal->congestion_work, qm_congestion_task); INIT_WORK(&portal->mr_work, qm_mr_process_task); portal->bits = 0; @@ -1456,11 +1456,14 @@ static void qm_congestion_task(struct work_struct *work) union qm_mc_result *mcr; struct qman_cgr *cgr; - spin_lock(&p->cgr_lock); + /* + * FIXME: QM_MCR_TIMEOUT is 10ms, which is too long for a raw spinlock! + */ + raw_spin_lock_irq(&p->cgr_lock); qm_mc_start(&p->p); qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION); if (!qm_mc_result_timeout(&p->p, &mcr)) { - spin_unlock(&p->cgr_lock); + raw_spin_unlock_irq(&p->cgr_lock); dev_crit(p->config->dev, "QUERYCONGESTION timeout\n"); qman_p_irqsource_add(p, QM_PIRQ_CSCI); return; @@ -1476,7 +1479,7 @@ static void qm_congestion_task(struct work_struct *work) list_for_each_entry(cgr, &p->cgr_cbs, node) if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid)) cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid)); - spin_unlock(&p->cgr_lock); + raw_spin_unlock_irq(&p->cgr_lock); qman_p_irqsource_add(p, QM_PIRQ_CSCI); } @@ -2440,7 +2443,7 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags, preempt_enable(); cgr->chan = p->config->channel; - spin_lock(&p->cgr_lock); + raw_spin_lock_irq(&p->cgr_lock); if (opts) { struct qm_mcc_initcgr local_opts = *opts; @@ -2477,7 +2480,7 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags, qman_cgrs_get(&p->cgrs[1], cgr->cgrid)) cgr->cb(p, cgr, 1); out: - spin_unlock(&p->cgr_lock); + raw_spin_unlock_irq(&p->cgr_lock); put_affine_portal(); return ret; } @@ -2512,7 +2515,7 @@ int qman_delete_cgr(struct qman_cgr *cgr) return -EINVAL; memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr)); - spin_lock_irqsave(&p->cgr_lock, irqflags); + raw_spin_lock_irqsave(&p->cgr_lock, irqflags); list_del(&cgr->node); /* * If there are no other CGR objects for this CGRID in the list, @@ -2537,17 +2540,12 @@ int qman_delete_cgr(struct qman_cgr *cgr) /* add back to the list */ list_add(&cgr->node, &p->cgr_cbs); release_lock: - spin_unlock_irqrestore(&p->cgr_lock, irqflags); + raw_spin_unlock_irqrestore(&p->cgr_lock, irqflags); put_affine_portal(); return ret; } EXPORT_SYMBOL(qman_delete_cgr); -struct cgr_comp { - struct qman_cgr *cgr; - struct completion completion; -}; - static void qman_delete_cgr_smp_call(void *p) { qman_delete_cgr((struct qman_cgr *)p); @@ -2577,9 +2575,9 @@ static int qman_update_cgr(struct qman_cgr *cgr, struct qm_mcc_initcgr *opts) if (!p) return -EINVAL; - spin_lock_irqsave(&p->cgr_lock, irqflags); + raw_spin_lock_irqsave(&p->cgr_lock, irqflags); ret = qm_modify_cgr(cgr, 0, opts); - spin_unlock_irqrestore(&p->cgr_lock, irqflags); + raw_spin_unlock_irqrestore(&p->cgr_lock, irqflags); put_affine_portal(); return ret; } diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c index 157659fd033a..aa5348f4902f 100644 --- a/drivers/soc/fsl/qbman/qman_ccsr.c +++ b/drivers/soc/fsl/qbman/qman_ccsr.c @@ -468,28 +468,6 @@ static int zero_priv_mem(phys_addr_t addr, size_t sz) return 0; } - -static int qman_fqd(struct reserved_mem *rmem) -{ - fqd_a = rmem->base; - fqd_sz = rmem->size; - - WARN_ON(!(fqd_a && fqd_sz)); - return 0; -} -RESERVEDMEM_OF_DECLARE(qman_fqd, "fsl,qman-fqd", qman_fqd); - -static int qman_pfdr(struct reserved_mem *rmem) -{ - pfdr_a = rmem->base; - pfdr_sz = rmem->size; - - WARN_ON(!(pfdr_a && pfdr_sz)); - - return 0; -} -RESERVEDMEM_OF_DECLARE(qman_pfdr, "fsl,qman-pfdr", qman_pfdr); - #endif unsigned int qm_get_fqid_maxcnt(void) @@ -796,39 +774,32 @@ static int fsl_qman_probe(struct platform_device *pdev) qm_channel_caam = QMAN_CHANNEL_CAAM_REV3; } - if (fqd_a) { + /* + * Order of memory regions is assumed as FQD followed by PFDR + * in order to ensure allocations from the correct regions the + * driver initializes then allocates each piece in order + */ + ret = qbman_init_private_mem(dev, 0, "fsl,qman-fqd", &fqd_a, &fqd_sz); + if (ret) { + dev_err(dev, "qbman_init_private_mem() for FQD failed 0x%x\n", + ret); + return -ENODEV; + } #ifdef CONFIG_PPC - /* - * For PPC backward DT compatibility - * FQD memory MUST be zero'd by software - */ - zero_priv_mem(fqd_a, fqd_sz); -#else - WARN(1, "Unexpected architecture using non shared-dma-mem reservations"); + /* + * For PPC backward DT compatibility + * FQD memory MUST be zero'd by software + */ + zero_priv_mem(fqd_a, fqd_sz); #endif - } else { - /* - * Order of memory regions is assumed as FQD followed by PFDR - * in order to ensure allocations from the correct regions the - * driver initializes then allocates each piece in order - */ - ret = qbman_init_private_mem(dev, 0, &fqd_a, &fqd_sz); - if (ret) { - dev_err(dev, "qbman_init_private_mem() for FQD failed 0x%x\n", - ret); - return -ENODEV; - } - } dev_dbg(dev, "Allocated FQD 0x%llx 0x%zx\n", fqd_a, fqd_sz); - if (!pfdr_a) { - /* Setup PFDR memory */ - ret = qbman_init_private_mem(dev, 1, &pfdr_a, &pfdr_sz); - if (ret) { - dev_err(dev, "qbman_init_private_mem() for PFDR failed 0x%x\n", - ret); - return -ENODEV; - } + /* Setup PFDR memory */ + ret = qbman_init_private_mem(dev, 1, "fsl,qman-pfdr", &pfdr_a, &pfdr_sz); + if (ret) { + dev_err(dev, "qbman_init_private_mem() for PFDR failed 0x%x\n", + ret); + return -ENODEV; } dev_dbg(dev, "Allocated PFDR 0x%llx 0x%zx\n", pfdr_a, pfdr_sz); diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c index e23b60618c1a..456ef5d5c199 100644 --- a/drivers/soc/fsl/qbman/qman_portal.c +++ b/drivers/soc/fsl/qbman/qman_portal.c @@ -48,9 +48,10 @@ static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu) struct device *dev = pcfg->dev; int ret; - pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type); - if (!pcfg->iommu_domain) { + pcfg->iommu_domain = iommu_paging_domain_alloc(dev); + if (IS_ERR(pcfg->iommu_domain)) { dev_err(dev, "%s(): iommu_domain_alloc() failed", __func__); + pcfg->iommu_domain = NULL; goto no_iommu; } ret = fsl_pamu_configure_l1_stash(pcfg->iommu_domain, cpu); diff --git a/drivers/soc/fsl/qbman/qman_test_stash.c b/drivers/soc/fsl/qbman/qman_test_stash.c index b7e8e5ec884c..f4d3c2146f4f 100644 --- a/drivers/soc/fsl/qbman/qman_test_stash.c +++ b/drivers/soc/fsl/qbman/qman_test_stash.c @@ -108,14 +108,12 @@ static int on_all_cpus(int (*fn)(void)) .fn = fn, .started = ATOMIC_INIT(0) }; - struct task_struct *k = kthread_create(bstrap_fn, &bstrap, - "hotpotato%d", cpu); + struct task_struct *k = kthread_run_on_cpu(bstrap_fn, &bstrap, + cpu, "hotpotato%d"); int ret; if (IS_ERR(k)) return -ENOMEM; - kthread_bind(k, cpu); - wake_up_process(k); /* * If we call kthread_stop() before the "wake up" has had an * effect, then the thread may exit with -EINTR without ever |