diff options
Diffstat (limited to 'drivers')
595 files changed, 9551 insertions, 5035 deletions
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c index fc6c416f8724..d5999eb41c00 100644 --- a/drivers/acpi/acpi_apd.c +++ b/drivers/acpi/acpi_apd.c @@ -180,8 +180,8 @@ static const struct acpi_device_id acpi_apd_device_ids[] = { { "APMC0D0F", APD_ADDR(xgene_i2c_desc) }, { "BRCM900D", APD_ADDR(vulcan_spi_desc) }, { "CAV900D", APD_ADDR(vulcan_spi_desc) }, - { "HISI0A21", APD_ADDR(hip07_i2c_desc) }, - { "HISI0A22", APD_ADDR(hip08_i2c_desc) }, + { "HISI02A1", APD_ADDR(hip07_i2c_desc) }, + { "HISI02A2", APD_ADDR(hip08_i2c_desc) }, #endif { } }; diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index e51a1e98e62f..f88caf5aab76 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -85,6 +85,7 @@ static const struct lpss_device_desc lpss_dma_desc = { }; struct lpss_private_data { + struct acpi_device *adev; void __iomem *mmio_base; resource_size_t mmio_size; unsigned int fixed_clk_rate; @@ -155,6 +156,12 @@ static struct pwm_lookup byt_pwm_lookup[] = { static void byt_pwm_setup(struct lpss_private_data *pdata) { + struct acpi_device *adev = pdata->adev; + + /* Only call pwm_add_table for the first PWM controller */ + if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1")) + return; + if (!acpi_dev_present("INT33FD", NULL, -1)) pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup)); } @@ -180,6 +187,12 @@ static struct pwm_lookup bsw_pwm_lookup[] = { static void bsw_pwm_setup(struct lpss_private_data *pdata) { + struct acpi_device *adev = pdata->adev; + + /* Only call pwm_add_table for the first PWM controller */ + if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1")) + return; + pwm_add_table(bsw_pwm_lookup, ARRAY_SIZE(bsw_pwm_lookup)); } @@ -456,6 +469,7 @@ static int acpi_lpss_create_device(struct acpi_device *adev, goto err_out; } + pdata->adev = adev; pdata->dev_desc = dev_desc; if (dev_desc->setup) diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c index 8c4e0a18460a..bf22c29d2517 100644 --- a/drivers/acpi/acpi_watchdog.c +++ b/drivers/acpi/acpi_watchdog.c @@ -86,7 +86,12 @@ void __init acpi_watchdog_init(void) found = false; resource_list_for_each_entry(rentry, &resource_list) { - if (resource_contains(rentry->res, &res)) { + if (rentry->res->flags == res.flags && + resource_overlaps(rentry->res, &res)) { + if (res.start < rentry->res->start) + rentry->res->start = res.start; + if (res.end > rentry->res->end) + rentry->res->end = res.end; found = true; break; } diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index ddb01e9fa5b2..62068a5e814f 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -151,6 +151,10 @@ static bool ec_freeze_events __read_mostly = false; module_param(ec_freeze_events, bool, 0644); MODULE_PARM_DESC(ec_freeze_events, "Disabling event handling during suspend/resume"); +static bool ec_no_wakeup __read_mostly; +module_param(ec_no_wakeup, bool, 0644); +MODULE_PARM_DESC(ec_no_wakeup, "Do not wake up from suspend-to-idle"); + struct acpi_ec_query_handler { struct list_head node; acpi_ec_query_func func; @@ -535,6 +539,14 @@ static void acpi_ec_disable_event(struct acpi_ec *ec) spin_unlock_irqrestore(&ec->lock, flags); __acpi_ec_flush_event(ec); } + +void acpi_ec_flush_work(void) +{ + if (first_ec) + __acpi_ec_flush_event(first_ec); + + flush_scheduled_work(); +} #endif /* CONFIG_PM_SLEEP */ static bool acpi_ec_guard_event(struct acpi_ec *ec) @@ -1880,6 +1892,32 @@ static int acpi_ec_suspend(struct device *dev) return 0; } +static int acpi_ec_suspend_noirq(struct device *dev) +{ + struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev)); + + /* + * The SCI handler doesn't run at this point, so the GPE can be + * masked at the low level without side effects. + */ + if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) && + ec->reference_count >= 1) + acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE); + + return 0; +} + +static int acpi_ec_resume_noirq(struct device *dev) +{ + struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev)); + + if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) && + ec->reference_count >= 1) + acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE); + + return 0; +} + static int acpi_ec_resume(struct device *dev) { struct acpi_ec *ec = @@ -1891,6 +1929,7 @@ static int acpi_ec_resume(struct device *dev) #endif static const struct dev_pm_ops acpi_ec_pm = { + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend_noirq, acpi_ec_resume_noirq) SET_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend, acpi_ec_resume) }; diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 9531d3276f65..58dd7ab3c653 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -193,6 +193,10 @@ int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit, void *data); void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit); +#ifdef CONFIG_PM_SLEEP +void acpi_ec_flush_work(void); +#endif + /*-------------------------------------------------------------------------- Suspend/Resume diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c index edb0c79f7c64..917f1cc0fda4 100644 --- a/drivers/acpi/numa.c +++ b/drivers/acpi/numa.c @@ -443,7 +443,7 @@ int __init acpi_numa_init(void) * So go over all cpu entries in SRAT to get apicid to node mapping. */ - /* SRAT: Static Resource Affinity Table */ + /* SRAT: System Resource Affinity Table */ if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) { struct acpi_subtable_proc srat_proc[3]; diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index be17664736b2..fa8243c5c062 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -777,11 +777,11 @@ static void acpi_freeze_sync(void) /* * Process all pending events in case there are any wakeup ones. * - * The EC driver uses the system workqueue, so that one needs to be - * flushed too. + * The EC driver uses the system workqueue and an additional special + * one, so those need to be flushed too. */ + acpi_ec_flush_work(); acpi_os_wait_events_complete(); - flush_scheduled_work(); s2idle_wakeup = false; } diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c index 4ac3e06b41d8..98aa8c808a33 100644 --- a/drivers/acpi/spcr.c +++ b/drivers/acpi/spcr.c @@ -17,6 +17,16 @@ #include <linux/serial_core.h> /* + * Erratum 44 for QDF2432v1 and QDF2400v1 SoCs describes the BUSY bit as + * occasionally getting stuck as 1. To avoid the potential for a hang, check + * TXFE == 0 instead of BUSY == 1. This may not be suitable for all UART + * implementations, so only do so if an affected platform is detected in + * parse_spcr(). + */ +bool qdf2400_e44_present; +EXPORT_SYMBOL(qdf2400_e44_present); + +/* * Some Qualcomm Datacenter Technologies SoCs have a defective UART BUSY bit. * Detect them by examining the OEM fields in the SPCR header, similiar to PCI * quirk detection in pci_mcfg.c. @@ -147,8 +157,30 @@ int __init parse_spcr(bool earlycon) goto done; } - if (qdf2400_erratum_44_present(&table->header)) - uart = "qdf2400_e44"; + /* + * If the E44 erratum is required, then we need to tell the pl011 + * driver to implement the work-around. + * + * The global variable is used by the probe function when it + * creates the UARTs, whether or not they're used as a console. + * + * If the user specifies "traditional" earlycon, the qdf2400_e44 + * console name matches the EARLYCON_DECLARE() statement, and + * SPCR is not used. Parameter "earlycon" is false. + * + * If the user specifies "SPCR" earlycon, then we need to update + * the console name so that it also says "qdf2400_e44". Parameter + * "earlycon" is true. + * + * For consistency, if we change the console name, then we do it + * for everyone, not just earlycon. + */ + if (qdf2400_erratum_44_present(&table->header)) { + qdf2400_e44_present = true; + if (earlycon) + uart = "qdf2400_e44"; + } + if (xgene_8250_erratum_present(table)) iotype = "mmio32"; diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index 948fc86980a1..363fc5330c21 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -215,7 +215,7 @@ config SATA_FSL config SATA_GEMINI tristate "Gemini SATA bridge support" - depends on PATA_FTIDE010 + depends on ARCH_GEMINI || COMPILE_TEST default ARCH_GEMINI help This enabled support for the FTIDE010 to SATA bridge @@ -613,7 +613,7 @@ config PATA_FTIDE010 tristate "Faraday Technology FTIDE010 PATA support" depends on OF depends on ARM - default ARCH_GEMINI + depends on SATA_GEMINI help This option enables support for the Faraday FTIDE010 PATA controller found in the Cortina Gemini SoCs. diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 8453f9a4682f..fa7dd4394c02 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -2083,7 +2083,7 @@ unsigned int ata_read_log_page(struct ata_device *dev, u8 log, retry: ata_tf_init(dev, &tf); if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) && - !(dev->horkage & ATA_HORKAGE_NO_NCQ_LOG)) { + !(dev->horkage & ATA_HORKAGE_NO_DMA_LOG)) { tf.command = ATA_CMD_READ_LOG_DMA_EXT; tf.protocol = ATA_PROT_DMA; dma = true; @@ -2102,8 +2102,8 @@ retry: buf, sectors * ATA_SECT_SIZE, 0); if (err_mask && dma) { - dev->horkage |= ATA_HORKAGE_NO_NCQ_LOG; - ata_dev_warn(dev, "READ LOG DMA EXT failed, trying unqueued\n"); + dev->horkage |= ATA_HORKAGE_NO_DMA_LOG; + ata_dev_warn(dev, "READ LOG DMA EXT failed, trying PIO\n"); goto retry; } diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index b70bcf6d2914..3dbd05532c09 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -1434,7 +1434,7 @@ void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev, /** * ata_eh_done - EH action complete -* @ap: target ATA port + * @link: ATA link for which EH actions are complete * @dev: target ATA dev for per-dev action (can be NULL) * @action: action just completed * @@ -1576,7 +1576,7 @@ unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key) /** * ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT - * @dev: device to perform REQUEST_SENSE_SENSE_DATA_EXT to + * @qc: qc to perform REQUEST_SENSE_SENSE_DATA_EXT to * @cmd: scsi command for which the sense code should be set * * Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK @@ -4175,7 +4175,6 @@ static void ata_eh_handle_port_resume(struct ata_port *ap) struct ata_link *link; struct ata_device *dev; unsigned long flags; - int rc = 0; /* are we resuming? */ spin_lock_irqsave(ap->lock, flags); @@ -4202,7 +4201,7 @@ static void ata_eh_handle_port_resume(struct ata_port *ap) ata_acpi_set_state(ap, ap->pm_mesg); if (ap->ops->port_resume) - rc = ap->ops->port_resume(ap); + ap->ops->port_resume(ap); /* tell ACPI that we're resuming */ ata_acpi_on_resume(ap); diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index d462c5a3a7ef..44ba292f2cd7 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -3030,10 +3030,12 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc) static struct ata_device *ata_find_dev(struct ata_port *ap, int devno) { if (!sata_pmp_attached(ap)) { - if (likely(devno < ata_link_max_devices(&ap->link))) + if (likely(devno >= 0 && + devno < ata_link_max_devices(&ap->link))) return &ap->link.device[devno]; } else { - if (likely(devno < ap->nr_pmp_links)) + if (likely(devno >= 0 && + devno < ap->nr_pmp_links)) return &ap->pmp_link[devno].device[0]; } diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c index ee9844758736..537d11869069 100644 --- a/drivers/ata/sata_rcar.c +++ b/drivers/ata/sata_rcar.c @@ -858,6 +858,14 @@ static const struct of_device_id sata_rcar_match[] = { .compatible = "renesas,sata-r8a7795", .data = (void *)RCAR_GEN2_SATA }, + { + .compatible = "renesas,rcar-gen2-sata", + .data = (void *)RCAR_GEN2_SATA + }, + { + .compatible = "renesas,rcar-gen3-sata", + .data = (void *)RCAR_GEN2_SATA + }, { }, }; MODULE_DEVICE_TABLE(of, sata_rcar_match); diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c index 2ae24c28e70c..1c152aed6b82 100644 --- a/drivers/base/dma-coherent.c +++ b/drivers/base/dma-coherent.c @@ -25,7 +25,7 @@ static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *de { if (dev && dev->dma_mem) return dev->dma_mem; - return dma_coherent_default_memory; + return NULL; } static inline dma_addr_t dma_get_device_base(struct device *dev, @@ -165,34 +165,15 @@ void *dma_mark_declared_memory_occupied(struct device *dev, } EXPORT_SYMBOL(dma_mark_declared_memory_occupied); -/** - * dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area - * - * @dev: device from which we allocate memory - * @size: size of requested memory area - * @dma_handle: This will be filled with the correct dma handle - * @ret: This pointer will be filled with the virtual address - * to allocated area. - * - * This function should be only called from per-arch dma_alloc_coherent() - * to support allocation from per-device coherent memory pools. - * - * Returns 0 if dma_alloc_coherent should continue with allocating from - * generic memory areas, or !0 if dma_alloc_coherent should return @ret. - */ -int dma_alloc_from_coherent(struct device *dev, ssize_t size, - dma_addr_t *dma_handle, void **ret) +static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem, + ssize_t size, dma_addr_t *dma_handle) { - struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); int order = get_order(size); unsigned long flags; int pageno; int dma_memory_map; + void *ret; - if (!mem) - return 0; - - *ret = NULL; spin_lock_irqsave(&mem->spinlock, flags); if (unlikely(size > (mem->size << PAGE_SHIFT))) @@ -203,21 +184,50 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size, goto err; /* - * Memory was found in the per-device area. + * Memory was found in the coherent area. */ - *dma_handle = dma_get_device_base(dev, mem) + (pageno << PAGE_SHIFT); - *ret = mem->virt_base + (pageno << PAGE_SHIFT); + *dma_handle = mem->device_base + (pageno << PAGE_SHIFT); + ret = mem->virt_base + (pageno << PAGE_SHIFT); dma_memory_map = (mem->flags & DMA_MEMORY_MAP); spin_unlock_irqrestore(&mem->spinlock, flags); if (dma_memory_map) - memset(*ret, 0, size); + memset(ret, 0, size); else - memset_io(*ret, 0, size); + memset_io(ret, 0, size); - return 1; + return ret; err: spin_unlock_irqrestore(&mem->spinlock, flags); + return NULL; +} + +/** + * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool + * @dev: device from which we allocate memory + * @size: size of requested memory area + * @dma_handle: This will be filled with the correct dma handle + * @ret: This pointer will be filled with the virtual address + * to allocated area. + * + * This function should be only called from per-arch dma_alloc_coherent() + * to support allocation from per-device coherent memory pools. + * + * Returns 0 if dma_alloc_coherent should continue with allocating from + * generic memory areas, or !0 if dma_alloc_coherent should return @ret. + */ +int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size, + dma_addr_t *dma_handle, void **ret) +{ + struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); + + if (!mem) + return 0; + + *ret = __dma_alloc_from_coherent(mem, size, dma_handle); + if (*ret) + return 1; + /* * In the case where the allocation can not be satisfied from the * per-device area, try to fall back to generic memory if the @@ -225,25 +235,20 @@ err: */ return mem->flags & DMA_MEMORY_EXCLUSIVE; } -EXPORT_SYMBOL(dma_alloc_from_coherent); +EXPORT_SYMBOL(dma_alloc_from_dev_coherent); -/** - * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool - * @dev: device from which the memory was allocated - * @order: the order of pages allocated - * @vaddr: virtual address of allocated pages - * - * This checks whether the memory was allocated from the per-device - * coherent memory pool and if so, releases that memory. - * - * Returns 1 if we correctly released the memory, or 0 if - * dma_release_coherent() should proceed with releasing memory from - * generic pools. - */ -int dma_release_from_coherent(struct device *dev, int order, void *vaddr) +void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle) { - struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); + if (!dma_coherent_default_memory) + return NULL; + + return __dma_alloc_from_coherent(dma_coherent_default_memory, size, + dma_handle); +} +static int __dma_release_from_coherent(struct dma_coherent_mem *mem, + int order, void *vaddr) +{ if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) { int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; @@ -256,28 +261,39 @@ int dma_release_from_coherent(struct device *dev, int order, void *vaddr) } return 0; } -EXPORT_SYMBOL(dma_release_from_coherent); /** - * dma_mmap_from_coherent() - try to mmap the memory allocated from - * per-device coherent memory pool to userspace + * dma_release_from_dev_coherent() - free memory to device coherent memory pool * @dev: device from which the memory was allocated - * @vma: vm_area for the userspace memory - * @vaddr: cpu address returned by dma_alloc_from_coherent - * @size: size of the memory buffer allocated by dma_alloc_from_coherent - * @ret: result from remap_pfn_range() + * @order: the order of pages allocated + * @vaddr: virtual address of allocated pages * * This checks whether the memory was allocated from the per-device - * coherent memory pool and if so, maps that memory to the provided vma. + * coherent memory pool and if so, releases that memory. * - * Returns 1 if we correctly mapped the memory, or 0 if the caller should - * proceed with mapping memory from generic pools. + * Returns 1 if we correctly released the memory, or 0 if the caller should + * proceed with releasing memory from generic pools. */ -int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, - void *vaddr, size_t size, int *ret) +int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr) { struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); + return __dma_release_from_coherent(mem, order, vaddr); +} +EXPORT_SYMBOL(dma_release_from_dev_coherent); + +int dma_release_from_global_coherent(int order, void *vaddr) +{ + if (!dma_coherent_default_memory) + return 0; + + return __dma_release_from_coherent(dma_coherent_default_memory, order, + vaddr); +} + +static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem, + struct vm_area_struct *vma, void *vaddr, size_t size, int *ret) +{ if (mem && vaddr >= mem->virt_base && vaddr + size <= (mem->virt_base + (mem->size << PAGE_SHIFT))) { unsigned long off = vma->vm_pgoff; @@ -296,7 +312,39 @@ int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, } return 0; } -EXPORT_SYMBOL(dma_mmap_from_coherent); + +/** + * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool + * @dev: device from which the memory was allocated + * @vma: vm_area for the userspace memory + * @vaddr: cpu address returned by dma_alloc_from_dev_coherent + * @size: size of the memory buffer allocated + * @ret: result from remap_pfn_range() + * + * This checks whether the memory was allocated from the per-device + * coherent memory pool and if so, maps that memory to the provided vma. + * + * Returns 1 if we correctly mapped the memory, or 0 if the caller should + * proceed with mapping memory from generic pools. + */ +int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, + void *vaddr, size_t size, int *ret) +{ + struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); + + return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret); +} +EXPORT_SYMBOL(dma_mmap_from_dev_coherent); + +int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr, + size_t size, int *ret) +{ + if (!dma_coherent_default_memory) + return 0; + + return __dma_mmap_from_coherent(dma_coherent_default_memory, vma, + vaddr, size, ret); +} /* * Support for reserved memory regions defined in device tree diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c index 5096755d185e..b555ff9dd8fc 100644 --- a/drivers/base/dma-mapping.c +++ b/drivers/base/dma-mapping.c @@ -235,7 +235,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) + if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) return ret; if (off < count && user_count <= (count - off)) { diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index b9f907eedbf7..bfbe1e154128 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c @@ -30,7 +30,6 @@ #include <linux/syscore_ops.h> #include <linux/reboot.h> #include <linux/security.h> -#include <linux/swait.h> #include <generated/utsrelease.h> @@ -112,13 +111,13 @@ static inline long firmware_loading_timeout(void) * state of the firmware loading. */ struct fw_state { - struct swait_queue_head wq; + struct completion completion; enum fw_status status; }; static void fw_state_init(struct fw_state *fw_st) { - init_swait_queue_head(&fw_st->wq); + init_completion(&fw_st->completion); fw_st->status = FW_STATUS_UNKNOWN; } @@ -131,9 +130,7 @@ static int __fw_state_wait_common(struct fw_state *fw_st, long timeout) { long ret; - ret = swait_event_interruptible_timeout(fw_st->wq, - __fw_state_is_done(READ_ONCE(fw_st->status)), - timeout); + ret = wait_for_completion_killable_timeout(&fw_st->completion, timeout); if (ret != 0 && fw_st->status == FW_STATUS_ABORTED) return -ENOENT; if (!ret) @@ -148,35 +145,34 @@ static void __fw_state_set(struct fw_state *fw_st, WRITE_ONCE(fw_st->status, status); if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED) - swake_up(&fw_st->wq); + complete_all(&fw_st->completion); } #define fw_state_start(fw_st) \ __fw_state_set(fw_st, FW_STATUS_LOADING) #define fw_state_done(fw_st) \ __fw_state_set(fw_st, FW_STATUS_DONE) +#define fw_state_aborted(fw_st) \ + __fw_state_set(fw_st, FW_STATUS_ABORTED) #define fw_state_wait(fw_st) \ __fw_state_wait_common(fw_st, MAX_SCHEDULE_TIMEOUT) -#ifndef CONFIG_FW_LOADER_USER_HELPER - -#define fw_state_is_aborted(fw_st) false - -#else /* CONFIG_FW_LOADER_USER_HELPER */ - static int __fw_state_check(struct fw_state *fw_st, enum fw_status status) { return fw_st->status == status; } +#define fw_state_is_aborted(fw_st) \ + __fw_state_check(fw_st, FW_STATUS_ABORTED) + +#ifdef CONFIG_FW_LOADER_USER_HELPER + #define fw_state_aborted(fw_st) \ __fw_state_set(fw_st, FW_STATUS_ABORTED) #define fw_state_is_done(fw_st) \ __fw_state_check(fw_st, FW_STATUS_DONE) #define fw_state_is_loading(fw_st) \ __fw_state_check(fw_st, FW_STATUS_LOADING) -#define fw_state_is_aborted(fw_st) \ - __fw_state_check(fw_st, FW_STATUS_ABORTED) #define fw_state_wait_timeout(fw_st, timeout) \ __fw_state_wait_common(fw_st, timeout) @@ -1200,6 +1196,28 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name, return 1; /* need to load */ } +/* + * Batched requests need only one wake, we need to do this step last due to the + * fallback mechanism. The buf is protected with kref_get(), and it won't be + * released until the last user calls release_firmware(). + * + * Failed batched requests are possible as well, in such cases we just share + * the struct firmware_buf and won't release it until all requests are woken + * and have gone through this same path. + */ +static void fw_abort_batch_reqs(struct firmware *fw) +{ + struct firmware_buf *buf; + + /* Loaded directly? */ + if (!fw || !fw->priv) + return; + + buf = fw->priv; + if (!fw_state_is_aborted(&buf->fw_st)) + fw_state_aborted(&buf->fw_st); +} + /* called from request_firmware() and request_firmware_work_func() */ static int _request_firmware(const struct firmware **firmware_p, const char *name, @@ -1243,6 +1261,7 @@ _request_firmware(const struct firmware **firmware_p, const char *name, out: if (ret < 0) { + fw_abort_batch_reqs(fw); release_firmware(fw); fw = NULL; } diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 87a0a29f6e7e..5bdf923294a5 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -908,7 +908,8 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg) continue; } sk_set_memalloc(sock->sk); - sock->sk->sk_sndtimeo = nbd->tag_set.timeout; + if (nbd->tag_set.timeout) + sock->sk->sk_sndtimeo = nbd->tag_set.timeout; atomic_inc(&config->recv_threads); refcount_inc(&nbd->config_refs); old = nsock->sock; @@ -922,6 +923,8 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg) mutex_unlock(&nsock->tx_lock); sockfd_put(old); + clear_bit(NBD_DISCONNECTED, &config->runtime_flags); + /* We take the tx_mutex in an error path in the recv_work, so we * need to queue_work outside of the tx_mutex. */ @@ -978,11 +981,15 @@ static void send_disconnects(struct nbd_device *nbd) int i, ret; for (i = 0; i < config->num_connections; i++) { + struct nbd_sock *nsock = config->socks[i]; + iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request)); + mutex_lock(&nsock->tx_lock); ret = sock_xmit(nbd, i, 1, &from, 0, NULL); if (ret <= 0) dev_err(disk_to_dev(nbd->disk), "Send disconnect failed %d\n", ret); + mutex_unlock(&nsock->tx_lock); } } @@ -991,9 +998,8 @@ static int nbd_disconnect(struct nbd_device *nbd) struct nbd_config *config = nbd->config; dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n"); - if (!test_and_set_bit(NBD_DISCONNECT_REQUESTED, - &config->runtime_flags)) - send_disconnects(nbd); + set_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags); + send_disconnects(nbd); return 0; } @@ -1074,7 +1080,9 @@ static int nbd_start_device(struct nbd_device *nbd) return -ENOMEM; } sk_set_memalloc(config->socks[i]->sock->sk); - config->socks[i]->sock->sk->sk_sndtimeo = nbd->tag_set.timeout; + if (nbd->tag_set.timeout) + config->socks[i]->sock->sk->sk_sndtimeo = + nbd->tag_set.timeout; atomic_inc(&config->recv_threads); refcount_inc(&nbd->config_refs); INIT_WORK(&args->work, recv_work); diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c index 6b16ead1da58..ad9749463d4f 100644 --- a/drivers/block/sunvdc.c +++ b/drivers/block/sunvdc.c @@ -875,6 +875,56 @@ static void print_version(void) printk(KERN_INFO "%s", version); } +struct vdc_check_port_data { + int dev_no; + char *type; +}; + +static int vdc_device_probed(struct device *dev, void *arg) +{ + struct vio_dev *vdev = to_vio_dev(dev); + struct vdc_check_port_data *port_data; + + port_data = (struct vdc_check_port_data *)arg; + + if ((vdev->dev_no == port_data->dev_no) && + (!(strcmp((char *)&vdev->type, port_data->type))) && + dev_get_drvdata(dev)) { + /* This device has already been configured + * by vdc_port_probe() + */ + return 1; + } else { + return 0; + } +} + +/* Determine whether the VIO device is part of an mpgroup + * by locating all the virtual-device-port nodes associated + * with the parent virtual-device node for the VIO device + * and checking whether any of these nodes are vdc-ports + * which have already been configured. + * + * Returns true if this device is part of an mpgroup and has + * already been probed. + */ +static bool vdc_port_mpgroup_check(struct vio_dev *vdev) +{ + struct vdc_check_port_data port_data; + struct device *dev; + + port_data.dev_no = vdev->dev_no; + port_data.type = (char *)&vdev->type; + + dev = device_find_child(vdev->dev.parent, &port_data, + vdc_device_probed); + + if (dev) + return true; + + return false; +} + static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) { struct mdesc_handle *hp; @@ -893,6 +943,14 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) goto err_out_release_mdesc; } + /* Check if this device is part of an mpgroup */ + if (vdc_port_mpgroup_check(vdev)) { + printk(KERN_WARNING + "VIO: Ignoring extra vdisk port %s", + dev_name(&vdev->dev)); + goto err_out_release_mdesc; + } + port = kzalloc(sizeof(*port), GFP_KERNEL); err = -ENOMEM; if (!port) { @@ -943,6 +1001,9 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) if (err) goto err_out_free_tx_ring; + /* Note that the device driver_data is used to determine + * whether the port has been probed. + */ dev_set_drvdata(&vdev->dev, port); mdesc_release(hp); diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 4e02aa5fdac0..1498b899a593 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -541,12 +541,9 @@ virtblk_cache_type_store(struct device *dev, struct device_attribute *attr, int i; BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE)); - for (i = ARRAY_SIZE(virtblk_cache_types); --i >= 0; ) - if (sysfs_streq(buf, virtblk_cache_types[i])) - break; - + i = sysfs_match_string(virtblk_cache_types, buf); if (i < 0) - return -EINVAL; + return i; virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i); virtblk_update_cache_mode(vdev); diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index c852ed3c01d5..98e34e4c62b8 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -111,7 +111,7 @@ struct blk_shadow { }; struct blkif_req { - int error; + blk_status_t error; }; static inline struct blkif_req *blkif_req(struct request *rq) @@ -708,6 +708,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri * existing persistent grants, or if we have to get new grants, * as there are not sufficiently many free. */ + bool new_persistent_gnts = false; struct scatterlist *sg; int num_sg, max_grefs, num_grant; @@ -719,19 +720,21 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri */ max_grefs += INDIRECT_GREFS(max_grefs); - /* - * We have to reserve 'max_grefs' grants because persistent - * grants are shared by all rings. - */ - if (max_grefs > 0) - if (gnttab_alloc_grant_references(max_grefs, &setup.gref_head) < 0) { + /* Check if we have enough persistent grants to allocate a requests */ + if (rinfo->persistent_gnts_c < max_grefs) { + new_persistent_gnts = true; + + if (gnttab_alloc_grant_references( + max_grefs - rinfo->persistent_gnts_c, + &setup.gref_head) < 0) { gnttab_request_free_callback( &rinfo->callback, blkif_restart_queue_callback, rinfo, - max_grefs); + max_grefs - rinfo->persistent_gnts_c); return 1; } + } /* Fill out a communications ring structure. */ id = blkif_ring_get_request(rinfo, req, &ring_req); @@ -832,7 +835,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri if (unlikely(require_extra_req)) rinfo->shadow[extra_id].req = *extra_ring_req; - if (max_grefs > 0) + if (new_persistent_gnts) gnttab_free_grant_references(setup.gref_head); return 0; @@ -906,8 +909,8 @@ out_err: return BLK_STS_IOERR; out_busy: - spin_unlock_irqrestore(&rinfo->ring_lock, flags); blk_mq_stop_hw_queue(hctx); + spin_unlock_irqrestore(&rinfo->ring_lock, flags); return BLK_STS_RESOURCE; } @@ -1616,7 +1619,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { printk(KERN_WARNING "blkfront: %s: %s op failed\n", info->gd->disk_name, op_name(bret->operation)); - blkif_req(req)->error = -EOPNOTSUPP; + blkif_req(req)->error = BLK_STS_NOTSUPP; } if (unlikely(bret->status == BLKIF_RSP_ERROR && rinfo->shadow[id].req.u.rw.nr_segments == 0)) { diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 856d5dc02451..3b1b6340ba13 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -308,7 +308,7 @@ static ssize_t comp_algorithm_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct zram *zram = dev_to_zram(dev); - char compressor[CRYPTO_MAX_ALG_NAME]; + char compressor[ARRAY_SIZE(zram->compressor)]; size_t sz; strlcpy(compressor, buf, sizeof(compressor)); @@ -327,7 +327,7 @@ static ssize_t comp_algorithm_store(struct device *dev, return -EBUSY; } - strlcpy(zram->compressor, compressor, sizeof(compressor)); + strcpy(zram->compressor, compressor); up_write(&zram->init_lock); return len; } diff --git a/drivers/bus/uniphier-system-bus.c b/drivers/bus/uniphier-system-bus.c index 1e6e0269edcc..f76be6bd6eb3 100644 --- a/drivers/bus/uniphier-system-bus.c +++ b/drivers/bus/uniphier-system-bus.c @@ -256,10 +256,23 @@ static int uniphier_system_bus_probe(struct platform_device *pdev) uniphier_system_bus_set_reg(priv); + platform_set_drvdata(pdev, priv); + /* Now, the bus is configured. Populate platform_devices below it */ return of_platform_default_populate(dev->of_node, NULL, dev); } +static int __maybe_unused uniphier_system_bus_resume(struct device *dev) +{ + uniphier_system_bus_set_reg(dev_get_drvdata(dev)); + + return 0; +} + +static const struct dev_pm_ops uniphier_system_bus_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(NULL, uniphier_system_bus_resume) +}; + static const struct of_device_id uniphier_system_bus_match[] = { { .compatible = "socionext,uniphier-system-bus" }, { /* sentinel */ } @@ -271,6 +284,7 @@ static struct platform_driver uniphier_system_bus_driver = { .driver = { .name = "uniphier-system-bus", .of_match_table = uniphier_system_bus_match, + .pm = &uniphier_system_bus_pm_ops, }, }; module_platform_driver(uniphier_system_bus_driver); diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c index dcbbb4ea3cc1..89527bae4602 100644 --- a/drivers/char/agp/ali-agp.c +++ b/drivers/char/agp/ali-agp.c @@ -381,7 +381,7 @@ static void agp_ali_remove(struct pci_dev *pdev) agp_put_bridge(bridge); } -static struct pci_device_id agp_ali_pci_table[] = { +static const struct pci_device_id agp_ali_pci_table[] = { { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c index 5fbd333e4c6d..b450544dcaf0 100644 --- a/drivers/char/agp/amd-k7-agp.c +++ b/drivers/char/agp/amd-k7-agp.c @@ -21,7 +21,7 @@ #define AMD_TLBFLUSH 0x0c /* In mmio region (32-bit register) */ #define AMD_CACHEENTRY 0x10 /* In mmio region (32-bit register) */ -static struct pci_device_id agp_amdk7_pci_table[]; +static const struct pci_device_id agp_amdk7_pci_table[]; struct amd_page_map { unsigned long *real; @@ -508,7 +508,7 @@ static int agp_amdk7_resume(struct pci_dev *pdev) #endif /* CONFIG_PM */ /* must be the same order as name table above */ -static struct pci_device_id agp_amdk7_pci_table[] = { +static const struct pci_device_id agp_amdk7_pci_table[] = { { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c index c99cd19d9147..e50c29c97ca7 100644 --- a/drivers/char/agp/amd64-agp.c +++ b/drivers/char/agp/amd64-agp.c @@ -610,7 +610,7 @@ static int agp_amd64_resume(struct pci_dev *pdev) #endif /* CONFIG_PM */ -static struct pci_device_id agp_amd64_pci_table[] = { +static const struct pci_device_id agp_amd64_pci_table[] = { { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c index 0b5ec7af2414..88b4cbee4dac 100644 --- a/drivers/char/agp/ati-agp.c +++ b/drivers/char/agp/ati-agp.c @@ -540,7 +540,7 @@ static void agp_ati_remove(struct pci_dev *pdev) agp_put_bridge(bridge); } -static struct pci_device_id agp_ati_pci_table[] = { +static const struct pci_device_id agp_ati_pci_table[] = { { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, diff --git a/drivers/char/agp/efficeon-agp.c b/drivers/char/agp/efficeon-agp.c index 533cb6d229b8..7f88490b5479 100644 --- a/drivers/char/agp/efficeon-agp.c +++ b/drivers/char/agp/efficeon-agp.c @@ -427,7 +427,7 @@ static int agp_efficeon_resume(struct pci_dev *pdev) } #endif -static struct pci_device_id agp_efficeon_pci_table[] = { +static const struct pci_device_id agp_efficeon_pci_table[] = { { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index 0a21daed5b62..9e4f27a6cb5a 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c @@ -828,7 +828,7 @@ static int agp_intel_resume(struct pci_dev *pdev) } #endif -static struct pci_device_id agp_intel_pci_table[] = { +static const struct pci_device_id agp_intel_pci_table[] = { #define ID(x) \ { \ .class = (PCI_CLASS_BRIDGE_HOST << 8), \ diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c index 6c8d39cb566e..828b34445203 100644 --- a/drivers/char/agp/nvidia-agp.c +++ b/drivers/char/agp/nvidia-agp.c @@ -420,7 +420,7 @@ static int agp_nvidia_resume(struct pci_dev *pdev) #endif -static struct pci_device_id agp_nvidia_pci_table[] = { +static const struct pci_device_id agp_nvidia_pci_table[] = { { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, diff --git a/drivers/char/agp/sis-agp.c b/drivers/char/agp/sis-agp.c index 2c74038da459..14909fc5d767 100644 --- a/drivers/char/agp/sis-agp.c +++ b/drivers/char/agp/sis-agp.c @@ -237,7 +237,7 @@ static int agp_sis_resume(struct pci_dev *pdev) #endif /* CONFIG_PM */ -static struct pci_device_id agp_sis_pci_table[] = { +static const struct pci_device_id agp_sis_pci_table[] = { { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c index fdced547ad59..c381c8e396fc 100644 --- a/drivers/char/agp/uninorth-agp.c +++ b/drivers/char/agp/uninorth-agp.c @@ -679,7 +679,7 @@ static void agp_uninorth_remove(struct pci_dev *pdev) agp_put_bridge(bridge); } -static struct pci_device_id agp_uninorth_pci_table[] = { +static const struct pci_device_id agp_uninorth_pci_table[] = { { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, diff --git a/drivers/char/random.c b/drivers/char/random.c index afa3ce7d3e72..8ad92707e45f 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1492,7 +1492,7 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller, #ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM print_once = true; #endif - pr_notice("random: %s called from %pF with crng_init=%d\n", + pr_notice("random: %s called from %pS with crng_init=%d\n", func_name, caller, crng_init); } diff --git a/drivers/clk/clk-gemini.c b/drivers/clk/clk-gemini.c index c391a49aaaff..b4cf2f699a21 100644 --- a/drivers/clk/clk-gemini.c +++ b/drivers/clk/clk-gemini.c @@ -237,6 +237,18 @@ static int gemini_reset(struct reset_controller_dev *rcdev, BIT(GEMINI_RESET_CPU1) | BIT(id)); } +static int gemini_reset_assert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + return 0; +} + +static int gemini_reset_deassert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + return 0; +} + static int gemini_reset_status(struct reset_controller_dev *rcdev, unsigned long id) { @@ -253,6 +265,8 @@ static int gemini_reset_status(struct reset_controller_dev *rcdev, static const struct reset_control_ops gemini_reset_ops = { .reset = gemini_reset, + .assert = gemini_reset_assert, + .deassert = gemini_reset_deassert, .status = gemini_reset_status, }; diff --git a/drivers/clk/keystone/sci-clk.c b/drivers/clk/keystone/sci-clk.c index 43b0f2f08df2..9cdf9d5050ac 100644 --- a/drivers/clk/keystone/sci-clk.c +++ b/drivers/clk/keystone/sci-clk.c @@ -22,6 +22,7 @@ #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/soc/ti/ti_sci_protocol.h> +#include <linux/bsearch.h> #define SCI_CLK_SSC_ENABLE BIT(0) #define SCI_CLK_ALLOW_FREQ_CHANGE BIT(1) @@ -44,6 +45,7 @@ struct sci_clk_data { * @dev: Device pointer for the clock provider * @clk_data: Clock data * @clocks: Clocks array for this device + * @num_clocks: Total number of clocks for this provider */ struct sci_clk_provider { const struct ti_sci_handle *sci; @@ -51,6 +53,7 @@ struct sci_clk_provider { struct device *dev; const struct sci_clk_data *clk_data; struct clk_hw **clocks; + int num_clocks; }; /** @@ -58,7 +61,6 @@ struct sci_clk_provider { * @hw: Hardware clock cookie for common clock framework * @dev_id: Device index * @clk_id: Clock index - * @node: Clocks list link * @provider: Master clock provider * @flags: Flags for the clock */ @@ -66,7 +68,6 @@ struct sci_clk { struct clk_hw hw; u16 dev_id; u8 clk_id; - struct list_head node; struct sci_clk_provider *provider; u8 flags; }; @@ -367,6 +368,19 @@ err: return &sci_clk->hw; } +static int _cmp_sci_clk(const void *a, const void *b) +{ + const struct sci_clk *ca = a; + const struct sci_clk *cb = *(struct sci_clk **)b; + + if (ca->dev_id == cb->dev_id && ca->clk_id == cb->clk_id) + return 0; + if (ca->dev_id > cb->dev_id || + (ca->dev_id == cb->dev_id && ca->clk_id > cb->clk_id)) + return 1; + return -1; +} + /** * sci_clk_get - Xlate function for getting clock handles * @clkspec: device tree clock specifier @@ -380,29 +394,22 @@ err: static struct clk_hw *sci_clk_get(struct of_phandle_args *clkspec, void *data) { struct sci_clk_provider *provider = data; - u16 dev_id; - u8 clk_id; - const struct sci_clk_data *clks = provider->clk_data; - struct clk_hw **clocks = provider->clocks; + struct sci_clk **clk; + struct sci_clk key; if (clkspec->args_count != 2) return ERR_PTR(-EINVAL); - dev_id = clkspec->args[0]; - clk_id = clkspec->args[1]; + key.dev_id = clkspec->args[0]; + key.clk_id = clkspec->args[1]; - while (clks->num_clks) { - if (clks->dev == dev_id) { - if (clk_id >= clks->num_clks) - return ERR_PTR(-EINVAL); - - return clocks[clk_id]; - } + clk = bsearch(&key, provider->clocks, provider->num_clocks, + sizeof(clk), _cmp_sci_clk); - clks++; - } + if (!clk) + return ERR_PTR(-ENODEV); - return ERR_PTR(-ENODEV); + return &(*clk)->hw; } static int ti_sci_init_clocks(struct sci_clk_provider *p) @@ -410,18 +417,29 @@ static int ti_sci_init_clocks(struct sci_clk_provider *p) const struct sci_clk_data *data = p->clk_data; struct clk_hw *hw; int i; + int num_clks = 0; while (data->num_clks) { - p->clocks = devm_kcalloc(p->dev, data->num_clks, - sizeof(struct sci_clk), - GFP_KERNEL); - if (!p->clocks) - return -ENOMEM; + num_clks += data->num_clks; + data++; + } + p->num_clocks = num_clks; + + p->clocks = devm_kcalloc(p->dev, num_clks, sizeof(struct sci_clk), + GFP_KERNEL); + if (!p->clocks) + return -ENOMEM; + + num_clks = 0; + + data = p->clk_data; + + while (data->num_clks) { for (i = 0; i < data->num_clks; i++) { hw = _sci_clk_build(p, data->dev, i); if (!IS_ERR(hw)) { - p->clocks[i] = hw; + p->clocks[num_clks++] = hw; continue; } diff --git a/drivers/clk/meson/clk-mpll.c b/drivers/clk/meson/clk-mpll.c index 39eab69fe51a..44a5a535ca63 100644 --- a/drivers/clk/meson/clk-mpll.c +++ b/drivers/clk/meson/clk-mpll.c @@ -161,6 +161,13 @@ static int mpll_set_rate(struct clk_hw *hw, reg = PARM_SET(p->width, p->shift, reg, 1); writel(reg, mpll->base + p->reg_off); + p = &mpll->ssen; + if (p->width != 0) { + reg = readl(mpll->base + p->reg_off); + reg = PARM_SET(p->width, p->shift, reg, 1); + writel(reg, mpll->base + p->reg_off); + } + p = &mpll->n2; reg = readl(mpll->base + p->reg_off); reg = PARM_SET(p->width, p->shift, reg, n2); diff --git a/drivers/clk/meson/clkc.h b/drivers/clk/meson/clkc.h index d6feafe8bd6c..1629da9b4141 100644 --- a/drivers/clk/meson/clkc.h +++ b/drivers/clk/meson/clkc.h @@ -118,6 +118,7 @@ struct meson_clk_mpll { struct parm sdm_en; struct parm n2; struct parm en; + struct parm ssen; spinlock_t *lock; }; diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c index a897ea45327c..a7ea5f3da89d 100644 --- a/drivers/clk/meson/gxbb.c +++ b/drivers/clk/meson/gxbb.c @@ -528,6 +528,11 @@ static struct meson_clk_mpll gxbb_mpll0 = { .shift = 14, .width = 1, }, + .ssen = { + .reg_off = HHI_MPLL_CNTL, + .shift = 25, + .width = 1, + }, .lock = &clk_lock, .hw.init = &(struct clk_init_data){ .name = "mpll0", diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c index bb3f1de876b1..6ec512ad2598 100644 --- a/drivers/clk/meson/meson8b.c +++ b/drivers/clk/meson/meson8b.c @@ -267,6 +267,11 @@ static struct meson_clk_mpll meson8b_mpll0 = { .shift = 14, .width = 1, }, + .ssen = { + .reg_off = HHI_MPLL_CNTL, + .shift = 25, + .width = 1, + }, .lock = &clk_lock, .hw.init = &(struct clk_init_data){ .name = "mpll0", diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c index 0748a0b333c5..9a6476aa7d81 100644 --- a/drivers/clk/samsung/clk-exynos5420.c +++ b/drivers/clk/samsung/clk-exynos5420.c @@ -1283,16 +1283,16 @@ static const struct samsung_pll_rate_table exynos5420_pll2550x_24mhz_tbl[] __ini static const struct samsung_pll_rate_table exynos5420_epll_24mhz_tbl[] = { PLL_36XX_RATE(600000000U, 100, 2, 1, 0), PLL_36XX_RATE(400000000U, 200, 3, 2, 0), - PLL_36XX_RATE(393216000U, 197, 3, 2, 25690), - PLL_36XX_RATE(361267200U, 301, 5, 2, 3671), + PLL_36XX_RATE(393216003U, 197, 3, 2, -25690), + PLL_36XX_RATE(361267218U, 301, 5, 2, 3671), PLL_36XX_RATE(200000000U, 200, 3, 3, 0), - PLL_36XX_RATE(196608000U, 197, 3, 3, -25690), - PLL_36XX_RATE(180633600U, 301, 5, 3, 3671), - PLL_36XX_RATE(131072000U, 131, 3, 3, 4719), + PLL_36XX_RATE(196608001U, 197, 3, 3, -25690), + PLL_36XX_RATE(180633609U, 301, 5, 3, 3671), + PLL_36XX_RATE(131072006U, 131, 3, 3, 4719), PLL_36XX_RATE(100000000U, 200, 3, 4, 0), - PLL_36XX_RATE(65536000U, 131, 3, 4, 4719), - PLL_36XX_RATE(49152000U, 197, 3, 5, 25690), - PLL_36XX_RATE(32768000U, 131, 3, 5, 4719), + PLL_36XX_RATE( 65536003U, 131, 3, 4, 4719), + PLL_36XX_RATE( 49152000U, 197, 3, 5, -25690), + PLL_36XX_RATE( 32768001U, 131, 3, 5, 4719), }; static struct samsung_pll_clock exynos5x_plls[nr_plls] __initdata = { diff --git a/drivers/clk/sunxi-ng/ccu-sun5i.c b/drivers/clk/sunxi-ng/ccu-sun5i.c index 5372bf8be5e6..31d7ffda9aab 100644 --- a/drivers/clk/sunxi-ng/ccu-sun5i.c +++ b/drivers/clk/sunxi-ng/ccu-sun5i.c @@ -184,7 +184,7 @@ static struct ccu_mux cpu_clk = { .hw.init = CLK_HW_INIT_PARENTS("cpu", cpu_parents, &ccu_mux_ops, - CLK_IS_CRITICAL), + CLK_SET_RATE_PARENT | CLK_IS_CRITICAL), } }; diff --git a/drivers/clk/x86/clk-pmc-atom.c b/drivers/clk/x86/clk-pmc-atom.c index f99abc1106f0..08ef69945ffb 100644 --- a/drivers/clk/x86/clk-pmc-atom.c +++ b/drivers/clk/x86/clk-pmc-atom.c @@ -186,6 +186,13 @@ static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id, pclk->reg = base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE; spin_lock_init(&pclk->lock); + /* + * If the clock was already enabled by the firmware mark it as critical + * to avoid it being gated by the clock framework if no driver owns it. + */ + if (plt_clk_is_enabled(&pclk->hw)) + init.flags |= CLK_IS_CRITICAL; + ret = devm_clk_hw_register(&pdev->dev, &pclk->hw); if (ret) { pclk = ERR_PTR(ret); diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 6cd503525638..0566455f233e 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -1922,13 +1922,6 @@ static int intel_pstate_init_cpu(unsigned int cpunum) return 0; } -static unsigned int intel_pstate_get(unsigned int cpu_num) -{ - struct cpudata *cpu = all_cpu_data[cpu_num]; - - return cpu ? get_avg_frequency(cpu) : 0; -} - static void intel_pstate_set_update_util_hook(unsigned int cpu_num) { struct cpudata *cpu = all_cpu_data[cpu_num]; @@ -2169,7 +2162,6 @@ static struct cpufreq_driver intel_pstate = { .setpolicy = intel_pstate_set_policy, .suspend = intel_pstate_hwp_save_state, .resume = intel_pstate_resume, - .get = intel_pstate_get, .init = intel_pstate_cpu_init, .exit = intel_pstate_cpu_exit, .stop_cpu = intel_pstate_stop_cpu, diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c index 37b0698b7193..42896a67aeae 100644 --- a/drivers/cpuidle/cpuidle-powernv.c +++ b/drivers/cpuidle/cpuidle-powernv.c @@ -235,6 +235,7 @@ static inline int validate_dt_prop_sizes(const char *prop1, int prop1_len, return -1; } +extern u32 pnv_get_supported_cpuidle_states(void); static int powernv_add_idle_states(void) { struct device_node *power_mgt; @@ -248,6 +249,8 @@ static int powernv_add_idle_states(void) const char *names[CPUIDLE_STATE_MAX]; u32 has_stop_states = 0; int i, rc; + u32 supported_flags = pnv_get_supported_cpuidle_states(); + /* Currently we have snooze statically defined */ @@ -362,6 +365,13 @@ static int powernv_add_idle_states(void) for (i = 0; i < dt_idle_states; i++) { unsigned int exit_latency, target_residency; bool stops_timebase = false; + + /* + * Skip the platform idle state whose flag isn't in + * the supported_cpuidle_states flag mask. + */ + if ((flags[i] & supported_flags) != flags[i]) + continue; /* * If an idle state has exit latency beyond * POWERNV_THRESHOLD_LATENCY_NS then don't use it diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 193204dfbf3a..4b75084fabad 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -655,7 +655,7 @@ source "drivers/crypto/virtio/Kconfig" config CRYPTO_DEV_BCM_SPU tristate "Broadcom symmetric crypto/hash acceleration support" depends on ARCH_BCM_IPROC - depends on BCM_PDC_MBOX + depends on MAILBOX default m select CRYPTO_DES select CRYPTO_MD5 diff --git a/drivers/crypto/bcm/spu2.c b/drivers/crypto/bcm/spu2.c index ef04c9748317..bf7ac621c591 100644 --- a/drivers/crypto/bcm/spu2.c +++ b/drivers/crypto/bcm/spu2.c @@ -302,6 +302,7 @@ spu2_hash_xlate(enum hash_alg hash_alg, enum hash_mode hash_mode, break; case HASH_ALG_SHA3_512: *spu2_type = SPU2_HASH_TYPE_SHA3_512; + break; case HASH_ALG_LAST: default: err = -EINVAL; diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c index ae44a464cd2d..9ccefb9b7232 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_main.c +++ b/drivers/crypto/cavium/nitrox/nitrox_main.c @@ -18,8 +18,9 @@ #define SE_GROUP 0 #define DRIVER_VERSION "1.0" +#define FW_DIR "cavium/" /* SE microcode */ -#define SE_FW "cnn55xx_se.fw" +#define SE_FW FW_DIR "cnn55xx_se.fw" static const char nitrox_driver_name[] = "CNN55XX"; diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index e7f87ac12685..1fabd4aee81b 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -773,7 +773,6 @@ static int safexcel_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct resource *res; struct safexcel_crypto_priv *priv; - u64 dma_mask; int i, ret; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); @@ -802,9 +801,7 @@ static int safexcel_probe(struct platform_device *pdev) return -EPROBE_DEFER; } - if (of_property_read_u64(dev->of_node, "dma-mask", &dma_mask)) - dma_mask = DMA_BIT_MASK(64); - ret = dma_set_mask_and_coherent(dev, dma_mask); + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); if (ret) goto err_clk; diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c index 8527a5899a2f..3f819399cd95 100644 --- a/drivers/crypto/inside-secure/safexcel_hash.c +++ b/drivers/crypto/inside-secure/safexcel_hash.c @@ -883,10 +883,7 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key, if (ret) return ret; - memcpy(ctx->ipad, &istate.state, SHA1_DIGEST_SIZE); - memcpy(ctx->opad, &ostate.state, SHA1_DIGEST_SIZE); - - for (i = 0; i < ARRAY_SIZE(istate.state); i++) { + for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) { if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) || ctx->opad[i] != le32_to_cpu(ostate.state[i])) { ctx->base.needs_inv = true; @@ -894,6 +891,9 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key, } } + memcpy(ctx->ipad, &istate.state, SHA1_DIGEST_SIZE); + memcpy(ctx->opad, &ostate.state, SHA1_DIGEST_SIZE); + return 0; } diff --git a/drivers/dax/super.c b/drivers/dax/super.c index ce9e563e6e1d..938eb4868f7f 100644 --- a/drivers/dax/super.c +++ b/drivers/dax/super.c @@ -278,6 +278,12 @@ void dax_write_cache(struct dax_device *dax_dev, bool wc) } EXPORT_SYMBOL_GPL(dax_write_cache); +bool dax_write_cache_enabled(struct dax_device *dax_dev) +{ + return test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags); +} +EXPORT_SYMBOL_GPL(dax_write_cache_enabled); + bool dax_alive(struct dax_device *dax_dev) { lockdep_assert_held(&dax_srcu); diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index f235eae04c16..461d6fc3688b 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -504,6 +504,7 @@ config GPIO_XGENE_SB depends on ARCH_XGENE && OF_GPIO select GPIO_GENERIC select GPIOLIB_IRQCHIP + select IRQ_DOMAIN_HIERARCHY help This driver supports the GPIO block within the APM X-Gene Standby Domain. Say yes here to enable the GPIO functionality. diff --git a/drivers/gpio/gpio-exar.c b/drivers/gpio/gpio-exar.c index fb8d304cfa17..0ecd2369c2ca 100644 --- a/drivers/gpio/gpio-exar.c +++ b/drivers/gpio/gpio-exar.c @@ -132,7 +132,7 @@ static int gpio_exar_probe(struct platform_device *pdev) if (!p) return -ENOMEM; - ret = device_property_read_u32(&pdev->dev, "linux,first-pin", + ret = device_property_read_u32(&pdev->dev, "exar,first-pin", &first_pin); if (ret) return ret; diff --git a/drivers/gpio/gpio-lp87565.c b/drivers/gpio/gpio-lp87565.c index 6313c50bb91b..a121c8f10610 100644 --- a/drivers/gpio/gpio-lp87565.c +++ b/drivers/gpio/gpio-lp87565.c @@ -26,6 +26,27 @@ struct lp87565_gpio { struct regmap *map; }; +static int lp87565_gpio_get(struct gpio_chip *chip, unsigned int offset) +{ + struct lp87565_gpio *gpio = gpiochip_get_data(chip); + int ret, val; + + ret = regmap_read(gpio->map, LP87565_REG_GPIO_IN, &val); + if (ret < 0) + return ret; + + return !!(val & BIT(offset)); +} + +static void lp87565_gpio_set(struct gpio_chip *chip, unsigned int offset, + int value) +{ + struct lp87565_gpio *gpio = gpiochip_get_data(chip); + + regmap_update_bits(gpio->map, LP87565_REG_GPIO_OUT, + BIT(offset), value ? BIT(offset) : 0); +} + static int lp87565_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) { @@ -54,30 +75,11 @@ static int lp87565_gpio_direction_output(struct gpio_chip *chip, { struct lp87565_gpio *gpio = gpiochip_get_data(chip); + lp87565_gpio_set(chip, offset, value); + return regmap_update_bits(gpio->map, LP87565_REG_GPIO_CONFIG, - BIT(offset), !value ? BIT(offset) : 0); -} - -static int lp87565_gpio_get(struct gpio_chip *chip, unsigned int offset) -{ - struct lp87565_gpio *gpio = gpiochip_get_data(chip); - int ret, val; - - ret = regmap_read(gpio->map, LP87565_REG_GPIO_IN, &val); - if (ret < 0) - return ret; - - return !!(val & BIT(offset)); -} - -static void lp87565_gpio_set(struct gpio_chip *chip, unsigned int offset, - int value) -{ - struct lp87565_gpio *gpio = gpiochip_get_data(chip); - - regmap_update_bits(gpio->map, LP87565_REG_GPIO_OUT, - BIT(offset), value ? BIT(offset) : 0); + BIT(offset), BIT(offset)); } static int lp87565_gpio_request(struct gpio_chip *gc, unsigned int offset) diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c index 3abea3f0b307..92692251ade1 100644 --- a/drivers/gpio/gpio-mxc.c +++ b/drivers/gpio/gpio-mxc.c @@ -424,6 +424,9 @@ static int mxc_gpio_probe(struct platform_device *pdev) return PTR_ERR(port->base); port->irq_high = platform_get_irq(pdev, 1); + if (port->irq_high < 0) + port->irq_high = 0; + port->irq = platform_get_irq(pdev, 0); if (port->irq < 0) return port->irq; diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c index 88529d3c06c9..506c6a67c5fc 100644 --- a/drivers/gpio/gpio-tegra.c +++ b/drivers/gpio/gpio-tegra.c @@ -360,7 +360,7 @@ static void tegra_gpio_irq_handler(struct irq_desc *desc) { int port; int pin; - int unmasked = 0; + bool unmasked = false; int gpio; u32 lvl; unsigned long sta; @@ -384,8 +384,8 @@ static void tegra_gpio_irq_handler(struct irq_desc *desc) * before executing the handler so that we don't * miss edges */ - if (lvl & (0x100 << pin)) { - unmasked = 1; + if (!unmasked && lvl & (0x100 << pin)) { + unmasked = true; chained_irq_exit(chip, desc); } diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 9568708a550b..cd003b74512f 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -704,24 +704,23 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p) { struct lineevent_state *le = p; struct gpioevent_data ge; - int ret; + int ret, level; ge.timestamp = ktime_get_real_ns(); + level = gpiod_get_value_cansleep(le->desc); if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) { - int level = gpiod_get_value_cansleep(le->desc); - if (level) /* Emit low-to-high event */ ge.id = GPIOEVENT_EVENT_RISING_EDGE; else /* Emit high-to-low event */ ge.id = GPIOEVENT_EVENT_FALLING_EDGE; - } else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE) { + } else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE && level) { /* Emit low-to-high event */ ge.id = GPIOEVENT_EVENT_RISING_EDGE; - } else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) { + } else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE && !level) { /* Emit high-to-low event */ ge.id = GPIOEVENT_EVENT_FALLING_EDGE; } else { diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index faea6349228f..658bac0cdc5e 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -25,7 +25,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \ amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \ amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \ amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \ - amdgpu_queue_mgr.o + amdgpu_queue_mgr.o amdgpu_vf_error.o # add asic specific block amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index ff7bf1a9f967..51d1364cf185 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -68,13 +68,16 @@ #include "gpu_scheduler.h" #include "amdgpu_virt.h" +#include "amdgpu_gart.h" /* * Modules parameters. */ extern int amdgpu_modeset; extern int amdgpu_vram_limit; -extern int amdgpu_gart_size; +extern int amdgpu_vis_vram_limit; +extern unsigned amdgpu_gart_size; +extern int amdgpu_gtt_size; extern int amdgpu_moverate; extern int amdgpu_benchmarking; extern int amdgpu_testing; @@ -104,6 +107,7 @@ extern unsigned amdgpu_pcie_gen_cap; extern unsigned amdgpu_pcie_lane_cap; extern unsigned amdgpu_cg_mask; extern unsigned amdgpu_pg_mask; +extern unsigned amdgpu_sdma_phase_quantum; extern char *amdgpu_disable_cu; extern char *amdgpu_virtual_display; extern unsigned amdgpu_pp_feature_mask; @@ -532,49 +536,6 @@ int amdgpu_fence_slab_init(void); void amdgpu_fence_slab_fini(void); /* - * GART structures, functions & helpers - */ -struct amdgpu_mc; - -#define AMDGPU_GPU_PAGE_SIZE 4096 -#define AMDGPU_GPU_PAGE_MASK (AMDGPU_GPU_PAGE_SIZE - 1) -#define AMDGPU_GPU_PAGE_SHIFT 12 -#define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK) - -struct amdgpu_gart { - dma_addr_t table_addr; - struct amdgpu_bo *robj; - void *ptr; - unsigned num_gpu_pages; - unsigned num_cpu_pages; - unsigned table_size; -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS - struct page **pages; -#endif - bool ready; - - /* Asic default pte flags */ - uint64_t gart_pte_flags; - - const struct amdgpu_gart_funcs *gart_funcs; -}; - -int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev); -void amdgpu_gart_table_ram_free(struct amdgpu_device *adev); -int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev); -void amdgpu_gart_table_vram_free(struct amdgpu_device *adev); -int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev); -void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev); -int amdgpu_gart_init(struct amdgpu_device *adev); -void amdgpu_gart_fini(struct amdgpu_device *adev); -int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, - int pages); -int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset, - int pages, struct page **pagelist, - dma_addr_t *dma_addr, uint64_t flags); -int amdgpu_ttm_recover_gart(struct amdgpu_device *adev); - -/* * VMHUB structures, functions & helpers */ struct amdgpu_vmhub { @@ -598,22 +559,20 @@ struct amdgpu_mc { * about vram size near mc fb location */ u64 mc_vram_size; u64 visible_vram_size; - u64 gtt_size; - u64 gtt_start; - u64 gtt_end; + u64 gart_size; + u64 gart_start; + u64 gart_end; u64 vram_start; u64 vram_end; unsigned vram_width; u64 real_vram_size; int vram_mtrr; - u64 gtt_base_align; u64 mc_mask; const struct firmware *fw; /* MC firmware */ uint32_t fw_version; struct amdgpu_irq_src vm_fault; uint32_t vram_type; uint32_t srbm_soft_reset; - struct amdgpu_mode_mc_save save; bool prt_warning; uint64_t stolen_size; /* apertures */ @@ -1159,7 +1118,9 @@ struct amdgpu_cs_parser { struct list_head validated; struct dma_fence *fence; uint64_t bytes_moved_threshold; + uint64_t bytes_moved_vis_threshold; uint64_t bytes_moved; + uint64_t bytes_moved_vis; struct amdgpu_bo_list_entry *evictable; /* user fence */ @@ -1231,7 +1192,9 @@ struct amdgpu_wb { int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb); void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb); int amdgpu_wb_get_64bit(struct amdgpu_device *adev, u32 *wb); +int amdgpu_wb_get_256Bit(struct amdgpu_device *adev, u32 *wb); void amdgpu_wb_free_64bit(struct amdgpu_device *adev, u32 wb); +void amdgpu_wb_free_256bit(struct amdgpu_device *adev, u32 wb); void amdgpu_get_pcie_info(struct amdgpu_device *adev); @@ -1557,6 +1520,10 @@ struct amdgpu_device { spinlock_t gc_cac_idx_lock; amdgpu_rreg_t gc_cac_rreg; amdgpu_wreg_t gc_cac_wreg; + /* protects concurrent se_cac register access */ + spinlock_t se_cac_idx_lock; + amdgpu_rreg_t se_cac_rreg; + amdgpu_wreg_t se_cac_wreg; /* protects concurrent ENDPOINT (audio) register access */ spinlock_t audio_endpt_idx_lock; amdgpu_block_rreg_t audio_endpt_rreg; @@ -1593,6 +1560,7 @@ struct amdgpu_device { spinlock_t lock; s64 last_update_us; s64 accum_us; /* accumulated microseconds */ + s64 accum_us_vis; /* for visible VRAM */ u32 log2_max_MBps; } mm_stats; @@ -1687,6 +1655,8 @@ struct amdgpu_device { bool has_hw_reset; u8 reset_magic[AMDGPU_RESET_MAGIC_NUM]; + /* record last mm index being written through WREG32*/ + unsigned long last_mm_index; }; static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev) @@ -1742,6 +1712,8 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v); #define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v)) #define RREG32_GC_CAC(reg) adev->gc_cac_rreg(adev, (reg)) #define WREG32_GC_CAC(reg, v) adev->gc_cac_wreg(adev, (reg), (v)) +#define RREG32_SE_CAC(reg) adev->se_cac_rreg(adev, (reg)) +#define WREG32_SE_CAC(reg, v) adev->se_cac_wreg(adev, (reg), (v)) #define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg)) #define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v)) #define WREG32_P(reg, val, mask) \ @@ -1792,50 +1764,6 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v); #define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8)) #define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16)) -/* - * RING helpers. - */ -static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v) -{ - if (ring->count_dw <= 0) - DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n"); - ring->ring[ring->wptr++ & ring->buf_mask] = v; - ring->wptr &= ring->ptr_mask; - ring->count_dw--; -} - -static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring, void *src, int count_dw) -{ - unsigned occupied, chunk1, chunk2; - void *dst; - - if (unlikely(ring->count_dw < count_dw)) { - DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n"); - return; - } - - occupied = ring->wptr & ring->buf_mask; - dst = (void *)&ring->ring[occupied]; - chunk1 = ring->buf_mask + 1 - occupied; - chunk1 = (chunk1 >= count_dw) ? count_dw: chunk1; - chunk2 = count_dw - chunk1; - chunk1 <<= 2; - chunk2 <<= 2; - - if (chunk1) - memcpy(dst, src, chunk1); - - if (chunk2) { - src += chunk1; - dst = (void *)ring->ring; - memcpy(dst, src, chunk2); - } - - ring->wptr += count_dw; - ring->wptr &= ring->ptr_mask; - ring->count_dw -= count_dw; -} - static inline struct amdgpu_sdma_instance * amdgpu_get_sdma_instance(struct amdgpu_ring *ring) { @@ -1898,7 +1826,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev)) #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv)) #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev)) -#define amdgpu_display_set_vga_render_state(adev, r) (adev)->mode_info.funcs->set_vga_render_state((adev), (r)) #define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc)) #define amdgpu_display_vblank_wait(adev, crtc) (adev)->mode_info.funcs->vblank_wait((adev), (crtc)) #define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l)) @@ -1911,8 +1838,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_display_page_flip_get_scanoutpos(adev, crtc, vbl, pos) (adev)->mode_info.funcs->page_flip_get_scanoutpos((adev), (crtc), (vbl), (pos)) #define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c)) #define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r)) -#define amdgpu_display_stop_mc_access(adev, s) (adev)->mode_info.funcs->stop_mc_access((adev), (s)) -#define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s)) #define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b)) #define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b)) #define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev)) @@ -1927,7 +1852,8 @@ void amdgpu_pci_config_reset(struct amdgpu_device *adev); bool amdgpu_need_post(struct amdgpu_device *adev); void amdgpu_update_display_priority(struct amdgpu_device *adev); -void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes); +void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, + u64 num_vis_bytes); void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain); bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages); @@ -1943,7 +1869,7 @@ bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm); uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, struct ttm_mem_reg *mem); void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base); -void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc); +void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc); void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size); int amdgpu_ttm_init(struct amdgpu_device *adev); void amdgpu_ttm_fini(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 1e8e1123ddf4..ce443586a0c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -1686,7 +1686,7 @@ void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock) { uint32_t bios_6_scratch; - bios_6_scratch = RREG32(mmBIOS_SCRATCH_6); + bios_6_scratch = RREG32(adev->bios_scratch_reg_offset + 6); if (lock) { bios_6_scratch |= ATOM_S6_CRITICAL_STATE; @@ -1696,15 +1696,17 @@ void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock) bios_6_scratch |= ATOM_S6_ACC_MODE; } - WREG32(mmBIOS_SCRATCH_6, bios_6_scratch); + WREG32(adev->bios_scratch_reg_offset + 6, bios_6_scratch); } void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev) { uint32_t bios_2_scratch, bios_6_scratch; - bios_2_scratch = RREG32(mmBIOS_SCRATCH_2); - bios_6_scratch = RREG32(mmBIOS_SCRATCH_6); + adev->bios_scratch_reg_offset = mmBIOS_SCRATCH_0; + + bios_2_scratch = RREG32(adev->bios_scratch_reg_offset + 2); + bios_6_scratch = RREG32(adev->bios_scratch_reg_offset + 6); /* let the bios control the backlight */ bios_2_scratch &= ~ATOM_S2_VRI_BRIGHT_ENABLE; @@ -1715,8 +1717,8 @@ void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev) /* clear the vbios dpms state */ bios_2_scratch &= ~ATOM_S2_DEVICE_DPMS_STATE; - WREG32(mmBIOS_SCRATCH_2, bios_2_scratch); - WREG32(mmBIOS_SCRATCH_6, bios_6_scratch); + WREG32(adev->bios_scratch_reg_offset + 2, bios_2_scratch); + WREG32(adev->bios_scratch_reg_offset + 6, bios_6_scratch); } void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev) @@ -1724,7 +1726,7 @@ void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev) int i; for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++) - adev->bios_scratch[i] = RREG32(mmBIOS_SCRATCH_0 + i); + adev->bios_scratch[i] = RREG32(adev->bios_scratch_reg_offset + i); } void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev) @@ -1738,20 +1740,30 @@ void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev) adev->bios_scratch[7] &= ~ATOM_S7_ASIC_INIT_COMPLETE_MASK; for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++) - WREG32(mmBIOS_SCRATCH_0 + i, adev->bios_scratch[i]); + WREG32(adev->bios_scratch_reg_offset + i, adev->bios_scratch[i]); } void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev, bool hung) { - u32 tmp = RREG32(mmBIOS_SCRATCH_3); + u32 tmp = RREG32(adev->bios_scratch_reg_offset + 3); if (hung) tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG; else tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG; - WREG32(mmBIOS_SCRATCH_3, tmp); + WREG32(adev->bios_scratch_reg_offset + 3, tmp); +} + +bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev) +{ + u32 tmp = RREG32(adev->bios_scratch_reg_offset + 7); + + if (tmp & ATOM_S7_ASIC_INIT_COMPLETE_MASK) + return false; + else + return true; } /* Atom needs data in little endian format diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h index 38d0fe32e5cd..b0d5d1d7fdba 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h @@ -200,6 +200,7 @@ void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev); void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev); void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev, bool hung); +bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev); void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le); int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c index 4bdda56fccee..f9ffe8ef0cd6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c @@ -66,41 +66,6 @@ void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev) } } -void amdgpu_atomfirmware_scratch_regs_save(struct amdgpu_device *adev) -{ - int i; - - for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++) - adev->bios_scratch[i] = RREG32(adev->bios_scratch_reg_offset + i); -} - -void amdgpu_atomfirmware_scratch_regs_restore(struct amdgpu_device *adev) -{ - int i; - - /* - * VBIOS will check ASIC_INIT_COMPLETE bit to decide if - * execute ASIC_Init posting via driver - */ - adev->bios_scratch[7] &= ~ATOM_S7_ASIC_INIT_COMPLETE_MASK; - - for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++) - WREG32(adev->bios_scratch_reg_offset + i, adev->bios_scratch[i]); -} - -void amdgpu_atomfirmware_scratch_regs_engine_hung(struct amdgpu_device *adev, - bool hung) -{ - u32 tmp = RREG32(adev->bios_scratch_reg_offset + 3); - - if (hung) - tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG; - else - tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG; - - WREG32(adev->bios_scratch_reg_offset + 3, tmp); -} - int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev) { struct atom_context *ctx = adev->mode_info.atom_context; @@ -130,3 +95,129 @@ int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev) ctx->scratch_size_bytes = usage_bytes; return 0; } + +union igp_info { + struct atom_integrated_system_info_v1_11 v11; +}; + +/* + * Return vram width from integrated system info table, if available, + * or 0 if not. + */ +int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev) +{ + struct amdgpu_mode_info *mode_info = &adev->mode_info; + int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, + integratedsysteminfo); + u16 data_offset, size; + union igp_info *igp_info; + u8 frev, crev; + + /* get any igp specific overrides */ + if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, &size, + &frev, &crev, &data_offset)) { + igp_info = (union igp_info *) + (mode_info->atom_context->bios + data_offset); + switch (crev) { + case 11: + return igp_info->v11.umachannelnumber * 64; + default: + return 0; + } + } + + return 0; +} + +union firmware_info { + struct atom_firmware_info_v3_1 v31; +}; + +union smu_info { + struct atom_smu_info_v3_1 v31; +}; + +union umc_info { + struct atom_umc_info_v3_1 v31; +}; + +int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev) +{ + struct amdgpu_mode_info *mode_info = &adev->mode_info; + struct amdgpu_pll *spll = &adev->clock.spll; + struct amdgpu_pll *mpll = &adev->clock.mpll; + uint8_t frev, crev; + uint16_t data_offset; + int ret = -EINVAL, index; + + index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, + firmwareinfo); + if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) { + union firmware_info *firmware_info = + (union firmware_info *)(mode_info->atom_context->bios + + data_offset); + + adev->clock.default_sclk = + le32_to_cpu(firmware_info->v31.bootup_sclk_in10khz); + adev->clock.default_mclk = + le32_to_cpu(firmware_info->v31.bootup_mclk_in10khz); + + adev->pm.current_sclk = adev->clock.default_sclk; + adev->pm.current_mclk = adev->clock.default_mclk; + + /* not technically a clock, but... */ + adev->mode_info.firmware_flags = + le32_to_cpu(firmware_info->v31.firmware_capability); + + ret = 0; + } + + index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, + smu_info); + if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) { + union smu_info *smu_info = + (union smu_info *)(mode_info->atom_context->bios + + data_offset); + + /* system clock */ + spll->reference_freq = le32_to_cpu(smu_info->v31.core_refclk_10khz); + + spll->reference_div = 0; + spll->min_post_div = 1; + spll->max_post_div = 1; + spll->min_ref_div = 2; + spll->max_ref_div = 0xff; + spll->min_feedback_div = 4; + spll->max_feedback_div = 0xff; + spll->best_vco = 0; + + ret = 0; + } + + index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, + umc_info); + if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) { + union umc_info *umc_info = + (union umc_info *)(mode_info->atom_context->bios + + data_offset); + + /* memory clock */ + mpll->reference_freq = le32_to_cpu(umc_info->v31.mem_refclk_10khz); + + mpll->reference_div = 0; + mpll->min_post_div = 1; + mpll->max_post_div = 1; + mpll->min_ref_div = 2; + mpll->max_ref_div = 0xff; + mpll->min_feedback_div = 4; + mpll->max_feedback_div = 0xff; + mpll->best_vco = 0; + + ret = 0; + } + + return ret; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h index a2c3ebe22c71..288b97e54347 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h @@ -26,10 +26,8 @@ bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev); void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev); -void amdgpu_atomfirmware_scratch_regs_save(struct amdgpu_device *adev); -void amdgpu_atomfirmware_scratch_regs_restore(struct amdgpu_device *adev); -void amdgpu_atomfirmware_scratch_regs_engine_hung(struct amdgpu_device *adev, - bool hung); int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev); +int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev); +int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c index 1beae5b930d0..2fb299afc12b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c @@ -40,7 +40,7 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size, for (i = 0; i < n; i++) { struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence, - false); + false, false); if (r) goto exit_do_move; r = dma_fence_wait(fence, false); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c index 365e735f6647..c21adf60a7f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c @@ -86,19 +86,6 @@ static bool check_atom_bios(uint8_t *bios, size_t size) return false; } -static bool is_atom_fw(uint8_t *bios) -{ - uint16_t bios_header_start = bios[0x48] | (bios[0x49] << 8); - uint8_t frev = bios[bios_header_start + 2]; - uint8_t crev = bios[bios_header_start + 3]; - - if ((frev < 3) || - ((frev == 3) && (crev < 3))) - return false; - - return true; -} - /* If you boot an IGP board with a discrete card as the primary, * the IGP rom is not accessible via the rom bar as the IGP rom is * part of the system bios. On boot, the system bios puts a @@ -117,7 +104,7 @@ static bool igp_read_bios_from_vram(struct amdgpu_device *adev) adev->bios = NULL; vram_base = pci_resource_start(adev->pdev, 0); - bios = ioremap(vram_base, size); + bios = ioremap_wc(vram_base, size); if (!bios) { return false; } @@ -455,6 +442,6 @@ bool amdgpu_get_bios(struct amdgpu_device *adev) return false; success: - adev->is_atom_fw = is_atom_fw(adev->bios); + adev->is_atom_fw = (adev->asic_type >= CHIP_VEGA10) ? true : false; return true; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c index f621ee115c98..d324e1c24028 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c @@ -83,7 +83,7 @@ static int amdgpu_bo_list_create(struct amdgpu_device *adev, r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL); mutex_unlock(&fpriv->bo_list_lock); if (r < 0) { - kfree(list); + amdgpu_bo_list_free(list); return r; } *id = r; @@ -198,12 +198,16 @@ amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id) result = idr_find(&fpriv->bo_list_handles, id); if (result) { - if (kref_get_unless_zero(&result->refcount)) + if (kref_get_unless_zero(&result->refcount)) { + rcu_read_unlock(); mutex_lock(&result->lock); - else + } else { + rcu_read_unlock(); result = NULL; + } + } else { + rcu_read_unlock(); } - rcu_read_unlock(); return result; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index c0a806280257..a99e0bca6812 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -240,6 +240,8 @@ static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device, return RREG32_DIDT(index); case CGS_IND_REG_GC_CAC: return RREG32_GC_CAC(index); + case CGS_IND_REG_SE_CAC: + return RREG32_SE_CAC(index); case CGS_IND_REG__AUDIO_ENDPT: DRM_ERROR("audio endpt register access not implemented.\n"); return 0; @@ -266,6 +268,8 @@ static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device, return WREG32_DIDT(index, value); case CGS_IND_REG_GC_CAC: return WREG32_GC_CAC(index, value); + case CGS_IND_REG_SE_CAC: + return WREG32_SE_CAC(index, value); case CGS_IND_REG__AUDIO_ENDPT: DRM_ERROR("audio endpt register access not implemented.\n"); return; @@ -610,6 +614,17 @@ static int amdgpu_cgs_enter_safe_mode(struct cgs_device *cgs_device, return 0; } +static void amdgpu_cgs_lock_grbm_idx(struct cgs_device *cgs_device, + bool lock) +{ + CGS_FUNC_ADEV; + + if (lock) + mutex_lock(&adev->grbm_idx_mutex); + else + mutex_unlock(&adev->grbm_idx_mutex); +} + static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, enum cgs_ucode_id type, struct cgs_firmware_info *info) @@ -719,7 +734,13 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, strcpy(fw_name, "amdgpu/polaris12_smc.bin"); break; case CHIP_VEGA10: - strcpy(fw_name, "amdgpu/vega10_smc.bin"); + if ((adev->pdev->device == 0x687f) && + ((adev->pdev->revision == 0xc0) || + (adev->pdev->revision == 0xc1) || + (adev->pdev->revision == 0xc3))) + strcpy(fw_name, "amdgpu/vega10_acg_smc.bin"); + else + strcpy(fw_name, "amdgpu/vega10_smc.bin"); break; default: DRM_ERROR("SMC firmware not supported\n"); @@ -1117,6 +1138,7 @@ static const struct cgs_ops amdgpu_cgs_ops = { .query_system_info = amdgpu_cgs_query_system_info, .is_virtualization_enabled = amdgpu_cgs_is_virtualization_enabled, .enter_safe_mode = amdgpu_cgs_enter_safe_mode, + .lock_grbm_idx = amdgpu_cgs_lock_grbm_idx, }; static const struct cgs_os_ops amdgpu_cgs_os_ops = { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 5599c01b265d..33789510e663 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -223,10 +223,11 @@ static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes) * ticks. The accumulated microseconds (us) are converted to bytes and * returned. */ -static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev) +static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev, + u64 *max_bytes, + u64 *max_vis_bytes) { s64 time_us, increment_us; - u64 max_bytes; u64 free_vram, total_vram, used_vram; /* Allow a maximum of 200 accumulated ms. This is basically per-IB @@ -238,8 +239,11 @@ static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev) */ const s64 us_upper_bound = 200000; - if (!adev->mm_stats.log2_max_MBps) - return 0; + if (!adev->mm_stats.log2_max_MBps) { + *max_bytes = 0; + *max_vis_bytes = 0; + return; + } total_vram = adev->mc.real_vram_size - adev->vram_pin_size; used_vram = atomic64_read(&adev->vram_usage); @@ -280,23 +284,45 @@ static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev) adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us); } - /* This returns 0 if the driver is in debt to disallow (optional) + /* This is set to 0 if the driver is in debt to disallow (optional) * buffer moves. */ - max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us); + *max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us); + + /* Do the same for visible VRAM if half of it is free */ + if (adev->mc.visible_vram_size < adev->mc.real_vram_size) { + u64 total_vis_vram = adev->mc.visible_vram_size; + u64 used_vis_vram = atomic64_read(&adev->vram_vis_usage); + + if (used_vis_vram < total_vis_vram) { + u64 free_vis_vram = total_vis_vram - used_vis_vram; + adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis + + increment_us, us_upper_bound); + + if (free_vis_vram >= total_vis_vram / 2) + adev->mm_stats.accum_us_vis = + max(bytes_to_us(adev, free_vis_vram / 2), + adev->mm_stats.accum_us_vis); + } + + *max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis); + } else { + *max_vis_bytes = 0; + } spin_unlock(&adev->mm_stats.lock); - return max_bytes; } /* Report how many bytes have really been moved for the last command * submission. This can result in a debt that can stop buffer migrations * temporarily. */ -void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes) +void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, + u64 num_vis_bytes) { spin_lock(&adev->mm_stats.lock); adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes); + adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes); spin_unlock(&adev->mm_stats.lock); } @@ -304,7 +330,7 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p, struct amdgpu_bo *bo) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); - u64 initial_bytes_moved; + u64 initial_bytes_moved, bytes_moved; uint32_t domain; int r; @@ -314,17 +340,35 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p, /* Don't move this buffer if we have depleted our allowance * to move it. Don't move anything if the threshold is zero. */ - if (p->bytes_moved < p->bytes_moved_threshold) - domain = bo->prefered_domains; - else + if (p->bytes_moved < p->bytes_moved_threshold) { + if (adev->mc.visible_vram_size < adev->mc.real_vram_size && + (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) { + /* And don't move a CPU_ACCESS_REQUIRED BO to limited + * visible VRAM if we've depleted our allowance to do + * that. + */ + if (p->bytes_moved_vis < p->bytes_moved_vis_threshold) + domain = bo->prefered_domains; + else + domain = bo->allowed_domains; + } else { + domain = bo->prefered_domains; + } + } else { domain = bo->allowed_domains; + } retry: amdgpu_ttm_placement_from_domain(bo, domain); initial_bytes_moved = atomic64_read(&adev->num_bytes_moved); r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); - p->bytes_moved += atomic64_read(&adev->num_bytes_moved) - - initial_bytes_moved; + bytes_moved = atomic64_read(&adev->num_bytes_moved) - + initial_bytes_moved; + p->bytes_moved += bytes_moved; + if (adev->mc.visible_vram_size < adev->mc.real_vram_size && + bo->tbo.mem.mem_type == TTM_PL_VRAM && + bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT) + p->bytes_moved_vis += bytes_moved; if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) { domain = bo->allowed_domains; @@ -350,7 +394,8 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p, struct amdgpu_bo_list_entry *candidate = p->evictable; struct amdgpu_bo *bo = candidate->robj; struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); - u64 initial_bytes_moved; + u64 initial_bytes_moved, bytes_moved; + bool update_bytes_moved_vis; uint32_t other; /* If we reached our current BO we can forget it */ @@ -370,10 +415,17 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p, /* Good we can try to move this BO somewhere else */ amdgpu_ttm_placement_from_domain(bo, other); + update_bytes_moved_vis = + adev->mc.visible_vram_size < adev->mc.real_vram_size && + bo->tbo.mem.mem_type == TTM_PL_VRAM && + bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT; initial_bytes_moved = atomic64_read(&adev->num_bytes_moved); r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); - p->bytes_moved += atomic64_read(&adev->num_bytes_moved) - + bytes_moved = atomic64_read(&adev->num_bytes_moved) - initial_bytes_moved; + p->bytes_moved += bytes_moved; + if (update_bytes_moved_vis) + p->bytes_moved_vis += bytes_moved; if (unlikely(r)) break; @@ -554,8 +606,10 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, list_splice(&need_pages, &p->validated); } - p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev); + amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold, + &p->bytes_moved_vis_threshold); p->bytes_moved = 0; + p->bytes_moved_vis = 0; p->evictable = list_last_entry(&p->validated, struct amdgpu_bo_list_entry, tv.head); @@ -579,8 +633,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, goto error_validate; } - amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved); - + amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved, + p->bytes_moved_vis); fpriv->vm.last_eviction_counter = atomic64_read(&p->adev->num_evictions); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 4a8fc15467cf..6279956e92a4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -53,6 +53,9 @@ #include "bif/bif_4_1_d.h" #include <linux/pci.h> #include <linux/firmware.h> +#include "amdgpu_vf_error.h" + +#include "amdgpu_amdkfd.h" MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin"); @@ -128,6 +131,10 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, { trace_amdgpu_mm_wreg(adev->pdev->device, reg, v); + if (adev->asic_type >= CHIP_VEGA10 && reg == 0) { + adev->last_mm_index = v; + } + if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) { BUG_ON(in_interrupt()); return amdgpu_virt_kiq_wreg(adev, reg, v); @@ -143,6 +150,10 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4)); spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); } + + if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) { + udelay(500); + } } u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg) @@ -157,6 +168,9 @@ u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg) void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v) { + if (adev->asic_type >= CHIP_VEGA10 && reg == 0) { + adev->last_mm_index = v; + } if ((reg * 4) < adev->rio_mem_size) iowrite32(v, adev->rio_mem + (reg * 4)); @@ -164,6 +178,10 @@ void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v) iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4)); iowrite32(v, adev->rio_mem + (mmMM_DATA * 4)); } + + if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) { + udelay(500); + } } /** @@ -584,6 +602,21 @@ int amdgpu_wb_get_64bit(struct amdgpu_device *adev, u32 *wb) } } +int amdgpu_wb_get_256Bit(struct amdgpu_device *adev, u32 *wb) +{ + int i = 0; + unsigned long offset = bitmap_find_next_zero_area_off(adev->wb.used, + adev->wb.num_wb, 0, 8, 63, 0); + if ((offset + 7) < adev->wb.num_wb) { + for (i = 0; i < 8; i++) + __set_bit(offset + i, adev->wb.used); + *wb = offset; + return 0; + } else { + return -EINVAL; + } +} + /** * amdgpu_wb_free - Free a wb entry * @@ -615,6 +648,23 @@ void amdgpu_wb_free_64bit(struct amdgpu_device *adev, u32 wb) } /** + * amdgpu_wb_free_256bit - Free a wb entry + * + * @adev: amdgpu_device pointer + * @wb: wb index + * + * Free a wb slot allocated for use by the driver (all asics) + */ +void amdgpu_wb_free_256bit(struct amdgpu_device *adev, u32 wb) +{ + int i = 0; + + if ((wb + 7) < adev->wb.num_wb) + for (i = 0; i < 8; i++) + __clear_bit(wb + i, adev->wb.used); +} + +/** * amdgpu_vram_location - try to find VRAM location * @adev: amdgpu device structure holding all necessary informations * @mc: memory controller structure holding memory informations @@ -665,7 +715,7 @@ void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 } /** - * amdgpu_gtt_location - try to find GTT location + * amdgpu_gart_location - try to find GTT location * @adev: amdgpu device structure holding all necessary informations * @mc: memory controller structure holding memory informations * @@ -676,28 +726,28 @@ void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 * * FIXME: when reducing GTT size align new size on power of 2. */ -void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc) +void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc) { u64 size_af, size_bf; - size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align; - size_bf = mc->vram_start & ~mc->gtt_base_align; + size_af = adev->mc.mc_mask - mc->vram_end; + size_bf = mc->vram_start; if (size_bf > size_af) { - if (mc->gtt_size > size_bf) { + if (mc->gart_size > size_bf) { dev_warn(adev->dev, "limiting GTT\n"); - mc->gtt_size = size_bf; + mc->gart_size = size_bf; } - mc->gtt_start = 0; + mc->gart_start = 0; } else { - if (mc->gtt_size > size_af) { + if (mc->gart_size > size_af) { dev_warn(adev->dev, "limiting GTT\n"); - mc->gtt_size = size_af; + mc->gart_size = size_af; } - mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align; + mc->gart_start = mc->vram_end + 1; } - mc->gtt_end = mc->gtt_start + mc->gtt_size - 1; + mc->gart_end = mc->gart_start + mc->gart_size - 1; dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n", - mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end); + mc->gart_size >> 20, mc->gart_start, mc->gart_end); } /* @@ -720,7 +770,12 @@ bool amdgpu_need_post(struct amdgpu_device *adev) adev->has_hw_reset = false; return true; } - /* then check MEM_SIZE, in case the crtcs are off */ + + /* bios scratch used on CIK+ */ + if (adev->asic_type >= CHIP_BONAIRE) + return amdgpu_atombios_scratch_need_asic_init(adev); + + /* check MEM_SIZE for older asics */ reg = amdgpu_asic_get_config_memsize(adev); if ((reg != 0) && (reg != 0xffffffff)) @@ -1031,19 +1086,6 @@ static unsigned int amdgpu_vga_set_decode(void *cookie, bool state) return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; } -/** - * amdgpu_check_pot_argument - check that argument is a power of two - * - * @arg: value to check - * - * Validates that a certain argument is a power of two (all asics). - * Returns true if argument is valid. - */ -static bool amdgpu_check_pot_argument(int arg) -{ - return (arg & (arg - 1)) == 0; -} - static void amdgpu_check_block_size(struct amdgpu_device *adev) { /* defines number of bits in page table versus page directory, @@ -1077,7 +1119,7 @@ static void amdgpu_check_vm_size(struct amdgpu_device *adev) if (amdgpu_vm_size == -1) return; - if (!amdgpu_check_pot_argument(amdgpu_vm_size)) { + if (!is_power_of_2(amdgpu_vm_size)) { dev_warn(adev->dev, "VM size (%d) must be a power of 2\n", amdgpu_vm_size); goto def_value; @@ -1118,19 +1160,24 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev) dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n", amdgpu_sched_jobs); amdgpu_sched_jobs = 4; - } else if (!amdgpu_check_pot_argument(amdgpu_sched_jobs)){ + } else if (!is_power_of_2(amdgpu_sched_jobs)){ dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n", amdgpu_sched_jobs); amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs); } - if (amdgpu_gart_size != -1) { + if (amdgpu_gart_size < 32) { + /* gart size must be greater or equal to 32M */ + dev_warn(adev->dev, "gart size (%d) too small\n", + amdgpu_gart_size); + amdgpu_gart_size = 32; + } + + if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) { /* gtt size must be greater or equal to 32M */ - if (amdgpu_gart_size < 32) { - dev_warn(adev->dev, "gart size (%d) too small\n", - amdgpu_gart_size); - amdgpu_gart_size = -1; - } + dev_warn(adev->dev, "gtt size (%d) too small\n", + amdgpu_gtt_size); + amdgpu_gtt_size = -1; } amdgpu_check_vm_size(adev); @@ -1138,7 +1185,7 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev) amdgpu_check_block_size(adev); if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 || - !amdgpu_check_pot_argument(amdgpu_vram_page_split))) { + !is_power_of_2(amdgpu_vram_page_split))) { dev_warn(adev->dev, "invalid VRAM page split (%d)\n", amdgpu_vram_page_split); amdgpu_vram_page_split = 1024; @@ -2019,7 +2066,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->flags = flags; adev->asic_type = flags & AMD_ASIC_MASK; adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT; - adev->mc.gtt_size = 512 * 1024 * 1024; + adev->mc.gart_size = 512 * 1024 * 1024; adev->accel_working = false; adev->num_rings = 0; adev->mman.buffer_funcs = NULL; @@ -2068,6 +2115,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, spin_lock_init(&adev->uvd_ctx_idx_lock); spin_lock_init(&adev->didt_idx_lock); spin_lock_init(&adev->gc_cac_idx_lock); + spin_lock_init(&adev->se_cac_idx_lock); spin_lock_init(&adev->audio_endpt_idx_lock); spin_lock_init(&adev->mm_stats.lock); @@ -2143,6 +2191,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = amdgpu_atombios_init(adev); if (r) { dev_err(adev->dev, "amdgpu_atombios_init failed\n"); + amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0); goto failed; } @@ -2153,6 +2202,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, if (amdgpu_vpost_needed(adev)) { if (!adev->bios) { dev_err(adev->dev, "no vBIOS found\n"); + amdgpu_vf_error_put(AMDGIM_ERROR_VF_NO_VBIOS, 0, 0); r = -EINVAL; goto failed; } @@ -2160,18 +2210,28 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = amdgpu_atom_asic_init(adev->mode_info.atom_context); if (r) { dev_err(adev->dev, "gpu post error!\n"); + amdgpu_vf_error_put(AMDGIM_ERROR_VF_GPU_POST_ERROR, 0, 0); goto failed; } } else { DRM_INFO("GPU post is not needed\n"); } - if (!adev->is_atom_fw) { + if (adev->is_atom_fw) { + /* Initialize clocks */ + r = amdgpu_atomfirmware_get_clock_info(adev); + if (r) { + dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n"); + amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0); + goto failed; + } + } else { /* Initialize clocks */ r = amdgpu_atombios_get_clock_info(adev); if (r) { dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n"); - return r; + amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0); + goto failed; } /* init i2c buses */ amdgpu_atombios_i2c_init(adev); @@ -2181,6 +2241,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = amdgpu_fence_driver_init(adev); if (r) { dev_err(adev->dev, "amdgpu_fence_driver_init failed\n"); + amdgpu_vf_error_put(AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0); goto failed; } @@ -2190,6 +2251,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = amdgpu_init(adev); if (r) { dev_err(adev->dev, "amdgpu_init failed\n"); + amdgpu_vf_error_put(AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0); amdgpu_fini(adev); goto failed; } @@ -2209,6 +2271,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = amdgpu_ib_pool_init(adev); if (r) { dev_err(adev->dev, "IB initialization failed (%d).\n", r); + amdgpu_vf_error_put(AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r); goto failed; } @@ -2253,12 +2316,14 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = amdgpu_late_init(adev); if (r) { dev_err(adev->dev, "amdgpu_late_init failed\n"); + amdgpu_vf_error_put(AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r); goto failed; } return 0; failed: + amdgpu_vf_error_trans_all(adev); if (runtime) vga_switcheroo_fini_domain_pm_ops(adev->dev); return r; @@ -2351,6 +2416,8 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) } drm_modeset_unlock_all(dev); + amdgpu_amdkfd_suspend(adev); + /* unpin the front buffers and cursors */ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); @@ -2392,10 +2459,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) */ amdgpu_bo_evict_vram(adev); - if (adev->is_atom_fw) - amdgpu_atomfirmware_scratch_regs_save(adev); - else - amdgpu_atombios_scratch_regs_save(adev); + amdgpu_atombios_scratch_regs_save(adev); pci_save_state(dev->pdev); if (suspend) { /* Shut down the device */ @@ -2444,10 +2508,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) if (r) goto unlock; } - if (adev->is_atom_fw) - amdgpu_atomfirmware_scratch_regs_restore(adev); - else - amdgpu_atombios_scratch_regs_restore(adev); + amdgpu_atombios_scratch_regs_restore(adev); /* post card */ if (amdgpu_need_post(adev)) { @@ -2490,6 +2551,9 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) } } } + r = amdgpu_amdkfd_resume(adev); + if (r) + return r; /* blat the mode back in */ if (fbcon) { @@ -2860,21 +2924,9 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev) r = amdgpu_suspend(adev); retry: - /* Disable fb access */ - if (adev->mode_info.num_crtc) { - struct amdgpu_mode_mc_save save; - amdgpu_display_stop_mc_access(adev, &save); - amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC); - } - if (adev->is_atom_fw) - amdgpu_atomfirmware_scratch_regs_save(adev); - else - amdgpu_atombios_scratch_regs_save(adev); + amdgpu_atombios_scratch_regs_save(adev); r = amdgpu_asic_reset(adev); - if (adev->is_atom_fw) - amdgpu_atomfirmware_scratch_regs_restore(adev); - else - amdgpu_atombios_scratch_regs_restore(adev); + amdgpu_atombios_scratch_regs_restore(adev); /* post card */ amdgpu_atom_asic_init(adev->mode_info.atom_context); @@ -2952,6 +3004,7 @@ out: } } else { dev_err(adev->dev, "asic resume failed (%d).\n", r); + amdgpu_vf_error_put(AMDGIM_ERROR_VF_ASIC_RESUME_FAIL, 0, r); for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { if (adev->rings[i] && adev->rings[i]->sched.thread) { kthread_unpark(adev->rings[i]->sched.thread); @@ -2962,12 +3015,16 @@ out: drm_helper_resume_force_mode(adev->ddev); ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched); - if (r) + if (r) { /* bad news, how to tell it to userspace ? */ dev_info(adev->dev, "GPU reset failed\n"); - else + amdgpu_vf_error_put(AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r); + } + else { dev_info(adev->dev, "GPU reset successed!\n"); + } + amdgpu_vf_error_trans_all(adev); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 3c57102de634..5e9ce8a29669 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -74,7 +74,9 @@ #define KMS_DRIVER_PATCHLEVEL 0 int amdgpu_vram_limit = 0; -int amdgpu_gart_size = -1; /* auto */ +int amdgpu_vis_vram_limit = 0; +unsigned amdgpu_gart_size = 256; +int amdgpu_gtt_size = -1; /* auto */ int amdgpu_moverate = -1; /* auto */ int amdgpu_benchmarking = 0; int amdgpu_testing = 0; @@ -106,6 +108,7 @@ unsigned amdgpu_pcie_gen_cap = 0; unsigned amdgpu_pcie_lane_cap = 0; unsigned amdgpu_cg_mask = 0xffffffff; unsigned amdgpu_pg_mask = 0xffffffff; +unsigned amdgpu_sdma_phase_quantum = 32; char *amdgpu_disable_cu = NULL; char *amdgpu_virtual_display = NULL; unsigned amdgpu_pp_feature_mask = 0xffffffff; @@ -120,8 +123,14 @@ int amdgpu_lbpw = -1; MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes"); module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); -MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc., -1 = auto)"); -module_param_named(gartsize, amdgpu_gart_size, int, 0600); +MODULE_PARM_DESC(vis_vramlimit, "Restrict visible VRAM for testing, in megabytes"); +module_param_named(vis_vramlimit, amdgpu_vis_vram_limit, int, 0444); + +MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc.)"); +module_param_named(gartsize, amdgpu_gart_size, uint, 0600); + +MODULE_PARM_DESC(gttsize, "Size of the GTT domain in megabytes (-1 = auto)"); +module_param_named(gttsize, amdgpu_gtt_size, int, 0600); MODULE_PARM_DESC(moverate, "Maximum buffer migration rate in MB/s. (32, 64, etc., -1=auto, 0=1=disabled)"); module_param_named(moverate, amdgpu_moverate, int, 0600); @@ -186,7 +195,7 @@ module_param_named(vm_debug, amdgpu_vm_debug, int, 0644); MODULE_PARM_DESC(vm_update_mode, "VM update using CPU (0 = never (default except for large BAR(LB)), 1 = Graphics only, 2 = Compute only (default for LB), 3 = Both"); module_param_named(vm_update_mode, amdgpu_vm_update_mode, int, 0444); -MODULE_PARM_DESC(vram_page_split, "Number of pages after we split VRAM allocations (default 1024, -1 = disable)"); +MODULE_PARM_DESC(vram_page_split, "Number of pages after we split VRAM allocations (default 512, -1 = disable)"); module_param_named(vram_page_split, amdgpu_vram_page_split, int, 0444); MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))"); @@ -199,7 +208,7 @@ MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444); MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))"); -module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, int, 0444); +module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, uint, 0444); MODULE_PARM_DESC(no_evict, "Support pinning request from user space (1 = enable, 0 = disable (default))"); module_param_named(no_evict, amdgpu_no_evict, int, 0444); @@ -219,6 +228,9 @@ module_param_named(cg_mask, amdgpu_cg_mask, uint, 0444); MODULE_PARM_DESC(pg_mask, "Powergating flags mask (0 = disable power gating)"); module_param_named(pg_mask, amdgpu_pg_mask, uint, 0444); +MODULE_PARM_DESC(sdma_phase_quantum, "SDMA context switch phase quantum (x 1K GPU clock cycles, 0 = no change (default 32))"); +module_param_named(sdma_phase_quantum, amdgpu_sdma_phase_quantum, uint, 0444); + MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)"); module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index a57abc1a25fb..5cc4987cd887 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -55,6 +55,19 @@ /* * Common GART table functions. */ + +/** + * amdgpu_gart_set_defaults - set the default gart_size + * + * @adev: amdgpu_device pointer + * + * Set the default gart_size based on parameters and available VRAM. + */ +void amdgpu_gart_set_defaults(struct amdgpu_device *adev) +{ + adev->mc.gart_size = (uint64_t)amdgpu_gart_size << 20; +} + /** * amdgpu_gart_table_ram_alloc - allocate system ram for gart page table * @@ -263,6 +276,41 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, } /** + * amdgpu_gart_map - map dma_addresses into GART entries + * + * @adev: amdgpu_device pointer + * @offset: offset into the GPU's gart aperture + * @pages: number of pages to bind + * @dma_addr: DMA addresses of pages + * + * Map the dma_addresses into GART entries (all asics). + * Returns 0 for success, -EINVAL for failure. + */ +int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset, + int pages, dma_addr_t *dma_addr, uint64_t flags, + void *dst) +{ + uint64_t page_base; + unsigned i, j, t; + + if (!adev->gart.ready) { + WARN(1, "trying to bind memory to uninitialized GART !\n"); + return -EINVAL; + } + + t = offset / AMDGPU_GPU_PAGE_SIZE; + + for (i = 0; i < pages; i++) { + page_base = dma_addr[i]; + for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) { + amdgpu_gart_set_pte_pde(adev, dst, t, page_base, flags); + page_base += AMDGPU_GPU_PAGE_SIZE; + } + } + return 0; +} + +/** * amdgpu_gart_bind - bind pages into the gart page table * * @adev: amdgpu_device pointer @@ -279,31 +327,30 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset, int pages, struct page **pagelist, dma_addr_t *dma_addr, uint64_t flags) { - unsigned t; - unsigned p; - uint64_t page_base; - int i, j; +#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS + unsigned i,t,p; +#endif + int r; if (!adev->gart.ready) { WARN(1, "trying to bind memory to uninitialized GART !\n"); return -EINVAL; } +#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS t = offset / AMDGPU_GPU_PAGE_SIZE; p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); - - for (i = 0; i < pages; i++, p++) { -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS + for (i = 0; i < pages; i++, p++) adev->gart.pages[p] = pagelist[i]; #endif - if (adev->gart.ptr) { - page_base = dma_addr[i]; - for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) { - amdgpu_gart_set_pte_pde(adev, adev->gart.ptr, t, page_base, flags); - page_base += AMDGPU_GPU_PAGE_SIZE; - } - } + + if (adev->gart.ptr) { + r = amdgpu_gart_map(adev, offset, pages, dma_addr, flags, + adev->gart.ptr); + if (r) + return r; } + mb(); amdgpu_gart_flush_gpu_tlb(adev, 0); return 0; @@ -333,8 +380,8 @@ int amdgpu_gart_init(struct amdgpu_device *adev) if (r) return r; /* Compute table size */ - adev->gart.num_cpu_pages = adev->mc.gtt_size / PAGE_SIZE; - adev->gart.num_gpu_pages = adev->mc.gtt_size / AMDGPU_GPU_PAGE_SIZE; + adev->gart.num_cpu_pages = adev->mc.gart_size / PAGE_SIZE; + adev->gart.num_gpu_pages = adev->mc.gart_size / AMDGPU_GPU_PAGE_SIZE; DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n", adev->gart.num_cpu_pages, adev->gart.num_gpu_pages); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h new file mode 100644 index 000000000000..d4cce6936200 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h @@ -0,0 +1,77 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __AMDGPU_GART_H__ +#define __AMDGPU_GART_H__ + +#include <linux/types.h> + +/* + * GART structures, functions & helpers + */ +struct amdgpu_device; +struct amdgpu_bo; +struct amdgpu_gart_funcs; + +#define AMDGPU_GPU_PAGE_SIZE 4096 +#define AMDGPU_GPU_PAGE_MASK (AMDGPU_GPU_PAGE_SIZE - 1) +#define AMDGPU_GPU_PAGE_SHIFT 12 +#define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK) + +struct amdgpu_gart { + dma_addr_t table_addr; + struct amdgpu_bo *robj; + void *ptr; + unsigned num_gpu_pages; + unsigned num_cpu_pages; + unsigned table_size; +#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS + struct page **pages; +#endif + bool ready; + + /* Asic default pte flags */ + uint64_t gart_pte_flags; + + const struct amdgpu_gart_funcs *gart_funcs; +}; + +void amdgpu_gart_set_defaults(struct amdgpu_device *adev); +int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev); +void amdgpu_gart_table_ram_free(struct amdgpu_device *adev); +int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev); +void amdgpu_gart_table_vram_free(struct amdgpu_device *adev); +int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev); +void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev); +int amdgpu_gart_init(struct amdgpu_device *adev); +void amdgpu_gart_fini(struct amdgpu_device *adev); +int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, + int pages); +int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset, + int pages, dma_addr_t *dma_addr, uint64_t flags, + void *dst); +int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset, + int pages, struct page **pagelist, + dma_addr_t *dma_addr, uint64_t flags); + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 621f739103a6..917ac5e074a0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -49,7 +49,6 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, struct drm_gem_object **obj) { struct amdgpu_bo *robj; - unsigned long max_size; int r; *obj = NULL; @@ -58,17 +57,6 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, alignment = PAGE_SIZE; } - if (!(initial_domain & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA))) { - /* Maximum bo size is the unpinned gtt size since we use the gtt to - * handle vram to system pool migrations. - */ - max_size = adev->mc.gtt_size - adev->gart_pin_size; - if (size > max_size) { - DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n", - size >> 20, max_size >> 20); - return -ENOMEM; - } - } retry: r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain, flags, NULL, NULL, &robj); @@ -784,6 +772,7 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data) unsigned domain; const char *placement; unsigned pin_count; + uint64_t offset; domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); switch (domain) { @@ -798,9 +787,12 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data) placement = " CPU"; break; } - seq_printf(m, "\t0x%08x: %12ld byte %s @ 0x%010Lx", - id, amdgpu_bo_size(bo), placement, - amdgpu_bo_gpu_offset(bo)); + seq_printf(m, "\t0x%08x: %12ld byte %s", + id, amdgpu_bo_size(bo), placement); + + offset = ACCESS_ONCE(bo->tbo.mem.start); + if (offset != AMDGPU_BO_INVALID_OFFSET) + seq_printf(m, " @ 0x%010Lx", offset); pin_count = ACCESS_ONCE(bo->pin_count); if (pin_count) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index e26108aad3fe..4f6c68fc1dd9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -125,7 +125,8 @@ void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev) if (mec >= adev->gfx.mec.num_mec) break; - if (adev->gfx.mec.num_mec > 1) { + /* FIXME: spreading the queues across pipes causes perf regressions */ + if (0) { /* policy: amdgpu owns the first two queues of the first MEC */ if (mec == 0 && queue < 2) set_bit(i, adev->gfx.mec.queue_bitmap); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c index f7d22c44034d..5e6b90c6794f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c @@ -42,13 +42,17 @@ struct amdgpu_gtt_mgr { static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man, unsigned long p_size) { + struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev); struct amdgpu_gtt_mgr *mgr; + uint64_t start, size; mgr = kzalloc(sizeof(*mgr), GFP_KERNEL); if (!mgr) return -ENOMEM; - drm_mm_init(&mgr->mm, 0, p_size); + start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS; + size = (adev->mc.gart_size >> PAGE_SHIFT) - start; + drm_mm_init(&mgr->mm, start, size); spin_lock_init(&mgr->lock); mgr->available = p_size; man->priv = mgr; @@ -81,6 +85,20 @@ static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man) } /** + * amdgpu_gtt_mgr_is_allocated - Check if mem has address space + * + * @mem: the mem object to check + * + * Check if a mem object has already address space allocated. + */ +bool amdgpu_gtt_mgr_is_allocated(struct ttm_mem_reg *mem) +{ + struct drm_mm_node *node = mem->mm_node; + + return (node->start != AMDGPU_BO_INVALID_OFFSET); +} + +/** * amdgpu_gtt_mgr_alloc - allocate new ranges * * @man: TTM memory type manager @@ -95,13 +113,14 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, const struct ttm_place *place, struct ttm_mem_reg *mem) { + struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev); struct amdgpu_gtt_mgr *mgr = man->priv; struct drm_mm_node *node = mem->mm_node; enum drm_mm_insert_mode mode; unsigned long fpfn, lpfn; int r; - if (node->start != AMDGPU_BO_INVALID_OFFSET) + if (amdgpu_gtt_mgr_is_allocated(mem)) return 0; if (place) @@ -112,7 +131,7 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, if (place && place->lpfn) lpfn = place->lpfn; else - lpfn = man->size; + lpfn = adev->gart.num_cpu_pages; mode = DRM_MM_INSERT_BEST; if (place && place->flags & TTM_PL_FLAG_TOPDOWN) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index f774b3f497d2..659997bfff30 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -130,6 +130,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, unsigned i; int r = 0; + bool need_pipe_sync = false; if (num_ibs == 0) return -EINVAL; @@ -165,15 +166,15 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, if (ring->funcs->emit_pipeline_sync && job && ((tmp = amdgpu_sync_get_fence(&job->sched_sync)) || amdgpu_vm_need_pipeline_sync(ring, job))) { - amdgpu_ring_emit_pipeline_sync(ring); + need_pipe_sync = true; dma_fence_put(tmp); } if (ring->funcs->insert_start) ring->funcs->insert_start(ring); - if (vm) { - r = amdgpu_vm_flush(ring, job); + if (job) { + r = amdgpu_vm_flush(ring, job, need_pipe_sync); if (r) { amdgpu_ring_undo(ring); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 2480273c1dca..4bdd851f56d0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -220,6 +220,10 @@ int amdgpu_irq_init(struct amdgpu_device *adev) int r = 0; spin_lock_init(&adev->irq.lock); + + /* Disable vblank irqs aggressively for power-saving */ + adev->ddev->vblank_disable_immediate = true; + r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc); if (r) { return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 3d641e10e6b6..4510627ae83e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -81,6 +81,8 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, r = amdgpu_ib_get(adev, NULL, size, &(*job)->ibs[0]); if (r) kfree(*job); + else + (*job)->vm_pd_addr = adev->gart.table_addr; return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index b0b23101d1c8..09f833255ba1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -485,7 +485,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file vram_gtt.vram_size -= adev->vram_pin_size; vram_gtt.vram_cpu_accessible_size = adev->mc.visible_vram_size; vram_gtt.vram_cpu_accessible_size -= (adev->vram_pin_size - adev->invisible_pin_size); - vram_gtt.gtt_size = adev->mc.gtt_size; + vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size; + vram_gtt.gtt_size *= PAGE_SIZE; vram_gtt.gtt_size -= adev->gart_pin_size; return copy_to_user(out, &vram_gtt, min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0; @@ -510,9 +511,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file mem.cpu_accessible_vram.max_allocation = mem.cpu_accessible_vram.usable_heap_size * 3 / 4; - mem.gtt.total_heap_size = adev->mc.gtt_size; - mem.gtt.usable_heap_size = - adev->mc.gtt_size - adev->gart_pin_size; + mem.gtt.total_heap_size = adev->mman.bdev.man[TTM_PL_TT].size; + mem.gtt.total_heap_size *= PAGE_SIZE; + mem.gtt.usable_heap_size = mem.gtt.total_heap_size + - adev->gart_pin_size; mem.gtt.heap_usage = atomic64_read(&adev->gtt_usage); mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4; @@ -571,8 +573,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10; dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10; } else { - dev_info.max_engine_clock = adev->pm.default_sclk * 10; - dev_info.max_memory_clock = adev->pm.default_mclk * 10; + dev_info.max_engine_clock = adev->clock.default_sclk * 10; + dev_info.max_memory_clock = adev->clock.default_mclk * 10; } dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask; dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se * @@ -587,8 +589,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE; dev_info.virtual_address_max = (uint64_t)adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE; dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE); - dev_info.pte_fragment_size = (1 << AMDGPU_LOG2_PAGES_PER_FRAG) * - AMDGPU_GPU_PAGE_SIZE; + dev_info.pte_fragment_size = + (1 << AMDGPU_LOG2_PAGES_PER_FRAG(adev)) * + AMDGPU_GPU_PAGE_SIZE; dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE; dev_info.cu_active_number = adev->gfx.cu_info.number; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index 38f739fb727b..6558a3ed57a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c @@ -359,7 +359,7 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo) head = bo->mn_list.next; bo->mn = NULL; - list_del(&bo->mn_list); + list_del_init(&bo->mn_list); if (list_empty(head)) { struct amdgpu_mn_node *node; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index 39f7eda6091e..2af2678ddaf6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h @@ -257,15 +257,7 @@ struct amdgpu_audio { int num_pins; }; -struct amdgpu_mode_mc_save { - u32 vga_render_control; - u32 vga_hdp_control; - bool crtc_enabled[AMDGPU_MAX_CRTCS]; -}; - struct amdgpu_display_funcs { - /* vga render */ - void (*set_vga_render_state)(struct amdgpu_device *adev, bool render); /* display watermarks */ void (*bandwidth_update)(struct amdgpu_device *adev); /* get frame count */ @@ -300,10 +292,6 @@ struct amdgpu_display_funcs { uint16_t connector_object_id, struct amdgpu_hpd *hpd, struct amdgpu_router *router); - void (*stop_mc_access)(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save); - void (*resume_mc_access)(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save); }; struct amdgpu_mode_info { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 8ee69652be8c..3ec43cf9ad78 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -93,6 +93,7 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) bo = container_of(tbo, struct amdgpu_bo, tbo); + amdgpu_bo_kunmap(bo); amdgpu_update_memory_usage(adev, &bo->tbo.mem, NULL); drm_gem_object_release(&bo->gem_base); @@ -322,7 +323,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, struct amdgpu_bo *bo; enum ttm_bo_type type; unsigned long page_align; - u64 initial_bytes_moved; + u64 initial_bytes_moved, bytes_moved; size_t acc_size; int r; @@ -398,8 +399,14 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type, &bo->placement, page_align, !kernel, NULL, acc_size, sg, resv, &amdgpu_ttm_bo_destroy); - amdgpu_cs_report_moved_bytes(adev, - atomic64_read(&adev->num_bytes_moved) - initial_bytes_moved); + bytes_moved = atomic64_read(&adev->num_bytes_moved) - + initial_bytes_moved; + if (adev->mc.visible_vram_size < adev->mc.real_vram_size && + bo->tbo.mem.mem_type == TTM_PL_VRAM && + bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT) + amdgpu_cs_report_moved_bytes(adev, bytes_moved, bytes_moved); + else + amdgpu_cs_report_moved_bytes(adev, bytes_moved, 0); if (unlikely(r != 0)) return r; @@ -426,6 +433,10 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, trace_amdgpu_bo_create(bo); + /* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */ + if (type == ttm_bo_type_device) + bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; + return 0; fail_unreserve: @@ -535,7 +546,7 @@ int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev, r = amdgpu_copy_buffer(ring, bo_addr, shadow_addr, amdgpu_bo_size(bo), resv, fence, - direct); + direct, false); if (!r) amdgpu_bo_fence(bo, *fence, true); @@ -588,7 +599,7 @@ int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev, r = amdgpu_copy_buffer(ring, shadow_addr, bo_addr, amdgpu_bo_size(bo), resv, fence, - direct); + direct, false); if (!r) amdgpu_bo_fence(bo, *fence, true); @@ -724,15 +735,16 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, dev_err(adev->dev, "%p pin failed\n", bo); goto error; } - r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem); - if (unlikely(r)) { - dev_err(adev->dev, "%p bind failed\n", bo); - goto error; - } bo->pin_count = 1; - if (gpu_addr != NULL) + if (gpu_addr != NULL) { + r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem); + if (unlikely(r)) { + dev_err(adev->dev, "%p bind failed\n", bo); + goto error; + } *gpu_addr = amdgpu_bo_gpu_offset(bo); + } if (domain == AMDGPU_GEM_DOMAIN_VRAM) { adev->vram_pin_size += amdgpu_bo_size(bo); if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) @@ -921,6 +933,8 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, abo = container_of(bo, struct amdgpu_bo, tbo); amdgpu_vm_bo_invalidate(adev, abo); + amdgpu_bo_kunmap(abo); + /* remember the eviction */ if (evict) atomic64_inc(&adev->num_evictions); @@ -939,19 +953,22 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); struct amdgpu_bo *abo; - unsigned long offset, size, lpfn; - int i, r; + unsigned long offset, size; + int r; if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) return 0; abo = container_of(bo, struct amdgpu_bo, tbo); + + /* Remember that this BO was accessed by the CPU */ + abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; + if (bo->mem.mem_type != TTM_PL_VRAM) return 0; size = bo->mem.num_pages << PAGE_SHIFT; offset = bo->mem.start << PAGE_SHIFT; - /* TODO: figure out how to map scattered VRAM to the CPU */ if ((offset + size) <= adev->mc.visible_vram_size) return 0; @@ -961,26 +978,21 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) /* hurrah the memory is not visible ! */ atomic64_inc(&adev->num_vram_cpu_page_faults); - amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM); - lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT; - for (i = 0; i < abo->placement.num_placement; i++) { - /* Force into visible VRAM */ - if ((abo->placements[i].flags & TTM_PL_FLAG_VRAM) && - (!abo->placements[i].lpfn || - abo->placements[i].lpfn > lpfn)) - abo->placements[i].lpfn = lpfn; - } + amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT); + + /* Avoid costly evictions; only set GTT as a busy placement */ + abo->placement.num_busy_placement = 1; + abo->placement.busy_placement = &abo->placements[1]; + r = ttm_bo_validate(bo, &abo->placement, false, false); - if (unlikely(r == -ENOMEM)) { - amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT); - return ttm_bo_validate(bo, &abo->placement, false, false); - } else if (unlikely(r != 0)) { + if (unlikely(r != 0)) return r; - } offset = bo->mem.start << PAGE_SHIFT; /* this should never happen */ - if ((offset + size) > adev->mc.visible_vram_size) + if (bo->mem.mem_type == TTM_PL_VRAM && + (offset + size) > adev->mc.visible_vram_size) return -EINVAL; return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 382485115b06..833b172a2c2a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -120,7 +120,11 @@ static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo) */ static inline bool amdgpu_bo_gpu_accessible(struct amdgpu_bo *bo) { - return bo->tbo.mem.mem_type != TTM_PL_SYSTEM; + switch (bo->tbo.mem.mem_type) { + case TTM_PL_TT: return amdgpu_ttm_is_bound(bo->tbo.ttm); + case TTM_PL_VRAM: return true; + default: return false; + } } int amdgpu_bo_create(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 4083be61b328..8c2204c7b384 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -63,8 +63,13 @@ static int psp_sw_init(void *handle) psp->smu_reload_quirk = psp_v3_1_smu_reload_quirk; break; case CHIP_RAVEN: +#if 0 + psp->init_microcode = psp_v10_0_init_microcode; +#endif psp->prep_cmd_buf = psp_v10_0_prep_cmd_buf; psp->ring_init = psp_v10_0_ring_init; + psp->ring_create = psp_v10_0_ring_create; + psp->ring_destroy = psp_v10_0_ring_destroy; psp->cmd_submit = psp_v10_0_cmd_submit; psp->compare_sram_data = psp_v10_0_compare_sram_data; break; @@ -95,9 +100,8 @@ int psp_wait_for(struct psp_context *psp, uint32_t reg_index, int i; struct amdgpu_device *adev = psp->adev; - val = RREG32(reg_index); - for (i = 0; i < adev->usec_timeout; i++) { + val = RREG32(reg_index); if (check_changed) { if (val != reg_val) return 0; @@ -118,33 +122,18 @@ psp_cmd_submit_buf(struct psp_context *psp, int index) { int ret; - struct amdgpu_bo *cmd_buf_bo; - uint64_t cmd_buf_mc_addr; - struct psp_gfx_cmd_resp *cmd_buf_mem; - struct amdgpu_device *adev = psp->adev; - - ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, - &cmd_buf_bo, &cmd_buf_mc_addr, - (void **)&cmd_buf_mem); - if (ret) - return ret; - memset(cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE); + memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE); - memcpy(cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp)); + memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp)); - ret = psp_cmd_submit(psp, ucode, cmd_buf_mc_addr, + ret = psp_cmd_submit(psp, ucode, psp->cmd_buf_mc_addr, fence_mc_addr, index); while (*((unsigned int *)psp->fence_buf) != index) { msleep(1); } - amdgpu_bo_free_kernel(&cmd_buf_bo, - &cmd_buf_mc_addr, - (void **)&cmd_buf_mem); - return ret; } @@ -352,13 +341,20 @@ static int psp_load_fw(struct amdgpu_device *adev) &psp->fence_buf_mc_addr, &psp->fence_buf); if (ret) + goto failed_mem2; + + ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, + (void **)&psp->cmd_buf_mem); + if (ret) goto failed_mem1; memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE); ret = psp_ring_init(psp, PSP_RING_TYPE__KM); if (ret) - goto failed_mem1; + goto failed_mem; ret = psp_tmr_init(psp); if (ret) @@ -379,9 +375,13 @@ static int psp_load_fw(struct amdgpu_device *adev) return 0; failed_mem: + amdgpu_bo_free_kernel(&psp->cmd_buf_bo, + &psp->cmd_buf_mc_addr, + (void **)&psp->cmd_buf_mem); +failed_mem1: amdgpu_bo_free_kernel(&psp->fence_buf_bo, &psp->fence_buf_mc_addr, &psp->fence_buf); -failed_mem1: +failed_mem2: amdgpu_bo_free_kernel(&psp->fw_pri_bo, &psp->fw_pri_mc_addr, &psp->fw_pri_buf); failed: @@ -435,16 +435,15 @@ static int psp_hw_fini(void *handle) psp_ring_destroy(psp, PSP_RING_TYPE__KM); - if (psp->tmr_buf) - amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf); - - if (psp->fw_pri_buf) - amdgpu_bo_free_kernel(&psp->fw_pri_bo, - &psp->fw_pri_mc_addr, &psp->fw_pri_buf); - - if (psp->fence_buf_bo) - amdgpu_bo_free_kernel(&psp->fence_buf_bo, - &psp->fence_buf_mc_addr, &psp->fence_buf); + amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf); + amdgpu_bo_free_kernel(&psp->fw_pri_bo, + &psp->fw_pri_mc_addr, &psp->fw_pri_buf); + amdgpu_bo_free_kernel(&psp->fence_buf_bo, + &psp->fence_buf_mc_addr, &psp->fence_buf); + amdgpu_bo_free_kernel(&psp->asd_shared_bo, &psp->asd_shared_mc_addr, + &psp->asd_shared_buf); + amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, + (void **)&psp->cmd_buf_mem); kfree(psp->cmd); psp->cmd = NULL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index 1a1c8b469f93..538fa9dbfb21 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -108,6 +108,11 @@ struct psp_context struct amdgpu_bo *fence_buf_bo; uint64_t fence_buf_mc_addr; void *fence_buf; + + /* cmd buffer */ + struct amdgpu_bo *cmd_buf_bo; + uint64_t cmd_buf_mc_addr; + struct psp_gfx_cmd_resp *cmd_buf_mem; }; struct amdgpu_psp_funcs { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 75165e07b1cd..15b7149d1204 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -212,10 +212,19 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, } - r = amdgpu_wb_get(adev, &ring->fence_offs); - if (r) { - dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r); - return r; + if (amdgpu_sriov_vf(adev) && ring->funcs->type == AMDGPU_RING_TYPE_GFX) { + r = amdgpu_wb_get_256Bit(adev, &ring->fence_offs); + if (r) { + dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r); + return r; + } + + } else { + r = amdgpu_wb_get(adev, &ring->fence_offs); + if (r) { + dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r); + return r; + } } r = amdgpu_wb_get(adev, &ring->cond_exe_offs); @@ -278,17 +287,18 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring) ring->ready = false; if (ring->funcs->support_64bit_ptrs) { - amdgpu_wb_free_64bit(ring->adev, ring->cond_exe_offs); - amdgpu_wb_free_64bit(ring->adev, ring->fence_offs); amdgpu_wb_free_64bit(ring->adev, ring->rptr_offs); amdgpu_wb_free_64bit(ring->adev, ring->wptr_offs); } else { - amdgpu_wb_free(ring->adev, ring->cond_exe_offs); - amdgpu_wb_free(ring->adev, ring->fence_offs); amdgpu_wb_free(ring->adev, ring->rptr_offs); amdgpu_wb_free(ring->adev, ring->wptr_offs); } + amdgpu_wb_free(ring->adev, ring->cond_exe_offs); + if (amdgpu_sriov_vf(ring->adev) && ring->funcs->type == AMDGPU_RING_TYPE_GFX) + amdgpu_wb_free_256bit(ring->adev, ring->fence_offs); + else + amdgpu_wb_free(ring->adev, ring->fence_offs); amdgpu_bo_free_kernel(&ring->ring_obj, &ring->gpu_addr, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index bc8dec992f73..322d25299a00 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -212,4 +212,44 @@ static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring) } +static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v) +{ + if (ring->count_dw <= 0) + DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n"); + ring->ring[ring->wptr++ & ring->buf_mask] = v; + ring->wptr &= ring->ptr_mask; + ring->count_dw--; +} + +static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring, + void *src, int count_dw) +{ + unsigned occupied, chunk1, chunk2; + void *dst; + + if (unlikely(ring->count_dw < count_dw)) + DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n"); + + occupied = ring->wptr & ring->buf_mask; + dst = (void *)&ring->ring[occupied]; + chunk1 = ring->buf_mask + 1 - occupied; + chunk1 = (chunk1 >= count_dw) ? count_dw: chunk1; + chunk2 = count_dw - chunk1; + chunk1 <<= 2; + chunk2 <<= 2; + + if (chunk1) + memcpy(dst, src, chunk1); + + if (chunk2) { + src += chunk1; + dst = (void *)ring->ring; + memcpy(dst, src, chunk2); + } + + ring->wptr += count_dw; + ring->wptr &= ring->ptr_mask; + ring->count_dw -= count_dw; +} + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c index 15510dadde01..3c4d7574d704 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c @@ -33,7 +33,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; struct amdgpu_bo *vram_obj = NULL; struct amdgpu_bo **gtt_obj = NULL; - uint64_t gtt_addr, vram_addr; + uint64_t gart_addr, vram_addr; unsigned n, size; int i, r; @@ -42,7 +42,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) /* Number of tests = * (Total GTT - IB pool - writeback page - ring buffers) / test size */ - n = adev->mc.gtt_size - AMDGPU_IB_POOL_SIZE*64*1024; + n = adev->mc.gart_size - AMDGPU_IB_POOL_SIZE*64*1024; for (i = 0; i < AMDGPU_MAX_RINGS; ++i) if (adev->rings[i]) n -= adev->rings[i]->ring_size; @@ -76,7 +76,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) } for (i = 0; i < n; i++) { void *gtt_map, *vram_map; - void **gtt_start, **gtt_end; + void **gart_start, **gart_end; void **vram_start, **vram_end; struct dma_fence *fence = NULL; @@ -91,7 +91,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) r = amdgpu_bo_reserve(gtt_obj[i], false); if (unlikely(r != 0)) goto out_lclean_unref; - r = amdgpu_bo_pin(gtt_obj[i], AMDGPU_GEM_DOMAIN_GTT, >t_addr); + r = amdgpu_bo_pin(gtt_obj[i], AMDGPU_GEM_DOMAIN_GTT, &gart_addr); if (r) { DRM_ERROR("Failed to pin GTT object %d\n", i); goto out_lclean_unres; @@ -103,15 +103,15 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) goto out_lclean_unpin; } - for (gtt_start = gtt_map, gtt_end = gtt_map + size; - gtt_start < gtt_end; - gtt_start++) - *gtt_start = gtt_start; + for (gart_start = gtt_map, gart_end = gtt_map + size; + gart_start < gart_end; + gart_start++) + *gart_start = gart_start; amdgpu_bo_kunmap(gtt_obj[i]); - r = amdgpu_copy_buffer(ring, gtt_addr, vram_addr, - size, NULL, &fence, false); + r = amdgpu_copy_buffer(ring, gart_addr, vram_addr, + size, NULL, &fence, false, false); if (r) { DRM_ERROR("Failed GTT->VRAM copy %d\n", i); @@ -132,21 +132,21 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) goto out_lclean_unpin; } - for (gtt_start = gtt_map, gtt_end = gtt_map + size, + for (gart_start = gtt_map, gart_end = gtt_map + size, vram_start = vram_map, vram_end = vram_map + size; vram_start < vram_end; - gtt_start++, vram_start++) { - if (*vram_start != gtt_start) { + gart_start++, vram_start++) { + if (*vram_start != gart_start) { DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, " "expected 0x%p (GTT/VRAM offset " "0x%16llx/0x%16llx)\n", - i, *vram_start, gtt_start, + i, *vram_start, gart_start, (unsigned long long) - (gtt_addr - adev->mc.gtt_start + - (void*)gtt_start - gtt_map), + (gart_addr - adev->mc.gart_start + + (void*)gart_start - gtt_map), (unsigned long long) (vram_addr - adev->mc.vram_start + - (void*)gtt_start - gtt_map)); + (void*)gart_start - gtt_map)); amdgpu_bo_kunmap(vram_obj); goto out_lclean_unpin; } @@ -155,8 +155,8 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) amdgpu_bo_kunmap(vram_obj); - r = amdgpu_copy_buffer(ring, vram_addr, gtt_addr, - size, NULL, &fence, false); + r = amdgpu_copy_buffer(ring, vram_addr, gart_addr, + size, NULL, &fence, false, false); if (r) { DRM_ERROR("Failed VRAM->GTT copy %d\n", i); @@ -177,20 +177,20 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) goto out_lclean_unpin; } - for (gtt_start = gtt_map, gtt_end = gtt_map + size, + for (gart_start = gtt_map, gart_end = gtt_map + size, vram_start = vram_map, vram_end = vram_map + size; - gtt_start < gtt_end; - gtt_start++, vram_start++) { - if (*gtt_start != vram_start) { + gart_start < gart_end; + gart_start++, vram_start++) { + if (*gart_start != vram_start) { DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, " "expected 0x%p (VRAM/GTT offset " "0x%16llx/0x%16llx)\n", - i, *gtt_start, vram_start, + i, *gart_start, vram_start, (unsigned long long) (vram_addr - adev->mc.vram_start + (void*)vram_start - vram_map), (unsigned long long) - (gtt_addr - adev->mc.gtt_start + + (gart_addr - adev->mc.gart_start + (void*)vram_start - vram_map)); amdgpu_bo_kunmap(gtt_obj[i]); goto out_lclean_unpin; @@ -200,7 +200,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) amdgpu_bo_kunmap(gtt_obj[i]); DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n", - gtt_addr - adev->mc.gtt_start); + gart_addr - adev->mc.gart_start); continue; out_lclean_unpin: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 8601904e670a..509f7a63d40c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -224,7 +224,7 @@ TRACE_EVENT(amdgpu_vm_bo_map, __field(long, start) __field(long, last) __field(u64, offset) - __field(u32, flags) + __field(u64, flags) ), TP_fast_assign( @@ -234,7 +234,7 @@ TRACE_EVENT(amdgpu_vm_bo_map, __entry->offset = mapping->offset; __entry->flags = mapping->flags; ), - TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%08x", + TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%llx", __entry->bo, __entry->start, __entry->last, __entry->offset, __entry->flags) ); @@ -248,7 +248,7 @@ TRACE_EVENT(amdgpu_vm_bo_unmap, __field(long, start) __field(long, last) __field(u64, offset) - __field(u32, flags) + __field(u64, flags) ), TP_fast_assign( @@ -258,7 +258,7 @@ TRACE_EVENT(amdgpu_vm_bo_unmap, __entry->offset = mapping->offset; __entry->flags = mapping->flags; ), - TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%08x", + TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%llx", __entry->bo, __entry->start, __entry->last, __entry->offset, __entry->flags) ); @@ -269,7 +269,7 @@ DECLARE_EVENT_CLASS(amdgpu_vm_mapping, TP_STRUCT__entry( __field(u64, soffset) __field(u64, eoffset) - __field(u32, flags) + __field(u64, flags) ), TP_fast_assign( @@ -277,7 +277,7 @@ DECLARE_EVENT_CLASS(amdgpu_vm_mapping, __entry->eoffset = mapping->last + 1; __entry->flags = mapping->flags; ), - TP_printk("soffs=%010llx, eoffs=%010llx, flags=%08x", + TP_printk("soffs=%010llx, eoffs=%010llx, flags=%llx", __entry->soffset, __entry->eoffset, __entry->flags) ); @@ -293,14 +293,14 @@ DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_mapping, TRACE_EVENT(amdgpu_vm_set_ptes, TP_PROTO(uint64_t pe, uint64_t addr, unsigned count, - uint32_t incr, uint32_t flags), + uint32_t incr, uint64_t flags), TP_ARGS(pe, addr, count, incr, flags), TP_STRUCT__entry( __field(u64, pe) __field(u64, addr) __field(u32, count) __field(u32, incr) - __field(u32, flags) + __field(u64, flags) ), TP_fast_assign( @@ -310,7 +310,7 @@ TRACE_EVENT(amdgpu_vm_set_ptes, __entry->incr = incr; __entry->flags = flags; ), - TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%08x, count=%u", + TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%llx, count=%u", __entry->pe, __entry->addr, __entry->incr, __entry->flags, __entry->count) ); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index c9b131b13ef7..e6f9a54c959d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -47,10 +47,15 @@ #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) +static int amdgpu_map_buffer(struct ttm_buffer_object *bo, + struct ttm_mem_reg *mem, unsigned num_pages, + uint64_t offset, unsigned window, + struct amdgpu_ring *ring, + uint64_t *addr); + static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev); static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev); - /* * Global memory. */ @@ -97,6 +102,8 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev) goto error_bo; } + mutex_init(&adev->mman.gtt_window_lock); + ring = adev->mman.buffer_funcs_ring; rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL]; r = amd_sched_entity_init(&ring->sched, &adev->mman.entity, @@ -123,6 +130,7 @@ static void amdgpu_ttm_global_fini(struct amdgpu_device *adev) if (adev->mman.mem_global_referenced) { amd_sched_entity_fini(adev->mman.entity.sched, &adev->mman.entity); + mutex_destroy(&adev->mman.gtt_window_lock); drm_global_item_unref(&adev->mman.bo_global_ref.ref); drm_global_item_unref(&adev->mman.mem_global_ref); adev->mman.mem_global_referenced = false; @@ -150,7 +158,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, break; case TTM_PL_TT: man->func = &amdgpu_gtt_mgr_func; - man->gpu_offset = adev->mc.gtt_start; + man->gpu_offset = adev->mc.gart_start; man->available_caching = TTM_PL_MASK_CACHING; man->default_caching = TTM_PL_FLAG_CACHED; man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; @@ -186,12 +194,11 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); struct amdgpu_bo *abo; - static struct ttm_place placements = { + static const struct ttm_place placements = { .fpfn = 0, .lpfn = 0, .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM }; - unsigned i; if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) { placement->placement = &placements; @@ -207,22 +214,36 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, adev->mman.buffer_funcs_ring && adev->mman.buffer_funcs_ring->ready == false) { amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); + } else if (adev->mc.visible_vram_size < adev->mc.real_vram_size && + !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) { + unsigned fpfn = adev->mc.visible_vram_size >> PAGE_SHIFT; + struct drm_mm_node *node = bo->mem.mm_node; + unsigned long pages_left; + + for (pages_left = bo->mem.num_pages; + pages_left; + pages_left -= node->size, node++) { + if (node->start < fpfn) + break; + } + + if (!pages_left) + goto gtt; + + /* Try evicting to the CPU inaccessible part of VRAM + * first, but only set GTT as busy placement, so this + * BO will be evicted to GTT rather than causing other + * BOs to be evicted from VRAM + */ + amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT); + abo->placements[0].fpfn = fpfn; + abo->placements[0].lpfn = 0; + abo->placement.busy_placement = &abo->placements[1]; + abo->placement.num_busy_placement = 1; } else { +gtt: amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT); - for (i = 0; i < abo->placement.num_placement; ++i) { - if (!(abo->placements[i].flags & - TTM_PL_FLAG_TT)) - continue; - - if (abo->placements[i].lpfn) - continue; - - /* set an upper limit to force directly - * allocating address space for the BO. - */ - abo->placements[i].lpfn = - adev->mc.gtt_size >> PAGE_SHIFT; - } } break; case TTM_PL_TT: @@ -252,29 +273,18 @@ static void amdgpu_move_null(struct ttm_buffer_object *bo, new_mem->mm_node = NULL; } -static int amdgpu_mm_node_addr(struct ttm_buffer_object *bo, - struct drm_mm_node *mm_node, - struct ttm_mem_reg *mem, - uint64_t *addr) +static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo, + struct drm_mm_node *mm_node, + struct ttm_mem_reg *mem) { - int r; - - switch (mem->mem_type) { - case TTM_PL_TT: - r = amdgpu_ttm_bind(bo, mem); - if (r) - return r; + uint64_t addr = 0; - case TTM_PL_VRAM: - *addr = mm_node->start << PAGE_SHIFT; - *addr += bo->bdev->man[mem->mem_type].gpu_offset; - break; - default: - DRM_ERROR("Unknown placement %d\n", mem->mem_type); - return -EINVAL; + if (mem->mem_type != TTM_PL_TT || + amdgpu_gtt_mgr_is_allocated(mem)) { + addr = mm_node->start << PAGE_SHIFT; + addr += bo->bdev->man[mem->mem_type].gpu_offset; } - - return 0; + return addr; } static int amdgpu_move_blit(struct ttm_buffer_object *bo, @@ -299,26 +309,40 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, } old_mm = old_mem->mm_node; - r = amdgpu_mm_node_addr(bo, old_mm, old_mem, &old_start); - if (r) - return r; old_size = old_mm->size; - + old_start = amdgpu_mm_node_addr(bo, old_mm, old_mem); new_mm = new_mem->mm_node; - r = amdgpu_mm_node_addr(bo, new_mm, new_mem, &new_start); - if (r) - return r; new_size = new_mm->size; + new_start = amdgpu_mm_node_addr(bo, new_mm, new_mem); num_pages = new_mem->num_pages; + mutex_lock(&adev->mman.gtt_window_lock); while (num_pages) { - unsigned long cur_pages = min(old_size, new_size); + unsigned long cur_pages = min(min(old_size, new_size), + (u64)AMDGPU_GTT_MAX_TRANSFER_SIZE); + uint64_t from = old_start, to = new_start; struct dma_fence *next; - r = amdgpu_copy_buffer(ring, old_start, new_start, + if (old_mem->mem_type == TTM_PL_TT && + !amdgpu_gtt_mgr_is_allocated(old_mem)) { + r = amdgpu_map_buffer(bo, old_mem, cur_pages, + old_start, 0, ring, &from); + if (r) + goto error; + } + + if (new_mem->mem_type == TTM_PL_TT && + !amdgpu_gtt_mgr_is_allocated(new_mem)) { + r = amdgpu_map_buffer(bo, new_mem, cur_pages, + new_start, 1, ring, &to); + if (r) + goto error; + } + + r = amdgpu_copy_buffer(ring, from, to, cur_pages * PAGE_SIZE, - bo->resv, &next, false); + bo->resv, &next, false, true); if (r) goto error; @@ -331,10 +355,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, old_size -= cur_pages; if (!old_size) { - r = amdgpu_mm_node_addr(bo, ++old_mm, old_mem, - &old_start); - if (r) - goto error; + old_start = amdgpu_mm_node_addr(bo, ++old_mm, old_mem); old_size = old_mm->size; } else { old_start += cur_pages * PAGE_SIZE; @@ -342,22 +363,21 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, new_size -= cur_pages; if (!new_size) { - r = amdgpu_mm_node_addr(bo, ++new_mm, new_mem, - &new_start); - if (r) - goto error; - + new_start = amdgpu_mm_node_addr(bo, ++new_mm, new_mem); new_size = new_mm->size; } else { new_start += cur_pages * PAGE_SIZE; } } + mutex_unlock(&adev->mman.gtt_window_lock); r = ttm_bo_pipeline_move(bo, fence, evict, new_mem); dma_fence_put(fence); return r; error: + mutex_unlock(&adev->mman.gtt_window_lock); + if (fence) dma_fence_wait(fence, false); dma_fence_put(fence); @@ -384,7 +404,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, placement.num_busy_placement = 1; placement.busy_placement = &placements; placements.fpfn = 0; - placements.lpfn = adev->mc.gtt_size >> PAGE_SHIFT; + placements.lpfn = 0; placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_gpu); @@ -431,7 +451,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, placement.num_busy_placement = 1; placement.busy_placement = &placements; placements.fpfn = 0; - placements.lpfn = adev->mc.gtt_size >> PAGE_SHIFT; + placements.lpfn = 0; placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_gpu); @@ -507,6 +527,15 @@ memcpy: } } + if (bo->type == ttm_bo_type_device && + new_mem->mem_type == TTM_PL_VRAM && + old_mem->mem_type != TTM_PL_VRAM) { + /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU + * accesses the BO after it's moved. + */ + abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; + } + /* update statistics */ atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved); return 0; @@ -695,6 +724,31 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) sg_free_table(ttm->sg); } +static int amdgpu_ttm_do_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) +{ + struct amdgpu_ttm_tt *gtt = (void *)ttm; + uint64_t flags; + int r; + + spin_lock(>t->adev->gtt_list_lock); + flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, mem); + gtt->offset = (u64)mem->start << PAGE_SHIFT; + r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages, + ttm->pages, gtt->ttm.dma_address, flags); + + if (r) { + DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", + ttm->num_pages, gtt->offset); + goto error_gart_bind; + } + + list_add_tail(>t->list, >t->adev->gtt_list); +error_gart_bind: + spin_unlock(>t->adev->gtt_list_lock); + return r; + +} + static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) { @@ -718,7 +772,10 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, bo_mem->mem_type == AMDGPU_PL_OA) return -EINVAL; - return 0; + if (amdgpu_gtt_mgr_is_allocated(bo_mem)) + r = amdgpu_ttm_do_bind(ttm, bo_mem); + + return r; } bool amdgpu_ttm_is_bound(struct ttm_tt *ttm) @@ -731,8 +788,6 @@ bool amdgpu_ttm_is_bound(struct ttm_tt *ttm) int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem) { struct ttm_tt *ttm = bo->ttm; - struct amdgpu_ttm_tt *gtt = (void *)bo->ttm; - uint64_t flags; int r; if (!ttm || amdgpu_ttm_is_bound(ttm)) @@ -745,22 +800,7 @@ int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem) return r; } - spin_lock(>t->adev->gtt_list_lock); - flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem); - gtt->offset = (u64)bo_mem->start << PAGE_SHIFT; - r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages, - ttm->pages, gtt->ttm.dma_address, flags); - - if (r) { - DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", - ttm->num_pages, gtt->offset); - goto error_gart_bind; - } - - list_add_tail(>t->list, >t->adev->gtt_list); -error_gart_bind: - spin_unlock(>t->adev->gtt_list_lock); - return r; + return amdgpu_ttm_do_bind(ttm, bo_mem); } int amdgpu_ttm_recover_gart(struct amdgpu_device *adev) @@ -1075,6 +1115,67 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, return ttm_bo_eviction_valuable(bo, place); } +static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo, + unsigned long offset, + void *buf, int len, int write) +{ + struct amdgpu_bo *abo = container_of(bo, struct amdgpu_bo, tbo); + struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); + struct drm_mm_node *nodes = abo->tbo.mem.mm_node; + uint32_t value = 0; + int ret = 0; + uint64_t pos; + unsigned long flags; + + if (bo->mem.mem_type != TTM_PL_VRAM) + return -EIO; + + while (offset >= (nodes->size << PAGE_SHIFT)) { + offset -= nodes->size << PAGE_SHIFT; + ++nodes; + } + pos = (nodes->start << PAGE_SHIFT) + offset; + + while (len && pos < adev->mc.mc_vram_size) { + uint64_t aligned_pos = pos & ~(uint64_t)3; + uint32_t bytes = 4 - (pos & 3); + uint32_t shift = (pos & 3) * 8; + uint32_t mask = 0xffffffff << shift; + + if (len < bytes) { + mask &= 0xffffffff >> (bytes - len) * 8; + bytes = len; + } + + spin_lock_irqsave(&adev->mmio_idx_lock, flags); + WREG32(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000); + WREG32(mmMM_INDEX_HI, aligned_pos >> 31); + if (!write || mask != 0xffffffff) + value = RREG32(mmMM_DATA); + if (write) { + value &= ~mask; + value |= (*(uint32_t *)buf << shift) & mask; + WREG32(mmMM_DATA, value); + } + spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); + if (!write) { + value = (value & mask) >> shift; + memcpy(buf, &value, bytes); + } + + ret += bytes; + buf = (uint8_t *)buf + bytes; + pos += bytes; + len -= bytes; + if (pos >= (nodes->start + nodes->size) << PAGE_SHIFT) { + ++nodes; + pos = (nodes->start << PAGE_SHIFT); + } + } + + return ret; +} + static struct ttm_bo_driver amdgpu_bo_driver = { .ttm_tt_create = &amdgpu_ttm_tt_create, .ttm_tt_populate = &amdgpu_ttm_tt_populate, @@ -1090,11 +1191,14 @@ static struct ttm_bo_driver amdgpu_bo_driver = { .io_mem_reserve = &amdgpu_ttm_io_mem_reserve, .io_mem_free = &amdgpu_ttm_io_mem_free, .io_mem_pfn = amdgpu_ttm_io_mem_pfn, + .access_memory = &amdgpu_ttm_access_memory }; int amdgpu_ttm_init(struct amdgpu_device *adev) { + uint64_t gtt_size; int r; + u64 vis_vram_limit; r = amdgpu_ttm_global_init(adev); if (r) { @@ -1118,6 +1222,13 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) DRM_ERROR("Failed initializing VRAM heap.\n"); return r; } + + /* Reduce size of CPU-visible VRAM if requested */ + vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024; + if (amdgpu_vis_vram_limit > 0 && + vis_vram_limit <= adev->mc.visible_vram_size) + adev->mc.visible_vram_size = vis_vram_limit; + /* Change the size here instead of the init above so only lpfn is affected */ amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); @@ -1140,14 +1251,19 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) } DRM_INFO("amdgpu: %uM of VRAM memory ready\n", (unsigned) (adev->mc.real_vram_size / (1024 * 1024))); - r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, - adev->mc.gtt_size >> PAGE_SHIFT); + + if (amdgpu_gtt_size == -1) + gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20), + adev->mc.mc_vram_size); + else + gtt_size = (uint64_t)amdgpu_gtt_size << 20; + r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, gtt_size >> PAGE_SHIFT); if (r) { DRM_ERROR("Failed initializing GTT heap.\n"); return r; } DRM_INFO("amdgpu: %uM of GTT memory ready.\n", - (unsigned)(adev->mc.gtt_size / (1024 * 1024))); + (unsigned)(gtt_size / (1024 * 1024))); adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT; adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT; @@ -1256,12 +1372,77 @@ int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma) return ttm_bo_mmap(filp, vma, &adev->mman.bdev); } -int amdgpu_copy_buffer(struct amdgpu_ring *ring, - uint64_t src_offset, - uint64_t dst_offset, - uint32_t byte_count, +static int amdgpu_map_buffer(struct ttm_buffer_object *bo, + struct ttm_mem_reg *mem, unsigned num_pages, + uint64_t offset, unsigned window, + struct amdgpu_ring *ring, + uint64_t *addr) +{ + struct amdgpu_ttm_tt *gtt = (void *)bo->ttm; + struct amdgpu_device *adev = ring->adev; + struct ttm_tt *ttm = bo->ttm; + struct amdgpu_job *job; + unsigned num_dw, num_bytes; + dma_addr_t *dma_address; + struct dma_fence *fence; + uint64_t src_addr, dst_addr; + uint64_t flags; + int r; + + BUG_ON(adev->mman.buffer_funcs->copy_max_bytes < + AMDGPU_GTT_MAX_TRANSFER_SIZE * 8); + + *addr = adev->mc.gart_start; + *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE * + AMDGPU_GPU_PAGE_SIZE; + + num_dw = adev->mman.buffer_funcs->copy_num_dw; + while (num_dw & 0x7) + num_dw++; + + num_bytes = num_pages * 8; + + r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, &job); + if (r) + return r; + + src_addr = num_dw * 4; + src_addr += job->ibs[0].gpu_addr; + + dst_addr = adev->gart.table_addr; + dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8; + amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, + dst_addr, num_bytes); + + amdgpu_ring_pad_ib(ring, &job->ibs[0]); + WARN_ON(job->ibs[0].length_dw > num_dw); + + dma_address = >t->ttm.dma_address[offset >> PAGE_SHIFT]; + flags = amdgpu_ttm_tt_pte_flags(adev, ttm, mem); + r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags, + &job->ibs[0].ptr[num_dw]); + if (r) + goto error_free; + + r = amdgpu_job_submit(job, ring, &adev->mman.entity, + AMDGPU_FENCE_OWNER_UNDEFINED, &fence); + if (r) + goto error_free; + + dma_fence_put(fence); + + return r; + +error_free: + amdgpu_job_free(job); + return r; +} + +int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, + uint64_t dst_offset, uint32_t byte_count, struct reservation_object *resv, - struct dma_fence **fence, bool direct_submit) + struct dma_fence **fence, bool direct_submit, + bool vm_needs_flush) { struct amdgpu_device *adev = ring->adev; struct amdgpu_job *job; @@ -1283,6 +1464,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, if (r) return r; + job->vm_needs_flush = vm_needs_flush; if (resv) { r = amdgpu_sync_resv(adev, &job->sync, resv, AMDGPU_FENCE_OWNER_UNDEFINED); @@ -1347,6 +1529,12 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, return -EINVAL; } + if (bo->tbo.mem.mem_type == TTM_PL_TT) { + r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem); + if (r) + return r; + } + num_pages = bo->tbo.num_pages; mm_node = bo->tbo.mem.mm_node; num_loops = 0; @@ -1382,11 +1570,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, uint32_t byte_count = mm_node->size << PAGE_SHIFT; uint64_t dst_addr; - r = amdgpu_mm_node_addr(&bo->tbo, mm_node, - &bo->tbo.mem, &dst_addr); - if (r) - return r; - + dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem); while (byte_count) { uint32_t cur_size_in_bytes = min(byte_count, max_bytes); @@ -1574,7 +1758,7 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev) adev, &amdgpu_ttm_gtt_fops); if (IS_ERR(ent)) return PTR_ERR(ent); - i_size_write(ent->d_inode, adev->mc.gtt_size); + i_size_write(ent->d_inode, adev->mc.gart_size); adev->mman.gtt = ent; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 6bdede8ff12b..f137c2458ee8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -34,6 +34,9 @@ #define AMDGPU_PL_FLAG_GWS (TTM_PL_FLAG_PRIV << 1) #define AMDGPU_PL_FLAG_OA (TTM_PL_FLAG_PRIV << 2) +#define AMDGPU_GTT_MAX_TRANSFER_SIZE 512 +#define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2 + struct amdgpu_mman { struct ttm_bo_global_ref bo_global_ref; struct drm_global_reference mem_global_ref; @@ -49,6 +52,8 @@ struct amdgpu_mman { /* buffer handling */ const struct amdgpu_buffer_funcs *buffer_funcs; struct amdgpu_ring *buffer_funcs_ring; + + struct mutex gtt_window_lock; /* Scheduler entity for buffer moves */ struct amd_sched_entity entity; }; @@ -56,17 +61,17 @@ struct amdgpu_mman { extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func; extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func; +bool amdgpu_gtt_mgr_is_allocated(struct ttm_mem_reg *mem); int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, struct ttm_buffer_object *tbo, const struct ttm_place *place, struct ttm_mem_reg *mem); -int amdgpu_copy_buffer(struct amdgpu_ring *ring, - uint64_t src_offset, - uint64_t dst_offset, - uint32_t byte_count, +int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, + uint64_t dst_offset, uint32_t byte_count, struct reservation_object *resv, - struct dma_fence **fence, bool direct_submit); + struct dma_fence **fence, bool direct_submit, + bool vm_needs_flush); int amdgpu_fill_buffer(struct amdgpu_bo *bo, uint32_t src_data, struct reservation_object *resv, @@ -75,5 +80,6 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma); bool amdgpu_ttm_is_bound(struct ttm_tt *ttm); int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem); +int amdgpu_ttm_recover_gart(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index 4f50eeb65855..fcfb9d4f7477 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -275,14 +275,10 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type) else return AMDGPU_FW_LOAD_PSP; case CHIP_RAVEN: -#if 0 - if (!load_type) + if (load_type != 2) return AMDGPU_FW_LOAD_DIRECT; else return AMDGPU_FW_LOAD_PSP; -#else - return AMDGPU_FW_LOAD_DIRECT; -#endif default: DRM_ERROR("Unknow firmware load type\n"); } @@ -377,6 +373,11 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev) struct amdgpu_firmware_info *ucode = NULL; const struct common_firmware_header *header = NULL; + if (!adev->firmware.fw_size) { + dev_warn(adev->dev, "No ip firmware need to load\n"); + return 0; + } + err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true, amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT, AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, @@ -459,6 +460,9 @@ int amdgpu_ucode_fini_bo(struct amdgpu_device *adev) int i; struct amdgpu_firmware_info *ucode = NULL; + if (!adev->firmware.fw_size) + return 0; + for (i = 0; i < adev->firmware.max_ucodes; i++) { ucode = &adev->firmware.ucode[i]; if (ucode->fw) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.c new file mode 100644 index 000000000000..45ac91861965 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.c @@ -0,0 +1,85 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "amdgpu.h" +#include "amdgpu_vf_error.h" +#include "mxgpu_ai.h" + +#define AMDGPU_VF_ERROR_ENTRY_SIZE 16 + +/* struct error_entry - amdgpu VF error information. */ +struct amdgpu_vf_error_buffer { + int read_count; + int write_count; + uint16_t code[AMDGPU_VF_ERROR_ENTRY_SIZE]; + uint16_t flags[AMDGPU_VF_ERROR_ENTRY_SIZE]; + uint64_t data[AMDGPU_VF_ERROR_ENTRY_SIZE]; +}; + +struct amdgpu_vf_error_buffer admgpu_vf_errors; + + +void amdgpu_vf_error_put(uint16_t sub_error_code, uint16_t error_flags, uint64_t error_data) +{ + int index; + uint16_t error_code = AMDGIM_ERROR_CODE(AMDGIM_ERROR_CATEGORY_VF, sub_error_code); + + index = admgpu_vf_errors.write_count % AMDGPU_VF_ERROR_ENTRY_SIZE; + admgpu_vf_errors.code [index] = error_code; + admgpu_vf_errors.flags [index] = error_flags; + admgpu_vf_errors.data [index] = error_data; + admgpu_vf_errors.write_count ++; +} + + +void amdgpu_vf_error_trans_all(struct amdgpu_device *adev) +{ + /* u32 pf2vf_flags = 0; */ + u32 data1, data2, data3; + int index; + + if ((NULL == adev) || (!amdgpu_sriov_vf(adev)) || (!adev->virt.ops) || (!adev->virt.ops->trans_msg)) { + return; + } +/* + TODO: Enable these code when pv2vf_info is merged + AMDGPU_FW_VRAM_PF2VF_READ (adev, feature_flags, &pf2vf_flags); + if (!(pf2vf_flags & AMDGIM_FEATURE_ERROR_LOG_COLLECT)) { + return; + } +*/ + /* The errors are overlay of array, correct read_count as full. */ + if (admgpu_vf_errors.write_count - admgpu_vf_errors.read_count > AMDGPU_VF_ERROR_ENTRY_SIZE) { + admgpu_vf_errors.read_count = admgpu_vf_errors.write_count - AMDGPU_VF_ERROR_ENTRY_SIZE; + } + + while (admgpu_vf_errors.read_count < admgpu_vf_errors.write_count) { + index =admgpu_vf_errors.read_count % AMDGPU_VF_ERROR_ENTRY_SIZE; + data1 = AMDGIM_ERROR_CODE_FLAGS_TO_MAILBOX (admgpu_vf_errors.code[index], admgpu_vf_errors.flags[index]); + data2 = admgpu_vf_errors.data[index] & 0xFFFFFFFF; + data3 = (admgpu_vf_errors.data[index] >> 32) & 0xFFFFFFFF; + + adev->virt.ops->trans_msg(adev, IDH_LOG_VF_ERROR, data1, data2, data3); + admgpu_vf_errors.read_count ++; + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.h new file mode 100644 index 000000000000..2a3278ec76ba --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.h @@ -0,0 +1,62 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __VF_ERROR_H__ +#define __VF_ERROR_H__ + +#define AMDGIM_ERROR_CODE_FLAGS_TO_MAILBOX(c,f) (((c & 0xFFFF) << 16) | (f & 0xFFFF)) +#define AMDGIM_ERROR_CODE(t,c) (((t&0xF)<<12)|(c&0xFFF)) + +/* Please keep enum same as AMD GIM driver */ +enum AMDGIM_ERROR_VF { + AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL = 0, + AMDGIM_ERROR_VF_NO_VBIOS, + AMDGIM_ERROR_VF_GPU_POST_ERROR, + AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, + AMDGIM_ERROR_VF_FENCE_INIT_FAIL, + + AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, + AMDGIM_ERROR_VF_IB_INIT_FAIL, + AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, + AMDGIM_ERROR_VF_ASIC_RESUME_FAIL, + AMDGIM_ERROR_VF_GPU_RESET_FAIL, + + AMDGIM_ERROR_VF_TEST, + AMDGIM_ERROR_VF_MAX +}; + +enum AMDGIM_ERROR_CATEGORY { + AMDGIM_ERROR_CATEGORY_NON_USED = 0, + AMDGIM_ERROR_CATEGORY_GIM, + AMDGIM_ERROR_CATEGORY_PF, + AMDGIM_ERROR_CATEGORY_VF, + AMDGIM_ERROR_CATEGORY_VBIOS, + AMDGIM_ERROR_CATEGORY_MONITOR, + + AMDGIM_ERROR_CATEGORY_MAX +}; + +void amdgpu_vf_error_put(uint16_t sub_error_code, uint16_t error_flags, uint64_t error_data); +void amdgpu_vf_error_trans_all (struct amdgpu_device *adev); + +#endif /* __VF_ERROR_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index 9e1062edb76e..e5b1baf387c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -43,6 +43,7 @@ struct amdgpu_virt_ops { int (*req_full_gpu)(struct amdgpu_device *adev, bool init); int (*rel_full_gpu)(struct amdgpu_device *adev, bool init); int (*reset_gpu)(struct amdgpu_device *adev); + void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3); }; /* GPU virtualization */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 5795f81369f0..250c8e80e646 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -77,8 +77,6 @@ struct amdgpu_pte_update_params { void (*func)(struct amdgpu_pte_update_params *params, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint64_t flags); - /* indicate update pt or its shadow */ - bool shadow; /* The next two are used during VM update by CPU * DMA addresses to use for mapping * Kernel pointer of PD/PT BO that needs to be updated @@ -161,11 +159,17 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, */ static int amdgpu_vm_validate_level(struct amdgpu_vm_pt *parent, int (*validate)(void *, struct amdgpu_bo *), - void *param) + void *param, bool use_cpu_for_update) { unsigned i; int r; + if (use_cpu_for_update) { + r = amdgpu_bo_kmap(parent->bo, NULL); + if (r) + return r; + } + if (!parent->entries) return 0; @@ -183,7 +187,8 @@ static int amdgpu_vm_validate_level(struct amdgpu_vm_pt *parent, * Recurse into the sub directory. This is harmless because we * have only a maximum of 5 layers. */ - r = amdgpu_vm_validate_level(entry, validate, param); + r = amdgpu_vm_validate_level(entry, validate, param, + use_cpu_for_update); if (r) return r; } @@ -214,7 +219,8 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, if (num_evictions == vm->last_eviction_counter) return 0; - return amdgpu_vm_validate_level(&vm->root, validate, param); + return amdgpu_vm_validate_level(&vm->root, validate, param, + vm->use_cpu_for_update); } /** @@ -331,6 +337,14 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, if (r) return r; + if (vm->use_cpu_for_update) { + r = amdgpu_bo_kmap(pt, NULL); + if (r) { + amdgpu_bo_unref(&pt); + return r; + } + } + /* Keep a reference to the root directory to avoid * freeing them up in the wrong order. */ @@ -338,6 +352,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, entry->bo = pt; entry->addr = 0; + entry->huge_page = false; } if (level < adev->vm_manager.num_level) { @@ -424,7 +439,7 @@ static int amdgpu_vm_grab_reserved_vmid_locked(struct amdgpu_vm *vm, struct dma_fence *updates = sync->last_vm_update; int r = 0; struct dma_fence *flushed, *tmp; - bool needs_flush = false; + bool needs_flush = vm->use_cpu_for_update; flushed = id->flushed_updates; if ((amdgpu_vm_had_gpu_reset(adev, id)) || @@ -545,11 +560,11 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, } kfree(fences); - job->vm_needs_flush = false; + job->vm_needs_flush = vm->use_cpu_for_update; /* Check if we can use a VMID already assigned to this VM */ list_for_each_entry_reverse(id, &id_mgr->ids_lru, list) { struct dma_fence *flushed; - bool needs_flush = false; + bool needs_flush = vm->use_cpu_for_update; /* Check all the prerequisites to using this VMID */ if (amdgpu_vm_had_gpu_reset(adev, id)) @@ -745,7 +760,7 @@ static bool amdgpu_vm_is_large_bar(struct amdgpu_device *adev) * * Emit a VM flush when it is necessary. */ -int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job) +int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync) { struct amdgpu_device *adev = ring->adev; unsigned vmhub = ring->funcs->vmhub; @@ -767,12 +782,15 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job) vm_flush_needed = true; } - if (!vm_flush_needed && !gds_switch_needed) + if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync) return 0; if (ring->funcs->init_cond_exec) patch_offset = amdgpu_ring_init_cond_exec(ring); + if (need_pipe_sync) + amdgpu_ring_emit_pipeline_sync(ring); + if (ring->funcs->emit_vm_flush && vm_flush_needed) { struct dma_fence *fence; @@ -981,6 +999,8 @@ static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params, unsigned int i; uint64_t value; + trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags); + for (i = 0; i < count; i++) { value = params->pages_addr ? amdgpu_vm_map_gart(params->pages_addr, addr) : @@ -989,19 +1009,16 @@ static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params, i, value, flags); addr += incr; } - - /* Flush HDP */ - mb(); - amdgpu_gart_flush_gpu_tlb(params->adev, 0); } -static int amdgpu_vm_bo_wait(struct amdgpu_device *adev, struct amdgpu_bo *bo) +static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm, + void *owner) { struct amdgpu_sync sync; int r; amdgpu_sync_create(&sync); - amdgpu_sync_resv(adev, &sync, bo->tbo.resv, AMDGPU_FENCE_OWNER_VM); + amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.resv, owner); r = amdgpu_sync_wait(&sync, true); amdgpu_sync_free(&sync); @@ -1042,16 +1059,12 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev, params.adev = adev; shadow = parent->bo->shadow; - WARN_ON(vm->use_cpu_for_update && shadow); - if (vm->use_cpu_for_update && !shadow) { - r = amdgpu_bo_kmap(parent->bo, (void **)&pd_addr); - if (r) - return r; - r = amdgpu_vm_bo_wait(adev, parent->bo); - if (unlikely(r)) { - amdgpu_bo_kunmap(parent->bo); + if (vm->use_cpu_for_update) { + pd_addr = (unsigned long)parent->bo->kptr; + r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM); + if (unlikely(r)) return r; - } + params.func = amdgpu_vm_cpu_set_ptes; } else { if (shadow) { @@ -1105,7 +1118,8 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev, pt = amdgpu_bo_gpu_offset(bo); pt = amdgpu_gart_get_vm_pde(adev, pt); - if (parent->entries[pt_idx].addr == pt) + if (parent->entries[pt_idx].addr == pt || + parent->entries[pt_idx].huge_page) continue; parent->entries[pt_idx].addr = pt; @@ -1146,28 +1160,29 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev, count, incr, AMDGPU_PTE_VALID); } - if (params.func == amdgpu_vm_cpu_set_ptes) - amdgpu_bo_kunmap(parent->bo); - else if (params.ib->length_dw == 0) { - amdgpu_job_free(job); - } else { - amdgpu_ring_pad_ib(ring, params.ib); - amdgpu_sync_resv(adev, &job->sync, parent->bo->tbo.resv, - AMDGPU_FENCE_OWNER_VM); - if (shadow) - amdgpu_sync_resv(adev, &job->sync, shadow->tbo.resv, + if (!vm->use_cpu_for_update) { + if (params.ib->length_dw == 0) { + amdgpu_job_free(job); + } else { + amdgpu_ring_pad_ib(ring, params.ib); + amdgpu_sync_resv(adev, &job->sync, parent->bo->tbo.resv, AMDGPU_FENCE_OWNER_VM); + if (shadow) + amdgpu_sync_resv(adev, &job->sync, + shadow->tbo.resv, + AMDGPU_FENCE_OWNER_VM); + + WARN_ON(params.ib->length_dw > ndw); + r = amdgpu_job_submit(job, ring, &vm->entity, + AMDGPU_FENCE_OWNER_VM, &fence); + if (r) + goto error_free; - WARN_ON(params.ib->length_dw > ndw); - r = amdgpu_job_submit(job, ring, &vm->entity, - AMDGPU_FENCE_OWNER_VM, &fence); - if (r) - goto error_free; - - amdgpu_bo_fence(parent->bo, fence, true); - dma_fence_put(vm->last_dir_update); - vm->last_dir_update = dma_fence_get(fence); - dma_fence_put(fence); + amdgpu_bo_fence(parent->bo, fence, true); + dma_fence_put(vm->last_dir_update); + vm->last_dir_update = dma_fence_get(fence); + dma_fence_put(fence); + } } /* * Recurse into the subdirectories. This recursion is harmless because @@ -1235,33 +1250,105 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev, if (r) amdgpu_vm_invalidate_level(&vm->root); + if (vm->use_cpu_for_update) { + /* Flush HDP */ + mb(); + amdgpu_gart_flush_gpu_tlb(adev, 0); + } + return r; } /** - * amdgpu_vm_find_pt - find the page table for an address + * amdgpu_vm_find_entry - find the entry for an address * * @p: see amdgpu_pte_update_params definition * @addr: virtual address in question + * @entry: resulting entry or NULL + * @parent: parent entry * - * Find the page table BO for a virtual address, return NULL when none found. + * Find the vm_pt entry and it's parent for the given address. */ -static struct amdgpu_bo *amdgpu_vm_get_pt(struct amdgpu_pte_update_params *p, - uint64_t addr) +void amdgpu_vm_get_entry(struct amdgpu_pte_update_params *p, uint64_t addr, + struct amdgpu_vm_pt **entry, + struct amdgpu_vm_pt **parent) { - struct amdgpu_vm_pt *entry = &p->vm->root; unsigned idx, level = p->adev->vm_manager.num_level; - while (entry->entries) { + *parent = NULL; + *entry = &p->vm->root; + while ((*entry)->entries) { idx = addr >> (p->adev->vm_manager.block_size * level--); - idx %= amdgpu_bo_size(entry->bo) / 8; - entry = &entry->entries[idx]; + idx %= amdgpu_bo_size((*entry)->bo) / 8; + *parent = *entry; + *entry = &(*entry)->entries[idx]; } if (level) - return NULL; + *entry = NULL; +} + +/** + * amdgpu_vm_handle_huge_pages - handle updating the PD with huge pages + * + * @p: see amdgpu_pte_update_params definition + * @entry: vm_pt entry to check + * @parent: parent entry + * @nptes: number of PTEs updated with this operation + * @dst: destination address where the PTEs should point to + * @flags: access flags fro the PTEs + * + * Check if we can update the PD with a huge page. + */ +static int amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p, + struct amdgpu_vm_pt *entry, + struct amdgpu_vm_pt *parent, + unsigned nptes, uint64_t dst, + uint64_t flags) +{ + bool use_cpu_update = (p->func == amdgpu_vm_cpu_set_ptes); + uint64_t pd_addr, pde; + int r; - return entry->bo; + /* In the case of a mixed PT the PDE must point to it*/ + if (p->adev->asic_type < CHIP_VEGA10 || + nptes != AMDGPU_VM_PTE_COUNT(p->adev) || + p->func == amdgpu_vm_do_copy_ptes || + !(flags & AMDGPU_PTE_VALID)) { + + dst = amdgpu_bo_gpu_offset(entry->bo); + dst = amdgpu_gart_get_vm_pde(p->adev, dst); + flags = AMDGPU_PTE_VALID; + } else { + flags |= AMDGPU_PDE_PTE; + } + + if (entry->addr == dst && + entry->huge_page == !!(flags & AMDGPU_PDE_PTE)) + return 0; + + entry->addr = dst; + entry->huge_page = !!(flags & AMDGPU_PDE_PTE); + + if (use_cpu_update) { + r = amdgpu_bo_kmap(parent->bo, (void *)&pd_addr); + if (r) + return r; + + pde = pd_addr + (entry - parent->entries) * 8; + amdgpu_vm_cpu_set_ptes(p, pde, dst, 1, 0, flags); + } else { + if (parent->bo->shadow) { + pd_addr = amdgpu_bo_gpu_offset(parent->bo->shadow); + pde = pd_addr + (entry - parent->entries) * 8; + amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags); + } + pd_addr = amdgpu_bo_gpu_offset(parent->bo); + pde = pd_addr + (entry - parent->entries) * 8; + amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags); + } + + return 0; } /** @@ -1287,49 +1374,47 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, uint64_t addr, pe_start; struct amdgpu_bo *pt; unsigned nptes; - int r; bool use_cpu_update = (params->func == amdgpu_vm_cpu_set_ptes); - + int r; /* walk over the address space and update the page tables */ - for (addr = start; addr < end; addr += nptes) { - pt = amdgpu_vm_get_pt(params, addr); - if (!pt) { - pr_err("PT not found, aborting update_ptes\n"); - return -EINVAL; - } + for (addr = start; addr < end; addr += nptes, + dst += nptes * AMDGPU_GPU_PAGE_SIZE) { + struct amdgpu_vm_pt *entry, *parent; - if (params->shadow) { - if (WARN_ONCE(use_cpu_update, - "CPU VM update doesn't suuport shadow pages")) - return 0; - - if (!pt->shadow) - return 0; - pt = pt->shadow; - } + amdgpu_vm_get_entry(params, addr, &entry, &parent); + if (!entry) + return -ENOENT; if ((addr & ~mask) == (end & ~mask)) nptes = end - addr; else nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask); + r = amdgpu_vm_handle_huge_pages(params, entry, parent, + nptes, dst, flags); + if (r) + return r; + + if (entry->huge_page) + continue; + + pt = entry->bo; if (use_cpu_update) { - r = amdgpu_bo_kmap(pt, (void *)&pe_start); - if (r) - return r; - } else + pe_start = (unsigned long)pt->kptr; + } else { + if (pt->shadow) { + pe_start = amdgpu_bo_gpu_offset(pt->shadow); + pe_start += (addr & mask) * 8; + params->func(params, pe_start, dst, nptes, + AMDGPU_GPU_PAGE_SIZE, flags); + } pe_start = amdgpu_bo_gpu_offset(pt); + } pe_start += (addr & mask) * 8; - params->func(params, pe_start, dst, nptes, AMDGPU_GPU_PAGE_SIZE, flags); - - dst += nptes * AMDGPU_GPU_PAGE_SIZE; - - if (use_cpu_update) - amdgpu_bo_kunmap(pt); } return 0; @@ -1372,8 +1457,9 @@ static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params, */ /* SI and newer are optimized for 64KB */ - uint64_t frag_flags = AMDGPU_PTE_FRAG(AMDGPU_LOG2_PAGES_PER_FRAG); - uint64_t frag_align = 1 << AMDGPU_LOG2_PAGES_PER_FRAG; + unsigned pages_per_frag = AMDGPU_LOG2_PAGES_PER_FRAG(params->adev); + uint64_t frag_flags = AMDGPU_PTE_FRAG(pages_per_frag); + uint64_t frag_align = 1 << pages_per_frag; uint64_t frag_start = ALIGN(start, frag_align); uint64_t frag_end = end & ~(frag_align - 1); @@ -1445,6 +1531,10 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, params.vm = vm; params.src = src; + /* sync to everything on unmapping */ + if (!(flags & AMDGPU_PTE_VALID)) + owner = AMDGPU_FENCE_OWNER_UNDEFINED; + if (vm->use_cpu_for_update) { /* params.src is used as flag to indicate system Memory */ if (pages_addr) @@ -1453,23 +1543,18 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, /* Wait for PT BOs to be free. PTs share the same resv. object * as the root PD BO */ - r = amdgpu_vm_bo_wait(adev, vm->root.bo); + r = amdgpu_vm_wait_pd(adev, vm, owner); if (unlikely(r)) return r; params.func = amdgpu_vm_cpu_set_ptes; params.pages_addr = pages_addr; - params.shadow = false; return amdgpu_vm_frag_ptes(¶ms, start, last + 1, addr, flags); } ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); - /* sync to everything on unmapping */ - if (!(flags & AMDGPU_PTE_VALID)) - owner = AMDGPU_FENCE_OWNER_UNDEFINED; - nptes = last - start + 1; /* @@ -1481,6 +1566,9 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, /* padding, etc. */ ndw = 64; + /* one PDE write for each huge page */ + ndw += ((nptes >> adev->vm_manager.block_size) + 1) * 6; + if (src) { /* only copy commands needed */ ndw += ncmds * 7; @@ -1542,11 +1630,6 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, if (r) goto error_free; - params.shadow = true; - r = amdgpu_vm_frag_ptes(¶ms, start, last + 1, addr, flags); - if (r) - goto error_free; - params.shadow = false; r = amdgpu_vm_frag_ptes(¶ms, start, last + 1, addr, flags); if (r) goto error_free; @@ -1565,6 +1648,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, error_free: amdgpu_job_free(job); + amdgpu_vm_invalidate_level(&vm->root); return r; } @@ -1752,6 +1836,12 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, list_add(&bo_va->vm_status, &vm->cleared); spin_unlock(&vm->status_lock); + if (vm->use_cpu_for_update) { + /* Flush HDP */ + mb(); + amdgpu_gart_flush_gpu_tlb(adev, 0); + } + return 0; } @@ -2457,6 +2547,13 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, goto error_free_root; vm->last_eviction_counter = atomic64_read(&adev->num_evictions); + + if (vm->use_cpu_for_update) { + r = amdgpu_bo_kmap(vm->root.bo, NULL); + if (r) + goto error_free_root; + } + amdgpu_bo_unreserve(vm->root.bo); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 936f158bc5ec..34d9174ebff2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -51,7 +51,9 @@ struct amdgpu_bo_list_entry; #define AMDGPU_VM_PTB_ALIGN_SIZE 32768 /* LOG2 number of continuous pages for the fragment field */ -#define AMDGPU_LOG2_PAGES_PER_FRAG 4 +#define AMDGPU_LOG2_PAGES_PER_FRAG(adev) \ + ((adev)->asic_type < CHIP_VEGA10 ? 4 : \ + (adev)->vm_manager.block_size) #define AMDGPU_PTE_VALID (1ULL << 0) #define AMDGPU_PTE_SYSTEM (1ULL << 1) @@ -68,6 +70,9 @@ struct amdgpu_bo_list_entry; /* TILED for VEGA10, reserved for older ASICs */ #define AMDGPU_PTE_PRT (1ULL << 51) +/* PDE is handled as PTE for VEGA10 */ +#define AMDGPU_PDE_PTE (1ULL << 54) + /* VEGA10 only */ #define AMDGPU_PTE_MTYPE(a) ((uint64_t)a << 57) #define AMDGPU_PTE_MTYPE_MASK AMDGPU_PTE_MTYPE(3ULL) @@ -98,6 +103,7 @@ struct amdgpu_bo_list_entry; struct amdgpu_vm_pt { struct amdgpu_bo *bo; uint64_t addr; + bool huge_page; /* array of page tables, one for each directory entry */ struct amdgpu_vm_pt *entries; @@ -222,7 +228,7 @@ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, struct amdgpu_sync *sync, struct dma_fence *fence, struct amdgpu_job *job); -int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job); +int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync); void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub, unsigned vmid); void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index 37a499ab30eb..567c4a5cf90c 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -1824,21 +1824,14 @@ static int cik_common_suspend(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - amdgpu_amdkfd_suspend(adev); - return cik_common_hw_fini(adev); } static int cik_common_resume(void *handle) { - int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = cik_common_hw_init(adev); - if (r) - return r; - - return amdgpu_amdkfd_resume(adev); + return cik_common_hw_init(adev); } static bool cik_common_is_idle(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index c216e16826c9..f508f4d01e4a 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -342,6 +342,63 @@ static void cik_sdma_rlc_stop(struct amdgpu_device *adev) } /** + * cik_ctx_switch_enable - stop the async dma engines context switch + * + * @adev: amdgpu_device pointer + * @enable: enable/disable the DMA MEs context switch. + * + * Halt or unhalt the async dma engines context switch (VI). + */ +static void cik_ctx_switch_enable(struct amdgpu_device *adev, bool enable) +{ + u32 f32_cntl, phase_quantum = 0; + int i; + + if (amdgpu_sdma_phase_quantum) { + unsigned value = amdgpu_sdma_phase_quantum; + unsigned unit = 0; + + while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK >> + SDMA0_PHASE0_QUANTUM__VALUE__SHIFT)) { + value = (value + 1) >> 1; + unit++; + } + if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK >> + SDMA0_PHASE0_QUANTUM__UNIT__SHIFT)) { + value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK >> + SDMA0_PHASE0_QUANTUM__VALUE__SHIFT); + unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK >> + SDMA0_PHASE0_QUANTUM__UNIT__SHIFT); + WARN_ONCE(1, + "clamping sdma_phase_quantum to %uK clock cycles\n", + value << unit); + } + phase_quantum = + value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT | + unit << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT; + } + + for (i = 0; i < adev->sdma.num_instances; i++) { + f32_cntl = RREG32(mmSDMA0_CNTL + sdma_offsets[i]); + if (enable) { + f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, + AUTO_CTXSW_ENABLE, 1); + if (amdgpu_sdma_phase_quantum) { + WREG32(mmSDMA0_PHASE0_QUANTUM + sdma_offsets[i], + phase_quantum); + WREG32(mmSDMA0_PHASE1_QUANTUM + sdma_offsets[i], + phase_quantum); + } + } else { + f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, + AUTO_CTXSW_ENABLE, 0); + } + + WREG32(mmSDMA0_CNTL + sdma_offsets[i], f32_cntl); + } +} + +/** * cik_sdma_enable - stop the async dma engines * * @adev: amdgpu_device pointer @@ -537,6 +594,8 @@ static int cik_sdma_start(struct amdgpu_device *adev) /* halt the engine before programing */ cik_sdma_enable(adev, false); + /* enable sdma ring preemption */ + cik_ctx_switch_enable(adev, true); /* start the gfx rings and rlc compute queues */ r = cik_sdma_gfx_resume(adev); @@ -984,6 +1043,7 @@ static int cik_sdma_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + cik_ctx_switch_enable(adev, false); cik_sdma_enable(adev, false); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h b/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h index 18fd01f3e4b2..003a131bad47 100644 --- a/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h +++ b/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h @@ -1,24 +1,25 @@ - /* -*************************************************************************************************** -* -* Trade secret of Advanced Micro Devices, Inc. -* Copyright (c) 2010 Advanced Micro Devices, Inc. (unpublished) -* -* All rights reserved. This notice is intended as a precaution against inadvertent publication and -* does not imply publication or any waiver of confidentiality. The year included in the foregoing -* notice is the year of creation of the work. -* -*************************************************************************************************** -*/ -/** -*************************************************************************************************** -* @brief gfx9 Clearstate Definitions -*************************************************************************************************** -* -* Do not edit! This is a machine-generated file! -* -*/ + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ static const unsigned int gfx9_SECT_CONTEXT_def_1[] = { diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 162d761e2f4e..490e84944851 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -484,134 +484,6 @@ static bool dce_v10_0_is_display_hung(struct amdgpu_device *adev) return true; } -static void dce_v10_0_stop_mc_access(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) -{ - u32 crtc_enabled, tmp; - int i; - - save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL); - save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL); - - /* disable VGA render */ - tmp = RREG32(mmVGA_RENDER_CONTROL); - tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); - WREG32(mmVGA_RENDER_CONTROL, tmp); - - /* blank the display controllers */ - for (i = 0; i < adev->mode_info.num_crtc; i++) { - crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]), - CRTC_CONTROL, CRTC_MASTER_EN); - if (crtc_enabled) { -#if 0 - u32 frame_count; - int j; - - save->crtc_enabled[i] = true; - tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) { - amdgpu_display_vblank_wait(adev, i); - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); - tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1); - WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); - } - /* wait for the next frame */ - frame_count = amdgpu_display_vblank_get_counter(adev, i); - for (j = 0; j < adev->usec_timeout; j++) { - if (amdgpu_display_vblank_get_counter(adev, i) != frame_count) - break; - udelay(1); - } - tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK) == 0) { - tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1); - WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp); - } - tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK) == 0) { - tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 1); - WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp); - } -#else - /* XXX this is a hack to avoid strange behavior with EFI on certain systems */ - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); - tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); - tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0); - WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp); - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); - save->crtc_enabled[i] = false; - /* ***** */ -#endif - } else { - save->crtc_enabled[i] = false; - } - } -} - -static void dce_v10_0_resume_mc_access(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) -{ - u32 tmp, frame_count; - int i, j; - - /* update crtc base addresses */ - for (i = 0; i < adev->mode_info.num_crtc; i++) { - WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], - upper_32_bits(adev->mc.vram_start)); - WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], - upper_32_bits(adev->mc.vram_start)); - WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i], - (u32)adev->mc.vram_start); - WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i], - (u32)adev->mc.vram_start); - - if (save->crtc_enabled[i]) { - tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 0) { - tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 0); - WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp); - } - tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK)) { - tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0); - WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp); - } - tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK)) { - tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 0); - WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp); - } - for (j = 0; j < adev->usec_timeout; j++) { - tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING) == 0) - break; - udelay(1); - } - tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); - tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0); - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); - WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); - /* wait for the next frame */ - frame_count = amdgpu_display_vblank_get_counter(adev, i); - for (j = 0; j < adev->usec_timeout; j++) { - if (amdgpu_display_vblank_get_counter(adev, i) != frame_count) - break; - udelay(1); - } - } - } - - WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start)); - WREG32(mmVGA_MEMORY_BASE_ADDRESS, lower_32_bits(adev->mc.vram_start)); - - /* Unlock vga access */ - WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control); - mdelay(1); - WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control); -} - static void dce_v10_0_set_vga_render_state(struct amdgpu_device *adev, bool render) { @@ -3012,6 +2884,8 @@ static int dce_v10_0_hw_init(void *handle) dce_v10_0_init_golden_registers(adev); + /* disable vga render */ + dce_v10_0_set_vga_render_state(adev, false); /* init dig PHYs, disp eng pll */ amdgpu_atombios_encoder_init_dig(adev); amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk); @@ -3724,7 +3598,6 @@ static void dce_v10_0_encoder_add(struct amdgpu_device *adev, } static const struct amdgpu_display_funcs dce_v10_0_display_funcs = { - .set_vga_render_state = &dce_v10_0_set_vga_render_state, .bandwidth_update = &dce_v10_0_bandwidth_update, .vblank_get_counter = &dce_v10_0_vblank_get_counter, .vblank_wait = &dce_v10_0_vblank_wait, @@ -3737,8 +3610,6 @@ static const struct amdgpu_display_funcs dce_v10_0_display_funcs = { .page_flip_get_scanoutpos = &dce_v10_0_crtc_get_scanoutpos, .add_encoder = &dce_v10_0_encoder_add, .add_connector = &amdgpu_connector_add, - .stop_mc_access = &dce_v10_0_stop_mc_access, - .resume_mc_access = &dce_v10_0_resume_mc_access, }; static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index f420e5815edb..921c6f772f11 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -499,79 +499,6 @@ static bool dce_v11_0_is_display_hung(struct amdgpu_device *adev) return true; } -static void dce_v11_0_stop_mc_access(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) -{ - u32 crtc_enabled, tmp; - int i; - - save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL); - save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL); - - /* disable VGA render */ - tmp = RREG32(mmVGA_RENDER_CONTROL); - tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); - WREG32(mmVGA_RENDER_CONTROL, tmp); - - /* blank the display controllers */ - for (i = 0; i < adev->mode_info.num_crtc; i++) { - crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]), - CRTC_CONTROL, CRTC_MASTER_EN); - if (crtc_enabled) { -#if 1 - save->crtc_enabled[i] = true; - tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) { - /*it is correct only for RGB ; black is 0*/ - WREG32(mmCRTC_BLANK_DATA_COLOR + crtc_offsets[i], 0); - tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1); - WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); - } -#else - /* XXX this is a hack to avoid strange behavior with EFI on certain systems */ - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); - tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); - tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0); - WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp); - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); - save->crtc_enabled[i] = false; - /* ***** */ -#endif - } else { - save->crtc_enabled[i] = false; - } - } -} - -static void dce_v11_0_resume_mc_access(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) -{ - u32 tmp; - int i; - - /* update crtc base addresses */ - for (i = 0; i < adev->mode_info.num_crtc; i++) { - WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], - upper_32_bits(adev->mc.vram_start)); - WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i], - (u32)adev->mc.vram_start); - - if (save->crtc_enabled[i]) { - tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); - tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0); - WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); - } - } - - WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start)); - WREG32(mmVGA_MEMORY_BASE_ADDRESS, lower_32_bits(adev->mc.vram_start)); - - /* Unlock vga access */ - WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control); - mdelay(1); - WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control); -} - static void dce_v11_0_set_vga_render_state(struct amdgpu_device *adev, bool render) { @@ -3073,6 +3000,8 @@ static int dce_v11_0_hw_init(void *handle) dce_v11_0_init_golden_registers(adev); + /* disable vga render */ + dce_v11_0_set_vga_render_state(adev, false); /* init dig PHYs, disp eng pll */ amdgpu_atombios_crtc_powergate_init(adev); amdgpu_atombios_encoder_init_dig(adev); @@ -3793,7 +3722,6 @@ static void dce_v11_0_encoder_add(struct amdgpu_device *adev, } static const struct amdgpu_display_funcs dce_v11_0_display_funcs = { - .set_vga_render_state = &dce_v11_0_set_vga_render_state, .bandwidth_update = &dce_v11_0_bandwidth_update, .vblank_get_counter = &dce_v11_0_vblank_get_counter, .vblank_wait = &dce_v11_0_vblank_wait, @@ -3806,8 +3734,6 @@ static const struct amdgpu_display_funcs dce_v11_0_display_funcs = { .page_flip_get_scanoutpos = &dce_v11_0_crtc_get_scanoutpos, .add_encoder = &dce_v11_0_encoder_add, .add_connector = &amdgpu_connector_add, - .stop_mc_access = &dce_v11_0_stop_mc_access, - .resume_mc_access = &dce_v11_0_resume_mc_access, }; static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index f3c422e93ed1..bcd9521237f4 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -392,117 +392,6 @@ static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev) return mmDC_GPIO_HPD_A; } -static u32 evergreen_get_vblank_counter(struct amdgpu_device* adev, int crtc) -{ - if (crtc >= adev->mode_info.num_crtc) - return 0; - else - return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]); -} - -static void dce_v6_0_stop_mc_access(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) -{ - u32 crtc_enabled, tmp, frame_count; - int i, j; - - save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL); - save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL); - - /* disable VGA render */ - WREG32(mmVGA_RENDER_CONTROL, 0); - - /* blank the display controllers */ - for (i = 0; i < adev->mode_info.num_crtc; i++) { - crtc_enabled = RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK; - if (crtc_enabled) { - save->crtc_enabled[i] = true; - tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); - - if (!(tmp & CRTC_BLANK_CONTROL__CRTC_BLANK_DATA_EN_MASK)) { - dce_v6_0_vblank_wait(adev, i); - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); - tmp |= CRTC_BLANK_CONTROL__CRTC_BLANK_DATA_EN_MASK; - WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); - } - /* wait for the next frame */ - frame_count = evergreen_get_vblank_counter(adev, i); - for (j = 0; j < adev->usec_timeout; j++) { - if (evergreen_get_vblank_counter(adev, i) != frame_count) - break; - udelay(1); - } - - /* XXX this is a hack to avoid strange behavior with EFI on certain systems */ - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); - tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); - tmp &= ~CRTC_CONTROL__CRTC_MASTER_EN_MASK; - WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp); - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); - save->crtc_enabled[i] = false; - /* ***** */ - } else { - save->crtc_enabled[i] = false; - } - } -} - -static void dce_v6_0_resume_mc_access(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) -{ - u32 tmp; - int i, j; - - /* update crtc base addresses */ - for (i = 0; i < adev->mode_info.num_crtc; i++) { - WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], - upper_32_bits(adev->mc.vram_start)); - WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], - upper_32_bits(adev->mc.vram_start)); - WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i], - (u32)adev->mc.vram_start); - WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i], - (u32)adev->mc.vram_start); - } - - WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start)); - WREG32(mmVGA_MEMORY_BASE_ADDRESS, (u32)adev->mc.vram_start); - - /* unlock regs and wait for update */ - for (i = 0; i < adev->mode_info.num_crtc; i++) { - if (save->crtc_enabled[i]) { - tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]); - if ((tmp & 0x7) != 0) { - tmp &= ~0x7; - WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp); - } - tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); - if (tmp & GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK) { - tmp &= ~GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK; - WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp); - } - tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]); - if (tmp & 1) { - tmp &= ~1; - WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp); - } - for (j = 0; j < adev->usec_timeout; j++) { - tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); - if ((tmp & GRPH_UPDATE__GRPH_SURFACE_UPDATE_PENDING_MASK) == 0) - break; - udelay(1); - } - } - } - - /* Unlock vga access */ - WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control); - mdelay(1); - WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control); - -} - static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev, bool render) { @@ -2860,6 +2749,8 @@ static int dce_v6_0_hw_init(void *handle) int i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + /* disable vga render */ + dce_v6_0_set_vga_render_state(adev, false); /* init dig PHYs, disp eng pll */ amdgpu_atombios_encoder_init_dig(adev); amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk); @@ -3512,7 +3403,6 @@ static void dce_v6_0_encoder_add(struct amdgpu_device *adev, } static const struct amdgpu_display_funcs dce_v6_0_display_funcs = { - .set_vga_render_state = &dce_v6_0_set_vga_render_state, .bandwidth_update = &dce_v6_0_bandwidth_update, .vblank_get_counter = &dce_v6_0_vblank_get_counter, .vblank_wait = &dce_v6_0_vblank_wait, @@ -3525,8 +3415,6 @@ static const struct amdgpu_display_funcs dce_v6_0_display_funcs = { .page_flip_get_scanoutpos = &dce_v6_0_crtc_get_scanoutpos, .add_encoder = &dce_v6_0_encoder_add, .add_connector = &amdgpu_connector_add, - .stop_mc_access = &dce_v6_0_stop_mc_access, - .resume_mc_access = &dce_v6_0_resume_mc_access, }; static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index b19aa39f647d..609438fe8584 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -419,81 +419,6 @@ static bool dce_v8_0_is_display_hung(struct amdgpu_device *adev) return true; } -static void dce_v8_0_stop_mc_access(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) -{ - u32 crtc_enabled, tmp; - int i; - - save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL); - save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL); - - /* disable VGA render */ - tmp = RREG32(mmVGA_RENDER_CONTROL); - tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); - WREG32(mmVGA_RENDER_CONTROL, tmp); - - /* blank the display controllers */ - for (i = 0; i < adev->mode_info.num_crtc; i++) { - crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]), - CRTC_CONTROL, CRTC_MASTER_EN); - if (crtc_enabled) { -#if 1 - save->crtc_enabled[i] = true; - tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) { - /*it is correct only for RGB ; black is 0*/ - WREG32(mmCRTC_BLANK_DATA_COLOR + crtc_offsets[i], 0); - tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1); - WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); - } - mdelay(20); -#else - /* XXX this is a hack to avoid strange behavior with EFI on certain systems */ - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); - tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); - tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0); - WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp); - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); - save->crtc_enabled[i] = false; - /* ***** */ -#endif - } else { - save->crtc_enabled[i] = false; - } - } -} - -static void dce_v8_0_resume_mc_access(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) -{ - u32 tmp; - int i; - - /* update crtc base addresses */ - for (i = 0; i < adev->mode_info.num_crtc; i++) { - WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], - upper_32_bits(adev->mc.vram_start)); - WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i], - (u32)adev->mc.vram_start); - - if (save->crtc_enabled[i]) { - tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); - tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0); - WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); - } - mdelay(20); - } - - WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start)); - WREG32(mmVGA_MEMORY_BASE_ADDRESS, lower_32_bits(adev->mc.vram_start)); - - /* Unlock vga access */ - WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control); - mdelay(1); - WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control); -} - static void dce_v8_0_set_vga_render_state(struct amdgpu_device *adev, bool render) { @@ -2857,6 +2782,8 @@ static int dce_v8_0_hw_init(void *handle) int i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + /* disable vga render */ + dce_v8_0_set_vga_render_state(adev, false); /* init dig PHYs, disp eng pll */ amdgpu_atombios_encoder_init_dig(adev); amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk); @@ -3561,7 +3488,6 @@ static void dce_v8_0_encoder_add(struct amdgpu_device *adev, } static const struct amdgpu_display_funcs dce_v8_0_display_funcs = { - .set_vga_render_state = &dce_v8_0_set_vga_render_state, .bandwidth_update = &dce_v8_0_bandwidth_update, .vblank_get_counter = &dce_v8_0_vblank_get_counter, .vblank_wait = &dce_v8_0_vblank_wait, @@ -3574,8 +3500,6 @@ static const struct amdgpu_display_funcs dce_v8_0_display_funcs = { .page_flip_get_scanoutpos = &dce_v8_0_crtc_get_scanoutpos, .add_encoder = &dce_v8_0_encoder_add, .add_connector = &amdgpu_connector_add, - .stop_mc_access = &dce_v8_0_stop_mc_access, - .resume_mc_access = &dce_v8_0_resume_mc_access, }; static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index ecf34bc77a63..5ed919e45351 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c @@ -95,62 +95,6 @@ static u32 dce_virtual_hpd_get_gpio_reg(struct amdgpu_device *adev) return 0; } -static void dce_virtual_stop_mc_access(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) -{ - switch (adev->asic_type) { -#ifdef CONFIG_DRM_AMDGPU_SI - case CHIP_TAHITI: - case CHIP_PITCAIRN: - case CHIP_VERDE: - case CHIP_OLAND: - dce_v6_0_disable_dce(adev); - break; -#endif -#ifdef CONFIG_DRM_AMDGPU_CIK - case CHIP_BONAIRE: - case CHIP_HAWAII: - case CHIP_KAVERI: - case CHIP_KABINI: - case CHIP_MULLINS: - dce_v8_0_disable_dce(adev); - break; -#endif - case CHIP_FIJI: - case CHIP_TONGA: - dce_v10_0_disable_dce(adev); - break; - case CHIP_CARRIZO: - case CHIP_STONEY: - case CHIP_POLARIS10: - case CHIP_POLARIS11: - case CHIP_POLARIS12: - dce_v11_0_disable_dce(adev); - break; - case CHIP_TOPAZ: -#ifdef CONFIG_DRM_AMDGPU_SI - case CHIP_HAINAN: -#endif - /* no DCE */ - return; - default: - DRM_ERROR("Virtual display unsupported ASIC type: 0x%X\n", adev->asic_type); - } - - return; -} -static void dce_virtual_resume_mc_access(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) -{ - return; -} - -static void dce_virtual_set_vga_render_state(struct amdgpu_device *adev, - bool render) -{ - return; -} - /** * dce_virtual_bandwidth_update - program display watermarks * @@ -499,6 +443,45 @@ static int dce_virtual_sw_fini(void *handle) static int dce_virtual_hw_init(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + switch (adev->asic_type) { +#ifdef CONFIG_DRM_AMDGPU_SI + case CHIP_TAHITI: + case CHIP_PITCAIRN: + case CHIP_VERDE: + case CHIP_OLAND: + dce_v6_0_disable_dce(adev); + break; +#endif +#ifdef CONFIG_DRM_AMDGPU_CIK + case CHIP_BONAIRE: + case CHIP_HAWAII: + case CHIP_KAVERI: + case CHIP_KABINI: + case CHIP_MULLINS: + dce_v8_0_disable_dce(adev); + break; +#endif + case CHIP_FIJI: + case CHIP_TONGA: + dce_v10_0_disable_dce(adev); + break; + case CHIP_CARRIZO: + case CHIP_STONEY: + case CHIP_POLARIS11: + case CHIP_POLARIS10: + dce_v11_0_disable_dce(adev); + break; + case CHIP_TOPAZ: +#ifdef CONFIG_DRM_AMDGPU_SI + case CHIP_HAINAN: +#endif + /* no DCE */ + break; + default: + DRM_ERROR("Virtual display unsupported ASIC type: 0x%X\n", adev->asic_type); + } return 0; } @@ -654,7 +637,6 @@ static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev, } static const struct amdgpu_display_funcs dce_virtual_display_funcs = { - .set_vga_render_state = &dce_virtual_set_vga_render_state, .bandwidth_update = &dce_virtual_bandwidth_update, .vblank_get_counter = &dce_virtual_vblank_get_counter, .vblank_wait = &dce_virtual_vblank_wait, @@ -667,8 +649,6 @@ static const struct amdgpu_display_funcs dce_virtual_display_funcs = { .page_flip_get_scanoutpos = &dce_virtual_crtc_get_scanoutpos, .add_encoder = NULL, .add_connector = NULL, - .stop_mc_access = &dce_virtual_stop_mc_access, - .resume_mc_access = &dce_virtual_resume_mc_access, }; static void dce_virtual_set_display_funcs(struct amdgpu_device *adev) @@ -786,7 +766,7 @@ static const struct amdgpu_irq_src_funcs dce_virtual_crtc_irq_funcs = { static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev) { - adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST; + adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VBLANK6 + 1; adev->crtc_irq.funcs = &dce_virtual_crtc_irq_funcs; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index 5173ca1fd159..4ac85f47f287 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -1573,7 +1573,7 @@ static void gfx_v6_0_gpu_init(struct amdgpu_device *adev) static void gfx_v6_0_scratch_init(struct amdgpu_device *adev) { - adev->gfx.scratch.num_reg = 7; + adev->gfx.scratch.num_reg = 8; adev->gfx.scratch.reg_base = mmSCRATCH_REG0; adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 37b45e4403d1..17b7c6934b0a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -2021,7 +2021,7 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev) */ static void gfx_v7_0_scratch_init(struct amdgpu_device *adev) { - adev->gfx.scratch.num_reg = 7; + adev->gfx.scratch.num_reg = 8; adev->gfx.scratch.reg_base = mmSCRATCH_REG0; adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index aa5a50f5eac8..05436b8730b4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -193,8 +193,8 @@ static const u32 tonga_golden_common_all[] = mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003, mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800, mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800, - mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF, - mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF + mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF, + mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF }; static const u32 tonga_mgcg_cgcg_init[] = @@ -303,8 +303,8 @@ static const u32 polaris11_golden_common_all[] = mmGB_ADDR_CONFIG, 0xffffffff, 0x22011002, mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800, mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800, - mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF, - mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF, + mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF, + mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF, }; static const u32 golden_settings_polaris10_a11[] = @@ -336,8 +336,8 @@ static const u32 polaris10_golden_common_all[] = mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003, mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800, mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800, - mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF, - mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF, + mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF, + mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF, }; static const u32 fiji_golden_common_all[] = @@ -348,8 +348,8 @@ static const u32 fiji_golden_common_all[] = mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003, mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800, mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800, - mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF, - mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF, + mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF, + mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF, mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x00000009, }; @@ -436,8 +436,8 @@ static const u32 iceland_golden_common_all[] = mmGB_ADDR_CONFIG, 0xffffffff, 0x22010001, mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800, mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800, - mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF, - mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF + mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF, + mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF }; static const u32 iceland_mgcg_cgcg_init[] = @@ -532,8 +532,8 @@ static const u32 cz_golden_common_all[] = mmGB_ADDR_CONFIG, 0xffffffff, 0x22010001, mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800, mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800, - mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF, - mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF + mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF, + mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF }; static const u32 cz_mgcg_cgcg_init[] = @@ -637,8 +637,8 @@ static const u32 stoney_golden_common_all[] = mmGB_ADDR_CONFIG, 0xffffffff, 0x12010001, mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800, mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800, - mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF, - mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF, + mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF, + mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF, }; static const u32 stoney_mgcg_cgcg_init[] = @@ -750,7 +750,7 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev) static void gfx_v8_0_scratch_init(struct amdgpu_device *adev) { - adev->gfx.scratch.num_reg = 7; + adev->gfx.scratch.num_reg = 8; adev->gfx.scratch.reg_base = mmSCRATCH_REG0; adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1; } @@ -4564,7 +4564,7 @@ static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev) /* This situation may be hit in the future if a new HW * generation exposes more than 64 queues. If so, the * definition of queue_mask needs updating */ - if (WARN_ON(i > (sizeof(queue_mask)*8))) { + if (WARN_ON(i >= (sizeof(queue_mask)*8))) { DRM_ERROR("Invalid KCQ enabled: %d\n", i); break; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 3a0b69b09ed6..435db6f5efcf 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -211,7 +211,7 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev) static void gfx_v9_0_scratch_init(struct amdgpu_device *adev) { - adev->gfx.scratch.num_reg = 7; + adev->gfx.scratch.num_reg = 8; adev->gfx.scratch.reg_base = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0); adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1; } @@ -1475,21 +1475,23 @@ static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev) static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance) { - u32 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1); + u32 data; - if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) { - data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1); - data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1); - } else if (se_num == 0xffffffff) { - data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num); + if (instance == 0xffffffff) + data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1); + else + data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance); + + if (se_num == 0xffffffff) data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1); - } else if (sh_num == 0xffffffff) { - data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1); + else data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); - } else { + + if (sh_num == 0xffffffff) + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1); + else data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num); - data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); - } + WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data); } @@ -2425,7 +2427,7 @@ static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev) /* This situation may be hit in the future if a new HW * generation exposes more than 64 queues. If so, the * definition of queue_mask needs updating */ - if (WARN_ON(i > (sizeof(queue_mask)*8))) { + if (WARN_ON(i >= (sizeof(queue_mask)*8))) { DRM_ERROR("Invalid KCQ enabled: %d\n", i); break; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c index a42f483767e7..408723ef157c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c @@ -58,14 +58,14 @@ static void gfxhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev) gfxhub_v1_0_init_gart_pt_regs(adev); WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, - (u32)(adev->mc.gtt_start >> 12)); + (u32)(adev->mc.gart_start >> 12)); WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, - (u32)(adev->mc.gtt_start >> 44)); + (u32)(adev->mc.gart_start >> 44)); WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, - (u32)(adev->mc.gtt_end >> 12)); + (u32)(adev->mc.gart_end >> 12)); WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, - (u32)(adev->mc.gtt_end >> 44)); + (u32)(adev->mc.gart_end >> 44)); } static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev) @@ -129,7 +129,7 @@ static void gfxhub_v1_0_init_cache_regs(struct amdgpu_device *adev) /* Setup L2 cache */ tmp = RREG32_SOC15(GC, 0, mmVM_L2_CNTL); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1); - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 0); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1); /* XXX for emulation, Refer to closed source code.*/ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE, 0); @@ -144,6 +144,8 @@ static void gfxhub_v1_0_init_cache_regs(struct amdgpu_device *adev) WREG32_SOC15(GC, 0, mmVM_L2_CNTL2, tmp); tmp = mmVM_L2_CNTL3_DEFAULT; + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 9); WREG32_SOC15(GC, 0, mmVM_L2_CNTL3, tmp); tmp = mmVM_L2_CNTL4_DEFAULT; @@ -206,6 +208,9 @@ static void gfxhub_v1_0_setup_vmid_config(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE, adev->vm_manager.block_size - 9); + /* Send no-retry XNACK on fault to suppress VM fault storm. */ + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, + RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_CNTL, i, tmp); WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, i*2, 0); WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, i*2, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index d0214d942bfc..93c45f26b7c8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -66,14 +66,10 @@ static const u32 crtc_offsets[6] = SI_CRTC5_REGISTER_OFFSET }; -static void gmc_v6_0_mc_stop(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) +static void gmc_v6_0_mc_stop(struct amdgpu_device *adev) { u32 blackout; - if (adev->mode_info.num_crtc) - amdgpu_display_stop_mc_access(adev, save); - gmc_v6_0_wait_for_idle((void *)adev); blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); @@ -90,8 +86,7 @@ static void gmc_v6_0_mc_stop(struct amdgpu_device *adev, } -static void gmc_v6_0_mc_resume(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) +static void gmc_v6_0_mc_resume(struct amdgpu_device *adev) { u32 tmp; @@ -103,10 +98,6 @@ static void gmc_v6_0_mc_resume(struct amdgpu_device *adev, tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1); tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1); WREG32(mmBIF_FB_EN, tmp); - - if (adev->mode_info.num_crtc) - amdgpu_display_resume_mc_access(adev, save); - } static int gmc_v6_0_init_microcode(struct amdgpu_device *adev) @@ -228,20 +219,20 @@ static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev) static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc) { + u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF; + base <<= 24; + if (mc->mc_vram_size > 0xFFC0000000ULL) { dev_warn(adev->dev, "limiting VRAM\n"); mc->real_vram_size = 0xFFC0000000ULL; mc->mc_vram_size = 0xFFC0000000ULL; } - amdgpu_vram_location(adev, &adev->mc, 0); - adev->mc.gtt_base_align = 0; - amdgpu_gtt_location(adev, mc); + amdgpu_vram_location(adev, &adev->mc, base); + amdgpu_gart_location(adev, mc); } static void gmc_v6_0_mc_program(struct amdgpu_device *adev) { - struct amdgpu_mode_mc_save save; - u32 tmp; int i, j; /* Initialize HDP */ @@ -254,16 +245,23 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev) } WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0); - if (adev->mode_info.num_crtc) - amdgpu_display_set_vga_render_state(adev, false); - - gmc_v6_0_mc_stop(adev, &save); - if (gmc_v6_0_wait_for_idle((void *)adev)) { dev_warn(adev->dev, "Wait for MC idle timedout !\n"); } - WREG32(mmVGA_HDP_CONTROL, VGA_HDP_CONTROL__VGA_MEMORY_DISABLE_MASK); + if (adev->mode_info.num_crtc) { + u32 tmp; + + /* Lockout access through VGA aperture*/ + tmp = RREG32(mmVGA_HDP_CONTROL); + tmp |= VGA_HDP_CONTROL__VGA_MEMORY_DISABLE_MASK; + WREG32(mmVGA_HDP_CONTROL, tmp); + + /* disable VGA render */ + tmp = RREG32(mmVGA_RENDER_CONTROL); + tmp &= ~VGA_VSTATUS_CNTL; + WREG32(mmVGA_RENDER_CONTROL, tmp); + } /* Update configuration */ WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, adev->mc.vram_start >> 12); @@ -271,13 +269,6 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev) adev->mc.vram_end >> 12); WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, adev->vram_scratch.gpu_addr >> 12); - tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16; - tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF); - WREG32(mmMC_VM_FB_LOCATION, tmp); - /* XXX double check these! */ - WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8)); - WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30)); - WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF); WREG32(mmMC_VM_AGP_BASE, 0); WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF); WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF); @@ -285,7 +276,6 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev) if (gmc_v6_0_wait_for_idle((void *)adev)) { dev_warn(adev->dev, "Wait for MC idle timedout !\n"); } - gmc_v6_0_mc_resume(adev, &save); } static int gmc_v6_0_mc_init(struct amdgpu_device *adev) @@ -342,15 +332,7 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev) adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; adev->mc.visible_vram_size = adev->mc.aper_size; - /* unless the user had overridden it, set the gart - * size equal to the 1024 or vram, whichever is larger. - */ - if (amdgpu_gart_size == -1) - adev->mc.gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20), - adev->mc.mc_vram_size); - else - adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20; - + amdgpu_gart_set_defaults(adev); gmc_v6_0_vram_gtt_location(adev, &adev->mc); return 0; @@ -511,8 +493,8 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev) (4UL << VM_L2_CNTL3__BANK_SELECT__SHIFT) | (4UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT)); /* setup context0 */ - WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12); - WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12); + WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gart_start >> 12); + WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gart_end >> 12); WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12); WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, (u32)(adev->dummy_page.addr >> 12)); @@ -559,7 +541,7 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev) gmc_v6_0_gart_flush_gpu_tlb(adev, 0); dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n", - (unsigned)(adev->mc.gtt_size >> 20), + (unsigned)(adev->mc.gart_size >> 20), (unsigned long long)adev->gart.table_addr); adev->gart.ready = true; return 0; @@ -987,7 +969,6 @@ static int gmc_v6_0_wait_for_idle(void *handle) static int gmc_v6_0_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_mode_mc_save save; u32 srbm_soft_reset = 0; u32 tmp = RREG32(mmSRBM_STATUS); @@ -1003,7 +984,7 @@ static int gmc_v6_0_soft_reset(void *handle) } if (srbm_soft_reset) { - gmc_v6_0_mc_stop(adev, &save); + gmc_v6_0_mc_stop(adev); if (gmc_v6_0_wait_for_idle(adev)) { dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); } @@ -1023,7 +1004,7 @@ static int gmc_v6_0_soft_reset(void *handle) udelay(50); - gmc_v6_0_mc_resume(adev, &save); + gmc_v6_0_mc_resume(adev); udelay(50); } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 7e9ea53edf8b..4a9e84062874 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -37,6 +37,9 @@ #include "oss/oss_2_0_d.h" #include "oss/oss_2_0_sh_mask.h" +#include "dce/dce_8_0_d.h" +#include "dce/dce_8_0_sh_mask.h" + #include "amdgpu_atombios.h" static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev); @@ -76,14 +79,10 @@ static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev) } } -static void gmc_v7_0_mc_stop(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) +static void gmc_v7_0_mc_stop(struct amdgpu_device *adev) { u32 blackout; - if (adev->mode_info.num_crtc) - amdgpu_display_stop_mc_access(adev, save); - gmc_v7_0_wait_for_idle((void *)adev); blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); @@ -99,8 +98,7 @@ static void gmc_v7_0_mc_stop(struct amdgpu_device *adev, udelay(100); } -static void gmc_v7_0_mc_resume(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) +static void gmc_v7_0_mc_resume(struct amdgpu_device *adev) { u32 tmp; @@ -112,9 +110,6 @@ static void gmc_v7_0_mc_resume(struct amdgpu_device *adev, tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1); tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1); WREG32(mmBIF_FB_EN, tmp); - - if (adev->mode_info.num_crtc) - amdgpu_display_resume_mc_access(adev, save); } /** @@ -242,15 +237,17 @@ static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev) static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc) { + u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF; + base <<= 24; + if (mc->mc_vram_size > 0xFFC0000000ULL) { /* leave room for at least 1024M GTT */ dev_warn(adev->dev, "limiting VRAM\n"); mc->real_vram_size = 0xFFC0000000ULL; mc->mc_vram_size = 0xFFC0000000ULL; } - amdgpu_vram_location(adev, &adev->mc, 0); - adev->mc.gtt_base_align = 0; - amdgpu_gtt_location(adev, mc); + amdgpu_vram_location(adev, &adev->mc, base); + amdgpu_gart_location(adev, mc); } /** @@ -263,7 +260,6 @@ static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev, */ static void gmc_v7_0_mc_program(struct amdgpu_device *adev) { - struct amdgpu_mode_mc_save save; u32 tmp; int i, j; @@ -277,13 +273,20 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev) } WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0); - if (adev->mode_info.num_crtc) - amdgpu_display_set_vga_render_state(adev, false); - - gmc_v7_0_mc_stop(adev, &save); if (gmc_v7_0_wait_for_idle((void *)adev)) { dev_warn(adev->dev, "Wait for MC idle timedout !\n"); } + if (adev->mode_info.num_crtc) { + /* Lockout access through VGA aperture*/ + tmp = RREG32(mmVGA_HDP_CONTROL); + tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1); + WREG32(mmVGA_HDP_CONTROL, tmp); + + /* disable VGA render */ + tmp = RREG32(mmVGA_RENDER_CONTROL); + tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); + WREG32(mmVGA_RENDER_CONTROL, tmp); + } /* Update configuration */ WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, adev->mc.vram_start >> 12); @@ -291,20 +294,12 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev) adev->mc.vram_end >> 12); WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, adev->vram_scratch.gpu_addr >> 12); - tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16; - tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF); - WREG32(mmMC_VM_FB_LOCATION, tmp); - /* XXX double check these! */ - WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8)); - WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30)); - WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF); WREG32(mmMC_VM_AGP_BASE, 0); WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF); WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF); if (gmc_v7_0_wait_for_idle((void *)adev)) { dev_warn(adev->dev, "Wait for MC idle timedout !\n"); } - gmc_v7_0_mc_resume(adev, &save); WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK); @@ -391,15 +386,7 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev) if (adev->mc.visible_vram_size > adev->mc.real_vram_size) adev->mc.visible_vram_size = adev->mc.real_vram_size; - /* unless the user had overridden it, set the gart - * size equal to the 1024 or vram, whichever is larger. - */ - if (amdgpu_gart_size == -1) - adev->mc.gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20), - adev->mc.mc_vram_size); - else - adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20; - + amdgpu_gart_set_defaults(adev); gmc_v7_0_vram_gtt_location(adev, &adev->mc); return 0; @@ -611,8 +598,8 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 4); WREG32(mmVM_L2_CNTL3, tmp); /* setup context0 */ - WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12); - WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12); + WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gart_start >> 12); + WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gart_end >> 12); WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12); WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, (u32)(adev->dummy_page.addr >> 12)); @@ -666,7 +653,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev) gmc_v7_0_gart_flush_gpu_tlb(adev, 0); DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", - (unsigned)(adev->mc.gtt_size >> 20), + (unsigned)(adev->mc.gart_size >> 20), (unsigned long long)adev->gart.table_addr); adev->gart.ready = true; return 0; @@ -1138,7 +1125,6 @@ static int gmc_v7_0_wait_for_idle(void *handle) static int gmc_v7_0_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_mode_mc_save save; u32 srbm_soft_reset = 0; u32 tmp = RREG32(mmSRBM_STATUS); @@ -1154,7 +1140,7 @@ static int gmc_v7_0_soft_reset(void *handle) } if (srbm_soft_reset) { - gmc_v7_0_mc_stop(adev, &save); + gmc_v7_0_mc_stop(adev); if (gmc_v7_0_wait_for_idle((void *)adev)) { dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); } @@ -1175,7 +1161,7 @@ static int gmc_v7_0_soft_reset(void *handle) /* Wait a little for things to settle down */ udelay(50); - gmc_v7_0_mc_resume(adev, &save); + gmc_v7_0_mc_resume(adev); udelay(50); } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index cc9f88057cd5..85c937b5e40b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -35,6 +35,9 @@ #include "oss/oss_3_0_d.h" #include "oss/oss_3_0_sh_mask.h" +#include "dce/dce_10_0_d.h" +#include "dce/dce_10_0_sh_mask.h" + #include "vid.h" #include "vi.h" @@ -161,14 +164,10 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev) } } -static void gmc_v8_0_mc_stop(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) +static void gmc_v8_0_mc_stop(struct amdgpu_device *adev) { u32 blackout; - if (adev->mode_info.num_crtc) - amdgpu_display_stop_mc_access(adev, save); - gmc_v8_0_wait_for_idle(adev); blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); @@ -184,8 +183,7 @@ static void gmc_v8_0_mc_stop(struct amdgpu_device *adev, udelay(100); } -static void gmc_v8_0_mc_resume(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) +static void gmc_v8_0_mc_resume(struct amdgpu_device *adev) { u32 tmp; @@ -197,9 +195,6 @@ static void gmc_v8_0_mc_resume(struct amdgpu_device *adev, tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1); tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1); WREG32(mmBIF_FB_EN, tmp); - - if (adev->mode_info.num_crtc) - amdgpu_display_resume_mc_access(adev, save); } /** @@ -404,15 +399,20 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev) static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc) { + u64 base = 0; + + if (!amdgpu_sriov_vf(adev)) + base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF; + base <<= 24; + if (mc->mc_vram_size > 0xFFC0000000ULL) { /* leave room for at least 1024M GTT */ dev_warn(adev->dev, "limiting VRAM\n"); mc->real_vram_size = 0xFFC0000000ULL; mc->mc_vram_size = 0xFFC0000000ULL; } - amdgpu_vram_location(adev, &adev->mc, 0); - adev->mc.gtt_base_align = 0; - amdgpu_gtt_location(adev, mc); + amdgpu_vram_location(adev, &adev->mc, base); + amdgpu_gart_location(adev, mc); } /** @@ -425,7 +425,6 @@ static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev, */ static void gmc_v8_0_mc_program(struct amdgpu_device *adev) { - struct amdgpu_mode_mc_save save; u32 tmp; int i, j; @@ -439,13 +438,20 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev) } WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0); - if (adev->mode_info.num_crtc) - amdgpu_display_set_vga_render_state(adev, false); - - gmc_v8_0_mc_stop(adev, &save); if (gmc_v8_0_wait_for_idle((void *)adev)) { dev_warn(adev->dev, "Wait for MC idle timedout !\n"); } + if (adev->mode_info.num_crtc) { + /* Lockout access through VGA aperture*/ + tmp = RREG32(mmVGA_HDP_CONTROL); + tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1); + WREG32(mmVGA_HDP_CONTROL, tmp); + + /* disable VGA render */ + tmp = RREG32(mmVGA_RENDER_CONTROL); + tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); + WREG32(mmVGA_RENDER_CONTROL, tmp); + } /* Update configuration */ WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, adev->mc.vram_start >> 12); @@ -453,20 +459,23 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev) adev->mc.vram_end >> 12); WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, adev->vram_scratch.gpu_addr >> 12); - tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16; - tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF); - WREG32(mmMC_VM_FB_LOCATION, tmp); - /* XXX double check these! */ - WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8)); - WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30)); - WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF); + + if (amdgpu_sriov_vf(adev)) { + tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16; + tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF); + WREG32(mmMC_VM_FB_LOCATION, tmp); + /* XXX double check these! */ + WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8)); + WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30)); + WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF); + } + WREG32(mmMC_VM_AGP_BASE, 0); WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF); WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF); if (gmc_v8_0_wait_for_idle((void *)adev)) { dev_warn(adev->dev, "Wait for MC idle timedout !\n"); } - gmc_v8_0_mc_resume(adev, &save); WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK); @@ -553,15 +562,7 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev) if (adev->mc.visible_vram_size > adev->mc.real_vram_size) adev->mc.visible_vram_size = adev->mc.real_vram_size; - /* unless the user had overridden it, set the gart - * size equal to the 1024 or vram, whichever is larger. - */ - if (amdgpu_gart_size == -1) - adev->mc.gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20), - adev->mc.mc_vram_size); - else - adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20; - + amdgpu_gart_set_defaults(adev); gmc_v8_0_vram_gtt_location(adev, &adev->mc); return 0; @@ -813,8 +814,8 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0); WREG32(mmVM_L2_CNTL4, tmp); /* setup context0 */ - WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12); - WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12); + WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gart_start >> 12); + WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gart_end >> 12); WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12); WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, (u32)(adev->dummy_page.addr >> 12)); @@ -869,7 +870,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev) gmc_v8_0_gart_flush_gpu_tlb(adev, 0); DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", - (unsigned)(adev->mc.gtt_size >> 20), + (unsigned)(adev->mc.gart_size >> 20), (unsigned long long)adev->gart.table_addr); adev->gart.ready = true; return 0; @@ -1260,7 +1261,7 @@ static int gmc_v8_0_pre_soft_reset(void *handle) if (!adev->mc.srbm_soft_reset) return 0; - gmc_v8_0_mc_stop(adev, &adev->mc.save); + gmc_v8_0_mc_stop(adev); if (gmc_v8_0_wait_for_idle(adev)) { dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); } @@ -1306,7 +1307,7 @@ static int gmc_v8_0_post_soft_reset(void *handle) if (!adev->mc.srbm_soft_reset) return 0; - gmc_v8_0_mc_resume(adev, &adev->mc.save); + gmc_v8_0_mc_resume(adev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 175ba5f9691c..c22899a08106 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -23,11 +23,14 @@ #include <linux/firmware.h> #include "amdgpu.h" #include "gmc_v9_0.h" +#include "amdgpu_atomfirmware.h" #include "vega10/soc15ip.h" #include "vega10/HDP/hdp_4_0_offset.h" #include "vega10/HDP/hdp_4_0_sh_mask.h" #include "vega10/GC/gc_9_0_sh_mask.h" +#include "vega10/DC/dce_12_0_offset.h" +#include "vega10/DC/dce_12_0_sh_mask.h" #include "vega10/vega10_enum.h" #include "soc15_common.h" @@ -419,8 +422,7 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev, if (!amdgpu_sriov_vf(adev)) base = mmhub_v1_0_get_fb_location(adev); amdgpu_vram_location(adev, &adev->mc, base); - adev->mc.gtt_base_align = 0; - amdgpu_gtt_location(adev, mc); + amdgpu_gart_location(adev, mc); /* base offset of vram pages */ if (adev->flags & AMD_IS_APU) adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev); @@ -442,43 +444,46 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev) u32 tmp; int chansize, numchan; - /* hbm memory channel size */ - chansize = 128; - - tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0); - tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK; - tmp >>= DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT; - switch (tmp) { - case 0: - default: - numchan = 1; - break; - case 1: - numchan = 2; - break; - case 2: - numchan = 0; - break; - case 3: - numchan = 4; - break; - case 4: - numchan = 0; - break; - case 5: - numchan = 8; - break; - case 6: - numchan = 0; - break; - case 7: - numchan = 16; - break; - case 8: - numchan = 2; - break; + adev->mc.vram_width = amdgpu_atomfirmware_get_vram_width(adev); + if (!adev->mc.vram_width) { + /* hbm memory channel size */ + chansize = 128; + + tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0); + tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK; + tmp >>= DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT; + switch (tmp) { + case 0: + default: + numchan = 1; + break; + case 1: + numchan = 2; + break; + case 2: + numchan = 0; + break; + case 3: + numchan = 4; + break; + case 4: + numchan = 0; + break; + case 5: + numchan = 8; + break; + case 6: + numchan = 0; + break; + case 7: + numchan = 16; + break; + case 8: + numchan = 2; + break; + } + adev->mc.vram_width = numchan * chansize; } - adev->mc.vram_width = numchan * chansize; /* Could aper size report 0 ? */ adev->mc.aper_base = pci_resource_start(adev->pdev, 0); @@ -494,15 +499,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev) if (adev->mc.visible_vram_size > adev->mc.real_vram_size) adev->mc.visible_vram_size = adev->mc.real_vram_size; - /* unless the user had overridden it, set the gart - * size equal to the 1024 or vram, whichever is larger. - */ - if (amdgpu_gart_size == -1) - adev->mc.gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20), - adev->mc.mc_vram_size); - else - adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20; - + amdgpu_gart_set_defaults(adev); gmc_v9_0_vram_gtt_location(adev, &adev->mc); return 0; @@ -537,10 +534,20 @@ static int gmc_v9_0_sw_init(void *handle) spin_lock_init(&adev->mc.invalidate_lock); - if (adev->flags & AMD_IS_APU) { + switch (adev->asic_type) { + case CHIP_RAVEN: adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; - amdgpu_vm_adjust_size(adev, 64); - } else { + if (adev->rev_id == 0x0 || adev->rev_id == 0x1) { + adev->vm_manager.vm_size = 1U << 18; + adev->vm_manager.block_size = 9; + adev->vm_manager.num_level = 3; + } else { + /* vm_size is 64GB for legacy 2-level page support*/ + amdgpu_vm_adjust_size(adev, 64); + adev->vm_manager.num_level = 1; + } + break; + case CHIP_VEGA10: /* XXX Don't know how to get VRAM type yet. */ adev->mc.vram_type = AMDGPU_VRAM_TYPE_HBM; /* @@ -550,11 +557,16 @@ static int gmc_v9_0_sw_init(void *handle) */ adev->vm_manager.vm_size = 1U << 18; adev->vm_manager.block_size = 9; - DRM_INFO("vm size is %llu GB, block size is %u-bit\n", - adev->vm_manager.vm_size, - adev->vm_manager.block_size); + adev->vm_manager.num_level = 3; + break; + default: + break; } + DRM_INFO("vm size is %llu GB, block size is %u-bit\n", + adev->vm_manager.vm_size, + adev->vm_manager.block_size); + /* This interrupt is VMC page fault.*/ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VMC, 0, &adev->mc.vm_fault); @@ -619,11 +631,6 @@ static int gmc_v9_0_sw_init(void *handle) adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids = AMDGPU_NUM_OF_VMIDS; adev->vm_manager.id_mgr[AMDGPU_MMHUB].num_ids = AMDGPU_NUM_OF_VMIDS; - /* TODO: fix num_level for APU when updating vm size and block size */ - if (adev->flags & AMD_IS_APU) - adev->vm_manager.num_level = 1; - else - adev->vm_manager.num_level = 3; amdgpu_vm_manager_init(adev); return 0; @@ -731,7 +738,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) gmc_v9_0_gart_flush_gpu_tlb(adev, 0); DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", - (unsigned)(adev->mc.gtt_size >> 20), + (unsigned)(adev->mc.gart_size >> 20), (unsigned long long)adev->gart.table_addr); adev->gart.ready = true; return 0; @@ -745,6 +752,20 @@ static int gmc_v9_0_hw_init(void *handle) /* The sequence of these two function calls matters.*/ gmc_v9_0_init_golden_registers(adev); + if (adev->mode_info.num_crtc) { + u32 tmp; + + /* Lockout access through VGA aperture*/ + tmp = RREG32_SOC15(DCE, 0, mmVGA_HDP_CONTROL); + tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1); + WREG32_SOC15(DCE, 0, mmVGA_HDP_CONTROL, tmp); + + /* disable VGA render */ + tmp = RREG32_SOC15(DCE, 0, mmVGA_RENDER_CONTROL); + tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); + WREG32_SOC15(DCE, 0, mmVGA_RENDER_CONTROL, tmp); + } + r = gmc_v9_0_gart_enable(adev); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index 9804318f3488..ad8def3cc343 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -69,14 +69,14 @@ static void mmhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev) mmhub_v1_0_init_gart_pt_regs(adev); WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, - (u32)(adev->mc.gtt_start >> 12)); + (u32)(adev->mc.gart_start >> 12)); WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, - (u32)(adev->mc.gtt_start >> 44)); + (u32)(adev->mc.gart_start >> 44)); WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, - (u32)(adev->mc.gtt_end >> 12)); + (u32)(adev->mc.gart_end >> 12)); WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, - (u32)(adev->mc.gtt_end >> 44)); + (u32)(adev->mc.gart_end >> 44)); } static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev) @@ -143,7 +143,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev) /* Setup L2 cache */ tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1); - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 0); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1); /* XXX for emulation, Refer to closed source code.*/ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE, 0); @@ -158,6 +158,8 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev) WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp); tmp = mmVM_L2_CNTL3_DEFAULT; + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 9); WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, tmp); tmp = mmVM_L2_CNTL4_DEFAULT; @@ -222,6 +224,9 @@ static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE, adev->vm_manager.block_size - 9); + /* Send no-retry XNACK on fault to suppress VM fault storm. */ + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, + RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL, i, tmp); WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, i*2, 0); WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, i*2, 0); @@ -245,28 +250,28 @@ static void mmhub_v1_0_program_invalidation(struct amdgpu_device *adev) } struct pctl_data { - uint32_t index; - uint32_t data; + uint32_t index; + uint32_t data; }; -const struct pctl_data pctl0_data[] = { - {0x0, 0x7a640}, - {0x9, 0x2a64a}, - {0xd, 0x2a680}, - {0x11, 0x6a684}, - {0x19, 0xea68e}, - {0x29, 0xa69e}, - {0x2b, 0x34a6c0}, - {0x61, 0x83a707}, - {0xe6, 0x8a7a4}, - {0xf0, 0x1a7b8}, - {0xf3, 0xfa7cc}, - {0x104, 0x17a7dd}, - {0x11d, 0xa7dc}, - {0x11f, 0x12a7f5}, - {0x133, 0xa808}, - {0x135, 0x12a810}, - {0x149, 0x7a82c} +static const struct pctl_data pctl0_data[] = { + {0x0, 0x7a640}, + {0x9, 0x2a64a}, + {0xd, 0x2a680}, + {0x11, 0x6a684}, + {0x19, 0xea68e}, + {0x29, 0xa69e}, + {0x2b, 0x34a6c0}, + {0x61, 0x83a707}, + {0xe6, 0x8a7a4}, + {0xf0, 0x1a7b8}, + {0xf3, 0xfa7cc}, + {0x104, 0x17a7dd}, + {0x11d, 0xa7dc}, + {0x11f, 0x12a7f5}, + {0x133, 0xa808}, + {0x135, 0x12a810}, + {0x149, 0x7a82c} }; #define PCTL0_DATA_LEN (sizeof(pctl0_data)/sizeof(pctl0_data[0])) @@ -274,32 +279,39 @@ const struct pctl_data pctl0_data[] = { #define PCTL0_STCTRL_REG_SAVE_RANGE0_BASE 0xa640 #define PCTL0_STCTRL_REG_SAVE_RANGE0_LIMIT 0xa833 -const struct pctl_data pctl1_data[] = { - {0x0, 0x39a000}, - {0x3b, 0x44a040}, - {0x81, 0x2a08d}, - {0x85, 0x6ba094}, - {0xf2, 0x18a100}, - {0x10c, 0x4a132}, - {0x112, 0xca141}, - {0x120, 0x2fa158}, - {0x151, 0x17a1d0}, - {0x16a, 0x1a1e9}, - {0x16d, 0x13a1ec}, - {0x182, 0x7a201}, - {0x18b, 0x3a20a}, - {0x190, 0x7a580}, - {0x199, 0xa590}, - {0x19b, 0x4a594}, - {0x1a1, 0x1a59c}, - {0x1a4, 0x7a82c}, - {0x1ad, 0xfa7cc}, - {0x1be, 0x17a7dd}, - {0x1d7, 0x12a810} +static const struct pctl_data pctl1_data[] = { + {0x0, 0x39a000}, + {0x3b, 0x44a040}, + {0x81, 0x2a08d}, + {0x85, 0x6ba094}, + {0xf2, 0x18a100}, + {0x10c, 0x4a132}, + {0x112, 0xca141}, + {0x120, 0x2fa158}, + {0x151, 0x17a1d0}, + {0x16a, 0x1a1e9}, + {0x16d, 0x13a1ec}, + {0x182, 0x7a201}, + {0x18b, 0x3a20a}, + {0x190, 0x7a580}, + {0x199, 0xa590}, + {0x19b, 0x4a594}, + {0x1a1, 0x1a59c}, + {0x1a4, 0x7a82c}, + {0x1ad, 0xfa7cc}, + {0x1be, 0x17a7dd}, + {0x1d7, 0x12a810}, + {0x1eb, 0x4000a7e1}, + {0x1ec, 0x5000a7f5}, + {0x1ed, 0x4000a7e2}, + {0x1ee, 0x5000a7dc}, + {0x1ef, 0x4000a7e3}, + {0x1f0, 0x5000a7f6}, + {0x1f1, 0x5000a7e4} }; #define PCTL1_DATA_LEN (sizeof(pctl1_data)/sizeof(pctl1_data[0])) -#define PCTL1_RENG_EXEC_END_PTR 0x1ea +#define PCTL1_RENG_EXEC_END_PTR 0x1f1 #define PCTL1_STCTRL_REG_SAVE_RANGE0_BASE 0xa000 #define PCTL1_STCTRL_REG_SAVE_RANGE0_LIMIT 0xa20d #define PCTL1_STCTRL_REG_SAVE_RANGE1_BASE 0xa580 diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c index bde3ca3c21c1..2812d88a8bdd 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c @@ -72,21 +72,6 @@ static void xgpu_ai_mailbox_set_valid(struct amdgpu_device *adev, bool val) reg); } -static void xgpu_ai_mailbox_trans_msg(struct amdgpu_device *adev, - enum idh_request req) -{ - u32 reg; - - reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, - mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0)); - reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0, - MSGBUF_DATA, req); - WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0), - reg); - - xgpu_ai_mailbox_set_valid(adev, true); -} - static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev, enum idh_event event) { @@ -154,13 +139,25 @@ static int xgpu_ai_poll_msg(struct amdgpu_device *adev, enum idh_event event) return r; } - -static int xgpu_ai_send_access_requests(struct amdgpu_device *adev, - enum idh_request req) -{ +static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev, + enum idh_request req, u32 data1, u32 data2, u32 data3) { + u32 reg; int r; - xgpu_ai_mailbox_trans_msg(adev, req); + reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, + mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0)); + reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0, + MSGBUF_DATA, req); + WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0), + reg); + WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW1), + data1); + WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW2), + data2); + WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW3), + data3); + + xgpu_ai_mailbox_set_valid(adev, true); /* start to poll ack */ r = xgpu_ai_poll_ack(adev); @@ -168,6 +165,14 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev, pr_err("Doesn't get ack from pf, continue\n"); xgpu_ai_mailbox_set_valid(adev, false); +} + +static int xgpu_ai_send_access_requests(struct amdgpu_device *adev, + enum idh_request req) +{ + int r; + + xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0); /* start to check msg if request is idh_req_gpu_init_access */ if (req == IDH_REQ_GPU_INIT_ACCESS || @@ -342,4 +347,5 @@ const struct amdgpu_virt_ops xgpu_ai_virt_ops = { .req_full_gpu = xgpu_ai_request_full_gpu_access, .rel_full_gpu = xgpu_ai_release_full_gpu_access, .reset_gpu = xgpu_ai_request_reset, + .trans_msg = xgpu_ai_mailbox_trans_msg, }; diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h index 9aefc44d2c34..1e91b9a1c591 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h @@ -31,7 +31,9 @@ enum idh_request { IDH_REL_GPU_INIT_ACCESS, IDH_REQ_GPU_FINI_ACCESS, IDH_REL_GPU_FINI_ACCESS, - IDH_REQ_GPU_RESET_ACCESS + IDH_REQ_GPU_RESET_ACCESS, + + IDH_LOG_VF_ERROR = 200, }; enum idh_event { diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c index 171a658135b5..c25a831f94ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c @@ -613,4 +613,5 @@ const struct amdgpu_virt_ops xgpu_vi_virt_ops = { .req_full_gpu = xgpu_vi_request_full_gpu_access, .rel_full_gpu = xgpu_vi_release_full_gpu_access, .reset_gpu = xgpu_vi_request_reset, + .trans_msg = NULL, /* Does not need to trans VF errors to host. */ }; diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h index 2db741131bc6..c791d73d2d54 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h @@ -32,7 +32,9 @@ enum idh_request { IDH_REL_GPU_INIT_ACCESS, IDH_REQ_GPU_FINI_ACCESS, IDH_REL_GPU_FINI_ACCESS, - IDH_REQ_GPU_RESET_ACCESS + IDH_REQ_GPU_RESET_ACCESS, + + IDH_LOG_VF_ERROR = 200, }; /* VI mailbox messages data */ diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c index 1e272f785def..045988b18bc3 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c @@ -32,6 +32,7 @@ #define smnCPM_CONTROL 0x11180460 #define smnPCIE_CNTL2 0x11180070 +#define smnPCIE_CONFIG_CNTL 0x11180044 u32 nbio_v6_1_get_rev_id(struct amdgpu_device *adev) { @@ -67,7 +68,7 @@ void nbio_v6_1_mc_access_enable(struct amdgpu_device *adev, bool enable) void nbio_v6_1_hdp_flush(struct amdgpu_device *adev) { - WREG32_SOC15(NBIO, 0, mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL, 0); + WREG32_SOC15_NO_KIQ(NBIO, 0, mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL, 0); } u32 nbio_v6_1_get_memsize(struct amdgpu_device *adev) @@ -256,3 +257,15 @@ void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev) adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; } } + +void nbio_v6_1_init_registers(struct amdgpu_device *adev) +{ + uint32_t def, data; + + def = data = RREG32_PCIE(smnPCIE_CONFIG_CNTL); + data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL, CI_SWUS_MAX_READ_REQUEST_SIZE_MODE, 1); + data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL, CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV, 1); + + if (def != data) + WREG32_PCIE(smnPCIE_CONFIG_CNTL, data); +} diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h index f6f8bc045518..686e4b4d296a 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h @@ -50,5 +50,6 @@ void nbio_v6_1_update_medium_grain_clock_gating(struct amdgpu_device *adev, bool void nbio_v6_1_update_medium_grain_light_sleep(struct amdgpu_device *adev, bool enable); void nbio_v6_1_get_clockgating_state(struct amdgpu_device *adev, u32 *flags); void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev); +void nbio_v6_1_init_registers(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c index aa04632523fa..11b70d601922 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c @@ -65,7 +65,7 @@ void nbio_v7_0_mc_access_enable(struct amdgpu_device *adev, bool enable) void nbio_v7_0_hdp_flush(struct amdgpu_device *adev) { - WREG32_SOC15(NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0); + WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0); } u32 nbio_v7_0_get_memsize(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c index 2258323a3c26..f7cf994b1da2 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c @@ -86,6 +86,52 @@ psp_v10_0_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type * return 0; } +int psp_v10_0_init_microcode(struct psp_context *psp) +{ + struct amdgpu_device *adev = psp->adev; + const char *chip_name; + char fw_name[30]; + int err = 0; + const struct psp_firmware_header_v1_0 *hdr; + + DRM_DEBUG("\n"); + + switch (adev->asic_type) { + case CHIP_RAVEN: + chip_name = "raven"; + break; + default: BUG(); + } + + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name); + err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev); + if (err) + goto out; + + err = amdgpu_ucode_validate(adev->psp.asd_fw); + if (err) + goto out; + + hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data; + adev->psp.asd_fw_version = le32_to_cpu(hdr->header.ucode_version); + adev->psp.asd_feature_version = le32_to_cpu(hdr->ucode_feature_version); + adev->psp.asd_ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes); + adev->psp.asd_start_addr = (uint8_t *)hdr + + le32_to_cpu(hdr->header.ucode_array_offset_bytes); + + return 0; +out: + if (err) { + dev_err(adev->dev, + "psp v10.0: Failed to load firmware \"%s\"\n", + fw_name); + release_firmware(adev->psp.asd_fw); + adev->psp.asd_fw = NULL; + } + + return err; +} + int psp_v10_0_prep_cmd_buf(struct amdgpu_firmware_info *ucode, struct psp_gfx_cmd_resp *cmd) { int ret; @@ -110,7 +156,6 @@ int psp_v10_0_prep_cmd_buf(struct amdgpu_firmware_info *ucode, struct psp_gfx_cm int psp_v10_0_ring_init(struct psp_context *psp, enum psp_ring_type ring_type) { int ret = 0; - unsigned int psp_ring_reg = 0; struct psp_ring *ring; struct amdgpu_device *adev = psp->adev; @@ -130,6 +175,16 @@ int psp_v10_0_ring_init(struct psp_context *psp, enum psp_ring_type ring_type) return ret; } + return 0; +} + +int psp_v10_0_ring_create(struct psp_context *psp, enum psp_ring_type ring_type) +{ + int ret = 0; + unsigned int psp_ring_reg = 0; + struct psp_ring *ring = &psp->km_ring; + struct amdgpu_device *adev = psp->adev; + /* Write low address of the ring to C2PMSG_69 */ psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr); WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg); @@ -143,13 +198,42 @@ int psp_v10_0_ring_init(struct psp_context *psp, enum psp_ring_type ring_type) psp_ring_reg = ring_type; psp_ring_reg = psp_ring_reg << 16; WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg); + + /* There might be handshake issue with hardware which needs delay */ + mdelay(20); + /* Wait for response flag (bit 31) in C2PMSG_64 */ - psp_ring_reg = 0; - while ((psp_ring_reg & 0x80000000) == 0) { - psp_ring_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64); - } + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), + 0x80000000, 0x8000FFFF, false); - return 0; + return ret; +} + +int psp_v10_0_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type) +{ + int ret = 0; + struct psp_ring *ring; + unsigned int psp_ring_reg = 0; + struct amdgpu_device *adev = psp->adev; + + ring = &psp->km_ring; + + /* Write the ring destroy command to C2PMSG_64 */ + psp_ring_reg = 3 << 16; + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg); + + /* There might be handshake issue with hardware which needs delay */ + mdelay(20); + + /* Wait for response flag (bit 31) in C2PMSG_64 */ + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), + 0x80000000, 0x80000000, false); + + amdgpu_bo_free_kernel(&adev->firmware.rbuf, + &ring->ring_mem_mc_addr, + (void **)&ring->ring_mem); + + return ret; } int psp_v10_0_cmd_submit(struct psp_context *psp, diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.h b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.h index 2022b7b7151e..e76cde2f01f9 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.h +++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.h @@ -27,10 +27,15 @@ #include "amdgpu_psp.h" +extern int psp_v10_0_init_microcode(struct psp_context *psp); extern int psp_v10_0_prep_cmd_buf(struct amdgpu_firmware_info *ucode, struct psp_gfx_cmd_resp *cmd); extern int psp_v10_0_ring_init(struct psp_context *psp, enum psp_ring_type ring_type); +extern int psp_v10_0_ring_create(struct psp_context *psp, + enum psp_ring_type ring_type); +extern int psp_v10_0_ring_destroy(struct psp_context *psp, + enum psp_ring_type ring_type); extern int psp_v10_0_cmd_submit(struct psp_context *psp, struct amdgpu_firmware_info *ucode, uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr, diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c index c98d77d0c8f8..2a535a4b8d5b 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c @@ -237,11 +237,9 @@ int psp_v3_1_bootloader_load_sos(struct psp_context *psp) /* there might be handshake issue with hardware which needs delay */ mdelay(20); -#if 0 ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_81), RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81), 0, true); -#endif return ret; } @@ -341,10 +339,10 @@ int psp_v3_1_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type) ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), 0x80000000, 0x80000000, false); - if (ring->ring_mem) - amdgpu_bo_free_kernel(&adev->firmware.rbuf, - &ring->ring_mem_mc_addr, - (void **)&ring->ring_mem); + amdgpu_bo_free_kernel(&adev->firmware.rbuf, + &ring->ring_mem_mc_addr, + (void **)&ring->ring_mem); + return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 1d766ae98dc8..b1de44f22824 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -551,17 +551,53 @@ static void sdma_v3_0_rlc_stop(struct amdgpu_device *adev) */ static void sdma_v3_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable) { - u32 f32_cntl; + u32 f32_cntl, phase_quantum = 0; int i; + if (amdgpu_sdma_phase_quantum) { + unsigned value = amdgpu_sdma_phase_quantum; + unsigned unit = 0; + + while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK >> + SDMA0_PHASE0_QUANTUM__VALUE__SHIFT)) { + value = (value + 1) >> 1; + unit++; + } + if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK >> + SDMA0_PHASE0_QUANTUM__UNIT__SHIFT)) { + value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK >> + SDMA0_PHASE0_QUANTUM__VALUE__SHIFT); + unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK >> + SDMA0_PHASE0_QUANTUM__UNIT__SHIFT); + WARN_ONCE(1, + "clamping sdma_phase_quantum to %uK clock cycles\n", + value << unit); + } + phase_quantum = + value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT | + unit << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT; + } + for (i = 0; i < adev->sdma.num_instances; i++) { f32_cntl = RREG32(mmSDMA0_CNTL + sdma_offsets[i]); - if (enable) + if (enable) { f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, AUTO_CTXSW_ENABLE, 1); - else + f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, + ATC_L1_ENABLE, 1); + if (amdgpu_sdma_phase_quantum) { + WREG32(mmSDMA0_PHASE0_QUANTUM + sdma_offsets[i], + phase_quantum); + WREG32(mmSDMA0_PHASE1_QUANTUM + sdma_offsets[i], + phase_quantum); + } + } else { f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, AUTO_CTXSW_ENABLE, 0); + f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, + ATC_L1_ENABLE, 1); + } + WREG32(mmSDMA0_CNTL + sdma_offsets[i], f32_cntl); } } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 4a65697ccc94..591f3e7fb508 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -493,13 +493,45 @@ static void sdma_v4_0_rlc_stop(struct amdgpu_device *adev) */ static void sdma_v4_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable) { - u32 f32_cntl; + u32 f32_cntl, phase_quantum = 0; int i; + if (amdgpu_sdma_phase_quantum) { + unsigned value = amdgpu_sdma_phase_quantum; + unsigned unit = 0; + + while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK >> + SDMA0_PHASE0_QUANTUM__VALUE__SHIFT)) { + value = (value + 1) >> 1; + unit++; + } + if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK >> + SDMA0_PHASE0_QUANTUM__UNIT__SHIFT)) { + value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK >> + SDMA0_PHASE0_QUANTUM__VALUE__SHIFT); + unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK >> + SDMA0_PHASE0_QUANTUM__UNIT__SHIFT); + WARN_ONCE(1, + "clamping sdma_phase_quantum to %uK clock cycles\n", + value << unit); + } + phase_quantum = + value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT | + unit << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT; + } + for (i = 0; i < adev->sdma.num_instances; i++) { f32_cntl = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_CNTL)); f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, AUTO_CTXSW_ENABLE, enable ? 1 : 0); + if (enable && amdgpu_sdma_phase_quantum) { + WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_PHASE0_QUANTUM), + phase_quantum); + WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_PHASE1_QUANTUM), + phase_quantum); + WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_PHASE2_QUANTUM), + phase_quantum); + } WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_CNTL), f32_cntl); } diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index f45fb0f022b3..8284d5dbfc30 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -1150,6 +1150,33 @@ static bool si_read_disabled_bios(struct amdgpu_device *adev) return r; } +#define mmROM_INDEX 0x2A +#define mmROM_DATA 0x2B + +static bool si_read_bios_from_rom(struct amdgpu_device *adev, + u8 *bios, u32 length_bytes) +{ + u32 *dw_ptr; + u32 i, length_dw; + + if (bios == NULL) + return false; + if (length_bytes == 0) + return false; + /* APU vbios image is part of sbios image */ + if (adev->flags & AMD_IS_APU) + return false; + + dw_ptr = (u32 *)bios; + length_dw = ALIGN(length_bytes, 4) / 4; + /* set rom index to 0 */ + WREG32(mmROM_INDEX, 0); + for (i = 0; i < length_dw; i++) + dw_ptr[i] = RREG32(mmROM_DATA); + + return true; +} + //xxx: not implemented static int si_asic_reset(struct amdgpu_device *adev) { @@ -1206,6 +1233,7 @@ static void si_detect_hw_virtualization(struct amdgpu_device *adev) static const struct amdgpu_asic_funcs si_asic_funcs = { .read_disabled_bios = &si_read_disabled_bios, + .read_bios_from_rom = &si_read_bios_from_rom, .read_register = &si_read_register, .reset = &si_asic_reset, .set_vga_state = &si_vga_set_state, @@ -1385,6 +1413,7 @@ static void si_init_golden_registers(struct amdgpu_device *adev) amdgpu_program_register_sequence(adev, pitcairn_mgcg_cgcg_init, (const u32)ARRAY_SIZE(pitcairn_mgcg_cgcg_init)); + break; case CHIP_VERDE: amdgpu_program_register_sequence(adev, verde_golden_registers, @@ -1409,6 +1438,7 @@ static void si_init_golden_registers(struct amdgpu_device *adev) amdgpu_program_register_sequence(adev, oland_mgcg_cgcg_init, (const u32)ARRAY_SIZE(oland_mgcg_cgcg_init)); + break; case CHIP_HAINAN: amdgpu_program_register_sequence(adev, hainan_golden_registers, diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c index a7ad8390981c..d63873f3f574 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c @@ -2055,6 +2055,7 @@ static void si_initialize_powertune_defaults(struct amdgpu_device *adev) case 0x682C: si_pi->cac_weights = cac_weights_cape_verde_pro; si_pi->dte_data = dte_data_sun_xt; + update_dte_from_pl2 = true; break; case 0x6825: case 0x6827: diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index a7341d88a320..f2c3a49f73a0 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -25,7 +25,7 @@ #include <linux/module.h> #include <drm/drmP.h> #include "amdgpu.h" -#include "amdgpu_atomfirmware.h" +#include "amdgpu_atombios.h" #include "amdgpu_ih.h" #include "amdgpu_uvd.h" #include "amdgpu_vce.h" @@ -62,8 +62,6 @@ #include "dce_virtual.h" #include "mxgpu_ai.h" -MODULE_FIRMWARE("amdgpu/vega10_smc.bin"); - #define mmFabricConfigAccessControl 0x0410 #define mmFabricConfigAccessControl_BASE_IDX 0 #define mmFabricConfigAccessControl_DEFAULT 0x00000000 @@ -198,6 +196,50 @@ static void soc15_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) spin_unlock_irqrestore(&adev->didt_idx_lock, flags); } +static u32 soc15_gc_cac_rreg(struct amdgpu_device *adev, u32 reg) +{ + unsigned long flags; + u32 r; + + spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); + WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg)); + r = RREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA); + spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); + return r; +} + +static void soc15_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) +{ + unsigned long flags; + + spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); + WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg)); + WREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA, (v)); + spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); +} + +static u32 soc15_se_cac_rreg(struct amdgpu_device *adev, u32 reg) +{ + unsigned long flags; + u32 r; + + spin_lock_irqsave(&adev->se_cac_idx_lock, flags); + WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg)); + r = RREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA); + spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags); + return r; +} + +static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) +{ + unsigned long flags; + + spin_lock_irqsave(&adev->se_cac_idx_lock, flags); + WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg)); + WREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA, (v)); + spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags); +} + static u32 soc15_get_config_memsize(struct amdgpu_device *adev) { if (adev->flags & AMD_IS_APU) @@ -392,11 +434,11 @@ static void soc15_gpu_pci_config_reset(struct amdgpu_device *adev) static int soc15_asic_reset(struct amdgpu_device *adev) { - amdgpu_atomfirmware_scratch_regs_engine_hung(adev, true); + amdgpu_atombios_scratch_regs_engine_hung(adev, true); soc15_gpu_pci_config_reset(adev); - amdgpu_atomfirmware_scratch_regs_engine_hung(adev, false); + amdgpu_atombios_scratch_regs_engine_hung(adev, false); return 0; } @@ -524,13 +566,6 @@ static uint32_t soc15_get_rev_id(struct amdgpu_device *adev) return nbio_v6_1_get_rev_id(adev); } - -int gmc_v9_0_mc_wait_for_idle(struct amdgpu_device *adev) -{ - /* to be implemented in MC IP*/ - return 0; -} - static const struct amdgpu_asic_funcs soc15_asic_funcs = { .read_disabled_bios = &soc15_read_disabled_bios, @@ -557,6 +592,10 @@ static int soc15_common_early_init(void *handle) adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg; adev->didt_rreg = &soc15_didt_rreg; adev->didt_wreg = &soc15_didt_wreg; + adev->gc_cac_rreg = &soc15_gc_cac_rreg; + adev->gc_cac_wreg = &soc15_gc_cac_wreg; + adev->se_cac_rreg = &soc15_se_cac_rreg; + adev->se_cac_wreg = &soc15_se_cac_wreg; adev->asic_funcs = &soc15_asic_funcs; @@ -681,6 +720,9 @@ static int soc15_common_hw_init(void *handle) soc15_pcie_gen3_enable(adev); /* enable aspm */ soc15_program_aspm(adev); + /* setup nbio registers */ + if (!(adev->flags & AMD_IS_APU)) + nbio_v6_1_init_registers(adev); /* enable the doorbell aperture */ soc15_enable_doorbell_aperture(adev, true); diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h index e2d330eed952..7a8e4e28abb2 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h @@ -77,6 +77,13 @@ struct nbio_pcie_index_data { (3 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG3 + reg : \ (ip##_BASE__INST##inst##_SEG4 + reg))))), value) +#define WREG32_SOC15_NO_KIQ(ip, inst, reg, value) \ + WREG32_NO_KIQ( (0 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG0 + reg : \ + (1 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG1 + reg : \ + (2 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG2 + reg : \ + (3 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG3 + reg : \ + (ip##_BASE__INST##inst##_SEG4 + reg))))), value) + #define WREG32_SOC15_OFFSET(ip, inst, reg, offset, value) \ WREG32( (0 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG0 + reg : \ (1 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG1 + reg : \ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 602769ced3bd..42de22bbe14c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -664,7 +664,7 @@ static int set_sched_resources(struct device_queue_manager *dqm) /* This situation may be hit in the future if a new HW * generation exposes more than 64 queues. If so, the * definition of res.queue_mask needs updating */ - if (WARN_ON(i > (sizeof(res.queue_mask)*8))) { + if (WARN_ON(i >= (sizeof(res.queue_mask)*8))) { pr_err("Invalid queue enabled by amdgpu: %d\n", i); break; } diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h index 0021a1c63356..837296db9628 100644 --- a/drivers/gpu/drm/amd/include/atomfirmware.h +++ b/drivers/gpu/drm/amd/include/atomfirmware.h @@ -1233,6 +1233,69 @@ struct atom_asic_profiling_info_v4_1 uint32_t phyclk2gfxclk_c; }; +struct atom_asic_profiling_info_v4_2 { + struct atom_common_table_header table_header; + uint32_t maxvddc; + uint32_t minvddc; + uint32_t avfs_meannsigma_acontant0; + uint32_t avfs_meannsigma_acontant1; + uint32_t avfs_meannsigma_acontant2; + uint16_t avfs_meannsigma_dc_tol_sigma; + uint16_t avfs_meannsigma_platform_mean; + uint16_t avfs_meannsigma_platform_sigma; + uint32_t gb_vdroop_table_cksoff_a0; + uint32_t gb_vdroop_table_cksoff_a1; + uint32_t gb_vdroop_table_cksoff_a2; + uint32_t gb_vdroop_table_ckson_a0; + uint32_t gb_vdroop_table_ckson_a1; + uint32_t gb_vdroop_table_ckson_a2; + uint32_t avfsgb_fuse_table_cksoff_m1; + uint32_t avfsgb_fuse_table_cksoff_m2; + uint32_t avfsgb_fuse_table_cksoff_b; + uint32_t avfsgb_fuse_table_ckson_m1; + uint32_t avfsgb_fuse_table_ckson_m2; + uint32_t avfsgb_fuse_table_ckson_b; + uint16_t max_voltage_0_25mv; + uint8_t enable_gb_vdroop_table_cksoff; + uint8_t enable_gb_vdroop_table_ckson; + uint8_t enable_gb_fuse_table_cksoff; + uint8_t enable_gb_fuse_table_ckson; + uint16_t psm_age_comfactor; + uint8_t enable_apply_avfs_cksoff_voltage; + uint8_t reserved; + uint32_t dispclk2gfxclk_a; + uint32_t dispclk2gfxclk_b; + uint32_t dispclk2gfxclk_c; + uint32_t pixclk2gfxclk_a; + uint32_t pixclk2gfxclk_b; + uint32_t pixclk2gfxclk_c; + uint32_t dcefclk2gfxclk_a; + uint32_t dcefclk2gfxclk_b; + uint32_t dcefclk2gfxclk_c; + uint32_t phyclk2gfxclk_a; + uint32_t phyclk2gfxclk_b; + uint32_t phyclk2gfxclk_c; + uint32_t acg_gb_vdroop_table_a0; + uint32_t acg_gb_vdroop_table_a1; + uint32_t acg_gb_vdroop_table_a2; + uint32_t acg_avfsgb_fuse_table_m1; + uint32_t acg_avfsgb_fuse_table_m2; + uint32_t acg_avfsgb_fuse_table_b; + uint8_t enable_acg_gb_vdroop_table; + uint8_t enable_acg_gb_fuse_table; + uint32_t acg_dispclk2gfxclk_a; + uint32_t acg_dispclk2gfxclk_b; + uint32_t acg_dispclk2gfxclk_c; + uint32_t acg_pixclk2gfxclk_a; + uint32_t acg_pixclk2gfxclk_b; + uint32_t acg_pixclk2gfxclk_c; + uint32_t acg_dcefclk2gfxclk_a; + uint32_t acg_dcefclk2gfxclk_b; + uint32_t acg_dcefclk2gfxclk_c; + uint32_t acg_phyclk2gfxclk_a; + uint32_t acg_phyclk2gfxclk_b; + uint32_t acg_phyclk2gfxclk_c; +}; /* *************************************************************************** diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h index 0a94f749e3c0..0214f63f52fc 100644 --- a/drivers/gpu/drm/amd/include/cgs_common.h +++ b/drivers/gpu/drm/amd/include/cgs_common.h @@ -50,6 +50,7 @@ enum cgs_ind_reg { CGS_IND_REG__UVD_CTX, CGS_IND_REG__DIDT, CGS_IND_REG_GC_CAC, + CGS_IND_REG_SE_CAC, CGS_IND_REG__AUDIO_ENDPT }; @@ -406,6 +407,8 @@ typedef int (*cgs_is_virtualization_enabled_t)(void *cgs_device); typedef int (*cgs_enter_safe_mode)(struct cgs_device *cgs_device, bool en); +typedef void (*cgs_lock_grbm_idx)(struct cgs_device *cgs_device, bool lock); + struct cgs_ops { /* memory management calls (similar to KFD interface) */ cgs_alloc_gpu_mem_t alloc_gpu_mem; @@ -441,6 +444,7 @@ struct cgs_ops { cgs_query_system_info query_system_info; cgs_is_virtualization_enabled_t is_virtualization_enabled; cgs_enter_safe_mode enter_safe_mode; + cgs_lock_grbm_idx lock_grbm_idx; }; struct cgs_os_ops; /* To be define in OS-specific CGS header */ @@ -517,4 +521,6 @@ struct cgs_device #define cgs_enter_safe_mode(cgs_device, en) \ CGS_CALL(enter_safe_mode, cgs_device, en) +#define cgs_lock_grbm_idx(cgs_device, lock) \ + CGS_CALL(lock_grbm_idx, cgs_device, lock) #endif /* _CGS_COMMON_H */ diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c index 720d5006ff62..cd33eb179db2 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c @@ -276,7 +276,10 @@ int pp_atomfwctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atomfwctrl_avfs_parameters *param) { uint16_t idx; + uint8_t format_revision, content_revision; + struct atom_asic_profiling_info_v4_1 *profile; + struct atom_asic_profiling_info_v4_2 *profile_v4_2; idx = GetIndexIntoMasterDataTable(asic_profiling_info); profile = (struct atom_asic_profiling_info_v4_1 *) @@ -286,76 +289,172 @@ int pp_atomfwctrl_get_avfs_information(struct pp_hwmgr *hwmgr, if (!profile) return -1; - param->ulMaxVddc = le32_to_cpu(profile->maxvddc); - param->ulMinVddc = le32_to_cpu(profile->minvddc); - param->ulMeanNsigmaAcontant0 = - le32_to_cpu(profile->avfs_meannsigma_acontant0); - param->ulMeanNsigmaAcontant1 = - le32_to_cpu(profile->avfs_meannsigma_acontant1); - param->ulMeanNsigmaAcontant2 = - le32_to_cpu(profile->avfs_meannsigma_acontant2); - param->usMeanNsigmaDcTolSigma = - le16_to_cpu(profile->avfs_meannsigma_dc_tol_sigma); - param->usMeanNsigmaPlatformMean = - le16_to_cpu(profile->avfs_meannsigma_platform_mean); - param->usMeanNsigmaPlatformSigma = - le16_to_cpu(profile->avfs_meannsigma_platform_sigma); - param->ulGbVdroopTableCksoffA0 = - le32_to_cpu(profile->gb_vdroop_table_cksoff_a0); - param->ulGbVdroopTableCksoffA1 = - le32_to_cpu(profile->gb_vdroop_table_cksoff_a1); - param->ulGbVdroopTableCksoffA2 = - le32_to_cpu(profile->gb_vdroop_table_cksoff_a2); - param->ulGbVdroopTableCksonA0 = - le32_to_cpu(profile->gb_vdroop_table_ckson_a0); - param->ulGbVdroopTableCksonA1 = - le32_to_cpu(profile->gb_vdroop_table_ckson_a1); - param->ulGbVdroopTableCksonA2 = - le32_to_cpu(profile->gb_vdroop_table_ckson_a2); - param->ulGbFuseTableCksoffM1 = - le32_to_cpu(profile->avfsgb_fuse_table_cksoff_m1); - param->ulGbFuseTableCksoffM2 = - le32_to_cpu(profile->avfsgb_fuse_table_cksoff_m2); - param->ulGbFuseTableCksoffB = - le32_to_cpu(profile->avfsgb_fuse_table_cksoff_b); - param->ulGbFuseTableCksonM1 = - le32_to_cpu(profile->avfsgb_fuse_table_ckson_m1); - param->ulGbFuseTableCksonM2 = - le32_to_cpu(profile->avfsgb_fuse_table_ckson_m2); - param->ulGbFuseTableCksonB = - le32_to_cpu(profile->avfsgb_fuse_table_ckson_b); - - param->ucEnableGbVdroopTableCkson = - profile->enable_gb_vdroop_table_ckson; - param->ucEnableGbFuseTableCkson = - profile->enable_gb_fuse_table_ckson; - param->usPsmAgeComfactor = - le16_to_cpu(profile->psm_age_comfactor); - - param->ulDispclk2GfxclkM1 = - le32_to_cpu(profile->dispclk2gfxclk_a); - param->ulDispclk2GfxclkM2 = - le32_to_cpu(profile->dispclk2gfxclk_b); - param->ulDispclk2GfxclkB = - le32_to_cpu(profile->dispclk2gfxclk_c); - param->ulDcefclk2GfxclkM1 = - le32_to_cpu(profile->dcefclk2gfxclk_a); - param->ulDcefclk2GfxclkM2 = - le32_to_cpu(profile->dcefclk2gfxclk_b); - param->ulDcefclk2GfxclkB = - le32_to_cpu(profile->dcefclk2gfxclk_c); - param->ulPixelclk2GfxclkM1 = - le32_to_cpu(profile->pixclk2gfxclk_a); - param->ulPixelclk2GfxclkM2 = - le32_to_cpu(profile->pixclk2gfxclk_b); - param->ulPixelclk2GfxclkB = - le32_to_cpu(profile->pixclk2gfxclk_c); - param->ulPhyclk2GfxclkM1 = - le32_to_cpu(profile->phyclk2gfxclk_a); - param->ulPhyclk2GfxclkM2 = - le32_to_cpu(profile->phyclk2gfxclk_b); - param->ulPhyclk2GfxclkB = - le32_to_cpu(profile->phyclk2gfxclk_c); + format_revision = ((struct atom_common_table_header *)profile)->format_revision; + content_revision = ((struct atom_common_table_header *)profile)->content_revision; + + if (format_revision == 4 && content_revision == 1) { + param->ulMaxVddc = le32_to_cpu(profile->maxvddc); + param->ulMinVddc = le32_to_cpu(profile->minvddc); + param->ulMeanNsigmaAcontant0 = + le32_to_cpu(profile->avfs_meannsigma_acontant0); + param->ulMeanNsigmaAcontant1 = + le32_to_cpu(profile->avfs_meannsigma_acontant1); + param->ulMeanNsigmaAcontant2 = + le32_to_cpu(profile->avfs_meannsigma_acontant2); + param->usMeanNsigmaDcTolSigma = + le16_to_cpu(profile->avfs_meannsigma_dc_tol_sigma); + param->usMeanNsigmaPlatformMean = + le16_to_cpu(profile->avfs_meannsigma_platform_mean); + param->usMeanNsigmaPlatformSigma = + le16_to_cpu(profile->avfs_meannsigma_platform_sigma); + param->ulGbVdroopTableCksoffA0 = + le32_to_cpu(profile->gb_vdroop_table_cksoff_a0); + param->ulGbVdroopTableCksoffA1 = + le32_to_cpu(profile->gb_vdroop_table_cksoff_a1); + param->ulGbVdroopTableCksoffA2 = + le32_to_cpu(profile->gb_vdroop_table_cksoff_a2); + param->ulGbVdroopTableCksonA0 = + le32_to_cpu(profile->gb_vdroop_table_ckson_a0); + param->ulGbVdroopTableCksonA1 = + le32_to_cpu(profile->gb_vdroop_table_ckson_a1); + param->ulGbVdroopTableCksonA2 = + le32_to_cpu(profile->gb_vdroop_table_ckson_a2); + param->ulGbFuseTableCksoffM1 = + le32_to_cpu(profile->avfsgb_fuse_table_cksoff_m1); + param->ulGbFuseTableCksoffM2 = + le32_to_cpu(profile->avfsgb_fuse_table_cksoff_m2); + param->ulGbFuseTableCksoffB = + le32_to_cpu(profile->avfsgb_fuse_table_cksoff_b); + param->ulGbFuseTableCksonM1 = + le32_to_cpu(profile->avfsgb_fuse_table_ckson_m1); + param->ulGbFuseTableCksonM2 = + le32_to_cpu(profile->avfsgb_fuse_table_ckson_m2); + param->ulGbFuseTableCksonB = + le32_to_cpu(profile->avfsgb_fuse_table_ckson_b); + + param->ucEnableGbVdroopTableCkson = + profile->enable_gb_vdroop_table_ckson; + param->ucEnableGbFuseTableCkson = + profile->enable_gb_fuse_table_ckson; + param->usPsmAgeComfactor = + le16_to_cpu(profile->psm_age_comfactor); + + param->ulDispclk2GfxclkM1 = + le32_to_cpu(profile->dispclk2gfxclk_a); + param->ulDispclk2GfxclkM2 = + le32_to_cpu(profile->dispclk2gfxclk_b); + param->ulDispclk2GfxclkB = + le32_to_cpu(profile->dispclk2gfxclk_c); + param->ulDcefclk2GfxclkM1 = + le32_to_cpu(profile->dcefclk2gfxclk_a); + param->ulDcefclk2GfxclkM2 = + le32_to_cpu(profile->dcefclk2gfxclk_b); + param->ulDcefclk2GfxclkB = + le32_to_cpu(profile->dcefclk2gfxclk_c); + param->ulPixelclk2GfxclkM1 = + le32_to_cpu(profile->pixclk2gfxclk_a); + param->ulPixelclk2GfxclkM2 = + le32_to_cpu(profile->pixclk2gfxclk_b); + param->ulPixelclk2GfxclkB = + le32_to_cpu(profile->pixclk2gfxclk_c); + param->ulPhyclk2GfxclkM1 = + le32_to_cpu(profile->phyclk2gfxclk_a); + param->ulPhyclk2GfxclkM2 = + le32_to_cpu(profile->phyclk2gfxclk_b); + param->ulPhyclk2GfxclkB = + le32_to_cpu(profile->phyclk2gfxclk_c); + param->ulAcgGbVdroopTableA0 = 0; + param->ulAcgGbVdroopTableA1 = 0; + param->ulAcgGbVdroopTableA2 = 0; + param->ulAcgGbFuseTableM1 = 0; + param->ulAcgGbFuseTableM2 = 0; + param->ulAcgGbFuseTableB = 0; + param->ucAcgEnableGbVdroopTable = 0; + param->ucAcgEnableGbFuseTable = 0; + } else if (format_revision == 4 && content_revision == 2) { + profile_v4_2 = (struct atom_asic_profiling_info_v4_2 *)profile; + param->ulMaxVddc = le32_to_cpu(profile_v4_2->maxvddc); + param->ulMinVddc = le32_to_cpu(profile_v4_2->minvddc); + param->ulMeanNsigmaAcontant0 = + le32_to_cpu(profile_v4_2->avfs_meannsigma_acontant0); + param->ulMeanNsigmaAcontant1 = + le32_to_cpu(profile_v4_2->avfs_meannsigma_acontant1); + param->ulMeanNsigmaAcontant2 = + le32_to_cpu(profile_v4_2->avfs_meannsigma_acontant2); + param->usMeanNsigmaDcTolSigma = + le16_to_cpu(profile_v4_2->avfs_meannsigma_dc_tol_sigma); + param->usMeanNsigmaPlatformMean = + le16_to_cpu(profile_v4_2->avfs_meannsigma_platform_mean); + param->usMeanNsigmaPlatformSigma = + le16_to_cpu(profile_v4_2->avfs_meannsigma_platform_sigma); + param->ulGbVdroopTableCksoffA0 = + le32_to_cpu(profile_v4_2->gb_vdroop_table_cksoff_a0); + param->ulGbVdroopTableCksoffA1 = + le32_to_cpu(profile_v4_2->gb_vdroop_table_cksoff_a1); + param->ulGbVdroopTableCksoffA2 = + le32_to_cpu(profile_v4_2->gb_vdroop_table_cksoff_a2); + param->ulGbVdroopTableCksonA0 = + le32_to_cpu(profile_v4_2->gb_vdroop_table_ckson_a0); + param->ulGbVdroopTableCksonA1 = + le32_to_cpu(profile_v4_2->gb_vdroop_table_ckson_a1); + param->ulGbVdroopTableCksonA2 = + le32_to_cpu(profile_v4_2->gb_vdroop_table_ckson_a2); + param->ulGbFuseTableCksoffM1 = + le32_to_cpu(profile_v4_2->avfsgb_fuse_table_cksoff_m1); + param->ulGbFuseTableCksoffM2 = + le32_to_cpu(profile_v4_2->avfsgb_fuse_table_cksoff_m2); + param->ulGbFuseTableCksoffB = + le32_to_cpu(profile_v4_2->avfsgb_fuse_table_cksoff_b); + param->ulGbFuseTableCksonM1 = + le32_to_cpu(profile_v4_2->avfsgb_fuse_table_ckson_m1); + param->ulGbFuseTableCksonM2 = + le32_to_cpu(profile_v4_2->avfsgb_fuse_table_ckson_m2); + param->ulGbFuseTableCksonB = + le32_to_cpu(profile_v4_2->avfsgb_fuse_table_ckson_b); + + param->ucEnableGbVdroopTableCkson = + profile_v4_2->enable_gb_vdroop_table_ckson; + param->ucEnableGbFuseTableCkson = + profile_v4_2->enable_gb_fuse_table_ckson; + param->usPsmAgeComfactor = + le16_to_cpu(profile_v4_2->psm_age_comfactor); + + param->ulDispclk2GfxclkM1 = + le32_to_cpu(profile_v4_2->dispclk2gfxclk_a); + param->ulDispclk2GfxclkM2 = + le32_to_cpu(profile_v4_2->dispclk2gfxclk_b); + param->ulDispclk2GfxclkB = + le32_to_cpu(profile_v4_2->dispclk2gfxclk_c); + param->ulDcefclk2GfxclkM1 = + le32_to_cpu(profile_v4_2->dcefclk2gfxclk_a); + param->ulDcefclk2GfxclkM2 = + le32_to_cpu(profile_v4_2->dcefclk2gfxclk_b); + param->ulDcefclk2GfxclkB = + le32_to_cpu(profile_v4_2->dcefclk2gfxclk_c); + param->ulPixelclk2GfxclkM1 = + le32_to_cpu(profile_v4_2->pixclk2gfxclk_a); + param->ulPixelclk2GfxclkM2 = + le32_to_cpu(profile_v4_2->pixclk2gfxclk_b); + param->ulPixelclk2GfxclkB = + le32_to_cpu(profile_v4_2->pixclk2gfxclk_c); + param->ulPhyclk2GfxclkM1 = + le32_to_cpu(profile->phyclk2gfxclk_a); + param->ulPhyclk2GfxclkM2 = + le32_to_cpu(profile_v4_2->phyclk2gfxclk_b); + param->ulPhyclk2GfxclkB = + le32_to_cpu(profile_v4_2->phyclk2gfxclk_c); + param->ulAcgGbVdroopTableA0 = le32_to_cpu(profile_v4_2->acg_gb_vdroop_table_a0); + param->ulAcgGbVdroopTableA1 = le32_to_cpu(profile_v4_2->acg_gb_vdroop_table_a1); + param->ulAcgGbVdroopTableA2 = le32_to_cpu(profile_v4_2->acg_gb_vdroop_table_a2); + param->ulAcgGbFuseTableM1 = le32_to_cpu(profile_v4_2->acg_avfsgb_fuse_table_m1); + param->ulAcgGbFuseTableM2 = le32_to_cpu(profile_v4_2->acg_avfsgb_fuse_table_m2); + param->ulAcgGbFuseTableB = le32_to_cpu(profile_v4_2->acg_avfsgb_fuse_table_b); + param->ucAcgEnableGbVdroopTable = le32_to_cpu(profile_v4_2->enable_acg_gb_vdroop_table); + param->ucAcgEnableGbFuseTable = le32_to_cpu(profile_v4_2->enable_acg_gb_fuse_table); + } else { + pr_info("Invalid VBIOS AVFS ProfilingInfo Revision!\n"); + return -EINVAL; + } return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h index 81908b5cfd5f..8e6b1f0ddebc 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h @@ -109,6 +109,14 @@ struct pp_atomfwctrl_avfs_parameters { uint32_t ulPhyclk2GfxclkM1; uint32_t ulPhyclk2GfxclkM2; uint32_t ulPhyclk2GfxclkB; + uint32_t ulAcgGbVdroopTableA0; + uint32_t ulAcgGbVdroopTableA1; + uint32_t ulAcgGbVdroopTableA2; + uint32_t ulAcgGbFuseTableM1; + uint32_t ulAcgGbFuseTableM2; + uint32_t ulAcgGbFuseTableB; + uint32_t ucAcgEnableGbVdroopTable; + uint32_t ucAcgEnableGbFuseTable; }; struct pp_atomfwctrl_gpio_parameters { diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 1f01020ce3a9..f01cda93f178 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -4630,6 +4630,15 @@ static int smu7_set_power_profile_state(struct pp_hwmgr *hwmgr, static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable) { + struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); + + if (smu_data == NULL) + return -EINVAL; + + if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED) + return 0; + if (enable) { if (!PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index d6f097f44b6c..01ff5054041b 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -78,6 +78,8 @@ uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2}; #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L +static int vega10_force_clock_level(struct pp_hwmgr *hwmgr, + enum pp_clock_type type, uint32_t mask); const ULONG PhwVega10_Magic = (ULONG)(PHM_VIslands_Magic); @@ -146,6 +148,19 @@ static void vega10_set_default_registry_data(struct pp_hwmgr *hwmgr) data->registry_data.vr1hot_enabled = 1; data->registry_data.regulator_hot_gpio_support = 1; + data->registry_data.didt_support = 1; + if (data->registry_data.didt_support) { + data->registry_data.didt_mode = 6; + data->registry_data.sq_ramping_support = 1; + data->registry_data.db_ramping_support = 0; + data->registry_data.td_ramping_support = 0; + data->registry_data.tcp_ramping_support = 0; + data->registry_data.dbr_ramping_support = 0; + data->registry_data.edc_didt_support = 1; + data->registry_data.gc_didt_support = 0; + data->registry_data.psm_didt_support = 0; + } + data->display_voltage_mode = PPVEGA10_VEGA10DISPLAYVOLTAGEMODE_DFLT; data->dcef_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT; data->dcef_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT; @@ -223,6 +238,8 @@ static int vega10_set_features_platform_caps(struct pp_hwmgr *hwmgr) phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PowerContainment); phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DiDtSupport); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping); phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping); @@ -230,6 +247,34 @@ static int vega10_set_features_platform_caps(struct pp_hwmgr *hwmgr) PHM_PlatformCaps_TDRamping); phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DBRRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DiDtEDCEnable); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_GCEDC); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PSM); + + if (data->registry_data.didt_support) { + phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtSupport); + if (data->registry_data.sq_ramping_support) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping); + if (data->registry_data.db_ramping_support) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping); + if (data->registry_data.td_ramping_support) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping); + if (data->registry_data.tcp_ramping_support) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping); + if (data->registry_data.dbr_ramping_support) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRRamping); + if (data->registry_data.edc_didt_support) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtEDCEnable); + if (data->registry_data.gc_didt_support) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC); + if (data->registry_data.psm_didt_support) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM); + } if (data->registry_data.power_containment_support) phm_cap_set(hwmgr->platform_descriptor.platformCaps, @@ -321,8 +366,8 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr) FEATURE_LED_DISPLAY_BIT; data->smu_features[GNLD_FAN_CONTROL].smu_feature_id = FEATURE_FAN_CONTROL_BIT; - data->smu_features[GNLD_VOLTAGE_CONTROLLER].smu_feature_id = - FEATURE_VOLTAGE_CONTROLLER_BIT; + data->smu_features[GNLD_ACG].smu_feature_id = FEATURE_ACG_BIT; + data->smu_features[GNLD_DIDT].smu_feature_id = FEATURE_GFX_EDC_BIT; if (!data->registry_data.prefetcher_dpm_key_disabled) data->smu_features[GNLD_DPM_PREFETCHER].supported = true; @@ -386,6 +431,15 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr) if (data->registry_data.vr0hot_enabled) data->smu_features[GNLD_VR0HOT].supported = true; + smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetSmuVersion); + vega10_read_arg_from_smc(hwmgr->smumgr, &(data->smu_version)); + /* ACG firmware has major version 5 */ + if ((data->smu_version & 0xff000000) == 0x5000000) + data->smu_features[GNLD_ACG].supported = true; + + if (data->registry_data.didt_support) + data->smu_features[GNLD_DIDT].supported = true; + } #ifdef PPLIB_VEGA10_EVV_SUPPORT @@ -2128,15 +2182,9 @@ static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr) pp_table->AvfsGbCksOff.m2_shift = 12; pp_table->AvfsGbCksOff.b_shift = 0; - for (i = 0; i < dep_table->count; i++) { - if (dep_table->entries[i].sclk_offset == 0) - pp_table->StaticVoltageOffsetVid[i] = 248; - else - pp_table->StaticVoltageOffsetVid[i] = - (uint8_t)(dep_table->entries[i].sclk_offset * - VOLTAGE_VID_OFFSET_SCALE2 / - VOLTAGE_VID_OFFSET_SCALE1); - } + for (i = 0; i < dep_table->count; i++) + pp_table->StaticVoltageOffsetVid[i] = + convert_to_vid((uint8_t)(dep_table->entries[i].sclk_offset)); if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT != data->disp_clk_quad_eqn_a) && @@ -2228,6 +2276,21 @@ static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr) pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1_shift = 24; pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2_shift = 12; pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b_shift = 12; + + pp_table->AcgBtcGbVdroopTable.a0 = avfs_params.ulAcgGbVdroopTableA0; + pp_table->AcgBtcGbVdroopTable.a0_shift = 20; + pp_table->AcgBtcGbVdroopTable.a1 = avfs_params.ulAcgGbVdroopTableA1; + pp_table->AcgBtcGbVdroopTable.a1_shift = 20; + pp_table->AcgBtcGbVdroopTable.a2 = avfs_params.ulAcgGbVdroopTableA2; + pp_table->AcgBtcGbVdroopTable.a2_shift = 20; + + pp_table->AcgAvfsGb.m1 = avfs_params.ulAcgGbFuseTableM1; + pp_table->AcgAvfsGb.m2 = avfs_params.ulAcgGbFuseTableM2; + pp_table->AcgAvfsGb.b = avfs_params.ulAcgGbFuseTableB; + pp_table->AcgAvfsGb.m1_shift = 0; + pp_table->AcgAvfsGb.m2_shift = 0; + pp_table->AcgAvfsGb.b_shift = 0; + } else { data->smu_features[GNLD_AVFS].supported = false; } @@ -2236,6 +2299,55 @@ static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr) return 0; } +static int vega10_acg_enable(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + uint32_t agc_btc_response; + + if (data->smu_features[GNLD_ACG].supported) { + if (0 == vega10_enable_smc_features(hwmgr->smumgr, true, + data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_bitmap)) + data->smu_features[GNLD_DPM_PREFETCHER].enabled = true; + + smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_InitializeAcg); + + smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_RunAcgBtc); + vega10_read_arg_from_smc(hwmgr->smumgr, &agc_btc_response);; + + if (1 == agc_btc_response) { + if (1 == data->acg_loop_state) + smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_RunAcgInClosedLoop); + else if (2 == data->acg_loop_state) + smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_RunAcgInOpenLoop); + if (0 == vega10_enable_smc_features(hwmgr->smumgr, true, + data->smu_features[GNLD_ACG].smu_feature_bitmap)) + data->smu_features[GNLD_ACG].enabled = true; + } else { + pr_info("[ACG_Enable] ACG BTC Returned Failed Status!\n"); + data->smu_features[GNLD_ACG].enabled = false; + } + } + + return 0; +} + +static int vega10_acg_disable(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + + if (data->smu_features[GNLD_ACG].supported) { + if (data->smu_features[GNLD_ACG].enabled) { + if (0 == vega10_enable_smc_features(hwmgr->smumgr, false, + data->smu_features[GNLD_ACG].smu_feature_bitmap)) + data->smu_features[GNLD_ACG].enabled = false; + } + } + + return 0; +} + static int vega10_populate_gpio_parameters(struct pp_hwmgr *hwmgr) { struct vega10_hwmgr *data = @@ -2506,7 +2618,7 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr) result = vega10_avfs_enable(hwmgr, true); PP_ASSERT_WITH_CODE(!result, "Attempt to enable AVFS feature Failed!", return result); - + vega10_acg_enable(hwmgr); vega10_save_default_power_profile(hwmgr); return 0; @@ -2838,6 +2950,11 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(!tmp_result, "Failed to start DPM!", result = tmp_result); + /* enable didt, do not abort if failed didt */ + tmp_result = vega10_enable_didt_config(hwmgr); + PP_ASSERT(!tmp_result, + "Failed to enable didt config!"); + tmp_result = vega10_enable_power_containment(hwmgr); PP_ASSERT_WITH_CODE(!tmp_result, "Failed to enable power containment!", @@ -4103,34 +4220,30 @@ static int vega10_unforce_dpm_levels(struct pp_hwmgr *hwmgr) return 0; } -static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, - enum amd_dpm_forced_level level) +static int vega10_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level, + uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *soc_mask) { - int ret = 0; + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); - switch (level) { - case AMD_DPM_FORCED_LEVEL_HIGH: - ret = vega10_force_dpm_highest(hwmgr); - if (ret) - return ret; - break; - case AMD_DPM_FORCED_LEVEL_LOW: - ret = vega10_force_dpm_lowest(hwmgr); - if (ret) - return ret; - break; - case AMD_DPM_FORCED_LEVEL_AUTO: - ret = vega10_unforce_dpm_levels(hwmgr); - if (ret) - return ret; - break; - default: - break; + if (table_info->vdd_dep_on_sclk->count > VEGA10_UMD_PSTATE_GFXCLK_LEVEL && + table_info->vdd_dep_on_socclk->count > VEGA10_UMD_PSTATE_SOCCLK_LEVEL && + table_info->vdd_dep_on_mclk->count > VEGA10_UMD_PSTATE_MCLK_LEVEL) { + *sclk_mask = VEGA10_UMD_PSTATE_GFXCLK_LEVEL; + *soc_mask = VEGA10_UMD_PSTATE_SOCCLK_LEVEL; + *mclk_mask = VEGA10_UMD_PSTATE_MCLK_LEVEL; } - hwmgr->dpm_level = level; - - return ret; + if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) { + *sclk_mask = 0; + } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) { + *mclk_mask = 0; + } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { + *sclk_mask = table_info->vdd_dep_on_sclk->count - 1; + *soc_mask = table_info->vdd_dep_on_socclk->count - 1; + *mclk_mask = table_info->vdd_dep_on_mclk->count - 1; + } + return 0; } static int vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode) @@ -4157,6 +4270,86 @@ static int vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode) return result; } +static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, + enum amd_dpm_forced_level level) +{ + int ret = 0; + uint32_t sclk_mask = 0; + uint32_t mclk_mask = 0; + uint32_t soc_mask = 0; + uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | + AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | + AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | + AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; + + if (level == hwmgr->dpm_level) + return ret; + + if (!(hwmgr->dpm_level & profile_mode_mask)) { + /* enter profile mode, save current level, disable gfx cg*/ + if (level & profile_mode_mask) { + hwmgr->saved_dpm_level = hwmgr->dpm_level; + cgs_set_clockgating_state(hwmgr->device, + AMD_IP_BLOCK_TYPE_GFX, + AMD_CG_STATE_UNGATE); + } + } else { + /* exit profile mode, restore level, enable gfx cg*/ + if (!(level & profile_mode_mask)) { + if (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT) + level = hwmgr->saved_dpm_level; + cgs_set_clockgating_state(hwmgr->device, + AMD_IP_BLOCK_TYPE_GFX, + AMD_CG_STATE_GATE); + } + } + + switch (level) { + case AMD_DPM_FORCED_LEVEL_HIGH: + ret = vega10_force_dpm_highest(hwmgr); + if (ret) + return ret; + hwmgr->dpm_level = level; + break; + case AMD_DPM_FORCED_LEVEL_LOW: + ret = vega10_force_dpm_lowest(hwmgr); + if (ret) + return ret; + hwmgr->dpm_level = level; + break; + case AMD_DPM_FORCED_LEVEL_AUTO: + ret = vega10_unforce_dpm_levels(hwmgr); + if (ret) + return ret; + hwmgr->dpm_level = level; + break; + case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: + case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: + case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: + case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: + ret = vega10_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask); + if (ret) + return ret; + hwmgr->dpm_level = level; + vega10_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask); + vega10_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask); + break; + case AMD_DPM_FORCED_LEVEL_MANUAL: + hwmgr->dpm_level = level; + break; + case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: + default: + break; + } + + if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) + vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_NONE); + else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) + vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_AUTO); + + return 0; +} + static int vega10_get_fan_control_mode(struct pp_hwmgr *hwmgr) { struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); @@ -4402,7 +4595,9 @@ static int vega10_force_clock_level(struct pp_hwmgr *hwmgr, struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); int i; - if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) + if (hwmgr->dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO | + AMD_DPM_FORCED_LEVEL_LOW | + AMD_DPM_FORCED_LEVEL_HIGH)) return -EINVAL; switch (type) { @@ -4667,6 +4862,10 @@ static int vega10_disable_dpm_tasks(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE((tmp_result == 0), "Failed to disable power containment!", result = tmp_result); + tmp_result = vega10_disable_didt_config(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to disable didt config!", result = tmp_result); + tmp_result = vega10_avfs_enable(hwmgr, false); PP_ASSERT_WITH_CODE((tmp_result == 0), "Failed to disable AVFS!", result = tmp_result); @@ -4683,6 +4882,9 @@ static int vega10_disable_dpm_tasks(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE((tmp_result == 0), "Failed to disable ulv!", result = tmp_result); + tmp_result = vega10_acg_disable(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to disable acg!", result = tmp_result); return result; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h index 6e5c5b99593b..676cd7735883 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h @@ -64,7 +64,9 @@ enum { GNLD_FW_CTF, GNLD_LED_DISPLAY, GNLD_FAN_CONTROL, - GNLD_VOLTAGE_CONTROLLER, + GNLD_FEATURE_FAST_PPT_BIT, + GNLD_DIDT, + GNLD_ACG, GNLD_FEATURES_MAX }; @@ -230,7 +232,9 @@ struct vega10_registry_data { uint8_t cac_support; uint8_t clock_stretcher_support; uint8_t db_ramping_support; + uint8_t didt_mode; uint8_t didt_support; + uint8_t edc_didt_support; uint8_t dynamic_state_patching_support; uint8_t enable_pkg_pwr_tracking_feature; uint8_t enable_tdc_limit_feature; @@ -263,6 +267,9 @@ struct vega10_registry_data { uint8_t tcp_ramping_support; uint8_t tdc_support; uint8_t td_ramping_support; + uint8_t dbr_ramping_support; + uint8_t gc_didt_support; + uint8_t psm_didt_support; uint8_t thermal_out_gpio_support; uint8_t thermal_support; uint8_t fw_ctf_enabled; @@ -381,6 +388,8 @@ struct vega10_hwmgr { struct vega10_smc_state_table smc_state_table; uint32_t config_telemetry; + uint32_t smu_version; + uint32_t acg_loop_state; }; #define VEGA10_DPM2_NEAR_TDP_DEC 10 @@ -425,6 +434,10 @@ struct vega10_hwmgr { #define PPVEGA10_VEGA10UCLKCLKAVERAGEALPHA_DFLT 25 /* 10% * 255 = 25 */ #define PPVEGA10_VEGA10GFXACTIVITYAVERAGEALPHA_DFLT 25 /* 10% * 255 = 25 */ +#define VEGA10_UMD_PSTATE_GFXCLK_LEVEL 0x3 +#define VEGA10_UMD_PSTATE_SOCCLK_LEVEL 0x3 +#define VEGA10_UMD_PSTATE_MCLK_LEVEL 0x2 + extern int tonga_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr); extern int tonga_hwmgr_backend_fini(struct pp_hwmgr *hwmgr); extern int tonga_get_mc_microcode_version (struct pp_hwmgr *hwmgr); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c index 3f72268e99bb..fbafc849ea71 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c @@ -26,7 +26,1298 @@ #include "vega10_powertune.h" #include "vega10_smumgr.h" #include "vega10_ppsmc.h" +#include "vega10_inc.h" #include "pp_debug.h" +#include "pp_soc15.h" + +static const struct vega10_didt_config_reg SEDiDtTuningCtrlConfig_Vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + /* DIDT_SQ */ + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853 }, + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153 }, + + /* DIDT_TD */ + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde }, + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde }, + + /* DIDT_TCP */ + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde }, + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde }, + + /* DIDT_DB */ + { ixDIDT_DB_TUNING_CTRL, DIDT_DB_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_DB_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde }, + { ixDIDT_DB_TUNING_CTRL, DIDT_DB_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_DB_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde }, + + { 0xFFFFFFFF } /* End of list */ +}; + +static const struct vega10_didt_config_reg SEDiDtCtrl3Config_vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + /*DIDT_SQ_CTRL3 */ + { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__GC_DIDT_ENABLE_MASK, DIDT_SQ_CTRL3__GC_DIDT_ENABLE__SHIFT, 0x0000 }, + { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 }, + { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__THROTTLE_POLICY_MASK, DIDT_SQ_CTRL3__THROTTLE_POLICY__SHIFT, 0x0003 }, + { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 }, + { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK, DIDT_SQ_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT, 0x0000 }, + { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0003 }, + { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK, DIDT_SQ_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 }, + { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK, DIDT_SQ_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 }, + { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__QUALIFY_STALL_EN_MASK, DIDT_SQ_CTRL3__QUALIFY_STALL_EN__SHIFT, 0x0000 }, + { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_STALL_SEL_MASK, DIDT_SQ_CTRL3__DIDT_STALL_SEL__SHIFT, 0x0000 }, + { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_FORCE_STALL_MASK, DIDT_SQ_CTRL3__DIDT_FORCE_STALL__SHIFT, 0x0000 }, + { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_STALL_DELAY_EN_MASK, DIDT_SQ_CTRL3__DIDT_STALL_DELAY_EN__SHIFT, 0x0000 }, + + /*DIDT_TCP_CTRL3 */ + { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__GC_DIDT_ENABLE_MASK, DIDT_TCP_CTRL3__GC_DIDT_ENABLE__SHIFT, 0x0000 }, + { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 }, + { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__THROTTLE_POLICY_MASK, DIDT_TCP_CTRL3__THROTTLE_POLICY__SHIFT, 0x0003 }, + { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_TCP_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 }, + { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK, DIDT_TCP_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT, 0x0000 }, + { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK, DIDT_TCP_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0003 }, + { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK, DIDT_TCP_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 }, + { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK, DIDT_TCP_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 }, + { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__QUALIFY_STALL_EN_MASK, DIDT_TCP_CTRL3__QUALIFY_STALL_EN__SHIFT, 0x0000 }, + { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_STALL_SEL_MASK, DIDT_TCP_CTRL3__DIDT_STALL_SEL__SHIFT, 0x0000 }, + { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_FORCE_STALL_MASK, DIDT_TCP_CTRL3__DIDT_FORCE_STALL__SHIFT, 0x0000 }, + { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_STALL_DELAY_EN_MASK, DIDT_TCP_CTRL3__DIDT_STALL_DELAY_EN__SHIFT, 0x0000 }, + + /*DIDT_TD_CTRL3 */ + { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__GC_DIDT_ENABLE_MASK, DIDT_TD_CTRL3__GC_DIDT_ENABLE__SHIFT, 0x0000 }, + { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 }, + { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__THROTTLE_POLICY_MASK, DIDT_TD_CTRL3__THROTTLE_POLICY__SHIFT, 0x0003 }, + { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_TD_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 }, + { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK, DIDT_TD_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT, 0x0000 }, + { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK, DIDT_TD_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0003 }, + { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK, DIDT_TD_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 }, + { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK, DIDT_TD_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 }, + { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__QUALIFY_STALL_EN_MASK, DIDT_TD_CTRL3__QUALIFY_STALL_EN__SHIFT, 0x0000 }, + { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_STALL_SEL_MASK, DIDT_TD_CTRL3__DIDT_STALL_SEL__SHIFT, 0x0000 }, + { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_FORCE_STALL_MASK, DIDT_TD_CTRL3__DIDT_FORCE_STALL__SHIFT, 0x0000 }, + { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_STALL_DELAY_EN_MASK, DIDT_TD_CTRL3__DIDT_STALL_DELAY_EN__SHIFT, 0x0000 }, + + /*DIDT_DB_CTRL3 */ + { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__GC_DIDT_ENABLE_MASK, DIDT_DB_CTRL3__GC_DIDT_ENABLE__SHIFT, 0x0000 }, + { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK, DIDT_DB_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 }, + { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__THROTTLE_POLICY_MASK, DIDT_DB_CTRL3__THROTTLE_POLICY__SHIFT, 0x0003 }, + { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_DB_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 }, + { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK, DIDT_DB_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT, 0x0000 }, + { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK, DIDT_DB_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0003 }, + { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK, DIDT_DB_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 }, + { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK, DIDT_DB_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 }, + { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__QUALIFY_STALL_EN_MASK, DIDT_DB_CTRL3__QUALIFY_STALL_EN__SHIFT, 0x0000 }, + { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_STALL_SEL_MASK, DIDT_DB_CTRL3__DIDT_STALL_SEL__SHIFT, 0x0000 }, + { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_FORCE_STALL_MASK, DIDT_DB_CTRL3__DIDT_FORCE_STALL__SHIFT, 0x0000 }, + { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_STALL_DELAY_EN_MASK, DIDT_DB_CTRL3__DIDT_STALL_DELAY_EN__SHIFT, 0x0000 }, + + { 0xFFFFFFFF } /* End of list */ +}; + +static const struct vega10_didt_config_reg SEDiDtCtrl2Config_Vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + /* DIDT_SQ */ + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853 }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x00c0 }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000 }, + + /* DIDT_TD */ + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x00c0 }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0001 }, + + /* DIDT_TCP */ + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x00c0 }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0001 }, + + /* DIDT_DB */ + { ixDIDT_DB_CTRL2, DIDT_DB_CTRL2__MAX_POWER_DELTA_MASK, DIDT_DB_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde }, + { ixDIDT_DB_CTRL2, DIDT_DB_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_DB_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x00c0 }, + { ixDIDT_DB_CTRL2, DIDT_DB_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_DB_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0001 }, + + { 0xFFFFFFFF } /* End of list */ +}; + +static const struct vega10_didt_config_reg SEDiDtCtrl1Config_Vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + /* DIDT_SQ */ + { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000 }, + { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff }, + /* DIDT_TD */ + { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000 }, + { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff }, + /* DIDT_TCP */ + { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000 }, + { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff }, + /* DIDT_DB */ + { ixDIDT_DB_CTRL1, DIDT_DB_CTRL1__MIN_POWER_MASK, DIDT_DB_CTRL1__MIN_POWER__SHIFT, 0x0000 }, + { ixDIDT_DB_CTRL1, DIDT_DB_CTRL1__MAX_POWER_MASK, DIDT_DB_CTRL1__MAX_POWER__SHIFT, 0xffff }, + + { 0xFFFFFFFF } /* End of list */ +}; + + +static const struct vega10_didt_config_reg SEDiDtWeightConfig_Vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + /* DIDT_SQ */ + { ixDIDT_SQ_WEIGHT0_3, 0xFFFFFFFF, 0, 0x2B363B1A }, + { ixDIDT_SQ_WEIGHT4_7, 0xFFFFFFFF, 0, 0x270B2432 }, + { ixDIDT_SQ_WEIGHT8_11, 0xFFFFFFFF, 0, 0x00000018 }, + + /* DIDT_TD */ + { ixDIDT_TD_WEIGHT0_3, 0xFFFFFFFF, 0, 0x2B1D220F }, + { ixDIDT_TD_WEIGHT4_7, 0xFFFFFFFF, 0, 0x00007558 }, + { ixDIDT_TD_WEIGHT8_11, 0xFFFFFFFF, 0, 0x00000000 }, + + /* DIDT_TCP */ + { ixDIDT_TCP_WEIGHT0_3, 0xFFFFFFFF, 0, 0x5ACE160D }, + { ixDIDT_TCP_WEIGHT4_7, 0xFFFFFFFF, 0, 0x00000000 }, + { ixDIDT_TCP_WEIGHT8_11, 0xFFFFFFFF, 0, 0x00000000 }, + + /* DIDT_DB */ + { ixDIDT_DB_WEIGHT0_3, 0xFFFFFFFF, 0, 0x0E152A0F }, + { ixDIDT_DB_WEIGHT4_7, 0xFFFFFFFF, 0, 0x09061813 }, + { ixDIDT_DB_WEIGHT8_11, 0xFFFFFFFF, 0, 0x00000013 }, + + { 0xFFFFFFFF } /* End of list */ +}; + +static const struct vega10_didt_config_reg SEDiDtCtrl0Config_Vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + /* DIDT_SQ */ + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0000 }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000 }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000 }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_STALL_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_STALL_CTRL_EN__SHIFT, 0x0001 }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_TUNING_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT, 0x0001 }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK, DIDT_SQ_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT, 0x0001 }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT, 0xffff }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_AUTO_MPD_EN_MASK, DIDT_SQ_CTRL0__DIDT_AUTO_MPD_EN__SHIFT, 0x0000 }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_STALL_EVENT_EN_MASK, DIDT_SQ_CTRL0__DIDT_STALL_EVENT_EN__SHIFT, 0x0000 }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK, DIDT_SQ_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT, 0x0000 }, + /* DIDT_TD */ + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0000 }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000 }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000 }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_STALL_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_STALL_CTRL_EN__SHIFT, 0x0001 }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_TUNING_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT, 0x0001 }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK, DIDT_TD_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT, 0x0001 }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT, 0xffff }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_AUTO_MPD_EN_MASK, DIDT_TD_CTRL0__DIDT_AUTO_MPD_EN__SHIFT, 0x0000 }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_STALL_EVENT_EN_MASK, DIDT_TD_CTRL0__DIDT_STALL_EVENT_EN__SHIFT, 0x0000 }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK, DIDT_TD_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT, 0x0000 }, + /* DIDT_TCP */ + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0000 }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000 }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000 }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_STALL_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_STALL_CTRL_EN__SHIFT, 0x0001 }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_TUNING_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT, 0x0001 }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK, DIDT_TCP_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT, 0x0001 }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT, 0xffff }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_AUTO_MPD_EN_MASK, DIDT_TCP_CTRL0__DIDT_AUTO_MPD_EN__SHIFT, 0x0000 }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_STALL_EVENT_EN_MASK, DIDT_TCP_CTRL0__DIDT_STALL_EVENT_EN__SHIFT, 0x0000 }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK, DIDT_TCP_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT, 0x0000 }, + /* DIDT_DB */ + { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK, DIDT_DB_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0000 }, + { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__PHASE_OFFSET_MASK, DIDT_DB_CTRL0__PHASE_OFFSET__SHIFT, 0x0000 }, + { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_CTRL_RST_MASK, DIDT_DB_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000 }, + { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_DB_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 }, + { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_STALL_CTRL_EN_MASK, DIDT_DB_CTRL0__DIDT_STALL_CTRL_EN__SHIFT, 0x0001 }, + { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_TUNING_CTRL_EN_MASK, DIDT_DB_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT, 0x0001 }, + { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK, DIDT_DB_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT, 0x0001 }, + { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_DB_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT, 0xffff }, + { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_AUTO_MPD_EN_MASK, DIDT_DB_CTRL0__DIDT_AUTO_MPD_EN__SHIFT, 0x0000 }, + { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_STALL_EVENT_EN_MASK, DIDT_DB_CTRL0__DIDT_STALL_EVENT_EN__SHIFT, 0x0000 }, + { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK, DIDT_DB_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT, 0x0000 }, + + { 0xFFFFFFFF } /* End of list */ +}; + + +static const struct vega10_didt_config_reg SEDiDtStallCtrlConfig_vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + /* DIDT_SQ */ + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0004 }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0004 }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x000a }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x000a }, + + /* DIDT_TD */ + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001 }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001 }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x000a }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x000a }, + + /* DIDT_TCP */ + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001 }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001 }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x000a }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x000a }, + + /* DIDT_DB */ + { ixDIDT_DB_STALL_CTRL, DIDT_DB_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_DB_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0004 }, + { ixDIDT_DB_STALL_CTRL, DIDT_DB_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_DB_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0004 }, + { ixDIDT_DB_STALL_CTRL, DIDT_DB_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_DB_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x000a }, + { ixDIDT_DB_STALL_CTRL, DIDT_DB_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_DB_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x000a }, + + { 0xFFFFFFFF } /* End of list */ +}; + +static const struct vega10_didt_config_reg SEDiDtStallPatternConfig_vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + /* DIDT_SQ_STALL_PATTERN_1_2 */ + { ixDIDT_SQ_STALL_PATTERN_1_2, DIDT_SQ_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK, DIDT_SQ_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT, 0x0001 }, + { ixDIDT_SQ_STALL_PATTERN_1_2, DIDT_SQ_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK, DIDT_SQ_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT, 0x0001 }, + + /* DIDT_SQ_STALL_PATTERN_3_4 */ + { ixDIDT_SQ_STALL_PATTERN_3_4, DIDT_SQ_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK, DIDT_SQ_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT, 0x0001 }, + { ixDIDT_SQ_STALL_PATTERN_3_4, DIDT_SQ_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK, DIDT_SQ_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT, 0x0001 }, + + /* DIDT_SQ_STALL_PATTERN_5_6 */ + { ixDIDT_SQ_STALL_PATTERN_5_6, DIDT_SQ_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK, DIDT_SQ_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT, 0x0000 }, + { ixDIDT_SQ_STALL_PATTERN_5_6, DIDT_SQ_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK, DIDT_SQ_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT, 0x0000 }, + + /* DIDT_SQ_STALL_PATTERN_7 */ + { ixDIDT_SQ_STALL_PATTERN_7, DIDT_SQ_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK, DIDT_SQ_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT, 0x0000 }, + + /* DIDT_TCP_STALL_PATTERN_1_2 */ + { ixDIDT_TCP_STALL_PATTERN_1_2, DIDT_TCP_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK, DIDT_TCP_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT, 0x0001 }, + { ixDIDT_TCP_STALL_PATTERN_1_2, DIDT_TCP_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK, DIDT_TCP_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT, 0x0001 }, + + /* DIDT_TCP_STALL_PATTERN_3_4 */ + { ixDIDT_TCP_STALL_PATTERN_3_4, DIDT_TCP_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK, DIDT_TCP_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT, 0x0001 }, + { ixDIDT_TCP_STALL_PATTERN_3_4, DIDT_TCP_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK, DIDT_TCP_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT, 0x0001 }, + + /* DIDT_TCP_STALL_PATTERN_5_6 */ + { ixDIDT_TCP_STALL_PATTERN_5_6, DIDT_TCP_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK, DIDT_TCP_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT, 0x0000 }, + { ixDIDT_TCP_STALL_PATTERN_5_6, DIDT_TCP_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK, DIDT_TCP_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT, 0x0000 }, + + /* DIDT_TCP_STALL_PATTERN_7 */ + { ixDIDT_TCP_STALL_PATTERN_7, DIDT_TCP_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK, DIDT_TCP_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT, 0x0000 }, + + /* DIDT_TD_STALL_PATTERN_1_2 */ + { ixDIDT_TD_STALL_PATTERN_1_2, DIDT_TD_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK, DIDT_TD_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT, 0x0001 }, + { ixDIDT_TD_STALL_PATTERN_1_2, DIDT_TD_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK, DIDT_TD_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT, 0x0001 }, + + /* DIDT_TD_STALL_PATTERN_3_4 */ + { ixDIDT_TD_STALL_PATTERN_3_4, DIDT_TD_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK, DIDT_TD_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT, 0x0001 }, + { ixDIDT_TD_STALL_PATTERN_3_4, DIDT_TD_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK, DIDT_TD_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT, 0x0001 }, + + /* DIDT_TD_STALL_PATTERN_5_6 */ + { ixDIDT_TD_STALL_PATTERN_5_6, DIDT_TD_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK, DIDT_TD_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT, 0x0000 }, + { ixDIDT_TD_STALL_PATTERN_5_6, DIDT_TD_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK, DIDT_TD_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT, 0x0000 }, + + /* DIDT_TD_STALL_PATTERN_7 */ + { ixDIDT_TD_STALL_PATTERN_7, DIDT_TD_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK, DIDT_TD_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT, 0x0000 }, + + /* DIDT_DB_STALL_PATTERN_1_2 */ + { ixDIDT_DB_STALL_PATTERN_1_2, DIDT_DB_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK, DIDT_DB_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT, 0x0001 }, + { ixDIDT_DB_STALL_PATTERN_1_2, DIDT_DB_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK, DIDT_DB_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT, 0x0001 }, + + /* DIDT_DB_STALL_PATTERN_3_4 */ + { ixDIDT_DB_STALL_PATTERN_3_4, DIDT_DB_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK, DIDT_DB_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT, 0x0001 }, + { ixDIDT_DB_STALL_PATTERN_3_4, DIDT_DB_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK, DIDT_DB_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT, 0x0001 }, + + /* DIDT_DB_STALL_PATTERN_5_6 */ + { ixDIDT_DB_STALL_PATTERN_5_6, DIDT_DB_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK, DIDT_DB_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT, 0x0000 }, + { ixDIDT_DB_STALL_PATTERN_5_6, DIDT_DB_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK, DIDT_DB_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT, 0x0000 }, + + /* DIDT_DB_STALL_PATTERN_7 */ + { ixDIDT_DB_STALL_PATTERN_7, DIDT_DB_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK, DIDT_DB_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT, 0x0000 }, + + { 0xFFFFFFFF } /* End of list */ +}; + +static const struct vega10_didt_config_reg SELCacConfig_Vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + /* SQ */ + { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x00060021 }, + { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x00860021 }, + { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x01060021 }, + { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x01860021 }, + { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x02060021 }, + { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x02860021 }, + { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x03060021 }, + { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x03860021 }, + { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x04060021 }, + /* TD */ + { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x000E0020 }, + { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x008E0020 }, + { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x010E0020 }, + { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x018E0020 }, + { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x020E0020 }, + { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x028E0020 }, + /* TCP */ + { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x001c0020 }, + { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x009c0020 }, + { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x011c0020 }, + { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x019c0020 }, + { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x021c0020 }, + /* DB */ + { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x00200008 }, + { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x00820008 }, + { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x01020008 }, + { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x01820008 }, + + { 0xFFFFFFFF } /* End of list */ +}; + + +static const struct vega10_didt_config_reg SEEDCStallPatternConfig_Vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + /* SQ */ + { ixDIDT_SQ_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00030001 }, + { ixDIDT_SQ_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x000F0007 }, + { ixDIDT_SQ_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x003F001F }, + { ixDIDT_SQ_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x0000007F }, + /* TD */ + { ixDIDT_TD_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00000000 }, + { ixDIDT_TD_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x00000000 }, + { ixDIDT_TD_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x00000000 }, + { ixDIDT_TD_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x00000000 }, + /* TCP */ + { ixDIDT_TCP_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00000000 }, + { ixDIDT_TCP_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x00000000 }, + { ixDIDT_TCP_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x00000000 }, + { ixDIDT_TCP_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x00000000 }, + /* DB */ + { ixDIDT_DB_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00000000 }, + { ixDIDT_DB_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x00000000 }, + { ixDIDT_DB_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x00000000 }, + { ixDIDT_DB_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x00000000 }, + + { 0xFFFFFFFF } /* End of list */ +}; + +static const struct vega10_didt_config_reg SEEDCForceStallPatternConfig_Vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + /* SQ */ + { ixDIDT_SQ_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00000015 }, + { ixDIDT_SQ_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x00000000 }, + { ixDIDT_SQ_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x00000000 }, + { ixDIDT_SQ_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x00000000 }, + /* TD */ + { ixDIDT_TD_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00000015 }, + { ixDIDT_TD_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x00000000 }, + { ixDIDT_TD_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x00000000 }, + { ixDIDT_TD_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x00000000 }, + + { 0xFFFFFFFF } /* End of list */ +}; + +static const struct vega10_didt_config_reg SEEDCStallDelayConfig_Vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + /* SQ */ + { ixDIDT_SQ_EDC_STALL_DELAY_1, 0xFFFFFFFF, 0, 0x00000000 }, + { ixDIDT_SQ_EDC_STALL_DELAY_2, 0xFFFFFFFF, 0, 0x00000000 }, + { ixDIDT_SQ_EDC_STALL_DELAY_3, 0xFFFFFFFF, 0, 0x00000000 }, + { ixDIDT_SQ_EDC_STALL_DELAY_4, 0xFFFFFFFF, 0, 0x00000000 }, + /* TD */ + { ixDIDT_TD_EDC_STALL_DELAY_1, 0xFFFFFFFF, 0, 0x00000000 }, + { ixDIDT_TD_EDC_STALL_DELAY_2, 0xFFFFFFFF, 0, 0x00000000 }, + { ixDIDT_TD_EDC_STALL_DELAY_3, 0xFFFFFFFF, 0, 0x00000000 }, + { ixDIDT_TD_EDC_STALL_DELAY_4, 0xFFFFFFFF, 0, 0x00000000 }, + /* TCP */ + { ixDIDT_TCP_EDC_STALL_DELAY_1, 0xFFFFFFFF, 0, 0x00000000 }, + { ixDIDT_TCP_EDC_STALL_DELAY_2, 0xFFFFFFFF, 0, 0x00000000 }, + { ixDIDT_TCP_EDC_STALL_DELAY_3, 0xFFFFFFFF, 0, 0x00000000 }, + { ixDIDT_TCP_EDC_STALL_DELAY_4, 0xFFFFFFFF, 0, 0x00000000 }, + /* DB */ + { ixDIDT_DB_EDC_STALL_DELAY_1, 0xFFFFFFFF, 0, 0x00000000 }, + + { 0xFFFFFFFF } /* End of list */ +}; + +static const struct vega10_didt_config_reg SEEDCThresholdConfig_Vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + { ixDIDT_SQ_EDC_THRESHOLD, 0xFFFFFFFF, 0, 0x0000010E }, + { ixDIDT_TD_EDC_THRESHOLD, 0xFFFFFFFF, 0, 0xFFFFFFFF }, + { ixDIDT_TCP_EDC_THRESHOLD, 0xFFFFFFFF, 0, 0xFFFFFFFF }, + { ixDIDT_DB_EDC_THRESHOLD, 0xFFFFFFFF, 0, 0xFFFFFFFF }, + + { 0xFFFFFFFF } /* End of list */ +}; + +static const struct vega10_didt_config_reg SEEDCCtrlResetConfig_Vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + /* SQ */ + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_EN_MASK, DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK, DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0001 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 }, + + { 0xFFFFFFFF } /* End of list */ +}; + +static const struct vega10_didt_config_reg SEEDCCtrlConfig_Vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + /* SQ */ + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_EN_MASK, DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT, 0x0001 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK, DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0004 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0006 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0001 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 }, + + { 0xFFFFFFFF } /* End of list */ +}; + +static const struct vega10_didt_config_reg SEEDCCtrlForceStallConfig_Vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + /* SQ */ + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_EN_MASK, DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT, 0x0001 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK, DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0001 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0001 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x000C }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0001 }, + + /* TD */ + { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_EN_MASK, DIDT_TD_EDC_CTRL__EDC_EN__SHIFT, 0x0001 }, + { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_SW_RST_MASK, DIDT_TD_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 }, + { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_TD_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 }, + { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_TD_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0001 }, + { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_TD_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0001 }, + { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_TD_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x000E }, + { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_TD_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 }, + { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__GC_EDC_EN_MASK, DIDT_TD_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0000 }, + { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_TD_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0000 }, + { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_TD_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 }, + { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_TD_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0001 }, + + { 0xFFFFFFFF } /* End of list */ +}; + +static const struct vega10_didt_config_reg GCDiDtDroopCtrlConfig_vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + { mmGC_DIDT_DROOP_CTRL, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_EN_MASK, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_EN__SHIFT, 0x0000 }, + { mmGC_DIDT_DROOP_CTRL, GC_DIDT_DROOP_CTRL__DIDT_DROOP_THRESHOLD_MASK, GC_DIDT_DROOP_CTRL__DIDT_DROOP_THRESHOLD__SHIFT, 0x0000 }, + { mmGC_DIDT_DROOP_CTRL, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_INDEX_MASK, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_INDEX__SHIFT, 0x0000 }, + { mmGC_DIDT_DROOP_CTRL, GC_DIDT_DROOP_CTRL__DIDT_LEVEL_SEL_MASK, GC_DIDT_DROOP_CTRL__DIDT_LEVEL_SEL__SHIFT, 0x0000 }, + { mmGC_DIDT_DROOP_CTRL, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_OVERFLOW_MASK, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_OVERFLOW__SHIFT, 0x0000 }, + + { 0xFFFFFFFF } /* End of list */ +}; + +static const struct vega10_didt_config_reg GCDiDtCtrl0Config_vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + { mmGC_DIDT_CTRL0, GC_DIDT_CTRL0__DIDT_CTRL_EN_MASK, GC_DIDT_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0000 }, + { mmGC_DIDT_CTRL0, GC_DIDT_CTRL0__PHASE_OFFSET_MASK, GC_DIDT_CTRL0__PHASE_OFFSET__SHIFT, 0x0000 }, + { mmGC_DIDT_CTRL0, GC_DIDT_CTRL0__DIDT_SW_RST_MASK, GC_DIDT_CTRL0__DIDT_SW_RST__SHIFT, 0x0000 }, + { mmGC_DIDT_CTRL0, GC_DIDT_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, GC_DIDT_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 }, + { mmGC_DIDT_CTRL0, GC_DIDT_CTRL0__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK, GC_DIDT_CTRL0__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 }, + { 0xFFFFFFFF } /* End of list */ +}; + + +static const struct vega10_didt_config_reg PSMSEEDCStallPatternConfig_Vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + /* SQ EDC STALL PATTERNs */ + { ixDIDT_SQ_EDC_STALL_PATTERN_1_2, DIDT_SQ_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1_MASK, DIDT_SQ_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1__SHIFT, 0x0101 }, + { ixDIDT_SQ_EDC_STALL_PATTERN_1_2, DIDT_SQ_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2_MASK, DIDT_SQ_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2__SHIFT, 0x0101 }, + { ixDIDT_SQ_EDC_STALL_PATTERN_3_4, DIDT_SQ_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3_MASK, DIDT_SQ_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3__SHIFT, 0x1111 }, + { ixDIDT_SQ_EDC_STALL_PATTERN_3_4, DIDT_SQ_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4_MASK, DIDT_SQ_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4__SHIFT, 0x1111 }, + + { ixDIDT_SQ_EDC_STALL_PATTERN_5_6, DIDT_SQ_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5_MASK, DIDT_SQ_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5__SHIFT, 0x1515 }, + { ixDIDT_SQ_EDC_STALL_PATTERN_5_6, DIDT_SQ_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6_MASK, DIDT_SQ_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6__SHIFT, 0x1515 }, + + { ixDIDT_SQ_EDC_STALL_PATTERN_7, DIDT_SQ_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7_MASK, DIDT_SQ_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7__SHIFT, 0x5555 }, + + { 0xFFFFFFFF } /* End of list */ +}; + +static const struct vega10_didt_config_reg PSMSEEDCStallDelayConfig_Vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + /* SQ EDC STALL DELAYs */ + { ixDIDT_SQ_EDC_STALL_DELAY_1, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ0_MASK, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ0__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_STALL_DELAY_1, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ1_MASK, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ1__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_STALL_DELAY_1, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ2_MASK, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ2__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_STALL_DELAY_1, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ3_MASK, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ3__SHIFT, 0x0000 }, + + { ixDIDT_SQ_EDC_STALL_DELAY_2, DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ4_MASK, DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ4__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_STALL_DELAY_2, DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ5_MASK, DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ5__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_STALL_DELAY_2, DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ6_MASK, DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ6__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_STALL_DELAY_2, DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ7_MASK, DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ7__SHIFT, 0x0000 }, + + { ixDIDT_SQ_EDC_STALL_DELAY_3, DIDT_SQ_EDC_STALL_DELAY_3__EDC_STALL_DELAY_SQ8_MASK, DIDT_SQ_EDC_STALL_DELAY_3__EDC_STALL_DELAY_SQ8__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_STALL_DELAY_3, DIDT_SQ_EDC_STALL_DELAY_3__EDC_STALL_DELAY_SQ9_MASK, DIDT_SQ_EDC_STALL_DELAY_3__EDC_STALL_DELAY_SQ9__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_STALL_DELAY_3, DIDT_SQ_EDC_STALL_DELAY_3__EDC_STALL_DELAY_SQ10_MASK, DIDT_SQ_EDC_STALL_DELAY_3__EDC_STALL_DELAY_SQ10__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_STALL_DELAY_3, DIDT_SQ_EDC_STALL_DELAY_3__EDC_STALL_DELAY_SQ11_MASK, DIDT_SQ_EDC_STALL_DELAY_3__EDC_STALL_DELAY_SQ11__SHIFT, 0x0000 }, + + { ixDIDT_SQ_EDC_STALL_DELAY_4, DIDT_SQ_EDC_STALL_DELAY_4__EDC_STALL_DELAY_SQ12_MASK, DIDT_SQ_EDC_STALL_DELAY_4__EDC_STALL_DELAY_SQ12__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_STALL_DELAY_4, DIDT_SQ_EDC_STALL_DELAY_4__EDC_STALL_DELAY_SQ12_MASK, DIDT_SQ_EDC_STALL_DELAY_4__EDC_STALL_DELAY_SQ13__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_STALL_DELAY_4, DIDT_SQ_EDC_STALL_DELAY_4__EDC_STALL_DELAY_SQ14_MASK, DIDT_SQ_EDC_STALL_DELAY_4__EDC_STALL_DELAY_SQ14__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_STALL_DELAY_4, DIDT_SQ_EDC_STALL_DELAY_4__EDC_STALL_DELAY_SQ15_MASK, DIDT_SQ_EDC_STALL_DELAY_4__EDC_STALL_DELAY_SQ15__SHIFT, 0x0000 }, + + { 0xFFFFFFFF } /* End of list */ +}; + +static const struct vega10_didt_config_reg PSMSEEDCThresholdConfig_Vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + /* SQ EDC THRESHOLD */ + { ixDIDT_SQ_EDC_THRESHOLD, DIDT_SQ_EDC_THRESHOLD__EDC_THRESHOLD_MASK, DIDT_SQ_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT, 0x0000 }, + + { 0xFFFFFFFF } /* End of list */ +}; + +static const struct vega10_didt_config_reg PSMSEEDCCtrlResetConfig_Vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + /* SQ EDC CTRL */ + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_EN_MASK, DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK, DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0001 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 }, + + { 0xFFFFFFFF } /* End of list */ +}; + +static const struct vega10_didt_config_reg PSMSEEDCCtrlConfig_Vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + /* SQ EDC CTRL */ + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_EN_MASK, DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT, 0x0001 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK, DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x000E }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0001 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0003 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0001 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 }, + + { 0xFFFFFFFF } /* End of list */ +}; + +static const struct vega10_didt_config_reg PSMGCEDCThresholdConfig_vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + { mmGC_EDC_THRESHOLD, GC_EDC_THRESHOLD__EDC_THRESHOLD_MASK, GC_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT, 0x0000000 }, + + { 0xFFFFFFFF } /* End of list */ +}; + +static const struct vega10_didt_config_reg PSMGCEDCDroopCtrlConfig_vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + { mmGC_EDC_DROOP_CTRL, GC_EDC_DROOP_CTRL__EDC_DROOP_LEVEL_EN_MASK, GC_EDC_DROOP_CTRL__EDC_DROOP_LEVEL_EN__SHIFT, 0x0001 }, + { mmGC_EDC_DROOP_CTRL, GC_EDC_DROOP_CTRL__EDC_DROOP_THRESHOLD_MASK, GC_EDC_DROOP_CTRL__EDC_DROOP_THRESHOLD__SHIFT, 0x0384 }, + { mmGC_EDC_DROOP_CTRL, GC_EDC_DROOP_CTRL__EDC_DROOP_LEVEL_INDEX_MASK, GC_EDC_DROOP_CTRL__EDC_DROOP_LEVEL_INDEX__SHIFT, 0x0001 }, + { mmGC_EDC_DROOP_CTRL, GC_EDC_DROOP_CTRL__AVG_PSM_SEL_MASK, GC_EDC_DROOP_CTRL__AVG_PSM_SEL__SHIFT, 0x0001 }, + { mmGC_EDC_DROOP_CTRL, GC_EDC_DROOP_CTRL__EDC_LEVEL_SEL_MASK, GC_EDC_DROOP_CTRL__EDC_LEVEL_SEL__SHIFT, 0x0001 }, + + { 0xFFFFFFFF } /* End of list */ +}; + +static const struct vega10_didt_config_reg PSMGCEDCCtrlResetConfig_vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_EN_MASK, GC_EDC_CTRL__EDC_EN__SHIFT, 0x0000 }, + { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_SW_RST_MASK, GC_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0001 }, + { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, GC_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 }, + { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_FORCE_STALL_MASK, GC_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 }, + { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, GC_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 }, + { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, GC_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 }, + + { 0xFFFFFFFF } /* End of list */ +}; + +static const struct vega10_didt_config_reg PSMGCEDCCtrlConfig_vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_EN_MASK, GC_EDC_CTRL__EDC_EN__SHIFT, 0x0001 }, + { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_SW_RST_MASK, GC_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 }, + { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, GC_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 }, + { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_FORCE_STALL_MASK, GC_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 }, + { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, GC_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 }, + { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, GC_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 }, + + { 0xFFFFFFFF } /* End of list */ +}; + +static const struct vega10_didt_config_reg AvfsPSMResetConfig_vega10[]= +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + { 0x16A02, 0xFFFFFFFF, 0x0, 0x0000005F }, + { 0x16A05, 0xFFFFFFFF, 0x0, 0x00000001 }, + { 0x16A06, 0x00000001, 0x0, 0x02000000 }, + { 0x16A01, 0xFFFFFFFF, 0x0, 0x00003027 }, + + { 0xFFFFFFFF } /* End of list */ +}; + +static const struct vega10_didt_config_reg AvfsPSMInitConfig_vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + { 0x16A05, 0xFFFFFFFF, 0x18, 0x00000001 }, + { 0x16A05, 0xFFFFFFFF, 0x8, 0x00000003 }, + { 0x16A05, 0xFFFFFFFF, 0xa, 0x00000006 }, + { 0x16A05, 0xFFFFFFFF, 0x7, 0x00000000 }, + { 0x16A06, 0xFFFFFFFF, 0x18, 0x00000001 }, + { 0x16A06, 0xFFFFFFFF, 0x19, 0x00000001 }, + { 0x16A01, 0xFFFFFFFF, 0x0, 0x00003027 }, + + { 0xFFFFFFFF } /* End of list */ +}; + +static int vega10_program_didt_config_registers(struct pp_hwmgr *hwmgr, const struct vega10_didt_config_reg *config_regs, enum vega10_didt_config_reg_type reg_type) +{ + uint32_t data; + + PP_ASSERT_WITH_CODE((config_regs != NULL), "[vega10_program_didt_config_registers] Invalid config register table!", return -EINVAL); + + while (config_regs->offset != 0xFFFFFFFF) { + switch (reg_type) { + case VEGA10_CONFIGREG_DIDT: + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset); + data &= ~config_regs->mask; + data |= ((config_regs->value << config_regs->shift) & config_regs->mask); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset, data); + break; + case VEGA10_CONFIGREG_GCCAC: + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset); + data &= ~config_regs->mask; + data |= ((config_regs->value << config_regs->shift) & config_regs->mask); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset, data); + break; + case VEGA10_CONFIGREG_SECAC: + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG_SE_CAC, config_regs->offset); + data &= ~config_regs->mask; + data |= ((config_regs->value << config_regs->shift) & config_regs->mask); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG_SE_CAC, config_regs->offset, data); + break; + default: + return -EINVAL; + } + + config_regs++; + } + + return 0; +} + +static int vega10_program_gc_didt_config_registers(struct pp_hwmgr *hwmgr, const struct vega10_didt_config_reg *config_regs) +{ + uint32_t data; + + while (config_regs->offset != 0xFFFFFFFF) { + data = cgs_read_register(hwmgr->device, config_regs->offset); + data &= ~config_regs->mask; + data |= ((config_regs->value << config_regs->shift) & config_regs->mask); + cgs_write_register(hwmgr->device, config_regs->offset, data); + config_regs++; + } + + return 0; +} + +static void vega10_didt_set_mask(struct pp_hwmgr *hwmgr, const bool enable) +{ + uint32_t data; + int result; + uint32_t en = (enable ? 1 : 0); + uint32_t didt_block_info = SQ_IR_MASK | TCP_IR_MASK | TD_PCC_MASK; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping)) { + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0); + data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK; + data |= ((en << DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0, data); + didt_block_info &= ~SQ_Enable_MASK; + didt_block_info |= en << SQ_Enable_SHIFT; + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping)) { + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0); + data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK; + data |= ((en << DIDT_DB_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0, data); + didt_block_info &= ~DB_Enable_MASK; + didt_block_info |= en << DB_Enable_SHIFT; + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping)) { + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0); + data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK; + data |= ((en << DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0, data); + didt_block_info &= ~TD_Enable_MASK; + didt_block_info |= en << TD_Enable_SHIFT; + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) { + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0); + data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK; + data |= ((en << DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0, data); + didt_block_info &= ~TCP_Enable_MASK; + didt_block_info |= en << TCP_Enable_SHIFT; + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRRamping)) { + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_CTRL0); + data &= ~DIDT_DBR_CTRL0__DIDT_CTRL_EN_MASK; + data |= ((en << DIDT_DBR_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_DBR_CTRL0__DIDT_CTRL_EN_MASK); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_CTRL0, data); + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtEDCEnable)) { + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping)) { + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_EDC_CTRL); + data &= ~DIDT_SQ_EDC_CTRL__EDC_EN_MASK; + data |= ((en << DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT) & DIDT_SQ_EDC_CTRL__EDC_EN_MASK); + data &= ~DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK; + data |= ((~en << DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT) & DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_EDC_CTRL, data); + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping)) { + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_EDC_CTRL); + data &= ~DIDT_DB_EDC_CTRL__EDC_EN_MASK; + data |= ((en << DIDT_DB_EDC_CTRL__EDC_EN__SHIFT) & DIDT_DB_EDC_CTRL__EDC_EN_MASK); + data &= ~DIDT_DB_EDC_CTRL__EDC_SW_RST_MASK; + data |= ((~en << DIDT_DB_EDC_CTRL__EDC_SW_RST__SHIFT) & DIDT_DB_EDC_CTRL__EDC_SW_RST_MASK); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_EDC_CTRL, data); + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping)) { + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_EDC_CTRL); + data &= ~DIDT_TD_EDC_CTRL__EDC_EN_MASK; + data |= ((en << DIDT_TD_EDC_CTRL__EDC_EN__SHIFT) & DIDT_TD_EDC_CTRL__EDC_EN_MASK); + data &= ~DIDT_TD_EDC_CTRL__EDC_SW_RST_MASK; + data |= ((~en << DIDT_TD_EDC_CTRL__EDC_SW_RST__SHIFT) & DIDT_TD_EDC_CTRL__EDC_SW_RST_MASK); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_EDC_CTRL, data); + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) { + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_EDC_CTRL); + data &= ~DIDT_TCP_EDC_CTRL__EDC_EN_MASK; + data |= ((en << DIDT_TCP_EDC_CTRL__EDC_EN__SHIFT) & DIDT_TCP_EDC_CTRL__EDC_EN_MASK); + data &= ~DIDT_TCP_EDC_CTRL__EDC_SW_RST_MASK; + data |= ((~en << DIDT_TCP_EDC_CTRL__EDC_SW_RST__SHIFT) & DIDT_TCP_EDC_CTRL__EDC_SW_RST_MASK); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_EDC_CTRL, data); + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRRamping)) { + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_EDC_CTRL); + data &= ~DIDT_DBR_EDC_CTRL__EDC_EN_MASK; + data |= ((en << DIDT_DBR_EDC_CTRL__EDC_EN__SHIFT) & DIDT_DBR_EDC_CTRL__EDC_EN_MASK); + data &= ~DIDT_DBR_EDC_CTRL__EDC_SW_RST_MASK; + data |= ((~en << DIDT_DBR_EDC_CTRL__EDC_SW_RST__SHIFT) & DIDT_DBR_EDC_CTRL__EDC_SW_RST_MASK); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_EDC_CTRL, data); + } + } + + if (enable) { + /* For Vega10, SMC does not support any mask yet. */ + result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_ConfigureGfxDidt, didt_block_info); + PP_ASSERT((0 == result), "[EnableDiDtConfig] SMC Configure Gfx Didt Failed!"); + } +} + +static int vega10_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr) +{ + int result; + uint32_t num_se = 0, count, data; + struct cgs_system_info sys_info = {0}; + uint32_t reg; + + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_GFX_SE_INFO; + if (cgs_query_system_info(hwmgr->device, &sys_info) == 0) + num_se = sys_info.value; + + cgs_enter_safe_mode(hwmgr->device, true); + + cgs_lock_grbm_idx(hwmgr->device, true); + reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX); + for (count = 0; count < num_se; count++) { + data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT); + cgs_write_register(hwmgr->device, reg, data); + + result = vega10_program_didt_config_registers(hwmgr, SEDiDtStallCtrlConfig_vega10, VEGA10_CONFIGREG_DIDT); + result |= vega10_program_didt_config_registers(hwmgr, SEDiDtStallPatternConfig_vega10, VEGA10_CONFIGREG_DIDT); + result |= vega10_program_didt_config_registers(hwmgr, SEDiDtWeightConfig_Vega10, VEGA10_CONFIGREG_DIDT); + result |= vega10_program_didt_config_registers(hwmgr, SEDiDtCtrl1Config_Vega10, VEGA10_CONFIGREG_DIDT); + result |= vega10_program_didt_config_registers(hwmgr, SEDiDtCtrl2Config_Vega10, VEGA10_CONFIGREG_DIDT); + result |= vega10_program_didt_config_registers(hwmgr, SEDiDtCtrl3Config_vega10, VEGA10_CONFIGREG_DIDT); + result |= vega10_program_didt_config_registers(hwmgr, SEDiDtTuningCtrlConfig_Vega10, VEGA10_CONFIGREG_DIDT); + result |= vega10_program_didt_config_registers(hwmgr, SELCacConfig_Vega10, VEGA10_CONFIGREG_SECAC); + result |= vega10_program_didt_config_registers(hwmgr, SEDiDtCtrl0Config_Vega10, VEGA10_CONFIGREG_DIDT); + + if (0 != result) + break; + } + cgs_write_register(hwmgr->device, reg, 0xE0000000); + cgs_lock_grbm_idx(hwmgr->device, false); + + vega10_didt_set_mask(hwmgr, true); + + cgs_enter_safe_mode(hwmgr->device, false); + + return 0; +} + +static int vega10_disable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr) +{ + cgs_enter_safe_mode(hwmgr->device, true); + + vega10_didt_set_mask(hwmgr, false); + + cgs_enter_safe_mode(hwmgr->device, false); + + return 0; +} + +static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr) +{ + int result; + uint32_t num_se = 0, count, data; + struct cgs_system_info sys_info = {0}; + uint32_t reg; + + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_GFX_SE_INFO; + if (cgs_query_system_info(hwmgr->device, &sys_info) == 0) + num_se = sys_info.value; + + cgs_enter_safe_mode(hwmgr->device, true); + + cgs_lock_grbm_idx(hwmgr->device, true); + reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX); + for (count = 0; count < num_se; count++) { + data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT); + cgs_write_register(hwmgr->device, reg, data); + + result = vega10_program_didt_config_registers(hwmgr, SEDiDtStallCtrlConfig_vega10, VEGA10_CONFIGREG_DIDT); + result |= vega10_program_didt_config_registers(hwmgr, SEDiDtStallPatternConfig_vega10, VEGA10_CONFIGREG_DIDT); + result |= vega10_program_didt_config_registers(hwmgr, SEDiDtCtrl3Config_vega10, VEGA10_CONFIGREG_DIDT); + result |= vega10_program_didt_config_registers(hwmgr, SEDiDtCtrl0Config_Vega10, VEGA10_CONFIGREG_DIDT); + if (0 != result) + break; + } + cgs_write_register(hwmgr->device, reg, 0xE0000000); + cgs_lock_grbm_idx(hwmgr->device, false); + + vega10_didt_set_mask(hwmgr, true); + + cgs_enter_safe_mode(hwmgr->device, false); + + vega10_program_gc_didt_config_registers(hwmgr, GCDiDtDroopCtrlConfig_vega10); + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC)) + vega10_program_gc_didt_config_registers(hwmgr, GCDiDtCtrl0Config_vega10); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM)) + vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMInitConfig_vega10); + + return 0; +} + +static int vega10_disable_psm_gc_didt_config(struct pp_hwmgr *hwmgr) +{ + uint32_t data; + + cgs_enter_safe_mode(hwmgr->device, true); + + vega10_didt_set_mask(hwmgr, false); + + cgs_enter_safe_mode(hwmgr->device, false); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC)) { + data = 0x00000000; + cgs_write_register(hwmgr->device, mmGC_DIDT_CTRL0, data); + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM)) + vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega10); + + return 0; +} + +static int vega10_enable_se_edc_config(struct pp_hwmgr *hwmgr) +{ + int result; + uint32_t num_se = 0, count, data; + struct cgs_system_info sys_info = {0}; + uint32_t reg; + + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_GFX_SE_INFO; + if (cgs_query_system_info(hwmgr->device, &sys_info) == 0) + num_se = sys_info.value; + + cgs_enter_safe_mode(hwmgr->device, true); + + cgs_lock_grbm_idx(hwmgr->device, true); + reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX); + for (count = 0; count < num_se; count++) { + data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT); + cgs_write_register(hwmgr->device, reg, data); + result = vega10_program_didt_config_registers(hwmgr, SEDiDtWeightConfig_Vega10, VEGA10_CONFIGREG_DIDT); + result |= vega10_program_didt_config_registers(hwmgr, SEEDCStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT); + result |= vega10_program_didt_config_registers(hwmgr, SEEDCStallDelayConfig_Vega10, VEGA10_CONFIGREG_DIDT); + result |= vega10_program_didt_config_registers(hwmgr, SEEDCThresholdConfig_Vega10, VEGA10_CONFIGREG_DIDT); + result |= vega10_program_didt_config_registers(hwmgr, SEEDCCtrlResetConfig_Vega10, VEGA10_CONFIGREG_DIDT); + result |= vega10_program_didt_config_registers(hwmgr, SEEDCCtrlConfig_Vega10, VEGA10_CONFIGREG_DIDT); + + if (0 != result) + break; + } + cgs_write_register(hwmgr->device, reg, 0xE0000000); + cgs_lock_grbm_idx(hwmgr->device, false); + + vega10_didt_set_mask(hwmgr, true); + + cgs_enter_safe_mode(hwmgr->device, false); + + return 0; +} + +static int vega10_disable_se_edc_config(struct pp_hwmgr *hwmgr) +{ + cgs_enter_safe_mode(hwmgr->device, true); + + vega10_didt_set_mask(hwmgr, false); + + cgs_enter_safe_mode(hwmgr->device, false); + + return 0; +} + +static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr) +{ + int result; + uint32_t num_se = 0; + uint32_t count, data; + struct cgs_system_info sys_info = {0}; + uint32_t reg; + + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_GFX_SE_INFO; + if (cgs_query_system_info(hwmgr->device, &sys_info) == 0) + num_se = sys_info.value; + + cgs_enter_safe_mode(hwmgr->device, true); + + vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega10); + + cgs_lock_grbm_idx(hwmgr->device, true); + reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX); + for (count = 0; count < num_se; count++) { + data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT); + cgs_write_register(hwmgr->device, reg, data); + result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT); + result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCStallDelayConfig_Vega10, VEGA10_CONFIGREG_DIDT); + result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCCtrlResetConfig_Vega10, VEGA10_CONFIGREG_DIDT); + result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCCtrlConfig_Vega10, VEGA10_CONFIGREG_DIDT); + + if (0 != result) + break; + } + cgs_write_register(hwmgr->device, reg, 0xE0000000); + cgs_lock_grbm_idx(hwmgr->device, false); + + vega10_didt_set_mask(hwmgr, true); + + cgs_enter_safe_mode(hwmgr->device, false); + + vega10_program_gc_didt_config_registers(hwmgr, PSMGCEDCDroopCtrlConfig_vega10); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC)) { + vega10_program_gc_didt_config_registers(hwmgr, PSMGCEDCCtrlResetConfig_vega10); + vega10_program_gc_didt_config_registers(hwmgr, PSMGCEDCCtrlConfig_vega10); + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM)) + vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMInitConfig_vega10); + + return 0; +} + +static int vega10_disable_psm_gc_edc_config(struct pp_hwmgr *hwmgr) +{ + uint32_t data; + + cgs_enter_safe_mode(hwmgr->device, true); + + vega10_didt_set_mask(hwmgr, false); + + cgs_enter_safe_mode(hwmgr->device, false); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC)) { + data = 0x00000000; + cgs_write_register(hwmgr->device, mmGC_EDC_CTRL, data); + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM)) + vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega10); + + return 0; +} + +static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr) +{ + uint32_t reg; + int result; + + cgs_enter_safe_mode(hwmgr->device, true); + + cgs_lock_grbm_idx(hwmgr->device, true); + reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX); + cgs_write_register(hwmgr->device, reg, 0xE0000000); + cgs_lock_grbm_idx(hwmgr->device, false); + + result = vega10_program_didt_config_registers(hwmgr, SEEDCForceStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT); + result |= vega10_program_didt_config_registers(hwmgr, SEEDCCtrlForceStallConfig_Vega10, VEGA10_CONFIGREG_DIDT); + if (0 != result) + return result; + + vega10_didt_set_mask(hwmgr, true); + + cgs_enter_safe_mode(hwmgr->device, false); + + return 0; +} + +static int vega10_disable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr) +{ + int result; + + result = vega10_disable_se_edc_config(hwmgr); + PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDtConfig] Pre DIDT disable clock gating failed!", return result); + + return 0; +} + +int vega10_enable_didt_config(struct pp_hwmgr *hwmgr) +{ + int result = 0; + struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); + + if (data->smu_features[GNLD_DIDT].supported) { + if (data->smu_features[GNLD_DIDT].enabled) + PP_DBG_LOG("[EnableDiDtConfig] Feature DiDt Already enabled!\n"); + + switch (data->registry_data.didt_mode) { + case 0: + result = vega10_enable_cac_driving_se_didt_config(hwmgr); + PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDt] Attempt to enable DiDt Mode 0 Failed!", return result); + break; + case 2: + result = vega10_enable_psm_gc_didt_config(hwmgr); + PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDt] Attempt to enable DiDt Mode 2 Failed!", return result); + break; + case 3: + result = vega10_enable_se_edc_config(hwmgr); + PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDt] Attempt to enable DiDt Mode 3 Failed!", return result); + break; + case 1: + case 4: + case 5: + result = vega10_enable_psm_gc_edc_config(hwmgr); + PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDt] Attempt to enable DiDt Mode 5 Failed!", return result); + break; + case 6: + result = vega10_enable_se_edc_force_stall_config(hwmgr); + PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDt] Attempt to enable DiDt Mode 6 Failed!", return result); + break; + default: + result = -EINVAL; + break; + } + + if (0 == result) { + PP_ASSERT_WITH_CODE((!vega10_enable_smc_features(hwmgr->smumgr, true, data->smu_features[GNLD_DIDT].smu_feature_bitmap)), + "[EnableDiDtConfig] Attempt to Enable DiDt feature Failed!", return result); + data->smu_features[GNLD_DIDT].enabled = true; + } + } + + return result; +} + +int vega10_disable_didt_config(struct pp_hwmgr *hwmgr) +{ + int result = 0; + struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); + + if (data->smu_features[GNLD_DIDT].supported) { + if (!data->smu_features[GNLD_DIDT].enabled) + PP_DBG_LOG("[DisableDiDtConfig] Feature DiDt Already Disabled!\n"); + + switch (data->registry_data.didt_mode) { + case 0: + result = vega10_disable_cac_driving_se_didt_config(hwmgr); + PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDt] Attempt to disable DiDt Mode 0 Failed!", return result); + break; + case 2: + result = vega10_disable_psm_gc_didt_config(hwmgr); + PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDt] Attempt to disable DiDt Mode 2 Failed!", return result); + break; + case 3: + result = vega10_disable_se_edc_config(hwmgr); + PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDt] Attempt to disable DiDt Mode 3 Failed!", return result); + break; + case 1: + case 4: + case 5: + result = vega10_disable_psm_gc_edc_config(hwmgr); + PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDt] Attempt to disable DiDt Mode 5 Failed!", return result); + break; + case 6: + result = vega10_disable_se_edc_force_stall_config(hwmgr); + PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDt] Attempt to disable DiDt Mode 6 Failed!", return result); + break; + default: + result = -EINVAL; + break; + } + + if (0 == result) { + PP_ASSERT_WITH_CODE((0 != vega10_enable_smc_features(hwmgr->smumgr, false, data->smu_features[GNLD_DIDT].smu_feature_bitmap)), + "[DisableDiDtConfig] Attempt to Disable DiDt feature Failed!", return result); + data->smu_features[GNLD_DIDT].enabled = false; + } + } + + return result; +} void vega10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) { diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.h index 9ecaa27c0bb5..b95771ab89cd 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.h @@ -31,6 +31,12 @@ enum vega10_pt_config_reg_type { VEGA10_CONFIGREG_MAX }; +enum vega10_didt_config_reg_type { + VEGA10_CONFIGREG_DIDT = 0, + VEGA10_CONFIGREG_GCCAC, + VEGA10_CONFIGREG_SECAC +}; + /* PowerContainment Features */ #define POWERCONTAINMENT_FEATURE_DTE 0x00000001 #define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002 @@ -44,6 +50,13 @@ struct vega10_pt_config_reg { enum vega10_pt_config_reg_type type; }; +struct vega10_didt_config_reg { + uint32_t offset; + uint32_t mask; + uint32_t shift; + uint32_t value; +}; + struct vega10_pt_defaults { uint8_t SviLoadLineEn; uint8_t SviLoadLineVddC; @@ -62,5 +75,8 @@ int vega10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n); int vega10_power_control_set_level(struct pp_hwmgr *hwmgr); int vega10_disable_power_containment(struct pp_hwmgr *hwmgr); +int vega10_enable_didt_config(struct pp_hwmgr *hwmgr); +int vega10_disable_didt_config(struct pp_hwmgr *hwmgr); + #endif /* _VEGA10_POWERTUNE_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c index 1623644ea49a..e343df190375 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c @@ -31,6 +31,8 @@ #include "cgs_common.h" #include "vega10_pptable.h" +#define NUM_DSPCLK_LEVELS 8 + static void set_hw_cap(struct pp_hwmgr *hwmgr, bool enable, enum phm_platform_caps cap) { @@ -644,11 +646,11 @@ static int get_gfxclk_voltage_dependency_table( return 0; } -static int get_dcefclk_voltage_dependency_table( +static int get_pix_clk_voltage_dependency_table( struct pp_hwmgr *hwmgr, struct phm_ppt_v1_clock_voltage_dependency_table **pp_vega10_clk_dep_table, - const ATOM_Vega10_DCEFCLK_Dependency_Table *clk_dep_table) + const ATOM_Vega10_PIXCLK_Dependency_Table *clk_dep_table) { uint32_t table_size, i; struct phm_ppt_v1_clock_voltage_dependency_table @@ -681,6 +683,76 @@ static int get_dcefclk_voltage_dependency_table( return 0; } +static int get_dcefclk_voltage_dependency_table( + struct pp_hwmgr *hwmgr, + struct phm_ppt_v1_clock_voltage_dependency_table + **pp_vega10_clk_dep_table, + const ATOM_Vega10_DCEFCLK_Dependency_Table *clk_dep_table) +{ + uint32_t table_size, i; + uint8_t num_entries; + struct phm_ppt_v1_clock_voltage_dependency_table + *clk_table; + struct cgs_system_info sys_info = {0}; + uint32_t dev_id; + uint32_t rev_id; + + PP_ASSERT_WITH_CODE((clk_dep_table->ucNumEntries != 0), + "Invalid PowerPlay Table!", return -1); + +/* + * workaround needed to add another DPM level for pioneer cards + * as VBIOS is locked down. + * This DPM level was added to support 3DPM monitors @ 4K120Hz + * + */ + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV; + cgs_query_system_info(hwmgr->device, &sys_info); + dev_id = (uint32_t)sys_info.value; + + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV; + cgs_query_system_info(hwmgr->device, &sys_info); + rev_id = (uint32_t)sys_info.value; + + if (dev_id == 0x6863 && rev_id == 0 && + clk_dep_table->entries[clk_dep_table->ucNumEntries - 1].ulClk < 90000) + num_entries = clk_dep_table->ucNumEntries + 1 > NUM_DSPCLK_LEVELS ? + NUM_DSPCLK_LEVELS : clk_dep_table->ucNumEntries + 1; + else + num_entries = clk_dep_table->ucNumEntries; + + + table_size = sizeof(uint32_t) + + sizeof(phm_ppt_v1_clock_voltage_dependency_record) * + num_entries; + + clk_table = (struct phm_ppt_v1_clock_voltage_dependency_table *) + kzalloc(table_size, GFP_KERNEL); + + if (!clk_table) + return -ENOMEM; + + clk_table->count = (uint32_t)num_entries; + + for (i = 0; i < clk_dep_table->ucNumEntries; i++) { + clk_table->entries[i].vddInd = + clk_dep_table->entries[i].ucVddInd; + clk_table->entries[i].clk = + le32_to_cpu(clk_dep_table->entries[i].ulClk); + } + + if (i < num_entries) { + clk_table->entries[i].vddInd = clk_dep_table->entries[i-1].ucVddInd; + clk_table->entries[i].clk = 90000; + } + + *pp_vega10_clk_dep_table = clk_table; + + return 0; +} + static int get_pcie_table(struct pp_hwmgr *hwmgr, struct phm_ppt_v1_pcie_table **vega10_pcie_table, const Vega10_PPTable_Generic_SubTable_Header *table) @@ -862,21 +934,21 @@ static int init_powerplay_extended_tables( gfxclk_dep_table); if (!result && powerplay_table->usPixclkDependencyTableOffset) - result = get_dcefclk_voltage_dependency_table(hwmgr, + result = get_pix_clk_voltage_dependency_table(hwmgr, &pp_table_info->vdd_dep_on_pixclk, - (const ATOM_Vega10_DCEFCLK_Dependency_Table*) + (const ATOM_Vega10_PIXCLK_Dependency_Table*) pixclk_dep_table); if (!result && powerplay_table->usPhyClkDependencyTableOffset) - result = get_dcefclk_voltage_dependency_table(hwmgr, + result = get_pix_clk_voltage_dependency_table(hwmgr, &pp_table_info->vdd_dep_on_phyclk, - (const ATOM_Vega10_DCEFCLK_Dependency_Table *) + (const ATOM_Vega10_PIXCLK_Dependency_Table *) phyclk_dep_table); if (!result && powerplay_table->usDispClkDependencyTableOffset) - result = get_dcefclk_voltage_dependency_table(hwmgr, + result = get_pix_clk_voltage_dependency_table(hwmgr, &pp_table_info->vdd_dep_on_dispclk, - (const ATOM_Vega10_DCEFCLK_Dependency_Table *) + (const ATOM_Vega10_PIXCLK_Dependency_Table *) dispclk_dep_table); if (!result && powerplay_table->usDcefclkDependencyTableOffset) diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h index a1ebe1014492..a4c8b09b6f14 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h @@ -164,9 +164,14 @@ enum phm_platform_caps { PHM_PlatformCaps_EnablePlatformPowerManagement, /* indicates that Platform Power Management feature is supported */ PHM_PlatformCaps_SurpriseRemoval, /* indicates that surprise removal feature is requested */ PHM_PlatformCaps_NewCACVoltage, /* indicates new CAC voltage table support */ + PHM_PlatformCaps_DiDtSupport, /* for dI/dT feature */ PHM_PlatformCaps_DBRamping, /* for dI/dT feature */ PHM_PlatformCaps_TDRamping, /* for dI/dT feature */ PHM_PlatformCaps_TCPRamping, /* for dI/dT feature */ + PHM_PlatformCaps_DBRRamping, /* for dI/dT feature */ + PHM_PlatformCaps_DiDtEDCEnable, /* for dI/dT feature */ + PHM_PlatformCaps_GCEDC, /* for dI/dT feature */ + PHM_PlatformCaps_PSM, /* for dI/dT feature */ PHM_PlatformCaps_EnableSMU7ThermalManagement, /* SMC will manage thermal events */ PHM_PlatformCaps_FPS, /* FPS support */ PHM_PlatformCaps_ACP, /* ACP support */ diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h b/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h index f3f9ebb631a5..822cd8b5bf90 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h +++ b/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h @@ -42,6 +42,12 @@ } \ } while (0) +#define PP_ASSERT(cond, msg) \ + do { \ + if (!(cond)) { \ + pr_warn("%s\n", msg); \ + } \ + } while (0) #define PP_DBG_LOG(fmt, ...) \ do { \ diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h b/drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h index 227d999b6bd1..a511611ec7e0 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h +++ b/drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h @@ -41,6 +41,8 @@ inline static uint32_t soc15_get_register_offset( reg = MP1_BASE.instance[inst].segment[segment] + offset; else if (hw_id == DF_HWID) reg = DF_BASE.instance[inst].segment[segment] + offset; + else if (hw_id == GC_HWID) + reg = GC_BASE.instance[inst].segment[segment] + offset; return reg; } diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu9.h b/drivers/gpu/drm/amd/powerplay/inc/smu9.h index 9ef2490c7c2e..550ed675027a 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu9.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu9.h @@ -55,9 +55,9 @@ #define FEATURE_FW_CTF_BIT 23 #define FEATURE_LED_DISPLAY_BIT 24 #define FEATURE_FAN_CONTROL_BIT 25 -#define FEATURE_VOLTAGE_CONTROLLER_BIT 26 -#define FEATURE_SPARE_27_BIT 27 -#define FEATURE_SPARE_28_BIT 28 +#define FEATURE_FAST_PPT_BIT 26 +#define FEATURE_GFX_EDC_BIT 27 +#define FEATURE_ACG_BIT 28 #define FEATURE_SPARE_29_BIT 29 #define FEATURE_SPARE_30_BIT 30 #define FEATURE_SPARE_31_BIT 31 @@ -90,9 +90,10 @@ #define FFEATURE_FW_CTF_MASK (1 << FEATURE_FW_CTF_BIT ) #define FFEATURE_LED_DISPLAY_MASK (1 << FEATURE_LED_DISPLAY_BIT ) #define FFEATURE_FAN_CONTROL_MASK (1 << FEATURE_FAN_CONTROL_BIT ) -#define FFEATURE_VOLTAGE_CONTROLLER_MASK (1 << FEATURE_VOLTAGE_CONTROLLER_BIT ) -#define FFEATURE_SPARE_27_MASK (1 << FEATURE_SPARE_27_BIT ) -#define FFEATURE_SPARE_28_MASK (1 << FEATURE_SPARE_28_BIT ) + +#define FEATURE_FAST_PPT_MASK (1 << FAST_PPT_BIT ) +#define FEATURE_GFX_EDC_MASK (1 << FEATURE_GFX_EDC_BIT ) +#define FEATURE_ACG_MASK (1 << FEATURE_ACG_BIT ) #define FFEATURE_SPARE_29_MASK (1 << FEATURE_SPARE_29_BIT ) #define FFEATURE_SPARE_30_MASK (1 << FEATURE_SPARE_30_BIT ) #define FFEATURE_SPARE_31_MASK (1 << FEATURE_SPARE_31_BIT ) diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h index 532186b6f941..f6d6c61f796a 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h @@ -312,7 +312,10 @@ typedef struct { PllSetting_t GfxBoostState; - uint32_t Reserved[14]; + uint8_t AcgEnable[NUM_GFXCLK_DPM_LEVELS]; + GbVdroopTable_t AcgBtcGbVdroopTable; + QuadraticInt_t AcgAvfsGb; + uint32_t Reserved[4]; /* Padding - ignore */ uint32_t MmHubPadding[7]; /* SMU internal use */ diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h index 976e942ec694..5d61cc9d4554 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h @@ -131,6 +131,7 @@ struct pp_smumgr_func { bool (*is_dpm_running)(struct pp_hwmgr *hwmgr); int (*populate_requested_graphic_levels)(struct pp_hwmgr *hwmgr, struct amd_pp_profile *request); + bool (*is_hw_avfs_present)(struct pp_smumgr *smumgr); }; struct pp_smumgr { @@ -202,6 +203,8 @@ extern bool smum_is_dpm_running(struct pp_hwmgr *hwmgr); extern int smum_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr, struct amd_pp_profile *request); +extern bool smum_is_hw_avfs_present(struct pp_smumgr *smumgr); + #define SMUM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT #define SMUM_FIELD_MASK(reg, field) reg##__##field##_MASK diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h index b4af9e85dfa5..cb070ebc7de1 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h +++ b/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h @@ -124,6 +124,10 @@ typedef uint16_t PPSMC_Result; #define PPSMC_MSG_NumOfDisplays 0x56 #define PPSMC_MSG_ReadSerialNumTop32 0x58 #define PPSMC_MSG_ReadSerialNumBottom32 0x59 +#define PPSMC_MSG_RunAcgBtc 0x5C +#define PPSMC_MSG_RunAcgInClosedLoop 0x5D +#define PPSMC_MSG_RunAcgInOpenLoop 0x5E +#define PPSMC_MSG_InitializeAcg 0x5F #define PPSMC_MSG_GetCurrPkgPwr 0x61 #define PPSMC_Message_Count 0x62 diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c index 6a320b27aefd..8712f093d6d9 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c @@ -2129,6 +2129,25 @@ int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) return 0; } + +int fiji_thermal_avfs_enable(struct pp_hwmgr *hwmgr) +{ + int ret; + struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); + + if (smu_data->avfs.avfs_btc_status != AVFS_BTC_ENABLEAVFS) + return 0; + + ret = smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs); + + if (!ret) + /* If this param is not changed, this function could fire unnecessarily */ + smu_data->avfs.avfs_btc_status = AVFS_BTC_COMPLETED_PREVIOUSLY; + + return ret; +} + static int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h index 0e9e1f2d7238..d9c72d992e30 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h @@ -48,5 +48,6 @@ int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr); bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr); int fiji_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr, struct amd_pp_profile *request); +int fiji_thermal_avfs_enable(struct pp_hwmgr *hwmgr); #endif diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c index a1cb78552cf6..6ae948fc524f 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c @@ -161,56 +161,47 @@ static int fiji_start_smu_in_non_protection_mode(struct pp_smumgr *smumgr) static int fiji_setup_pwr_virus(struct pp_smumgr *smumgr) { - int i, result = -1; + int i; + int result = -EINVAL; uint32_t reg, data; - const PWR_Command_Table *virus = PwrVirusTable; - struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); - priv->avfs.AvfsBtcStatus = AVFS_LOAD_VIRUS; - for (i = 0; (i < PWR_VIRUS_TABLE_SIZE); i++) { - switch (virus->command) { + const PWR_Command_Table *pvirus = PwrVirusTable; + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); + + for (i = 0; i < PWR_VIRUS_TABLE_SIZE; i++) { + switch (pvirus->command) { case PwrCmdWrite: - reg = virus->reg; - data = virus->data; + reg = pvirus->reg; + data = pvirus->data; cgs_write_register(smumgr->device, reg, data); break; + case PwrCmdEnd: - priv->avfs.AvfsBtcStatus = AVFS_BTC_VIRUS_LOADED; result = 0; break; + default: - pr_err("Table Exit with Invalid Command!"); - priv->avfs.AvfsBtcStatus = AVFS_BTC_VIRUS_FAIL; - result = -1; + pr_info("Table Exit with Invalid Command!"); + smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL; + result = -EINVAL; break; } - virus++; + pvirus++; } + return result; } static int fiji_start_avfs_btc(struct pp_smumgr *smumgr) { int result = 0; - struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); - priv->avfs.AvfsBtcStatus = AVFS_BTC_STARTED; - if (priv->avfs.AvfsBtcParam) { - if (!smum_send_msg_to_smc_with_parameter(smumgr, - PPSMC_MSG_PerformBtc, priv->avfs.AvfsBtcParam)) { - if (!smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs)) { - priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_UNSAVED; - result = 0; - } else { - pr_err("[AVFS][fiji_start_avfs_btc] Attempt" - " to Enable AVFS Failed!"); - smum_send_msg_to_smc(smumgr, PPSMC_MSG_DisableAvfs); - result = -1; - } - } else { - pr_err("[AVFS][fiji_start_avfs_btc] " - "PerformBTC SMU msg failed"); - result = -1; + if (0 != smu_data->avfs.avfs_btc_param) { + if (0 != smu7_send_msg_to_smc_with_parameter(smumgr, + PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) { + pr_info("[AVFS][Fiji_PerformBtc] PerformBTC SMU msg failed"); + result = -EINVAL; } } /* Soft-Reset to reset the engine before loading uCode */ @@ -224,42 +215,6 @@ static int fiji_start_avfs_btc(struct pp_smumgr *smumgr) return result; } -static int fiji_setup_pm_fuse_for_avfs(struct pp_smumgr *smumgr) -{ - int result = 0; - uint32_t table_start; - uint32_t charz_freq_addr, inversion_voltage_addr, charz_freq; - uint16_t inversion_voltage; - - charz_freq = 0x30750000; /* In 10KHz units 0x00007530 Actual value */ - inversion_voltage = 0x1A04; /* mV Q14.2 0x41A Actual value */ - - PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(smumgr, - SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header, - PmFuseTable), &table_start, 0x40000), - "[AVFS][Fiji_SetupGfxLvlStruct] SMU could not communicate " - "starting address of PmFuse structure", - return -1;); - - charz_freq_addr = table_start + - offsetof(struct SMU73_Discrete_PmFuses, PsmCharzFreq); - inversion_voltage_addr = table_start + - offsetof(struct SMU73_Discrete_PmFuses, InversionVoltage); - - result = smu7_copy_bytes_to_smc(smumgr, charz_freq_addr, - (uint8_t *)(&charz_freq), sizeof(charz_freq), 0x40000); - PP_ASSERT_WITH_CODE(0 == result, - "[AVFS][fiji_setup_pm_fuse_for_avfs] charz_freq could not " - "be populated.", return -1;); - - result = smu7_copy_bytes_to_smc(smumgr, inversion_voltage_addr, - (uint8_t *)(&inversion_voltage), sizeof(inversion_voltage), 0x40000); - PP_ASSERT_WITH_CODE(0 == result, "[AVFS][fiji_setup_pm_fuse_for_avfs] " - "charz_freq could not be populated.", return -1;); - - return result; -} - static int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr) { int32_t vr_config; @@ -298,93 +253,41 @@ static int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr) return 0; } -/* Work in Progress */ -static int fiji_restore_vft_table(struct pp_smumgr *smumgr) -{ - struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); - - if (AVFS_BTC_COMPLETED_SAVED == priv->avfs.AvfsBtcStatus) { - priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_RESTORED; - return 0; - } else - return -EINVAL; -} - -/* Work in Progress */ -static int fiji_save_vft_table(struct pp_smumgr *smumgr) -{ - struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); - - if (AVFS_BTC_COMPLETED_SAVED == priv->avfs.AvfsBtcStatus) { - priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_RESTORED; - return 0; - } else - return -EINVAL; -} - static int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started) { - struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); - switch (priv->avfs.AvfsBtcStatus) { - case AVFS_BTC_COMPLETED_SAVED: /*S3 State - Pre SMU Start */ - priv->avfs.AvfsBtcStatus = AVFS_BTC_RESTOREVFT_FAILED; - PP_ASSERT_WITH_CODE(0 == fiji_restore_vft_table(smumgr), - "[AVFS][fiji_avfs_event_mgr] Could not Copy Graphics " - "Level table over to SMU", - return -1;); - priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_RESTORED; - break; - case AVFS_BTC_COMPLETED_RESTORED: /*S3 State - Post SMU Start*/ - priv->avfs.AvfsBtcStatus = AVFS_BTC_SMUMSG_ERROR; - PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(smumgr, - 0x666), - "[AVFS][fiji_avfs_event_mgr] SMU did not respond " - "correctly to VftTableIsValid Msg", - return -1;); - priv->avfs.AvfsBtcStatus = AVFS_BTC_SMUMSG_ERROR; - PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(smumgr, - PPSMC_MSG_EnableAvfs), - "[AVFS][fiji_avfs_event_mgr] SMU did not respond " - "correctly to EnableAvfs Message Msg", - return -1;); - priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_SAVED; + switch (smu_data->avfs.avfs_btc_status) { + case AVFS_BTC_COMPLETED_PREVIOUSLY: break; + case AVFS_BTC_BOOT: /*Cold Boot State - Post SMU Start*/ if (!smu_started) break; - priv->avfs.AvfsBtcStatus = AVFS_BTC_FAILED; - PP_ASSERT_WITH_CODE(0 == fiji_setup_pm_fuse_for_avfs(smumgr), - "[AVFS][fiji_avfs_event_mgr] Failure at " - "fiji_setup_pm_fuse_for_avfs", - return -1;); - priv->avfs.AvfsBtcStatus = AVFS_BTC_DPMTABLESETUP_FAILED; + smu_data->avfs.avfs_btc_status = AVFS_BTC_FAILED; PP_ASSERT_WITH_CODE(0 == fiji_setup_graphics_level_structure(smumgr), "[AVFS][fiji_avfs_event_mgr] Could not Copy Graphics Level" " table over to SMU", - return -1;); - priv->avfs.AvfsBtcStatus = AVFS_BTC_VIRUS_FAIL; + return -EINVAL;); + smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL; PP_ASSERT_WITH_CODE(0 == fiji_setup_pwr_virus(smumgr), "[AVFS][fiji_avfs_event_mgr] Could not setup " "Pwr Virus for AVFS ", - return -1;); - priv->avfs.AvfsBtcStatus = AVFS_BTC_FAILED; + return -EINVAL;); + smu_data->avfs.avfs_btc_status = AVFS_BTC_FAILED; PP_ASSERT_WITH_CODE(0 == fiji_start_avfs_btc(smumgr), "[AVFS][fiji_avfs_event_mgr] Failure at " "fiji_start_avfs_btc. AVFS Disabled", - return -1;); - priv->avfs.AvfsBtcStatus = AVFS_BTC_SAVEVFT_FAILED; - PP_ASSERT_WITH_CODE(0 == fiji_save_vft_table(smumgr), - "[AVFS][fiji_avfs_event_mgr] Could not save VFT Table", - return -1;); - priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_SAVED; + return -EINVAL;); + + smu_data->avfs.avfs_btc_status = AVFS_BTC_ENABLEAVFS; break; case AVFS_BTC_DISABLED: /* Do nothing */ - break; case AVFS_BTC_NOTSUPPORTED: /* Do nothing */ + case AVFS_BTC_ENABLEAVFS: break; default: - pr_err("[AVFS] Something is broken. See log!"); + pr_err("AVFS failed status is %x !\n", smu_data->avfs.avfs_btc_status); break; } return 0; @@ -477,19 +380,6 @@ static int fiji_smu_init(struct pp_smumgr *smumgr) if (smu7_init(smumgr)) return -EINVAL; - fiji_priv->avfs.AvfsBtcStatus = AVFS_BTC_BOOT; - if (fiji_is_hw_avfs_present(smumgr)) - /* AVFS Parameter - * 0 - BTC DC disabled, BTC AC disabled - * 1 - BTC DC enabled, BTC AC disabled - * 2 - BTC DC disabled, BTC AC enabled - * 3 - BTC DC enabled, BTC AC enabled - * Default is 0 - BTC DC disabled, BTC AC disabled - */ - fiji_priv->avfs.AvfsBtcParam = 0; - else - fiji_priv->avfs.AvfsBtcStatus = AVFS_BTC_NOTSUPPORTED; - for (i = 0; i < SMU73_MAX_LEVELS_GRAPHICS; i++) fiji_priv->activity_target[i] = 30; @@ -514,10 +404,12 @@ const struct pp_smumgr_func fiji_smu_funcs = { .init_smc_table = fiji_init_smc_table, .update_sclk_threshold = fiji_update_sclk_threshold, .thermal_setup_fan_table = fiji_thermal_setup_fan_table, + .thermal_avfs_enable = fiji_thermal_avfs_enable, .populate_all_graphic_levels = fiji_populate_all_graphic_levels, .populate_all_memory_levels = fiji_populate_all_memory_levels, .get_mac_definition = fiji_get_mac_definition, .initialize_mc_reg_table = fiji_initialize_mc_reg_table, .is_dpm_running = fiji_is_dpm_running, .populate_requested_graphic_levels = fiji_populate_requested_graphic_levels, + .is_hw_avfs_present = fiji_is_hw_avfs_present, }; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h index adcbdfb209be..175bf9f8ef9c 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h @@ -28,17 +28,8 @@ #include "smu7_smumgr.h" - -struct fiji_smu_avfs { - enum AVFS_BTC_STATUS AvfsBtcStatus; - uint32_t AvfsBtcParam; -}; - - struct fiji_smumgr { struct smu7_smumgr smu7_data; - - struct fiji_smu_avfs avfs; struct SMU73_Discrete_DpmTable smc_state_table; struct SMU73_Discrete_Ulv ulv_setting; struct SMU73_Discrete_PmFuses power_tune_table; @@ -47,7 +38,5 @@ struct fiji_smumgr { }; - - #endif diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c index f68e759e8be2..99a00bd39256 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c @@ -1498,7 +1498,7 @@ static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr) table_info->vdd_dep_on_sclk; - if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED) + if (((struct smu7_smumgr *)smu_data)->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED) return result; result = atomctrl_get_avfs_information(hwmgr, &avfs_params); @@ -1889,7 +1889,7 @@ int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr) { int ret; struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr); - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c index 9616cedc139c..75f43dadc56b 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c @@ -60,16 +60,14 @@ static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = { static const SMU74_Discrete_MemoryLevel avfs_memory_level_polaris10 = { 0x100ea446, 0, 0x30750000, 0x01, 0x01, 0x01, 0x00, 0x00, 0x64, 0x00, 0x00, 0x1f00, 0x00, 0x00}; - static int polaris10_setup_pwr_virus(struct pp_smumgr *smumgr) { int i; - int result = -1; + int result = -EINVAL; uint32_t reg, data; const PWR_Command_Table *pvirus = pwr_virus_table; - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); - + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); for (i = 0; i < PWR_VIRUS_TABLE_SIZE; i++) { switch (pvirus->command) { @@ -86,7 +84,7 @@ static int polaris10_setup_pwr_virus(struct pp_smumgr *smumgr) default: pr_info("Table Exit with Invalid Command!"); smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL; - result = -1; + result = -EINVAL; break; } pvirus++; @@ -98,7 +96,7 @@ static int polaris10_setup_pwr_virus(struct pp_smumgr *smumgr) static int polaris10_perform_btc(struct pp_smumgr *smumgr) { int result = 0; - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); if (0 != smu_data->avfs.avfs_btc_param) { if (0 != smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) { @@ -172,10 +170,11 @@ static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr) return 0; } + static int polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT) { - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); switch (smu_data->avfs.avfs_btc_status) { case AVFS_BTC_COMPLETED_PREVIOUSLY: @@ -185,30 +184,31 @@ polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT) smu_data->avfs.avfs_btc_status = AVFS_BTC_DPMTABLESETUP_FAILED; PP_ASSERT_WITH_CODE(0 == polaris10_setup_graphics_level_structure(smumgr), - "[AVFS][Polaris10_AVFSEventMgr] Could not Copy Graphics Level table over to SMU", - return -1); + "[AVFS][Polaris10_AVFSEventMgr] Could not Copy Graphics Level table over to SMU", + return -EINVAL); if (smu_data->avfs.avfs_btc_param > 1) { pr_info("[AVFS][Polaris10_AVFSEventMgr] AC BTC has not been successfully verified on Fiji. There may be in this setting."); smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL; - PP_ASSERT_WITH_CODE(-1 == polaris10_setup_pwr_virus(smumgr), + PP_ASSERT_WITH_CODE(0 == polaris10_setup_pwr_virus(smumgr), "[AVFS][Polaris10_AVFSEventMgr] Could not setup Pwr Virus for AVFS ", - return -1); + return -EINVAL); } smu_data->avfs.avfs_btc_status = AVFS_BTC_FAILED; PP_ASSERT_WITH_CODE(0 == polaris10_perform_btc(smumgr), "[AVFS][Polaris10_AVFSEventMgr] Failure at SmuPolaris10_PerformBTC. AVFS Disabled", - return -1); - + return -EINVAL); + smu_data->avfs.avfs_btc_status = AVFS_BTC_ENABLEAVFS; break; case AVFS_BTC_DISABLED: + case AVFS_BTC_ENABLEAVFS: case AVFS_BTC_NOTSUPPORTED: break; default: - pr_info("[AVFS] Something is broken. See log!"); + pr_err("AVFS failed status is %x!\n", smu_data->avfs.avfs_btc_status); break; } @@ -376,11 +376,6 @@ static int polaris10_smu_init(struct pp_smumgr *smumgr) if (smu7_init(smumgr)) return -EINVAL; - if (polaris10_is_hw_avfs_present(smumgr)) - smu_data->avfs.avfs_btc_status = AVFS_BTC_BOOT; - else - smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED; - for (i = 0; i < SMU74_MAX_LEVELS_GRAPHICS; i++) smu_data->activity_target[i] = PPPOLARIS10_TARGETACTIVITY_DFLT; @@ -410,4 +405,5 @@ const struct pp_smumgr_func polaris10_smu_funcs = { .get_mac_definition = polaris10_get_mac_definition, .is_dpm_running = polaris10_is_dpm_running, .populate_requested_graphic_levels = polaris10_populate_requested_graphic_levels, + .is_hw_avfs_present = polaris10_is_hw_avfs_present, }; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h index 49ebf1d5a53c..5e19c24b0561 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h @@ -32,11 +32,6 @@ #define SMC_RAM_END 0x40000 -struct polaris10_avfs { - enum AVFS_BTC_STATUS avfs_btc_status; - uint32_t avfs_btc_param; -}; - struct polaris10_pt_defaults { uint8_t SviLoadLineEn; uint8_t SviLoadLineVddC; @@ -51,8 +46,6 @@ struct polaris10_pt_defaults { uint16_t BAPMTI_RC[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS]; }; - - struct polaris10_range_table { uint32_t trans_lower_frequency; /* in 10khz */ uint32_t trans_upper_frequency; @@ -61,14 +54,13 @@ struct polaris10_range_table { struct polaris10_smumgr { struct smu7_smumgr smu7_data; uint8_t protected_mode; - struct polaris10_avfs avfs; SMU74_Discrete_DpmTable smc_state_table; struct SMU74_Discrete_Ulv ulv_setting; struct SMU74_Discrete_PmFuses power_tune_table; struct polaris10_range_table range_table[NUM_SCLK_RANGE]; const struct polaris10_pt_defaults *power_tune_defaults; - uint32_t activity_target[SMU74_MAX_LEVELS_GRAPHICS]; - uint32_t bif_sclk_table[SMU74_MAX_LEVELS_LINK]; + uint32_t activity_target[SMU74_MAX_LEVELS_GRAPHICS]; + uint32_t bif_sclk_table[SMU74_MAX_LEVELS_LINK]; }; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c index 35ac27681415..76347ff6d655 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c @@ -540,7 +540,6 @@ int smu7_upload_smu_firmware_image(struct pp_smumgr *smumgr) return result; } - int smu7_init(struct pp_smumgr *smumgr) { struct smu7_smumgr *smu_data; @@ -596,6 +595,11 @@ int smu7_init(struct pp_smumgr *smumgr) (cgs_handle_t)smu_data->smu_buffer.handle); return -EINVAL); + if (smum_is_hw_avfs_present(smumgr)) + smu_data->avfs.avfs_btc_status = AVFS_BTC_BOOT; + else + smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED; + return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h index 919be435b49c..ee5e32d2921e 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h @@ -37,6 +37,11 @@ struct smu7_buffer_entry { unsigned long handle; }; +struct smu7_avfs { + enum AVFS_BTC_STATUS avfs_btc_status; + uint32_t avfs_btc_param; +}; + struct smu7_smumgr { uint8_t *header; uint8_t *mec_image; @@ -50,7 +55,8 @@ struct smu7_smumgr { uint32_t arb_table_start; uint32_t ulv_setting_starts; uint8_t security_hard_key; - uint32_t acpi_optimization; + uint32_t acpi_optimization; + struct smu7_avfs avfs; }; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c index bcc61ffd13cb..3bdf6478de7f 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c @@ -43,7 +43,8 @@ MODULE_FIRMWARE("amdgpu/polaris11_smc.bin"); MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin"); MODULE_FIRMWARE("amdgpu/polaris11_k_smc.bin"); MODULE_FIRMWARE("amdgpu/polaris12_smc.bin"); - +MODULE_FIRMWARE("amdgpu/vega10_smc.bin"); +MODULE_FIRMWARE("amdgpu/vega10_acg_smc.bin"); int smum_early_init(struct pp_instance *handle) { @@ -403,3 +404,11 @@ int smum_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr, return 0; } + +bool smum_is_hw_avfs_present(struct pp_smumgr *smumgr) +{ + if (smumgr->smumgr_funcs->is_hw_avfs_present) + return smumgr->smumgr_funcs->is_hw_avfs_present(smumgr); + + return false; +} diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c index 269678443862..408514c965a0 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c @@ -356,6 +356,9 @@ int vega10_set_tools_address(struct pp_smumgr *smumgr) static int vega10_verify_smc_interface(struct pp_smumgr *smumgr) { uint32_t smc_driver_if_version; + struct cgs_system_info sys_info = {0}; + uint32_t dev_id; + uint32_t rev_id; PP_ASSERT_WITH_CODE(!vega10_send_msg_to_smc(smumgr, PPSMC_MSG_GetDriverIfVersion), @@ -363,12 +366,27 @@ static int vega10_verify_smc_interface(struct pp_smumgr *smumgr) return -EINVAL); vega10_read_arg_from_smc(smumgr, &smc_driver_if_version); - if (smc_driver_if_version != SMU9_DRIVER_IF_VERSION) { - pr_err("Your firmware(0x%x) doesn't match \ - SMU9_DRIVER_IF_VERSION(0x%x). \ - Please update your firmware!\n", - smc_driver_if_version, SMU9_DRIVER_IF_VERSION); - return -EINVAL; + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV; + cgs_query_system_info(smumgr->device, &sys_info); + dev_id = (uint32_t)sys_info.value; + + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV; + cgs_query_system_info(smumgr->device, &sys_info); + rev_id = (uint32_t)sys_info.value; + + if (!((dev_id == 0x687f) && + ((rev_id == 0xc0) || + (rev_id == 0xc1) || + (rev_id == 0xc3)))) { + if (smc_driver_if_version != SMU9_DRIVER_IF_VERSION) { + pr_err("Your firmware(0x%x) doesn't match \ + SMU9_DRIVER_IF_VERSION(0x%x). \ + Please update your firmware!\n", + smc_driver_if_version, SMU9_DRIVER_IF_VERSION); + return -EINVAL; + } } return 0; diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h index dbd4fd3a810b..8bd38102b58e 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h @@ -16,16 +16,16 @@ TRACE_EVENT(amd_sched_job, TP_ARGS(sched_job), TP_STRUCT__entry( __field(struct amd_sched_entity *, entity) - __field(struct amd_sched_job *, sched_job) __field(struct dma_fence *, fence) __field(const char *, name) + __field(uint64_t, id) __field(u32, job_count) __field(int, hw_job_count) ), TP_fast_assign( __entry->entity = sched_job->s_entity; - __entry->sched_job = sched_job; + __entry->id = sched_job->id; __entry->fence = &sched_job->s_fence->finished; __entry->name = sched_job->sched->name; __entry->job_count = kfifo_len( @@ -33,8 +33,9 @@ TRACE_EVENT(amd_sched_job, __entry->hw_job_count = atomic_read( &sched_job->sched->hw_rq_count); ), - TP_printk("entity=%p, sched job=%p, fence=%p, ring=%s, job count:%u, hw job count:%d", - __entry->entity, __entry->sched_job, __entry->fence, __entry->name, + TP_printk("entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d", + __entry->entity, __entry->id, + __entry->fence, __entry->name, __entry->job_count, __entry->hw_job_count) ); diff --git a/drivers/gpu/drm/arc/arcpgu_crtc.c b/drivers/gpu/drm/arc/arcpgu_crtc.c index 799416651f2f..16903dc7fe0d 100644 --- a/drivers/gpu/drm/arc/arcpgu_crtc.c +++ b/drivers/gpu/drm/arc/arcpgu_crtc.c @@ -69,12 +69,13 @@ static enum drm_mode_status arc_pgu_crtc_mode_valid(struct drm_crtc *crtc, { struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc); long rate, clk_rate = mode->clock * 1000; + long diff = clk_rate / 200; /* +-0.5% allowed by HDMI spec */ rate = clk_round_rate(arcpgu->clk, clk_rate); - if (rate != clk_rate) - return MODE_NOCLOCK; + if ((max(rate, clk_rate) - min(rate, clk_rate) < diff) && (rate > 0)) + return MODE_OK; - return MODE_OK; + return MODE_NOCLOCK; } static void arc_pgu_crtc_mode_set_nofb(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c index f75c341566ac..289eda54e5aa 100644 --- a/drivers/gpu/drm/arc/arcpgu_drv.c +++ b/drivers/gpu/drm/arc/arcpgu_drv.c @@ -48,29 +48,7 @@ static void arcpgu_setup_mode_config(struct drm_device *drm) drm->mode_config.funcs = &arcpgu_drm_modecfg_funcs; } -static int arcpgu_gem_mmap(struct file *filp, struct vm_area_struct *vma) -{ - int ret; - - ret = drm_gem_mmap(filp, vma); - if (ret) - return ret; - - vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags)); - return 0; -} - -static const struct file_operations arcpgu_drm_ops = { - .owner = THIS_MODULE, - .open = drm_open, - .release = drm_release, - .unlocked_ioctl = drm_ioctl, - .compat_ioctl = drm_compat_ioctl, - .poll = drm_poll, - .read = drm_read, - .llseek = no_llseek, - .mmap = arcpgu_gem_mmap, -}; +DEFINE_DRM_GEM_CMA_FOPS(arcpgu_drm_ops); static void arcpgu_lastclose(struct drm_device *drm) { @@ -142,7 +120,7 @@ static int arcpgu_load(struct drm_device *drm) return -ENODEV; } - platform_set_drvdata(pdev, arcpgu); + platform_set_drvdata(pdev, drm); return 0; } @@ -160,11 +138,37 @@ static int arcpgu_unload(struct drm_device *drm) return 0; } +#ifdef CONFIG_DEBUG_FS +static int arcpgu_show_pxlclock(struct seq_file *m, void *arg) +{ + struct drm_info_node *node = (struct drm_info_node *)m->private; + struct drm_device *drm = node->minor->dev; + struct arcpgu_drm_private *arcpgu = drm->dev_private; + unsigned long clkrate = clk_get_rate(arcpgu->clk); + unsigned long mode_clock = arcpgu->crtc.mode.crtc_clock * 1000; + + seq_printf(m, "hw : %lu\n", clkrate); + seq_printf(m, "mode: %lu\n", mode_clock); + return 0; +} + +static struct drm_info_list arcpgu_debugfs_list[] = { + { "clocks", arcpgu_show_pxlclock, 0 }, + { "fb", drm_fb_cma_debugfs_show, 0 }, +}; + +static int arcpgu_debugfs_init(struct drm_minor *minor) +{ + return drm_debugfs_create_files(arcpgu_debugfs_list, + ARRAY_SIZE(arcpgu_debugfs_list), minor->debugfs_root, minor); +} +#endif + static struct drm_driver arcpgu_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC, .lastclose = arcpgu_lastclose, - .name = "drm-arcpgu", + .name = "arcpgu", .desc = "ARC PGU Controller", .date = "20160219", .major = 1, @@ -183,6 +187,9 @@ static struct drm_driver arcpgu_drm_driver = { .gem_prime_vmap = drm_gem_cma_prime_vmap, .gem_prime_vunmap = drm_gem_cma_prime_vunmap, .gem_prime_mmap = drm_gem_cma_prime_mmap, +#ifdef CONFIG_DEBUG_FS + .debugfs_init = arcpgu_debugfs_init, +#endif }; static int arcpgu_probe(struct platform_device *pdev) diff --git a/drivers/gpu/drm/ast/ast_dp501.c b/drivers/gpu/drm/ast/ast_dp501.c index 76f07f38b941..749646ae365f 100644 --- a/drivers/gpu/drm/ast/ast_dp501.c +++ b/drivers/gpu/drm/ast/ast_dp501.c @@ -4,16 +4,11 @@ #include "ast_drv.h" MODULE_FIRMWARE("ast_dp501_fw.bin"); -int ast_load_dp501_microcode(struct drm_device *dev) +static int ast_load_dp501_microcode(struct drm_device *dev) { struct ast_private *ast = dev->dev_private; - static char *fw_name = "ast_dp501_fw.bin"; - int err; - err = request_firmware(&ast->dp501_fw, fw_name, dev->dev); - if (err) - return err; - return 0; + return request_firmware(&ast->dp501_fw, "ast_dp501_fw.bin", dev->dev); } static void send_ack(struct ast_private *ast) @@ -187,7 +182,7 @@ bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size) return false; } -bool ast_launch_m68k(struct drm_device *dev) +static bool ast_launch_m68k(struct drm_device *dev) { struct ast_private *ast = dev->dev_private; u32 i, data, len = 0; @@ -201,7 +196,11 @@ bool ast_launch_m68k(struct drm_device *dev) if (ast->dp501_fw_addr) { fw_addr = ast->dp501_fw_addr; len = 32*1024; - } else if (ast->dp501_fw) { + } else { + if (!ast->dp501_fw && + ast_load_dp501_microcode(dev) < 0) + return false; + fw_addr = (u8 *)ast->dp501_fw->data; len = ast->dp501_fw->size; } @@ -432,3 +431,11 @@ void ast_init_3rdtx(struct drm_device *dev) } } } + +void ast_release_firmware(struct drm_device *dev) +{ + struct ast_private *ast = dev->dev_private; + + release_firmware(ast->dp501_fw); + ast->dp501_fw = NULL; +} diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index 569a1484d523..e6c4cd3dc50e 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -400,11 +400,10 @@ void ast_post_gpu(struct drm_device *dev); u32 ast_mindwm(struct ast_private *ast, u32 r); void ast_moutdwm(struct ast_private *ast, u32 r, u32 v); /* ast dp501 */ -int ast_load_dp501_microcode(struct drm_device *dev); void ast_set_dp501_video_output(struct drm_device *dev, u8 mode); -bool ast_launch_m68k(struct drm_device *dev); bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size); bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata); u8 ast_get_dp501_max_clk(struct drm_device *dev); void ast_init_3rdtx(struct drm_device *dev); +void ast_release_firmware(struct drm_device *dev); #endif diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index cb05e8e79eb9..dac355812adc 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -389,7 +389,7 @@ static void ast_user_framebuffer_destroy(struct drm_framebuffer *fb) drm_gem_object_put_unlocked(ast_fb->obj); drm_framebuffer_cleanup(fb); - kfree(fb); + kfree(ast_fb); } static const struct drm_framebuffer_funcs ast_fb_funcs = { @@ -576,6 +576,7 @@ void ast_driver_unload(struct drm_device *dev) { struct ast_private *ast = dev->dev_private; + ast_release_firmware(dev); kfree(ast->dp501_fw_addr); ast_mode_fini(dev); ast_fbdev_fini(dev); diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index fea790b0edfa..6f3849ec0c1d 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -616,7 +616,23 @@ static int ast_crtc_mode_set(struct drm_crtc *crtc, static void ast_crtc_disable(struct drm_crtc *crtc) { + int ret; + + DRM_DEBUG_KMS("\n"); + ast_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); + if (crtc->primary->fb) { + struct ast_framebuffer *ast_fb = to_ast_framebuffer(crtc->primary->fb); + struct drm_gem_object *obj = ast_fb->obj; + struct ast_bo *bo = gem_to_ast_bo(obj); + + ret = ast_bo_reserve(bo, false); + if (ret) + return; + ast_bo_push_sysram(bo); + ast_bo_unreserve(bo); + } + crtc->primary->fb = NULL; } static void ast_crtc_prepare(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c index 58084985e6cf..696a15dc2f3f 100644 --- a/drivers/gpu/drm/ast/ast_ttm.c +++ b/drivers/gpu/drm/ast/ast_ttm.c @@ -323,10 +323,8 @@ int ast_bo_create(struct drm_device *dev, int size, int align, return -ENOMEM; ret = drm_gem_object_init(dev, &astbo->gem, size); - if (ret) { - kfree(astbo); - return ret; - } + if (ret) + goto error; astbo->bo.bdev = &ast->ttm.bdev; @@ -340,10 +338,13 @@ int ast_bo_create(struct drm_device *dev, int size, int align, align >> PAGE_SHIFT, false, NULL, acc_size, NULL, NULL, ast_bo_ttm_destroy); if (ret) - return ret; + goto error; *pastbo = astbo; return 0; +error: + kfree(astbo); + return ret; } static inline u64 ast_bo_gpu_offset(struct ast_bo *bo) @@ -376,7 +377,7 @@ int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr) int ast_bo_unpin(struct ast_bo *bo) { - int i, ret; + int i; if (!bo->pin_count) { DRM_ERROR("unpin bad %p\n", bo); return 0; @@ -387,11 +388,7 @@ int ast_bo_unpin(struct ast_bo *bo) for (i = 0; i < bo->placement.num_placement ; i++) bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; - ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); - if (ret) - return ret; - - return 0; + return ttm_bo_validate(&bo->bo, &bo->placement, false, false); } int ast_bo_push_sysram(struct ast_bo *bo) diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c index 503252d6a74d..8571cfd877c5 100644 --- a/drivers/gpu/drm/bridge/tc358767.c +++ b/drivers/gpu/drm/bridge/tc358767.c @@ -1254,7 +1254,7 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id) /* port@2 is the output port */ ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &tc->panel, NULL); - if (ret) + if (ret && ret != -ENODEV) return ret; /* Shut down GPIO is optional */ diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index 213fb837e1c4..08af8d6b844b 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c @@ -544,7 +544,7 @@ void drm_dp_downstream_debug(struct seq_file *m, DP_DETAILED_CAP_INFO_AVAILABLE; int clk; int bpc; - char id[6]; + char id[7]; int len; uint8_t rev[2]; int type = port_cap[0] & DP_DS_PORT_TYPE_MASK; @@ -583,6 +583,7 @@ void drm_dp_downstream_debug(struct seq_file *m, seq_puts(m, "\t\tType: N/A\n"); } + memset(id, 0, sizeof(id)); drm_dp_downstream_id(aux, id); seq_printf(m, "\t\tID: %s\n", id); @@ -591,7 +592,7 @@ void drm_dp_downstream_debug(struct seq_file *m, seq_printf(m, "\t\tHW: %d.%d\n", (rev[0] & 0xf0) >> 4, rev[0] & 0xf); - len = drm_dp_dpcd_read(aux, DP_BRANCH_SW_REV, &rev, 2); + len = drm_dp_dpcd_read(aux, DP_BRANCH_SW_REV, rev, 2); if (len > 0) seq_printf(m, "\t\tSW: %d.%d\n", rev[0], rev[1]); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index 5bd93169dac2..6463fc2c736f 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c @@ -270,8 +270,8 @@ static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream, if (ret) return ret; - if (r->reloc_offset >= bo->obj->base.size - sizeof(*ptr)) { - DRM_ERROR("relocation %u outside object", i); + if (r->reloc_offset > bo->obj->base.size - sizeof(*ptr)) { + DRM_ERROR("relocation %u outside object\n", i); return -EINVAL; } diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index 1d185347c64c..305dc3d4ff77 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig @@ -75,6 +75,7 @@ config DRM_EXYNOS_DP config DRM_EXYNOS_HDMI bool "HDMI" depends on DRM_EXYNOS_MIXER || DRM_EXYNOS5433_DECON + select CEC_CORE if CEC_NOTIFIER help Choose this option if you want to use Exynos HDMI for DRM. diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index d99b4295540f..b1f7299600f0 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -453,7 +453,6 @@ static int exynos_drm_platform_probe(struct platform_device *pdev) struct component_match *match; pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); - exynos_drm_driver.num_ioctls = ARRAY_SIZE(exynos_ioctls); match = exynos_drm_match_add(&pdev->dev); if (IS_ERR(match)) diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index 4ea7cc7cb3de..c399dc9b325f 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -1649,8 +1649,6 @@ static int exynos_dsi_parse_dt(struct exynos_dsi *dsi) return ret; dsi->bridge_node = of_graph_get_remote_node(node, DSI_PORT_IN, 0); - if (!dsi->bridge_node) - return -EINVAL; return 0; } @@ -1685,9 +1683,11 @@ static int exynos_dsi_bind(struct device *dev, struct device *master, return ret; } - bridge = of_drm_find_bridge(dsi->bridge_node); - if (bridge) - drm_bridge_attach(encoder, bridge, NULL); + if (dsi->bridge_node) { + bridge = of_drm_find_bridge(dsi->bridge_node); + if (bridge) + drm_bridge_attach(encoder, bridge, NULL); + } return mipi_dsi_host_register(&dsi->dsi_host); } diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c index ed1a648d518c..6592f50d460a 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fb.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c @@ -145,13 +145,19 @@ static struct drm_framebuffer * exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd) { + const struct drm_format_info *info = drm_get_format_info(dev, mode_cmd); struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER]; struct drm_gem_object *obj; struct drm_framebuffer *fb; int i; int ret; - for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) { + for (i = 0; i < info->num_planes; i++) { + unsigned int height = (i == 0) ? mode_cmd->height : + DIV_ROUND_UP(mode_cmd->height, info->vsub); + unsigned long size = height * mode_cmd->pitches[i] + + mode_cmd->offsets[i]; + obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]); if (!obj) { DRM_ERROR("failed to lookup gem object\n"); @@ -160,6 +166,12 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, } exynos_gem[i] = to_exynos_gem(obj); + + if (size > exynos_gem[i]->size) { + i++; + ret = -EINVAL; + goto err; + } } fb = exynos_drm_framebuffer_init(dev, mode_cmd, exynos_gem, i); diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c index e45720543a45..16bbee897e0d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_mic.c +++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c @@ -340,16 +340,10 @@ static int exynos_mic_bind(struct device *dev, struct device *master, void *data) { struct exynos_mic *mic = dev_get_drvdata(dev); - int ret; - mic->bridge.funcs = &mic_bridge_funcs; - mic->bridge.of_node = dev->of_node; mic->bridge.driver_private = mic; - ret = drm_bridge_add(&mic->bridge); - if (ret) - DRM_ERROR("mic: Failed to add MIC to the global bridge list\n"); - return ret; + return 0; } static void exynos_mic_unbind(struct device *dev, struct device *master, @@ -365,8 +359,6 @@ static void exynos_mic_unbind(struct device *dev, struct device *master, already_disabled: mutex_unlock(&mic_mutex); - - drm_bridge_remove(&mic->bridge); } static const struct component_ops exynos_mic_component_ops = { @@ -461,6 +453,15 @@ static int exynos_mic_probe(struct platform_device *pdev) platform_set_drvdata(pdev, mic); + mic->bridge.funcs = &mic_bridge_funcs; + mic->bridge.of_node = dev->of_node; + + ret = drm_bridge_add(&mic->bridge); + if (ret) { + DRM_ERROR("mic: Failed to add MIC to the global bridge list\n"); + return ret; + } + pm_runtime_enable(dev); ret = component_add(dev, &exynos_mic_component_ops); @@ -479,8 +480,13 @@ err: static int exynos_mic_remove(struct platform_device *pdev) { + struct exynos_mic *mic = platform_get_drvdata(pdev); + component_del(&pdev->dev, &exynos_mic_component_ops); pm_runtime_disable(&pdev->dev); + + drm_bridge_remove(&mic->bridge); + return 0; } diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index 0e2a472c3021..d70eeb8c5f75 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -1500,8 +1500,6 @@ static void hdmi_disable(struct drm_encoder *encoder) */ cancel_delayed_work(&hdata->hotplug_work); cec_notifier_set_phys_addr(hdata->notifier, CEC_PHYS_ADDR_INVALID); - - hdmiphy_disable(hdata); } static const struct drm_encoder_helper_funcs exynos_hdmi_encoder_helper_funcs = { @@ -1675,7 +1673,7 @@ static int hdmi_resources_init(struct hdmi_context *hdata) return hdmi_bridge_init(hdata); } -static struct of_device_id hdmi_match_types[] = { +static const struct of_device_id hdmi_match_types[] = { { .compatible = "samsung,exynos4210-hdmi", .data = &exynos4210_hdmi_driver_data, @@ -1933,8 +1931,7 @@ static int hdmi_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM -static int exynos_hdmi_suspend(struct device *dev) +static int __maybe_unused exynos_hdmi_suspend(struct device *dev) { struct hdmi_context *hdata = dev_get_drvdata(dev); @@ -1943,7 +1940,7 @@ static int exynos_hdmi_suspend(struct device *dev) return 0; } -static int exynos_hdmi_resume(struct device *dev) +static int __maybe_unused exynos_hdmi_resume(struct device *dev) { struct hdmi_context *hdata = dev_get_drvdata(dev); int ret; @@ -1954,7 +1951,6 @@ static int exynos_hdmi_resume(struct device *dev) return 0; } -#endif static const struct dev_pm_ops exynos_hdmi_pm_ops = { SET_RUNTIME_PM_OPS(exynos_hdmi_suspend, exynos_hdmi_resume, NULL) diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index 6bed4f3ffcd6..a998a8dd783c 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -1094,28 +1094,28 @@ static const struct exynos_drm_crtc_ops mixer_crtc_ops = { .atomic_check = mixer_atomic_check, }; -static struct mixer_drv_data exynos5420_mxr_drv_data = { +static const struct mixer_drv_data exynos5420_mxr_drv_data = { .version = MXR_VER_128_0_0_184, .is_vp_enabled = 0, }; -static struct mixer_drv_data exynos5250_mxr_drv_data = { +static const struct mixer_drv_data exynos5250_mxr_drv_data = { .version = MXR_VER_16_0_33_0, .is_vp_enabled = 0, }; -static struct mixer_drv_data exynos4212_mxr_drv_data = { +static const struct mixer_drv_data exynos4212_mxr_drv_data = { .version = MXR_VER_0_0_0_16, .is_vp_enabled = 1, }; -static struct mixer_drv_data exynos4210_mxr_drv_data = { +static const struct mixer_drv_data exynos4210_mxr_drv_data = { .version = MXR_VER_0_0_0_16, .is_vp_enabled = 1, .has_sclk = 1, }; -static struct of_device_id mixer_match_types[] = { +static const struct of_device_id mixer_match_types[] = { { .compatible = "samsung,exynos4210-mixer", .data = &exynos4210_mxr_drv_data, diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index 2deb05f618fb..7cb0818a13de 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -323,27 +323,27 @@ void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt) { struct intel_gvt_irq *irq = &gvt->irq; struct intel_vgpu *vgpu; - bool have_enabled_pipe = false; int pipe, id; if (WARN_ON(!mutex_is_locked(&gvt->lock))) return; - hrtimer_cancel(&irq->vblank_timer.timer); - for_each_active_vgpu(gvt, vgpu, id) { for (pipe = 0; pipe < I915_MAX_PIPES; pipe++) { - have_enabled_pipe = - pipe_is_enabled(vgpu, pipe); - if (have_enabled_pipe) - break; + if (pipe_is_enabled(vgpu, pipe)) + goto out; } } - if (have_enabled_pipe) - hrtimer_start(&irq->vblank_timer.timer, - ktime_add_ns(ktime_get(), irq->vblank_timer.period), - HRTIMER_MODE_ABS); + /* all the pipes are disabled */ + hrtimer_cancel(&irq->vblank_timer.timer); + return; + +out: + hrtimer_start(&irq->vblank_timer.timer, + ktime_add_ns(ktime_get(), irq->vblank_timer.period), + HRTIMER_MODE_ABS); + } static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe) diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index 700050556242..1648887d3f55 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c @@ -46,6 +46,8 @@ #define same_context(a, b) (((a)->context_id == (b)->context_id) && \ ((a)->lrca == (b)->lrca)) +static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask); + static int context_switch_events[] = { [RCS] = RCS_AS_CONTEXT_SWITCH, [BCS] = BCS_AS_CONTEXT_SWITCH, @@ -499,10 +501,10 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) static int complete_execlist_workload(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; - struct intel_vgpu_execlist *execlist = - &vgpu->execlist[workload->ring_id]; + int ring_id = workload->ring_id; + struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id]; struct intel_vgpu_workload *next_workload; - struct list_head *next = workload_q_head(vgpu, workload->ring_id)->next; + struct list_head *next = workload_q_head(vgpu, ring_id)->next; bool lite_restore = false; int ret; @@ -512,10 +514,25 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload) release_shadow_batch_buffer(workload); release_shadow_wa_ctx(&workload->wa_ctx); - if (workload->status || vgpu->resetting) + if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) { + /* if workload->status is not successful means HW GPU + * has occurred GPU hang or something wrong with i915/GVT, + * and GVT won't inject context switch interrupt to guest. + * So this error is a vGPU hang actually to the guest. + * According to this we should emunlate a vGPU hang. If + * there are pending workloads which are already submitted + * from guest, we should clean them up like HW GPU does. + * + * if it is in middle of engine resetting, the pending + * workloads won't be submitted to HW GPU and will be + * cleaned up during the resetting process later, so doing + * the workload clean up here doesn't have any impact. + **/ + clean_workloads(vgpu, ENGINE_MASK(ring_id)); goto out; + } - if (!list_empty(workload_q_head(vgpu, workload->ring_id))) { + if (!list_empty(workload_q_head(vgpu, ring_id))) { struct execlist_ctx_descriptor_format *this_desc, *next_desc; next_workload = container_of(next, diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c index 5dad9298b2d5..a26c1705430e 100644 --- a/drivers/gpu/drm/i915/gvt/firmware.c +++ b/drivers/gpu/drm/i915/gvt/firmware.c @@ -72,11 +72,13 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt) struct intel_gvt_device_info *info = &gvt->device_info; struct pci_dev *pdev = gvt->dev_priv->drm.pdev; struct intel_gvt_mmio_info *e; + struct gvt_mmio_block *block = gvt->mmio.mmio_block; + int num = gvt->mmio.num_mmio_block; struct gvt_firmware_header *h; void *firmware; void *p; unsigned long size, crc32_start; - int i; + int i, j; int ret; size = sizeof(*h) + info->mmio_size + info->cfg_space_size; @@ -105,6 +107,13 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt) hash_for_each(gvt->mmio.mmio_info_table, i, e, node) *(u32 *)(p + e->offset) = I915_READ_NOTRACE(_MMIO(e->offset)); + for (i = 0; i < num; i++, block++) { + for (j = 0; j < block->size; j += 4) + *(u32 *)(p + INTEL_GVT_MMIO_OFFSET(block->offset) + j) = + I915_READ_NOTRACE(_MMIO(INTEL_GVT_MMIO_OFFSET( + block->offset) + j)); + } + memcpy(gvt->firmware.mmio, p, info->mmio_size); crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4; diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 3a74e79eac2f..2964a4d01a66 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -149,7 +149,7 @@ struct intel_vgpu { bool active; bool pv_notified; bool failsafe; - bool resetting; + unsigned int resetting_eng; void *sched_data; struct vgpu_sched_ctl sched_ctl; @@ -195,6 +195,15 @@ struct intel_gvt_fence { unsigned long vgpu_allocated_fence_num; }; +/* Special MMIO blocks. */ +struct gvt_mmio_block { + unsigned int device; + i915_reg_t offset; + unsigned int size; + gvt_mmio_func read; + gvt_mmio_func write; +}; + #define INTEL_GVT_MMIO_HASH_BITS 11 struct intel_gvt_mmio { @@ -214,6 +223,9 @@ struct intel_gvt_mmio { /* This reg could be accessed by unaligned address */ #define F_UNALIGN (1 << 6) + struct gvt_mmio_block *mmio_block; + unsigned int num_mmio_block; + DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS); unsigned int num_tracked_mmio; }; diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 17febe830ff6..feed9921b3b3 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -2857,31 +2857,15 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) return 0; } -/* Special MMIO blocks. */ -static struct gvt_mmio_block { - unsigned int device; - i915_reg_t offset; - unsigned int size; - gvt_mmio_func read; - gvt_mmio_func write; -} gvt_mmio_blocks[] = { - {D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL}, - {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL}, - {D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE, - pvinfo_mmio_read, pvinfo_mmio_write}, - {D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL}, - {D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL}, - {D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL}, -}; - static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt, unsigned int offset) { unsigned long device = intel_gvt_get_device_type(gvt); - struct gvt_mmio_block *block = gvt_mmio_blocks; + struct gvt_mmio_block *block = gvt->mmio.mmio_block; + int num = gvt->mmio.num_mmio_block; int i; - for (i = 0; i < ARRAY_SIZE(gvt_mmio_blocks); i++, block++) { + for (i = 0; i < num; i++, block++) { if (!(device & block->device)) continue; if (offset >= INTEL_GVT_MMIO_OFFSET(block->offset) && @@ -2912,6 +2896,17 @@ void intel_gvt_clean_mmio_info(struct intel_gvt *gvt) gvt->mmio.mmio_attribute = NULL; } +/* Special MMIO blocks. */ +static struct gvt_mmio_block mmio_blocks[] = { + {D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL}, + {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL}, + {D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE, + pvinfo_mmio_read, pvinfo_mmio_write}, + {D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL}, + {D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL}, + {D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL}, +}; + /** * intel_gvt_setup_mmio_info - setup MMIO information table for GVT device * @gvt: GVT device @@ -2951,6 +2946,9 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt) goto err; } + gvt->mmio.mmio_block = mmio_blocks; + gvt->mmio.num_mmio_block = ARRAY_SIZE(mmio_blocks); + gvt_dbg_mmio("traced %u virtual mmio registers\n", gvt->mmio.num_tracked_mmio); return 0; @@ -3030,7 +3028,7 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset, gvt_mmio_func func; int ret; - if (WARN_ON(bytes > 4)) + if (WARN_ON(bytes > 8)) return -EINVAL; /* diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 0e2e36ad6196..c873136add97 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -432,7 +432,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) i915_gem_request_put(fetch_and_zero(&workload->req)); - if (!workload->status && !vgpu->resetting) { + if (!workload->status && !(vgpu->resetting_eng & + ENGINE_MASK(ring_id))) { update_guest_context(workload); for_each_set_bit(event, workload->pending_events, diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 90c14e6e3ea0..3deadcbd5a24 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -480,11 +480,13 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, { struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; + unsigned int resetting_eng = dmlr ? ALL_ENGINES : engine_mask; gvt_dbg_core("------------------------------------------\n"); gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n", vgpu->id, dmlr, engine_mask); - vgpu->resetting = true; + + vgpu->resetting_eng = resetting_eng; intel_vgpu_stop_schedule(vgpu); /* @@ -497,7 +499,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, mutex_lock(&gvt->lock); } - intel_vgpu_reset_execlist(vgpu, dmlr ? ALL_ENGINES : engine_mask); + intel_vgpu_reset_execlist(vgpu, resetting_eng); /* full GPU reset or device model level reset */ if (engine_mask == ALL_ENGINES || dmlr) { @@ -520,7 +522,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, } } - vgpu->resetting = false; + vgpu->resetting_eng = 0; gvt_dbg_core("reset vgpu%d done\n", vgpu->id); gvt_dbg_core("------------------------------------------\n"); } diff --git a/drivers/gpu/drm/i915/i915_gem_clflush.c b/drivers/gpu/drm/i915/i915_gem_clflush.c index 152f16c11878..348b29a845c9 100644 --- a/drivers/gpu/drm/i915/i915_gem_clflush.c +++ b/drivers/gpu/drm/i915/i915_gem_clflush.c @@ -114,7 +114,7 @@ i915_clflush_notify(struct i915_sw_fence *fence, return NOTIFY_DONE; } -void i915_gem_clflush_object(struct drm_i915_gem_object *obj, +bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, unsigned int flags) { struct clflush *clflush; @@ -128,7 +128,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj, */ if (!i915_gem_object_has_struct_page(obj)) { obj->cache_dirty = false; - return; + return false; } /* If the GPU is snooping the contents of the CPU cache, @@ -140,7 +140,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj, * tracking. */ if (!(flags & I915_CLFLUSH_FORCE) && obj->cache_coherent) - return; + return false; trace_i915_gem_object_clflush(obj); @@ -179,4 +179,5 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj, } obj->cache_dirty = false; + return true; } diff --git a/drivers/gpu/drm/i915/i915_gem_clflush.h b/drivers/gpu/drm/i915/i915_gem_clflush.h index 2455a7820937..f390247561b3 100644 --- a/drivers/gpu/drm/i915/i915_gem_clflush.h +++ b/drivers/gpu/drm/i915/i915_gem_clflush.h @@ -28,7 +28,7 @@ struct drm_i915_private; struct drm_i915_gem_object; -void i915_gem_clflush_object(struct drm_i915_gem_object *obj, +bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, unsigned int flags); #define I915_CLFLUSH_FORCE BIT(0) #define I915_CLFLUSH_SYNC BIT(1) diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 929f275e67aa..5fa44767c29e 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -560,9 +560,6 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb, eb->args->flags |= __EXEC_HAS_RELOC; } - entry->flags |= __EXEC_OBJECT_HAS_PIN; - GEM_BUG_ON(eb_vma_misplaced(entry, vma)); - if (unlikely(entry->flags & EXEC_OBJECT_NEEDS_FENCE)) { err = i915_vma_get_fence(vma); if (unlikely(err)) { @@ -574,6 +571,9 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb, entry->flags |= __EXEC_OBJECT_HAS_FENCE; } + entry->flags |= __EXEC_OBJECT_HAS_PIN; + GEM_BUG_ON(eb_vma_misplaced(entry, vma)); + return 0; } @@ -1459,7 +1459,7 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma) * to read. However, if the array is not writable the user loses * the updated relocation values. */ - if (unlikely(!access_ok(VERIFY_READ, urelocs, remain*sizeof(urelocs)))) + if (unlikely(!access_ok(VERIFY_READ, urelocs, remain*sizeof(*urelocs)))) return -EFAULT; do { @@ -1776,7 +1776,7 @@ out: } } - return err ?: have_copy; + return err; } static int eb_relocate(struct i915_execbuffer *eb) @@ -1826,7 +1826,7 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) int err; for (i = 0; i < count; i++) { - const struct drm_i915_gem_exec_object2 *entry = &eb->exec[i]; + struct drm_i915_gem_exec_object2 *entry = &eb->exec[i]; struct i915_vma *vma = exec_to_vma(entry); struct drm_i915_gem_object *obj = vma->obj; @@ -1842,12 +1842,14 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) eb->request->capture_list = capture; } + if (unlikely(obj->cache_dirty && !obj->cache_coherent)) { + if (i915_gem_clflush_object(obj, 0)) + entry->flags &= ~EXEC_OBJECT_ASYNC; + } + if (entry->flags & EXEC_OBJECT_ASYNC) goto skip_flushes; - if (unlikely(obj->cache_dirty && !obj->cache_coherent)) - i915_gem_clflush_object(obj, 0); - err = i915_gem_request_await_object (eb->request, obj, entry->flags & EXEC_OBJECT_WRITE); if (err) @@ -2210,7 +2212,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, goto err_rpm; err = eb_relocate(&eb); - if (err) + if (err) { /* * If the user expects the execobject.offset and * reloc.presumed_offset to be an exact match, @@ -2219,8 +2221,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, * relocation. */ args->flags &= ~__EXEC_HAS_RELOC; - if (err < 0) goto err_vma; + } if (unlikely(eb.batch->exec_entry->flags & EXEC_OBJECT_WRITE)) { DRM_DEBUG("Attempting to use self-modifying batch buffer\n"); diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c index 1032f98add11..77fb39808131 100644 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c @@ -43,16 +43,21 @@ static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock) return true; case MUTEX_TRYLOCK_FAILED: + *unlock = false; + preempt_disable(); do { cpu_relax(); if (mutex_trylock(&dev_priv->drm.struct_mutex)) { - case MUTEX_TRYLOCK_SUCCESS: *unlock = true; - return true; + break; } } while (!need_resched()); + preempt_enable(); + return *unlock; - return false; + case MUTEX_TRYLOCK_SUCCESS: + *unlock = true; + return true; } BUG(); diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index d9f77a4d85db..ed396f7b7dca 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -1601,11 +1601,11 @@ static int gen8_emit_oa_config(struct drm_i915_gem_request *req) u32 *cs; int i; - cs = intel_ring_begin(req, n_flex_regs * 2 + 4); + cs = intel_ring_begin(req, ARRAY_SIZE(flex_mmio) * 2 + 4); if (IS_ERR(cs)) return PTR_ERR(cs); - *cs++ = MI_LOAD_REGISTER_IMM(n_flex_regs + 1); + *cs++ = MI_LOAD_REGISTER_IMM(ARRAY_SIZE(flex_mmio) + 1); *cs++ = i915_mmio_reg_offset(GEN8_OACTXCONTROL); *cs++ = (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) | diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h index 4a673fc1a432..20cf272c97b1 100644 --- a/drivers/gpu/drm/i915/i915_vma.h +++ b/drivers/gpu/drm/i915/i915_vma.h @@ -284,12 +284,12 @@ static inline void __i915_vma_pin(struct i915_vma *vma) static inline void __i915_vma_unpin(struct i915_vma *vma) { - GEM_BUG_ON(!i915_vma_is_pinned(vma)); vma->flags--; } static inline void i915_vma_unpin(struct i915_vma *vma) { + GEM_BUG_ON(!i915_vma_is_pinned(vma)); GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); __i915_vma_unpin(vma); } diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c index f85d57555957..1813d84989c9 100644 --- a/drivers/gpu/drm/i915/intel_color.c +++ b/drivers/gpu/drm/i915/intel_color.c @@ -398,6 +398,7 @@ static void bdw_load_gamma_lut(struct drm_crtc_state *state, u32 offset) } /* Program the max register to clamp values > 1.0. */ + i = lut_size - 1; I915_WRITE(PREC_PAL_GC_MAX(pipe, 0), drm_color_lut_extract(lut[i].red, 16)); I915_WRITE(PREC_PAL_GC_MAX(pipe, 1), diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index efb13582dc73..f4fbb396054b 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -2010,8 +2010,8 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder, u32 level) val = I915_READ(CNL_PORT_TX_DW4_LN(port, ln)); val &= ~LOADGEN_SELECT; - if (((rate < 600000) && (width == 4) && (ln >= 1)) || - ((rate < 600000) && (width < 4) && ((ln == 1) || (ln == 2)))) { + if ((rate <= 600000 && width == 4 && ln >= 1) || + (rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) { val |= LOADGEN_SELECT; } I915_WRITE(CNL_PORT_TX_DW4_LN(port, ln), val); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 684d653bfddb..f47ab0ef14bb 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3427,26 +3427,6 @@ static void intel_complete_page_flips(struct drm_i915_private *dev_priv) intel_finish_page_flip_cs(dev_priv, crtc->pipe); } -static void intel_update_primary_planes(struct drm_device *dev) -{ - struct drm_crtc *crtc; - - for_each_crtc(dev, crtc) { - struct intel_plane *plane = to_intel_plane(crtc->primary); - struct intel_plane_state *plane_state = - to_intel_plane_state(plane->base.state); - - if (plane_state->base.visible) { - trace_intel_update_plane(&plane->base, - to_intel_crtc(crtc)); - - plane->update_plane(plane, - to_intel_crtc_state(crtc->state), - plane_state); - } - } -} - static int __intel_display_resume(struct drm_device *dev, struct drm_atomic_state *state, @@ -3499,6 +3479,12 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv) struct drm_atomic_state *state; int ret; + + /* reset doesn't touch the display */ + if (!i915.force_reset_modeset_test && + !gpu_reset_clobbers_display(dev_priv)) + return; + /* * Need mode_config.mutex so that we don't * trample ongoing ->detect() and whatnot. @@ -3512,12 +3498,6 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv) drm_modeset_backoff(ctx); } - - /* reset doesn't touch the display, but flips might get nuked anyway, */ - if (!i915.force_reset_modeset_test && - !gpu_reset_clobbers_display(dev_priv)) - return; - /* * Disabling the crtcs gracefully seems nicer. Also the * g33 docs say we should at least disable all the planes. @@ -3547,6 +3527,14 @@ void intel_finish_reset(struct drm_i915_private *dev_priv) struct drm_atomic_state *state = dev_priv->modeset_restore_state; int ret; + /* reset doesn't touch the display */ + if (!i915.force_reset_modeset_test && + !gpu_reset_clobbers_display(dev_priv)) + return; + + if (!state) + goto unlock; + /* * Flips in the rings will be nuked by the reset, * so complete all pending flips so that user space @@ -3558,22 +3546,10 @@ void intel_finish_reset(struct drm_i915_private *dev_priv) /* reset doesn't touch the display */ if (!gpu_reset_clobbers_display(dev_priv)) { - if (!state) { - /* - * Flips in the rings have been nuked by the reset, - * so update the base address of all primary - * planes to the the last fb to make sure we're - * showing the correct fb after a reset. - * - * FIXME: Atomic will make this obsolete since we won't schedule - * CS-based flips (which might get lost in gpu resets) any more. - */ - intel_update_primary_planes(dev); - } else { - ret = __intel_display_resume(dev, state, ctx); + /* for testing only restore the display */ + ret = __intel_display_resume(dev, state, ctx); if (ret) DRM_ERROR("Restoring old state failed with %i\n", ret); - } } else { /* * The display has been reset as well, @@ -3597,8 +3573,8 @@ void intel_finish_reset(struct drm_i915_private *dev_priv) intel_hpd_init(dev_priv); } - if (state) - drm_atomic_state_put(state); + drm_atomic_state_put(state); +unlock: drm_modeset_drop_locks(ctx); drm_modeset_acquire_fini(ctx); mutex_unlock(&dev->mode_config.mutex); @@ -9132,6 +9108,13 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, u64 power_domain_mask; bool active; + if (INTEL_GEN(dev_priv) >= 9) { + intel_crtc_init_scalers(crtc, pipe_config); + + pipe_config->scaler_state.scaler_id = -1; + pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX); + } + power_domain = POWER_DOMAIN_PIPE(crtc->pipe); if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) return false; @@ -9160,13 +9143,6 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK; - if (INTEL_GEN(dev_priv) >= 9) { - intel_crtc_init_scalers(crtc, pipe_config); - - pipe_config->scaler_state.scaler_id = -1; - pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX); - } - power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); if (intel_display_power_get_if_enabled(dev_priv, power_domain)) { power_domain_mask |= BIT_ULL(power_domain); @@ -9555,7 +9531,16 @@ static void i9xx_update_cursor(struct intel_plane *plane, * On some platforms writing CURCNTR first will also * cause CURPOS to be armed by the CURBASE write. * Without the CURCNTR write the CURPOS write would - * arm itself. + * arm itself. Thus we always start the full update + * with a CURCNTR write. + * + * On other platforms CURPOS always requires the + * CURBASE write to arm the update. Additonally + * a write to any of the cursor register will cancel + * an already armed cursor update. Thus leaving out + * the CURBASE write after CURPOS could lead to a + * cursor that doesn't appear to move, or even change + * shape. Thus we always write CURBASE. * * CURCNTR and CUR_FBC_CTL are always * armed by the CURBASE write only. @@ -9574,6 +9559,7 @@ static void i9xx_update_cursor(struct intel_plane *plane, plane->cursor.cntl = cntl; } else { I915_WRITE_FW(CURPOS(pipe), pos); + I915_WRITE_FW(CURBASE(pipe), base); } POSTING_READ_FW(CURBASE(pipe)); diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c index 52d5b82790d9..c17ed0e62b67 100644 --- a/drivers/gpu/drm/i915/intel_gvt.c +++ b/drivers/gpu/drm/i915/intel_gvt.c @@ -45,7 +45,7 @@ static bool is_supported_device(struct drm_i915_private *dev_priv) return true; if (IS_SKYLAKE(dev_priv)) return true; - if (IS_KABYLAKE(dev_priv) && INTEL_DEVID(dev_priv) == 0x591D) + if (IS_KABYLAKE(dev_priv)) return true; return false; } diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 96c2cbd81869..593349be8b9d 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -469,7 +469,7 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector, if (i915.invert_brightness > 0 || dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) { - return panel->backlight.max - val; + return panel->backlight.max - val + panel->backlight.min; } return val; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index ee2a349cfe68..48785ef75d33 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -4459,8 +4459,8 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, if ((cpp * cstate->base.adjusted_mode.crtc_htotal / 512 < 1) && (plane_bytes_per_line / 512 < 1)) selected_result = method2; - else if ((ddb_allocation && ddb_allocation / - fixed16_to_u32_round_up(plane_blocks_per_line)) >= 1) + else if (ddb_allocation >= + fixed16_to_u32_round_up(plane_blocks_per_line)) selected_result = min_fixed16(method1, method2); else if (latency >= linetime_us) selected_result = min_fixed16(method1, method2); diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 47613d20bba8..2f1844c50e7d 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -207,7 +207,7 @@ struct drm_i915_private *mock_gem_device(void) mkwrite_device_info(i915)->ring_mask = BIT(0); i915->engine[RCS] = mock_engine(i915, "mock"); if (!i915->engine[RCS]) - goto err_dependencies; + goto err_priorities; i915->kernel_context = mock_context(i915, NULL); if (!i915->kernel_context) diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index b638d192ce5e..99d39b2aefa6 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -5,7 +5,7 @@ config DRM_MSM depends on ARCH_QCOM || (ARM && COMPILE_TEST) depends on OF && COMMON_CLK depends on MMU - select QCOM_MDT_LOADER + select QCOM_MDT_LOADER if ARCH_QCOM select REGULATOR select DRM_KMS_HELPER select DRM_PANEL diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index b4b54f1c24bc..f9eae03aa1dc 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -15,7 +15,7 @@ #include <linux/cpumask.h> #include <linux/qcom_scm.h> #include <linux/dma-mapping.h> -#include <linux/of_reserved_mem.h> +#include <linux/of_address.h> #include <linux/soc/qcom/mdt_loader.h> #include "msm_gem.h" #include "msm_mmu.h" @@ -26,16 +26,34 @@ static void a5xx_dump(struct msm_gpu *gpu); #define GPU_PAS_ID 13 -#if IS_ENABLED(CONFIG_QCOM_MDT_LOADER) - static int zap_shader_load_mdt(struct device *dev, const char *fwname) { const struct firmware *fw; + struct device_node *np; + struct resource r; phys_addr_t mem_phys; ssize_t mem_size; void *mem_region = NULL; int ret; + if (!IS_ENABLED(CONFIG_ARCH_QCOM)) + return -EINVAL; + + np = of_get_child_by_name(dev->of_node, "zap-shader"); + if (!np) + return -ENODEV; + + np = of_parse_phandle(np, "memory-region", 0); + if (!np) + return -EINVAL; + + ret = of_address_to_resource(np, 0, &r); + if (ret) + return ret; + + mem_phys = r.start; + mem_size = resource_size(&r); + /* Request the MDT file for the firmware */ ret = request_firmware(&fw, fwname, dev); if (ret) { @@ -51,7 +69,7 @@ static int zap_shader_load_mdt(struct device *dev, const char *fwname) } /* Allocate memory for the firmware image */ - mem_region = dmam_alloc_coherent(dev, mem_size, &mem_phys, GFP_KERNEL); + mem_region = memremap(mem_phys, mem_size, MEMREMAP_WC); if (!mem_region) { ret = -ENOMEM; goto out; @@ -69,16 +87,13 @@ static int zap_shader_load_mdt(struct device *dev, const char *fwname) DRM_DEV_ERROR(dev, "Unable to authorize the image\n"); out: + if (mem_region) + memunmap(mem_region); + release_firmware(fw); return ret; } -#else -static int zap_shader_load_mdt(struct device *dev, const char *fwname) -{ - return -ENODEV; -} -#endif static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, struct msm_file_private *ctx) @@ -117,12 +132,10 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, gpu->funcs->flush(gpu); } -struct a5xx_hwcg { +static const struct { u32 offset; u32 value; -}; - -static const struct a5xx_hwcg a530_hwcg[] = { +} a5xx_hwcg[] = { {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222}, {REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222}, {REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222}, @@ -217,38 +230,16 @@ static const struct a5xx_hwcg a530_hwcg[] = { {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222} }; -static const struct { - int (*test)(struct adreno_gpu *gpu); - const struct a5xx_hwcg *regs; - unsigned int count; -} a5xx_hwcg_regs[] = { - { adreno_is_a530, a530_hwcg, ARRAY_SIZE(a530_hwcg), }, -}; - -static void _a5xx_enable_hwcg(struct msm_gpu *gpu, - const struct a5xx_hwcg *regs, unsigned int count) +void a5xx_set_hwcg(struct msm_gpu *gpu, bool state) { unsigned int i; - for (i = 0; i < count; i++) - gpu_write(gpu, regs[i].offset, regs[i].value); + for (i = 0; i < ARRAY_SIZE(a5xx_hwcg); i++) + gpu_write(gpu, a5xx_hwcg[i].offset, + state ? a5xx_hwcg[i].value : 0); - gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0xAAA8AA00); - gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, 0x182); -} - -static void a5xx_enable_hwcg(struct msm_gpu *gpu) -{ - struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); - unsigned int i; - - for (i = 0; i < ARRAY_SIZE(a5xx_hwcg_regs); i++) { - if (a5xx_hwcg_regs[i].test(adreno_gpu)) { - _a5xx_enable_hwcg(gpu, a5xx_hwcg_regs[i].regs, - a5xx_hwcg_regs[i].count); - return; - } - } + gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, state ? 0xAAA8AA00 : 0); + gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, state ? 0x182 : 0x180); } static int a5xx_me_init(struct msm_gpu *gpu) @@ -377,45 +368,6 @@ static int a5xx_zap_shader_resume(struct msm_gpu *gpu) return ret; } -/* Set up a child device to "own" the zap shader */ -static int a5xx_zap_shader_dev_init(struct device *parent, struct device *dev) -{ - struct device_node *node; - int ret; - - if (dev->parent) - return 0; - - /* Find the sub-node for the zap shader */ - node = of_get_child_by_name(parent->of_node, "zap-shader"); - if (!node) { - DRM_DEV_ERROR(parent, "zap-shader not found in device tree\n"); - return -ENODEV; - } - - dev->parent = parent; - dev->of_node = node; - dev_set_name(dev, "adreno_zap_shader"); - - ret = device_register(dev); - if (ret) { - DRM_DEV_ERROR(parent, "Couldn't register zap shader device\n"); - goto out; - } - - ret = of_reserved_mem_device_init(dev); - if (ret) { - DRM_DEV_ERROR(parent, "Unable to set up the reserved memory\n"); - device_unregister(dev); - } - -out: - if (ret) - dev->parent = NULL; - - return ret; -} - static int a5xx_zap_shader_init(struct msm_gpu *gpu) { static bool loaded; @@ -444,11 +396,7 @@ static int a5xx_zap_shader_init(struct msm_gpu *gpu) return -ENODEV; } - ret = a5xx_zap_shader_dev_init(&pdev->dev, &a5xx_gpu->zap_dev); - - if (!ret) - ret = zap_shader_load_mdt(&a5xx_gpu->zap_dev, - adreno_gpu->info->zapfw); + ret = zap_shader_load_mdt(&pdev->dev, adreno_gpu->info->zapfw); loaded = !ret; @@ -545,7 +493,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu) gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF); /* Enable HWCG */ - a5xx_enable_hwcg(gpu); + a5xx_set_hwcg(gpu, true); gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F); @@ -691,9 +639,6 @@ static void a5xx_destroy(struct msm_gpu *gpu) DBG("%s", gpu->name); - if (a5xx_gpu->zap_dev.parent) - device_unregister(&a5xx_gpu->zap_dev); - if (a5xx_gpu->pm4_bo) { if (a5xx_gpu->pm4_iova) msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace); @@ -920,31 +865,30 @@ static const u32 a5xx_registers[] = { 0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B, 0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095, 0x0097, 0x00BB, 0x03A0, 0x0464, 0x0469, 0x046F, 0x04D2, 0x04D3, - 0x04E0, 0x0533, 0x0540, 0x0555, 0xF400, 0xF400, 0xF800, 0xF807, - 0x0800, 0x081A, 0x081F, 0x0841, 0x0860, 0x0860, 0x0880, 0x08A0, - 0x0B00, 0x0B12, 0x0B15, 0x0B28, 0x0B78, 0x0B7F, 0x0BB0, 0x0BBD, - 0x0BC0, 0x0BC6, 0x0BD0, 0x0C53, 0x0C60, 0x0C61, 0x0C80, 0x0C82, - 0x0C84, 0x0C85, 0x0C90, 0x0C98, 0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2, - 0x2180, 0x2185, 0x2580, 0x2585, 0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7, - 0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8, 0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8, - 0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E, 0x2100, 0x211E, 0x2140, 0x2145, - 0x2500, 0x251E, 0x2540, 0x2545, 0x0D10, 0x0D17, 0x0D20, 0x0D23, - 0x0D30, 0x0D30, 0x20C0, 0x20C0, 0x24C0, 0x24C0, 0x0E40, 0x0E43, - 0x0E4A, 0x0E4A, 0x0E50, 0x0E57, 0x0E60, 0x0E7C, 0x0E80, 0x0E8E, - 0x0E90, 0x0E96, 0x0EA0, 0x0EA8, 0x0EB0, 0x0EB2, 0xE140, 0xE147, - 0xE150, 0xE187, 0xE1A0, 0xE1A9, 0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7, - 0xE1D0, 0xE1D1, 0xE200, 0xE201, 0xE210, 0xE21C, 0xE240, 0xE268, - 0xE000, 0xE006, 0xE010, 0xE09A, 0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB, - 0xE100, 0xE105, 0xE380, 0xE38F, 0xE3B0, 0xE3B0, 0xE400, 0xE405, - 0xE408, 0xE4E9, 0xE4F0, 0xE4F0, 0xE280, 0xE280, 0xE282, 0xE2A3, - 0xE2A5, 0xE2C2, 0xE940, 0xE947, 0xE950, 0xE987, 0xE9A0, 0xE9A9, - 0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7, 0xE9D0, 0xE9D1, 0xEA00, 0xEA01, - 0xEA10, 0xEA1C, 0xEA40, 0xEA68, 0xE800, 0xE806, 0xE810, 0xE89A, - 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB, 0xE900, 0xE905, 0xEB80, 0xEB8F, - 0xEBB0, 0xEBB0, 0xEC00, 0xEC05, 0xEC08, 0xECE9, 0xECF0, 0xECF0, - 0xEA80, 0xEA80, 0xEA82, 0xEAA3, 0xEAA5, 0xEAC2, 0xA800, 0xA8FF, - 0xAC60, 0xAC60, 0xB000, 0xB97F, 0xB9A0, 0xB9BF, - ~0 + 0x04E0, 0x0533, 0x0540, 0x0555, 0x0800, 0x081A, 0x081F, 0x0841, + 0x0860, 0x0860, 0x0880, 0x08A0, 0x0B00, 0x0B12, 0x0B15, 0x0B28, + 0x0B78, 0x0B7F, 0x0BB0, 0x0BBD, 0x0BC0, 0x0BC6, 0x0BD0, 0x0C53, + 0x0C60, 0x0C61, 0x0C80, 0x0C82, 0x0C84, 0x0C85, 0x0C90, 0x0C98, + 0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2, 0x2180, 0x2185, 0x2580, 0x2585, + 0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7, 0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8, + 0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8, 0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E, + 0x2100, 0x211E, 0x2140, 0x2145, 0x2500, 0x251E, 0x2540, 0x2545, + 0x0D10, 0x0D17, 0x0D20, 0x0D23, 0x0D30, 0x0D30, 0x20C0, 0x20C0, + 0x24C0, 0x24C0, 0x0E40, 0x0E43, 0x0E4A, 0x0E4A, 0x0E50, 0x0E57, + 0x0E60, 0x0E7C, 0x0E80, 0x0E8E, 0x0E90, 0x0E96, 0x0EA0, 0x0EA8, + 0x0EB0, 0x0EB2, 0xE140, 0xE147, 0xE150, 0xE187, 0xE1A0, 0xE1A9, + 0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7, 0xE1D0, 0xE1D1, 0xE200, 0xE201, + 0xE210, 0xE21C, 0xE240, 0xE268, 0xE000, 0xE006, 0xE010, 0xE09A, + 0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB, 0xE100, 0xE105, 0xE380, 0xE38F, + 0xE3B0, 0xE3B0, 0xE400, 0xE405, 0xE408, 0xE4E9, 0xE4F0, 0xE4F0, + 0xE280, 0xE280, 0xE282, 0xE2A3, 0xE2A5, 0xE2C2, 0xE940, 0xE947, + 0xE950, 0xE987, 0xE9A0, 0xE9A9, 0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7, + 0xE9D0, 0xE9D1, 0xEA00, 0xEA01, 0xEA10, 0xEA1C, 0xEA40, 0xEA68, + 0xE800, 0xE806, 0xE810, 0xE89A, 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB, + 0xE900, 0xE905, 0xEB80, 0xEB8F, 0xEBB0, 0xEBB0, 0xEC00, 0xEC05, + 0xEC08, 0xECE9, 0xECF0, 0xECF0, 0xEA80, 0xEA80, 0xEA82, 0xEAA3, + 0xEAA5, 0xEAC2, 0xA800, 0xA8FF, 0xAC60, 0xAC60, 0xB000, 0xB97F, + 0xB9A0, 0xB9BF, ~0 }; static void a5xx_dump(struct msm_gpu *gpu) @@ -1020,7 +964,14 @@ static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m) { seq_printf(m, "status: %08x\n", gpu_read(gpu, REG_A5XX_RBBM_STATUS)); + + /* + * Temporarily disable hardware clock gating before going into + * adreno_show to avoid issues while reading the registers + */ + a5xx_set_hwcg(gpu, false); adreno_show(gpu, m); + a5xx_set_hwcg(gpu, true); } #endif diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h index 6638bc85645d..1137092241d5 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h @@ -36,8 +36,6 @@ struct a5xx_gpu { uint32_t gpmu_dwords; uint32_t lm_leakage; - - struct device zap_dev; }; #define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base) @@ -59,5 +57,6 @@ static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs, } bool a5xx_idle(struct msm_gpu *gpu); +void a5xx_set_hwcg(struct msm_gpu *gpu, bool state); #endif /* __A5XX_GPU_H__ */ diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index f1ab2703674a..7414c6bbd582 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -48,8 +48,15 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value) *value = adreno_gpu->base.fast_rate; return 0; case MSM_PARAM_TIMESTAMP: - if (adreno_gpu->funcs->get_timestamp) - return adreno_gpu->funcs->get_timestamp(gpu, value); + if (adreno_gpu->funcs->get_timestamp) { + int ret; + + pm_runtime_get_sync(&gpu->pdev->dev); + ret = adreno_gpu->funcs->get_timestamp(gpu, value); + pm_runtime_put_autosuspend(&gpu->pdev->dev); + + return ret; + } return -EINVAL; default: DBG("%s: invalid param: %u", gpu->name, param); diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index 9e9c5696bc03..c7b612c3d771 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -2137,6 +2137,13 @@ void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host, struct msm_dsi_phy_clk_request *clk_req) { struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + int ret; + + ret = dsi_calc_clk_rate(msm_host); + if (ret) { + pr_err("%s: unable to calc clk rate, %d\n", __func__, ret); + return; + } clk_req->bitclk_rate = msm_host->byte_clk_rate * 8; clk_req->escclk_rate = msm_host->esc_clk_rate; @@ -2280,7 +2287,6 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host, struct drm_display_mode *mode) { struct msm_dsi_host *msm_host = to_msm_dsi_host(host); - int ret; if (msm_host->mode) { drm_mode_destroy(msm_host->dev, msm_host->mode); @@ -2293,12 +2299,6 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host, return -ENOMEM; } - ret = dsi_calc_clk_rate(msm_host); - if (ret) { - pr_err("%s: unable to calc clk rate, %d\n", __func__, ret); - return ret; - } - return 0; } diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c index 5e3bc7224eee..3a81e26629c7 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c @@ -221,8 +221,8 @@ static void blend_setup(struct drm_crtc *crtc) struct mdp5_ctl *ctl = mdp5_cstate->ctl; uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0; unsigned long flags; - enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE }; - enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE }; + enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } }; + enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } }; int i, plane_cnt = 0; bool bg_alpha_enabled = false; u32 mixer_op_mode = 0; @@ -755,6 +755,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, if (!handle) { DBG("Cursor off"); cursor_enable = false; + mdp5_enable(mdp5_kms); goto set_cursor; } @@ -778,6 +779,8 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, get_roi(crtc, &roi_w, &roi_h); + mdp5_enable(mdp5_kms); + mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride); mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm), MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888)); @@ -806,6 +809,7 @@ set_cursor: crtc_flush(crtc, flush_mask); end: + mdp5_disable(mdp5_kms); if (old_bo) { drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo); /* enable vblank to complete cursor work: */ @@ -838,6 +842,8 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) get_roi(crtc, &roi_w, &roi_h); + mdp5_enable(mdp5_kms); + spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm), MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) | @@ -849,6 +855,8 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) crtc_flush(crtc, flush_mask); + mdp5_disable(mdp5_kms); + return 0; } diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c index 97f3294fbfc6..70bef51245af 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c @@ -299,7 +299,7 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder) struct mdp5_interface *intf = mdp5_encoder->intf; if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND) - mdp5_cmd_encoder_disable(encoder); + mdp5_cmd_encoder_enable(encoder); else mdp5_vid_encoder_enable(encoder); } diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c index 5d13fa5381ee..1c603aef3c59 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c @@ -502,7 +502,7 @@ static int get_clk(struct platform_device *pdev, struct clk **clkp, const char *name, bool mandatory) { struct device *dev = &pdev->dev; - struct clk *clk = devm_clk_get(dev, name); + struct clk *clk = msm_clk_get(pdev, name); if (IS_ERR(clk) && mandatory) { dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk)); return PTR_ERR(clk); @@ -887,21 +887,21 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev) } /* mandatory clocks: */ - ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus_clk", true); + ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus", true); if (ret) goto fail; - ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface_clk", true); + ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface", true); if (ret) goto fail; - ret = get_clk(pdev, &mdp5_kms->core_clk, "core_clk", true); + ret = get_clk(pdev, &mdp5_kms->core_clk, "core", true); if (ret) goto fail; - ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync_clk", true); + ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync", true); if (ret) goto fail; /* optional clocks: */ - get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk", false); + get_clk(pdev, &mdp5_kms->lut_clk, "lut", false); /* we need to set a default rate before enabling. Set a safe * rate first, then figure out hw revision, and then set a diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c index 818244ac4a4b..4b22ac3413a1 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c @@ -888,8 +888,8 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, struct mdp5_hw_pipe *right_hwpipe; const struct mdp_format *format; uint32_t nplanes, config = 0; - struct phase_step step = { 0 }; - struct pixel_ext pe = { 0 }; + struct phase_step step = { { 0 } }; + struct pixel_ext pe = { { 0 } }; uint32_t hdecm = 0, vdecm = 0; uint32_t pix_format; unsigned int rotation; diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 65f35544c1ec..a0c60e738db8 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -383,8 +383,10 @@ int msm_gem_get_iova(struct drm_gem_object *obj, struct page **pages; vma = add_vma(obj, aspace); - if (IS_ERR(vma)) - return PTR_ERR(vma); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + goto unlock; + } pages = get_pages(obj); if (IS_ERR(pages)) { @@ -405,7 +407,7 @@ int msm_gem_get_iova(struct drm_gem_object *obj, fail: del_vma(vma); - +unlock: mutex_unlock(&msm_obj->lock); return ret; } @@ -928,8 +930,12 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev, if (use_vram) { struct msm_gem_vma *vma; struct page **pages; + struct msm_gem_object *msm_obj = to_msm_bo(obj); + + mutex_lock(&msm_obj->lock); vma = add_vma(obj, NULL); + mutex_unlock(&msm_obj->lock); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto fail; diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 6bfca7470141..8a75c0bd8a78 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -34,8 +34,8 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev, struct msm_gpu *gpu, uint32_t nr_bos, uint32_t nr_cmds) { struct msm_gem_submit *submit; - uint64_t sz = sizeof(*submit) + (nr_bos * sizeof(submit->bos[0])) + - (nr_cmds * sizeof(submit->cmd[0])); + uint64_t sz = sizeof(*submit) + ((u64)nr_bos * sizeof(submit->bos[0])) + + ((u64)nr_cmds * sizeof(submit->cmd[0])); if (sz > SIZE_MAX) return NULL; @@ -451,7 +451,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, if (ret) goto out; - if (!(args->fence & MSM_SUBMIT_NO_IMPLICIT)) { + if (!(args->flags & MSM_SUBMIT_NO_IMPLICIT)) { ret = submit_fence_sync(submit); if (ret) goto out; diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c index c36321bc8714..d34e331554f3 100644 --- a/drivers/gpu/drm/msm/msm_gem_vma.c +++ b/drivers/gpu/drm/msm/msm_gem_vma.c @@ -42,7 +42,7 @@ void msm_gem_unmap_vma(struct msm_gem_address_space *aspace, struct msm_gem_vma *vma, struct sg_table *sgt) { - if (!vma->iova) + if (!aspace || !vma->iova) return; if (aspace->mmu) { diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 196eb668d30d..5137155bf3c0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -1147,8 +1147,6 @@ nouveau_connector_aux_xfer(struct drm_dp_aux *obj, struct drm_dp_aux_msg *msg) return -ENODEV; if (WARN_ON(msg->size > 16)) return -E2BIG; - if (msg->size == 0) - return msg->size; ret = nvkm_i2c_aux_acquire(aux); if (ret) diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index b9a109be989c..d66640047913 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -407,7 +407,6 @@ nouveau_display_fini(struct drm_device *dev, bool suspend) struct nouveau_display *disp = nouveau_display(dev); struct nouveau_drm *drm = nouveau_drm(dev); struct drm_connector *connector; - struct drm_crtc *crtc; if (!suspend) { if (drm_drv_uses_atomic_modeset(dev)) @@ -416,10 +415,6 @@ nouveau_display_fini(struct drm_device *dev, bool suspend) drm_crtc_force_disable_all(dev); } - /* Make sure that drm and hw vblank irqs get properly disabled. */ - drm_for_each_crtc(crtc, dev) - drm_crtc_vblank_off(crtc); - /* disable flip completion events */ nvif_notify_put(&drm->flip); diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 6dee4071bb3f..f7b4326a4641 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -3658,15 +3658,24 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe) drm_mode_connector_attach_encoder(connector, encoder); if (dcbe->type == DCB_OUTPUT_DP) { + struct nv50_disp *disp = nv50_disp(encoder->dev); struct nvkm_i2c_aux *aux = nvkm_i2c_aux_find(i2c, dcbe->i2c_index); if (aux) { - nv_encoder->i2c = &nv_connector->aux.ddc; + if (disp->disp->oclass < GF110_DISP) { + /* HW has no support for address-only + * transactions, so we're required to + * use custom I2C-over-AUX code. + */ + nv_encoder->i2c = &aux->i2c; + } else { + nv_encoder->i2c = &nv_connector->aux.ddc; + } nv_encoder->aux = aux; } /*TODO: Use DP Info Table to check for support. */ - if (nv50_disp(encoder->dev)->disp->oclass >= GF110_DISP) { + if (disp->disp->oclass >= GF110_DISP) { ret = nv50_mstm_new(nv_encoder, &nv_connector->aux, 16, nv_connector->base.base.id, &nv_encoder->dp.mstm); @@ -3888,7 +3897,7 @@ static void nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) { struct drm_device *dev = state->dev; - struct drm_crtc_state *new_crtc_state; + struct drm_crtc_state *new_crtc_state, *old_crtc_state; struct drm_crtc *crtc; struct drm_plane_state *new_plane_state; struct drm_plane *plane; @@ -3909,12 +3918,14 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) mutex_lock(&disp->mutex); /* Disable head(s). */ - for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state); struct nv50_head *head = nv50_head(crtc); NV_ATOMIC(drm, "%s: clr %04x (set %04x)\n", crtc->name, asyh->clr.mask, asyh->set.mask); + if (old_crtc_state->active && !new_crtc_state->active) + drm_crtc_vblank_off(crtc); if (asyh->clr.mask) { nv50_head_flush_clr(head, asyh, atom->flush_disable); @@ -3989,7 +4000,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) } /* Update head(s). */ - for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state); struct nv50_head *head = nv50_head(crtc); @@ -4000,11 +4011,13 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) nv50_head_flush_set(head, asyh); interlock_core = 1; } - } - for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { - if (new_crtc_state->event) - drm_crtc_vblank_get(crtc); + if (new_crtc_state->active) { + if (!old_crtc_state->active) + drm_crtc_vblank_on(crtc); + if (new_crtc_state->event) + drm_crtc_vblank_get(crtc); + } } /* Update plane(s). */ @@ -4051,12 +4064,15 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) if (new_crtc_state->event) { unsigned long flags; /* Get correct count/ts if racing with vblank irq */ - drm_crtc_accurate_vblank_count(crtc); + if (new_crtc_state->active) + drm_crtc_accurate_vblank_count(crtc); spin_lock_irqsave(&crtc->dev->event_lock, flags); drm_crtc_send_vblank_event(crtc, new_crtc_state->event); spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + new_crtc_state->event = NULL; - drm_crtc_vblank_put(crtc); + if (new_crtc_state->active) + drm_crtc_vblank_put(crtc); } } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c index c7c84d34d97e..88582af8bd89 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c @@ -267,6 +267,8 @@ nvkm_disp_oneinit(struct nvkm_engine *engine) /* Create output path objects for each VBIOS display path. */ i = -1; while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) { + if (ver < 0x40) /* No support for chipsets prior to NV50. */ + break; if (dcbE.type == DCB_OUTPUT_UNUSED) continue; if (dcbE.type == DCB_OUTPUT_EOL) diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h index a24312fb0228..a1e8bf48b778 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h @@ -22,6 +22,7 @@ struct nvkm_ior { unsigned proto_evo:4; enum nvkm_ior_proto { CRT, + TV, TMDS, LVDS, DP, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h index 19c635663399..6ea19466f436 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h @@ -22,7 +22,7 @@ struct nv50_disp { u8 type[3]; } pior; - struct nv50_disp_chan *chan[17]; + struct nv50_disp_chan *chan[21]; }; void nv50_disp_super_1(struct nv50_disp *); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c index 85aff85394ac..be9e7f8c3b23 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c @@ -62,6 +62,7 @@ nvkm_outp_xlat(struct nvkm_outp *outp, enum nvkm_ior_type *type) case 0: switch (outp->info.type) { case DCB_OUTPUT_ANALOG: *type = DAC; return CRT; + case DCB_OUTPUT_TV : *type = DAC; return TV; case DCB_OUTPUT_TMDS : *type = SOR; return TMDS; case DCB_OUTPUT_LVDS : *type = SOR; return LVDS; case DCB_OUTPUT_DP : *type = SOR; return DP; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c index c794b2c2d21e..6d8f21290aa2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c @@ -129,7 +129,7 @@ gf100_bar_init(struct nvkm_bar *base) if (bar->bar[0].mem) { addr = nvkm_memory_addr(bar->bar[0].mem) >> 12; - nvkm_wr32(device, 0x001714, 0xc0000000 | addr); + nvkm_wr32(device, 0x001714, 0x80000000 | addr); } return 0; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild index 48f01e40b8fc..b768e66a472b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild @@ -25,6 +25,7 @@ nvkm-y += nvkm/subdev/i2c/bit.o nvkm-y += nvkm/subdev/i2c/aux.o nvkm-y += nvkm/subdev/i2c/auxg94.o +nvkm-y += nvkm/subdev/i2c/auxgf119.o nvkm-y += nvkm/subdev/i2c/auxgm200.o nvkm-y += nvkm/subdev/i2c/anx9805.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c index d172e42dd228..4c1f547da463 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c @@ -117,6 +117,10 @@ int nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *aux, bool retry, u8 type, u32 addr, u8 *data, u8 *size) { + if (!*size && !aux->func->address_only) { + AUX_ERR(aux, "address-only transaction dropped"); + return -ENOSYS; + } return aux->func->xfer(aux, retry, type, addr, data, size); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h index 27a4a39c87f0..9587ab456d9e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h @@ -3,6 +3,7 @@ #include "pad.h" struct nvkm_i2c_aux_func { + bool address_only; int (*xfer)(struct nvkm_i2c_aux *, bool retry, u8 type, u32 addr, u8 *data, u8 *size); int (*lnk_ctl)(struct nvkm_i2c_aux *, int link_nr, int link_bw, @@ -17,7 +18,12 @@ void nvkm_i2c_aux_del(struct nvkm_i2c_aux **); int nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *, bool retry, u8 type, u32 addr, u8 *data, u8 *size); +int g94_i2c_aux_new_(const struct nvkm_i2c_aux_func *, struct nvkm_i2c_pad *, + int, u8, struct nvkm_i2c_aux **); + int g94_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **); +int g94_i2c_aux_xfer(struct nvkm_i2c_aux *, bool, u8, u32, u8 *, u8 *); +int gf119_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **); int gm200_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **); #define AUX_MSG(b,l,f,a...) do { \ diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c index ab8cb196c34e..c8ab1b5741a3 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c @@ -72,7 +72,7 @@ g94_i2c_aux_init(struct g94_i2c_aux *aux) return 0; } -static int +int g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry, u8 type, u32 addr, u8 *data, u8 *size) { @@ -105,9 +105,9 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry, } ctrl = nvkm_rd32(device, 0x00e4e4 + base); - ctrl &= ~0x0001f0ff; + ctrl &= ~0x0001f1ff; ctrl |= type << 12; - ctrl |= *size - 1; + ctrl |= (*size ? (*size - 1) : 0x00000100); nvkm_wr32(device, 0x00e4e0 + base, addr); /* (maybe) retry transaction a number of times on failure... */ @@ -160,14 +160,10 @@ out: return ret < 0 ? ret : (stat & 0x000f0000) >> 16; } -static const struct nvkm_i2c_aux_func -g94_i2c_aux_func = { - .xfer = g94_i2c_aux_xfer, -}; - int -g94_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive, - struct nvkm_i2c_aux **paux) +g94_i2c_aux_new_(const struct nvkm_i2c_aux_func *func, + struct nvkm_i2c_pad *pad, int index, u8 drive, + struct nvkm_i2c_aux **paux) { struct g94_i2c_aux *aux; @@ -175,8 +171,20 @@ g94_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive, return -ENOMEM; *paux = &aux->base; - nvkm_i2c_aux_ctor(&g94_i2c_aux_func, pad, index, &aux->base); + nvkm_i2c_aux_ctor(func, pad, index, &aux->base); aux->ch = drive; aux->base.intr = 1 << aux->ch; return 0; } + +static const struct nvkm_i2c_aux_func +g94_i2c_aux = { + .xfer = g94_i2c_aux_xfer, +}; + +int +g94_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive, + struct nvkm_i2c_aux **paux) +{ + return g94_i2c_aux_new_(&g94_i2c_aux, pad, index, drive, paux); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c new file mode 100644 index 000000000000..dab40cd8fe3a --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c @@ -0,0 +1,35 @@ +/* + * Copyright 2017 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "aux.h" + +static const struct nvkm_i2c_aux_func +gf119_i2c_aux = { + .address_only = true, + .xfer = g94_i2c_aux_xfer, +}; + +int +gf119_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive, + struct nvkm_i2c_aux **paux) +{ + return g94_i2c_aux_new_(&gf119_i2c_aux, pad, index, drive, paux); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c index ee091fa79628..7ef60895f43a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c @@ -105,9 +105,9 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry, } ctrl = nvkm_rd32(device, 0x00d954 + base); - ctrl &= ~0x0001f0ff; + ctrl &= ~0x0001f1ff; ctrl |= type << 12; - ctrl |= *size - 1; + ctrl |= (*size ? (*size - 1) : 0x00000100); nvkm_wr32(device, 0x00d950 + base, addr); /* (maybe) retry transaction a number of times on failure... */ @@ -162,6 +162,7 @@ out: static const struct nvkm_i2c_aux_func gm200_i2c_aux_func = { + .address_only = true, .xfer = gm200_i2c_aux_xfer, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c index d53212f1aa52..3bc4d0310076 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c @@ -28,7 +28,7 @@ static const struct nvkm_i2c_pad_func gf119_i2c_pad_s_func = { .bus_new_4 = gf119_i2c_bus_new, - .aux_new_6 = g94_i2c_aux_new, + .aux_new_6 = gf119_i2c_aux_new, .mode = g94_i2c_pad_mode, }; @@ -41,7 +41,7 @@ gf119_i2c_pad_s_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad) static const struct nvkm_i2c_pad_func gf119_i2c_pad_x_func = { .bus_new_4 = gf119_i2c_bus_new, - .aux_new_6 = g94_i2c_aux_new, + .aux_new_6 = gf119_i2c_aux_new, }; int diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 8b7d7a0d3ca8..ee274c6e374d 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -1373,12 +1373,12 @@ static const struct drm_mode_config_funcs radeon_mode_funcs = { .output_poll_changed = radeon_output_poll_changed }; -static struct drm_prop_enum_list radeon_tmds_pll_enum_list[] = +static const struct drm_prop_enum_list radeon_tmds_pll_enum_list[] = { { 0, "driver" }, { 1, "bios" }, }; -static struct drm_prop_enum_list radeon_tv_std_enum_list[] = +static const struct drm_prop_enum_list radeon_tv_std_enum_list[] = { { TV_STD_NTSC, "ntsc" }, { TV_STD_PAL, "pal" }, { TV_STD_PAL_M, "pal-m" }, @@ -1389,25 +1389,25 @@ static struct drm_prop_enum_list radeon_tv_std_enum_list[] = { TV_STD_SECAM, "secam" }, }; -static struct drm_prop_enum_list radeon_underscan_enum_list[] = +static const struct drm_prop_enum_list radeon_underscan_enum_list[] = { { UNDERSCAN_OFF, "off" }, { UNDERSCAN_ON, "on" }, { UNDERSCAN_AUTO, "auto" }, }; -static struct drm_prop_enum_list radeon_audio_enum_list[] = +static const struct drm_prop_enum_list radeon_audio_enum_list[] = { { RADEON_AUDIO_DISABLE, "off" }, { RADEON_AUDIO_ENABLE, "on" }, { RADEON_AUDIO_AUTO, "auto" }, }; /* XXX support different dither options? spatial, temporal, both, etc. */ -static struct drm_prop_enum_list radeon_dither_enum_list[] = +static const struct drm_prop_enum_list radeon_dither_enum_list[] = { { RADEON_FMT_DITHER_DISABLE, "off" }, { RADEON_FMT_DITHER_ENABLE, "on" }, }; -static struct drm_prop_enum_list radeon_output_csc_enum_list[] = +static const struct drm_prop_enum_list radeon_output_csc_enum_list[] = { { RADEON_OUTPUT_CSC_BYPASS, "bypass" }, { RADEON_OUTPUT_CSC_TVRGB, "tvrgb" }, { RADEON_OUTPUT_CSC_YCBCR601, "ycbcr601" }, diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index 4b8f072d480a..af6ee7d9b465 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -344,9 +344,12 @@ int radeon_fbdev_init(struct radeon_device *rdev) if (list_empty(&rdev->ddev->mode_config.connector_list)) return 0; - /* select 8 bpp console on RN50 or 16MB cards */ - if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024)) + /* select 8 bpp console on 8MB cards, or 16 bpp on RN50 or 32MB */ + if (rdev->mc.real_vram_size <= (8*1024*1024)) bpp_sel = 8; + else if (ASIC_IS_RN50(rdev) || + rdev->mc.real_vram_size <= (32*1024*1024)) + bpp_sel = 16; rfbdev = kzalloc(sizeof(struct radeon_fbdev), GFP_KERNEL); if (!rfbdev) diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index fff0d11b0600..afaf10db47cc 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c @@ -283,6 +283,10 @@ int radeon_irq_kms_init(struct radeon_device *rdev) int r = 0; spin_lock_init(&rdev->irq.lock); + + /* Disable vblank irqs aggressively for power-saving */ + rdev->ddev->vblank_disable_immediate = true; + r = drm_vblank_init(rdev->ddev, rdev->num_crtc); if (r) { return r; diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index faa021396da3..2804b4a15896 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -178,7 +178,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, static void radeon_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *placement) { - static struct ttm_place placements = { + static const struct ttm_place placements = { .fpfn = 0, .lpfn = 0, .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM diff --git a/drivers/gpu/drm/radeon/vce_v2_0.c b/drivers/gpu/drm/radeon/vce_v2_0.c index fce214482e72..b0a43b68776d 100644 --- a/drivers/gpu/drm/radeon/vce_v2_0.c +++ b/drivers/gpu/drm/radeon/vce_v2_0.c @@ -104,6 +104,10 @@ static void vce_v2_0_disable_cg(struct radeon_device *rdev) WREG32(VCE_CGTT_CLK_OVERRIDE, 7); } +/* + * Local variable sw_cg is used for debugging purposes, in case we + * ran into problems with dynamic clock gating. Don't remove it. + */ void vce_v2_0_enable_mgcg(struct radeon_device *rdev, bool enable) { bool sw_cg = false; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c index f131fc68cc46..301ea1a8018e 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c @@ -13,6 +13,7 @@ #include <linux/clk.h> #include <linux/mutex.h> +#include <linux/sys_soc.h> #include <drm/drmP.h> #include <drm/drm_atomic.h> @@ -129,10 +130,8 @@ static void rcar_du_dpll_divider(struct rcar_du_crtc *rcrtc, for (fdpll = 1; fdpll < 32; fdpll++) { unsigned long output; - /* 1/2 (FRQSEL=1) for duty rate 50% */ output = input * (n + 1) / (m + 1) - / (fdpll + 1) / 2; - + / (fdpll + 1); if (output >= 400000000) continue; @@ -158,6 +157,11 @@ done: best_diff); } +static const struct soc_device_attribute rcar_du_r8a7795_es1[] = { + { .soc_id = "r8a7795", .revision = "ES1.*" }, + { /* sentinel */ } +}; + static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) { const struct drm_display_mode *mode = &rcrtc->crtc.state->adjusted_mode; @@ -168,7 +172,8 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) u32 escr; u32 div; - /* Compute the clock divisor and select the internal or external dot + /* + * Compute the clock divisor and select the internal or external dot * clock based on the requested frequency. */ clk = clk_get_rate(rcrtc->clock); @@ -185,7 +190,20 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) extclk = clk_get_rate(rcrtc->extclock); if (rcdu->info->dpll_ch & (1 << rcrtc->index)) { - rcar_du_dpll_divider(rcrtc, &dpll, extclk, mode_clock); + unsigned long target = mode_clock; + + /* + * The H3 ES1.x exhibits dot clock duty cycle stability + * issues. We can work around them by configuring the + * DPLL to twice the desired frequency, coupled with a + * /2 post-divider. This isn't needed on other SoCs and + * breaks HDMI output on M3-W for a currently unknown + * reason, so restrict the workaround to H3 ES1.x. + */ + if (soc_device_match(rcar_du_r8a7795_es1)) + target *= 2; + + rcar_du_dpll_divider(rcrtc, &dpll, extclk, target); extclk = dpll.output; } @@ -197,8 +215,6 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) if (abs((long)extrate - (long)mode_clock) < abs((long)rate - (long)mode_clock)) { - dev_dbg(rcrtc->group->dev->dev, - "crtc%u: using external clock\n", rcrtc->index); if (rcdu->info->dpll_ch & (1 << rcrtc->index)) { u32 dpllcr = DPLLCR_CODE | DPLLCR_CLKE @@ -215,12 +231,14 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) rcar_du_group_write(rcrtc->group, DPLLCR, dpllcr); - - escr = ESCR_DCLKSEL_DCLKIN | 1; - } else { - escr = ESCR_DCLKSEL_DCLKIN | extdiv; } + + escr = ESCR_DCLKSEL_DCLKIN | extdiv; } + + dev_dbg(rcrtc->group->dev->dev, + "mode clock %lu extrate %lu rate %lu ESCR 0x%08x\n", + mode_clock, extrate, rate, escr); } rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? ESCR2 : ESCR, @@ -261,12 +279,14 @@ void rcar_du_crtc_route_output(struct drm_crtc *crtc, struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); struct rcar_du_device *rcdu = rcrtc->group->dev; - /* Store the route from the CRTC output to the DU output. The DU will be + /* + * Store the route from the CRTC output to the DU output. The DU will be * configured when starting the CRTC. */ rcrtc->outputs |= BIT(output); - /* Store RGB routing to DPAD0, the hardware will be configured when + /* + * Store RGB routing to DPAD0, the hardware will be configured when * starting the CRTC. */ if (output == RCAR_DU_OUTPUT_DPAD0) @@ -342,7 +362,8 @@ static void rcar_du_crtc_update_planes(struct rcar_du_crtc *rcrtc) } } - /* Update the planes to display timing and dot clock generator + /* + * Update the planes to display timing and dot clock generator * associations. * * Updating the DPTSR register requires restarting the CRTC group, @@ -431,14 +452,8 @@ static void rcar_du_crtc_wait_page_flip(struct rcar_du_crtc *rcrtc) * Start/Stop and Suspend/Resume */ -static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc) +static void rcar_du_crtc_setup(struct rcar_du_crtc *rcrtc) { - struct drm_crtc *crtc = &rcrtc->crtc; - bool interlaced; - - if (rcrtc->started) - return; - /* Set display off and background to black */ rcar_du_crtc_write(rcrtc, DOOR, DOOR_RGB(0, 0, 0)); rcar_du_crtc_write(rcrtc, BPOR, BPOR_RGB(0, 0, 0)); @@ -450,7 +465,20 @@ static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc) /* Start with all planes disabled. */ rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR, 0); - /* Select master sync mode. This enables display operation in master + /* Enable the VSP compositor. */ + if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE)) + rcar_du_vsp_enable(rcrtc); + + /* Turn vertical blanking interrupt reporting on. */ + drm_crtc_vblank_on(&rcrtc->crtc); +} + +static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc) +{ + bool interlaced; + + /* + * Select master sync mode. This enables display operation in master * sync mode (with the HSYNC and VSYNC signals configured as outputs and * actively driven). */ @@ -460,38 +488,56 @@ static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc) DSYSR_TVM_MASTER); rcar_du_group_start_stop(rcrtc->group, true); +} - /* Enable the VSP compositor. */ - if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE)) - rcar_du_vsp_enable(rcrtc); +static void rcar_du_crtc_disable_planes(struct rcar_du_crtc *rcrtc) +{ + struct rcar_du_device *rcdu = rcrtc->group->dev; + struct drm_crtc *crtc = &rcrtc->crtc; + u32 status; - /* Turn vertical blanking interrupt reporting back on. */ - drm_crtc_vblank_on(crtc); + /* Make sure vblank interrupts are enabled. */ + drm_crtc_vblank_get(crtc); - rcrtc->started = true; + /* + * Disable planes and calculate how many vertical blanking interrupts we + * have to wait for. If a vertical blanking interrupt has been triggered + * but not processed yet, we don't know whether it occurred before or + * after the planes got disabled. We thus have to wait for two vblank + * interrupts in that case. + */ + spin_lock_irq(&rcrtc->vblank_lock); + rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR, 0); + status = rcar_du_crtc_read(rcrtc, DSSR); + rcrtc->vblank_count = status & DSSR_VBK ? 2 : 1; + spin_unlock_irq(&rcrtc->vblank_lock); + + if (!wait_event_timeout(rcrtc->vblank_wait, rcrtc->vblank_count == 0, + msecs_to_jiffies(100))) + dev_warn(rcdu->dev, "vertical blanking timeout\n"); + + drm_crtc_vblank_put(crtc); } static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc) { struct drm_crtc *crtc = &rcrtc->crtc; - if (!rcrtc->started) - return; - - /* Disable all planes and wait for the change to take effect. This is - * required as the DSnPR registers are updated on vblank, and no vblank - * will occur once the CRTC is stopped. Disabling planes when starting - * the CRTC thus wouldn't be enough as it would start scanning out - * immediately from old frame buffers until the next vblank. + /* + * Disable all planes and wait for the change to take effect. This is + * required as the plane enable registers are updated on vblank, and no + * vblank will occur once the CRTC is stopped. Disabling planes when + * starting the CRTC thus wouldn't be enough as it would start scanning + * out immediately from old frame buffers until the next vblank. * * This increases the CRTC stop delay, especially when multiple CRTCs * are stopped in one operation as we now wait for one vblank per CRTC. * Whether this can be improved needs to be researched. */ - rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR, 0); - drm_crtc_wait_one_vblank(crtc); + rcar_du_crtc_disable_planes(rcrtc); - /* Disable vertical blanking interrupt reporting. We first need to wait + /* + * Disable vertical blanking interrupt reporting. We first need to wait * for page flip completion before stopping the CRTC as userspace * expects page flips to eventually complete. */ @@ -502,14 +548,13 @@ static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc) if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE)) rcar_du_vsp_disable(rcrtc); - /* Select switch sync mode. This stops display operation and configures + /* + * Select switch sync mode. This stops display operation and configures * the HSYNC and VSYNC signals as inputs. */ rcar_du_crtc_clr_set(rcrtc, DSYSR, DSYSR_TVM_MASK, DSYSR_TVM_SWITCH); rcar_du_group_start_stop(rcrtc->group, false); - - rcrtc->started = false; } void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc) @@ -529,12 +574,10 @@ void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc) return; rcar_du_crtc_get(rcrtc); - rcar_du_crtc_start(rcrtc); + rcar_du_crtc_setup(rcrtc); /* Commit the planes state. */ - if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE)) { - rcar_du_vsp_enable(rcrtc); - } else { + if (!rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE)) { for (i = 0; i < rcrtc->group->num_planes; ++i) { struct rcar_du_plane *plane = &rcrtc->group->planes[i]; @@ -546,6 +589,7 @@ void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc) } rcar_du_crtc_update_planes(rcrtc); + rcar_du_crtc_start(rcrtc); } /* ----------------------------------------------------------------------------- @@ -557,7 +601,16 @@ static void rcar_du_crtc_atomic_enable(struct drm_crtc *crtc, { struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); - rcar_du_crtc_get(rcrtc); + /* + * If the CRTC has already been setup by the .atomic_begin() handler we + * can skip the setup stage. + */ + if (!rcrtc->initialized) { + rcar_du_crtc_get(rcrtc); + rcar_du_crtc_setup(rcrtc); + rcrtc->initialized = true; + } + rcar_du_crtc_start(rcrtc); } @@ -576,6 +629,7 @@ static void rcar_du_crtc_atomic_disable(struct drm_crtc *crtc, } spin_unlock_irq(&crtc->dev->event_lock); + rcrtc->initialized = false; rcrtc->outputs = 0; } @@ -584,6 +638,19 @@ static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc, { struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); + WARN_ON(!crtc->state->enable); + + /* + * If a mode set is in progress we can be called with the CRTC disabled. + * We then need to first setup the CRTC in order to configure planes. + * The .atomic_enable() handler will notice and skip the CRTC setup. + */ + if (!rcrtc->initialized) { + rcar_du_crtc_get(rcrtc); + rcar_du_crtc_setup(rcrtc); + rcrtc->initialized = true; + } + if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE)) rcar_du_vsp_atomic_begin(rcrtc); } @@ -623,6 +690,7 @@ static int rcar_du_crtc_enable_vblank(struct drm_crtc *crtc) rcar_du_crtc_write(rcrtc, DSRCR, DSRCR_VBCL); rcar_du_crtc_set(rcrtc, DIER, DIER_VBE); + rcrtc->vblank_enable = true; return 0; } @@ -632,6 +700,7 @@ static void rcar_du_crtc_disable_vblank(struct drm_crtc *crtc) struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); rcar_du_crtc_clr(rcrtc, DIER, DIER_VBE); + rcrtc->vblank_enable = false; } static const struct drm_crtc_funcs crtc_funcs = { @@ -656,14 +725,30 @@ static irqreturn_t rcar_du_crtc_irq(int irq, void *arg) irqreturn_t ret = IRQ_NONE; u32 status; + spin_lock(&rcrtc->vblank_lock); + status = rcar_du_crtc_read(rcrtc, DSSR); rcar_du_crtc_write(rcrtc, DSRCR, status & DSRCR_MASK); - if (status & DSSR_FRM) { - drm_crtc_handle_vblank(&rcrtc->crtc); + if (status & DSSR_VBK) { + /* + * Wake up the vblank wait if the counter reaches 0. This must + * be protected by the vblank_lock to avoid races in + * rcar_du_crtc_disable_planes(). + */ + if (rcrtc->vblank_count) { + if (--rcrtc->vblank_count == 0) + wake_up(&rcrtc->vblank_wait); + } + } - if (rcdu->info->gen < 3) + spin_unlock(&rcrtc->vblank_lock); + + if (status & DSSR_VBK) { + if (rcdu->info->gen < 3) { + drm_crtc_handle_vblank(&rcrtc->crtc); rcar_du_crtc_finish_page_flip(rcrtc); + } ret = IRQ_HANDLED; } @@ -717,13 +802,15 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index) } init_waitqueue_head(&rcrtc->flip_wait); + init_waitqueue_head(&rcrtc->vblank_wait); + spin_lock_init(&rcrtc->vblank_lock); rcrtc->group = rgrp; rcrtc->mmio_offset = mmio_offsets[index]; rcrtc->index = index; if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE)) - primary = &rcrtc->vsp->planes[0].plane; + primary = &rcrtc->vsp->planes[rcrtc->vsp_pipe].plane; else primary = &rgrp->planes[index % 2].plane; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h index b199ed5adf36..fdc2bf99bda1 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h @@ -15,6 +15,7 @@ #define __RCAR_DU_CRTC_H__ #include <linux/mutex.h> +#include <linux/spinlock.h> #include <linux/wait.h> #include <drm/drmP.h> @@ -30,11 +31,17 @@ struct rcar_du_vsp; * @extclock: external pixel dot clock (optional) * @mmio_offset: offset of the CRTC registers in the DU MMIO block * @index: CRTC software and hardware index - * @started: whether the CRTC has been started and is running + * @initialized: whether the CRTC has been initialized and clocks enabled + * @vblank_enable: whether vblank events are enabled on this CRTC * @event: event to post when the pending page flip completes * @flip_wait: wait queue used to signal page flip completion + * @vblank_lock: protects vblank_wait and vblank_count + * @vblank_wait: wait queue used to signal vertical blanking + * @vblank_count: number of vertical blanking interrupts to wait for * @outputs: bitmask of the outputs (enum rcar_du_output) driven by this CRTC * @group: CRTC group this CRTC belongs to + * @vsp: VSP feeding video to this CRTC + * @vsp_pipe: index of the VSP pipeline feeding video to this CRTC */ struct rcar_du_crtc { struct drm_crtc crtc; @@ -43,15 +50,21 @@ struct rcar_du_crtc { struct clk *extclock; unsigned int mmio_offset; unsigned int index; - bool started; + bool initialized; + bool vblank_enable; struct drm_pending_vblank_event *event; wait_queue_head_t flip_wait; + spinlock_t vblank_lock; + wait_queue_head_t vblank_wait; + unsigned int vblank_count; + unsigned int outputs; struct rcar_du_group *group; struct rcar_du_vsp *vsp; + unsigned int vsp_pipe; }; #define to_rcar_crtc(c) container_of(c, struct rcar_du_crtc, crtc) diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index c09cf847a657..d2f29e6b1112 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c @@ -39,7 +39,8 @@ static const struct rcar_du_device_info rcar_du_r8a7779_info = { .features = 0, .num_crtcs = 2, .routes = { - /* R8A7779 has two RGB outputs and one (currently unsupported) + /* + * R8A7779 has two RGB outputs and one (currently unsupported) * TCON output. */ [RCAR_DU_OUTPUT_DPAD0] = { @@ -61,7 +62,8 @@ static const struct rcar_du_device_info rcar_du_r8a7790_info = { .quirks = RCAR_DU_QUIRK_ALIGN_128B | RCAR_DU_QUIRK_LVDS_LANES, .num_crtcs = 3, .routes = { - /* R8A7790 has one RGB output, two LVDS outputs and one + /* + * R8A7790 has one RGB output, two LVDS outputs and one * (currently unsupported) TCON output. */ [RCAR_DU_OUTPUT_DPAD0] = { @@ -87,7 +89,8 @@ static const struct rcar_du_device_info rcar_du_r8a7791_info = { | RCAR_DU_FEATURE_EXT_CTRL_REGS, .num_crtcs = 2, .routes = { - /* R8A779[13] has one RGB output, one LVDS output and one + /* + * R8A779[13] has one RGB output, one LVDS output and one * (currently unsupported) TCON output. */ [RCAR_DU_OUTPUT_DPAD0] = { @@ -127,7 +130,8 @@ static const struct rcar_du_device_info rcar_du_r8a7794_info = { | RCAR_DU_FEATURE_EXT_CTRL_REGS, .num_crtcs = 2, .routes = { - /* R8A7794 has two RGB outputs and one (currently unsupported) + /* + * R8A7794 has two RGB outputs and one (currently unsupported) * TCON output. */ [RCAR_DU_OUTPUT_DPAD0] = { @@ -149,7 +153,8 @@ static const struct rcar_du_device_info rcar_du_r8a7795_info = { | RCAR_DU_FEATURE_VSP1_SOURCE, .num_crtcs = 4, .routes = { - /* R8A7795 has one RGB output, two HDMI outputs and one + /* + * R8A7795 has one RGB output, two HDMI outputs and one * LVDS output. */ [RCAR_DU_OUTPUT_DPAD0] = { @@ -180,19 +185,25 @@ static const struct rcar_du_device_info rcar_du_r8a7796_info = { | RCAR_DU_FEATURE_VSP1_SOURCE, .num_crtcs = 3, .routes = { - /* R8A7796 has one RGB output, one LVDS output and one - * (currently unsupported) HDMI output. + /* + * R8A7796 has one RGB output, one LVDS output and one HDMI + * output. */ [RCAR_DU_OUTPUT_DPAD0] = { .possible_crtcs = BIT(2), .port = 0, }, + [RCAR_DU_OUTPUT_HDMI0] = { + .possible_crtcs = BIT(1), + .port = 1, + }, [RCAR_DU_OUTPUT_LVDS0] = { .possible_crtcs = BIT(0), .port = 2, }, }, .num_lvds = 1, + .dpll_ch = BIT(1), }; static const struct of_device_id rcar_du_of_table[] = { @@ -339,7 +350,8 @@ static int rcar_du_probe(struct platform_device *pdev) ddev->irq_enabled = 1; - /* Register the DRM device with the core and the connectors with + /* + * Register the DRM device with the core and the connectors with * sysfs. */ ret = drm_dev_register(ddev, 0); diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.c b/drivers/gpu/drm/rcar-du/rcar_du_group.c index 64738fca96d0..2f37ea901873 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_group.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_group.c @@ -64,7 +64,8 @@ static void rcar_du_group_setup_defr8(struct rcar_du_group *rgrp) if (rcdu->info->gen < 3) { defr8 |= DEFR8_DEFE8; - /* On Gen2 the DEFR8 register for the first group also controls + /* + * On Gen2 the DEFR8 register for the first group also controls * RGB output routing to DPAD0 and VSPD1 routing to DU0/1/2 for * DU instances that support it. */ @@ -75,7 +76,8 @@ static void rcar_du_group_setup_defr8(struct rcar_du_group *rgrp) defr8 |= DEFR8_VSCS; } } else { - /* On Gen3 VSPD routing can't be configured, but DPAD routing + /* + * On Gen3 VSPD routing can't be configured, but DPAD routing * needs to be set despite having a single option available. */ u32 crtc = ffs(possible_crtcs) - 1; @@ -124,7 +126,8 @@ static void rcar_du_group_setup(struct rcar_du_group *rgrp) if (rcdu->info->gen >= 3) rcar_du_group_write(rgrp, DEFR10, DEFR10_CODE | DEFR10_DEFE10); - /* Use DS1PR and DS2PR to configure planes priorities and connects the + /* + * Use DS1PR and DS2PR to configure planes priorities and connects the * superposition 0 to DU0 pins. DU1 pins will be configured dynamically. */ rcar_du_group_write(rgrp, DORCR, DORCR_PG1D_DS1 | DORCR_DPRS); @@ -177,7 +180,8 @@ static void __rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start) void rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start) { - /* Many of the configuration bits are only updated when the display + /* + * Many of the configuration bits are only updated when the display * reset (DRES) bit in DSYSR is set to 1, disabling *both* CRTCs. Some * of those bits could be pre-configured, but others (especially the * bits related to plane assignment to display timing controllers) need @@ -208,23 +212,32 @@ void rcar_du_group_restart(struct rcar_du_group *rgrp) int rcar_du_set_dpad0_vsp1_routing(struct rcar_du_device *rcdu) { + struct rcar_du_group *rgrp; + struct rcar_du_crtc *crtc; + unsigned int index; int ret; if (!rcar_du_has(rcdu, RCAR_DU_FEATURE_EXT_CTRL_REGS)) return 0; - /* RGB output routing to DPAD0 and VSP1D routing to DU0/1/2 are - * configured in the DEFR8 register of the first group. As this function - * can be called with the DU0 and DU1 CRTCs disabled, we need to enable - * the first group clock before accessing the register. + /* + * RGB output routing to DPAD0 and VSP1D routing to DU0/1/2 are + * configured in the DEFR8 register of the first group on Gen2 and the + * last group on Gen3. As this function can be called with the DU + * channels of the corresponding CRTCs disabled, we need to enable the + * group clock before accessing the register. */ - ret = clk_prepare_enable(rcdu->crtcs[0].clock); + index = rcdu->info->gen < 3 ? 0 : DIV_ROUND_UP(rcdu->num_crtcs, 2) - 1; + rgrp = &rcdu->groups[index]; + crtc = &rcdu->crtcs[index * 2]; + + ret = clk_prepare_enable(crtc->clock); if (ret < 0) return ret; - rcar_du_group_setup_defr8(&rcdu->groups[0]); + rcar_du_group_setup_defr8(rgrp); - clk_disable_unprepare(rcdu->crtcs[0].clock); + clk_disable_unprepare(crtc->clock); return 0; } @@ -236,7 +249,8 @@ int rcar_du_group_set_routing(struct rcar_du_group *rgrp) dorcr &= ~(DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_MASK); - /* Set the DPAD1 pins sources. Select CRTC 0 if explicitly requested and + /* + * Set the DPAD1 pins sources. Select CRTC 0 if explicitly requested and * CRTC 1 in all other cases to avoid cloning CRTC 0 to DPAD0 and DPAD1 * by default. */ diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c index 5d681ea53be6..7278b9703c15 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c @@ -96,7 +96,8 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = { .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC, .edf = PnDDCR4_EDF_NONE, }, - /* The following formats are not supported on Gen2 and thus have no + /* + * The following formats are not supported on Gen2 and thus have no * associated .pnmr or .edf settings. */ { @@ -153,7 +154,8 @@ int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev, unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); unsigned int align; - /* The R8A7779 DU requires a 16 pixels pitch alignment as documented, + /* + * The R8A7779 DU requires a 16 pixels pitch alignment as documented, * but the R8A7790 DU seems to require a 128 bytes pitch alignment. */ if (rcar_du_needs(rcdu, RCAR_DU_QUIRK_ALIGN_128B)) @@ -255,12 +257,12 @@ static void rcar_du_atomic_commit_tail(struct drm_atomic_state *old_state) /* Apply the atomic update. */ drm_atomic_helper_commit_modeset_disables(dev, old_state); - drm_atomic_helper_commit_modeset_enables(dev, old_state); drm_atomic_helper_commit_planes(dev, old_state, DRM_PLANE_COMMIT_ACTIVE_ONLY); + drm_atomic_helper_commit_modeset_enables(dev, old_state); drm_atomic_helper_commit_hw_done(old_state); - drm_atomic_helper_wait_for_vblanks(dev, old_state); + drm_atomic_helper_wait_for_flip_done(dev, old_state); drm_atomic_helper_cleanup_planes(dev, old_state); } @@ -309,7 +311,7 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu, return -ENODEV; } - entity_ep_node = of_parse_phandle(ep->local_node, "remote-endpoint", 0); + entity_ep_node = of_graph_get_remote_endpoint(ep->local_node); for_each_endpoint_of_node(entity, ep_node) { if (ep_node == entity_ep_node) @@ -419,7 +421,8 @@ static int rcar_du_properties_init(struct rcar_du_device *rcdu) if (rcdu->props.alpha == NULL) return -ENOMEM; - /* The color key is expressed as an RGB888 triplet stored in a 32-bit + /* + * The color key is expressed as an RGB888 triplet stored in a 32-bit * integer in XRGB8888 format. Bit 24 is used as a flag to disable (0) * or enable source color keying (1). */ @@ -432,6 +435,81 @@ static int rcar_du_properties_init(struct rcar_du_device *rcdu) return 0; } +static int rcar_du_vsps_init(struct rcar_du_device *rcdu) +{ + const struct device_node *np = rcdu->dev->of_node; + struct of_phandle_args args; + struct { + struct device_node *np; + unsigned int crtcs_mask; + } vsps[RCAR_DU_MAX_VSPS] = { { 0, }, }; + unsigned int vsps_count = 0; + unsigned int cells; + unsigned int i; + int ret; + + /* + * First parse the DT vsps property to populate the list of VSPs. Each + * entry contains a pointer to the VSP DT node and a bitmask of the + * connected DU CRTCs. + */ + cells = of_property_count_u32_elems(np, "vsps") / rcdu->num_crtcs - 1; + if (cells > 1) + return -EINVAL; + + for (i = 0; i < rcdu->num_crtcs; ++i) { + unsigned int j; + + ret = of_parse_phandle_with_fixed_args(np, "vsps", cells, i, + &args); + if (ret < 0) + goto error; + + /* + * Add the VSP to the list or update the corresponding existing + * entry if the VSP has already been added. + */ + for (j = 0; j < vsps_count; ++j) { + if (vsps[j].np == args.np) + break; + } + + if (j < vsps_count) + of_node_put(args.np); + else + vsps[vsps_count++].np = args.np; + + vsps[j].crtcs_mask |= BIT(i); + + /* Store the VSP pointer and pipe index in the CRTC. */ + rcdu->crtcs[i].vsp = &rcdu->vsps[j]; + rcdu->crtcs[i].vsp_pipe = cells >= 1 ? args.args[0] : 0; + } + + /* + * Then initialize all the VSPs from the node pointers and CRTCs bitmask + * computed previously. + */ + for (i = 0; i < vsps_count; ++i) { + struct rcar_du_vsp *vsp = &rcdu->vsps[i]; + + vsp->index = i; + vsp->dev = rcdu; + + ret = rcar_du_vsp_init(vsp, vsps[i].np, vsps[i].crtcs_mask); + if (ret < 0) + goto error; + } + + return 0; + +error: + for (i = 0; i < ARRAY_SIZE(vsps); ++i) + of_node_put(vsps[i].np); + + return ret; +} + int rcar_du_modeset_init(struct rcar_du_device *rcdu) { static const unsigned int mmio_offsets[] = { @@ -461,7 +539,8 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu) if (ret < 0) return ret; - /* Initialize vertical blanking interrupts handling. Start with vblank + /* + * Initialize vertical blanking interrupts handling. Start with vblank * disabled for all CRTCs. */ ret = drm_vblank_init(dev, (1 << rcdu->info->num_crtcs) - 1); @@ -481,7 +560,8 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu) rgrp->index = i; rgrp->num_crtcs = min(rcdu->num_crtcs - 2 * i, 2U); - /* If we have more than one CRTCs in this group pre-associate + /* + * If we have more than one CRTCs in this group pre-associate * the low-order planes with CRTC 0 and the high-order planes * with CRTC 1 to minimize flicker occurring when the * association is changed. @@ -499,17 +579,9 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu) /* Initialize the compositors. */ if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE)) { - for (i = 0; i < rcdu->num_crtcs; ++i) { - struct rcar_du_vsp *vsp = &rcdu->vsps[i]; - - vsp->index = i; - vsp->dev = rcdu; - rcdu->crtcs[i].vsp = vsp; - - ret = rcar_du_vsp_init(vsp); - if (ret < 0) - return ret; - } + ret = rcar_du_vsps_init(rcdu); + if (ret < 0) + return ret; } /* Create the CRTCs. */ @@ -537,7 +609,8 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu) num_encoders = ret; - /* Set the possible CRTCs and possible clones. There's always at least + /* + * Set the possible CRTCs and possible clones. There's always at least * one way for all encoders to clone each other, set all bits in the * possible clones field. */ diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c index 1661f6201210..12d22f3db1af 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c @@ -59,7 +59,8 @@ static void rcar_du_lvdsenc_start_gen2(struct rcar_du_lvdsenc *lvds, rcar_lvds_write(lvds, LVDPLLCR, pllcr); - /* Select the input, hardcode mode 0, enable LVDS operation and turn + /* + * Select the input, hardcode mode 0, enable LVDS operation and turn * bias circuitry on. */ lvdcr0 = (lvds->mode << LVDCR0_LVMD_SHIFT) | LVDCR0_BEN | LVDCR0_LVEN; @@ -73,7 +74,8 @@ static void rcar_du_lvdsenc_start_gen2(struct rcar_du_lvdsenc *lvds, LVDCR1_CHSTBY_GEN2(1) | LVDCR1_CHSTBY_GEN2(0) | LVDCR1_CLKSTBY_GEN2); - /* Turn the PLL on, wait for the startup delay, and turn the output + /* + * Turn the PLL on, wait for the startup delay, and turn the output * on. */ lvdcr0 |= LVDCR0_PLLON; @@ -140,7 +142,8 @@ static int rcar_du_lvdsenc_start(struct rcar_du_lvdsenc *lvds, if (ret < 0) return ret; - /* Hardcode the channels and control signals routing for now. + /* + * Hardcode the channels and control signals routing for now. * * HSYNC -> CTRL0 * VSYNC -> CTRL1 @@ -202,7 +205,8 @@ void rcar_du_lvdsenc_atomic_check(struct rcar_du_lvdsenc *lvds, { struct rcar_du_device *rcdu = lvds->dev; - /* The internal LVDS encoder has a restricted clock frequency operating + /* + * The internal LVDS encoder has a restricted clock frequency operating * range (30MHz to 150MHz on Gen2, 25.175MHz to 148.5MHz on Gen3). Clamp * the clock accordingly. */ diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c index 5a7967498628..61833cc1c699 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c @@ -50,23 +50,21 @@ * automatically when the core swaps the old and new states. */ -static bool rcar_du_plane_needs_realloc(struct rcar_du_plane *plane, - struct rcar_du_plane_state *new_state) +static bool rcar_du_plane_needs_realloc( + const struct rcar_du_plane_state *old_state, + const struct rcar_du_plane_state *new_state) { - struct rcar_du_plane_state *cur_state; - - cur_state = to_rcar_plane_state(plane->plane.state); - - /* Lowering the number of planes doesn't strictly require reallocation + /* + * Lowering the number of planes doesn't strictly require reallocation * as the extra hardware plane will be freed when committing, but doing * so could lead to more fragmentation. */ - if (!cur_state->format || - cur_state->format->planes != new_state->format->planes) + if (!old_state->format || + old_state->format->planes != new_state->format->planes) return true; /* Reallocate hardware planes if the source has changed. */ - if (cur_state->source != new_state->source) + if (old_state->source != new_state->source) return true; return false; @@ -141,37 +139,43 @@ int rcar_du_atomic_check_planes(struct drm_device *dev, unsigned int groups = 0; unsigned int i; struct drm_plane *drm_plane; - struct drm_plane_state *drm_plane_state; + struct drm_plane_state *old_drm_plane_state; + struct drm_plane_state *new_drm_plane_state; /* Check if hardware planes need to be reallocated. */ - for_each_plane_in_state(state, drm_plane, drm_plane_state, i) { - struct rcar_du_plane_state *plane_state; + for_each_oldnew_plane_in_state(state, drm_plane, old_drm_plane_state, + new_drm_plane_state, i) { + struct rcar_du_plane_state *old_plane_state; + struct rcar_du_plane_state *new_plane_state; struct rcar_du_plane *plane; unsigned int index; plane = to_rcar_plane(drm_plane); - plane_state = to_rcar_plane_state(drm_plane_state); + old_plane_state = to_rcar_plane_state(old_drm_plane_state); + new_plane_state = to_rcar_plane_state(new_drm_plane_state); dev_dbg(rcdu->dev, "%s: checking plane (%u,%tu)\n", __func__, plane->group->index, plane - plane->group->planes); - /* If the plane is being disabled we don't need to go through + /* + * If the plane is being disabled we don't need to go through * the full reallocation procedure. Just mark the hardware * plane(s) as freed. */ - if (!plane_state->format) { + if (!new_plane_state->format) { dev_dbg(rcdu->dev, "%s: plane is being disabled\n", __func__); index = plane - plane->group->planes; group_freed_planes[plane->group->index] |= 1 << index; - plane_state->hwindex = -1; + new_plane_state->hwindex = -1; continue; } - /* If the plane needs to be reallocated mark it as such, and + /* + * If the plane needs to be reallocated mark it as such, and * mark the hardware plane(s) as free. */ - if (rcar_du_plane_needs_realloc(plane, plane_state)) { + if (rcar_du_plane_needs_realloc(old_plane_state, new_plane_state)) { dev_dbg(rcdu->dev, "%s: plane needs reallocation\n", __func__); groups |= 1 << plane->group->index; @@ -179,14 +183,15 @@ int rcar_du_atomic_check_planes(struct drm_device *dev, index = plane - plane->group->planes; group_freed_planes[plane->group->index] |= 1 << index; - plane_state->hwindex = -1; + new_plane_state->hwindex = -1; } } if (!needs_realloc) return 0; - /* Grab all plane states for the groups that need reallocation to ensure + /* + * Grab all plane states for the groups that need reallocation to ensure * locking and avoid racy updates. This serializes the update operation, * but there's not much we can do about it as that's the hardware * design. @@ -204,14 +209,15 @@ int rcar_du_atomic_check_planes(struct drm_device *dev, for (i = 0; i < group->num_planes; ++i) { struct rcar_du_plane *plane = &group->planes[i]; - struct rcar_du_plane_state *plane_state; + struct rcar_du_plane_state *new_plane_state; struct drm_plane_state *s; s = drm_atomic_get_plane_state(state, &plane->plane); if (IS_ERR(s)) return PTR_ERR(s); - /* If the plane has been freed in the above loop its + /* + * If the plane has been freed in the above loop its * hardware planes must not be added to the used planes * bitmask. However, the current state doesn't reflect * the free state yet, as we've modified the new state @@ -226,16 +232,16 @@ int rcar_du_atomic_check_planes(struct drm_device *dev, continue; } - plane_state = to_rcar_plane_state(plane->plane.state); - used_planes |= rcar_du_plane_hwmask(plane_state); + new_plane_state = to_rcar_plane_state(s); + used_planes |= rcar_du_plane_hwmask(new_plane_state); dev_dbg(rcdu->dev, "%s: plane (%u,%tu) uses %u hwplanes (index %d)\n", __func__, plane->group->index, plane - plane->group->planes, - plane_state->format ? - plane_state->format->planes : 0, - plane_state->hwindex); + new_plane_state->format ? + new_plane_state->format->planes : 0, + new_plane_state->hwindex); } group_free_planes[index] = 0xff & ~used_planes; @@ -246,40 +252,45 @@ int rcar_du_atomic_check_planes(struct drm_device *dev, } /* Reallocate hardware planes for each plane that needs it. */ - for_each_plane_in_state(state, drm_plane, drm_plane_state, i) { - struct rcar_du_plane_state *plane_state; + for_each_oldnew_plane_in_state(state, drm_plane, old_drm_plane_state, + new_drm_plane_state, i) { + struct rcar_du_plane_state *old_plane_state; + struct rcar_du_plane_state *new_plane_state; struct rcar_du_plane *plane; unsigned int crtc_planes; unsigned int free; int idx; plane = to_rcar_plane(drm_plane); - plane_state = to_rcar_plane_state(drm_plane_state); + old_plane_state = to_rcar_plane_state(old_drm_plane_state); + new_plane_state = to_rcar_plane_state(new_drm_plane_state); dev_dbg(rcdu->dev, "%s: allocating plane (%u,%tu)\n", __func__, plane->group->index, plane - plane->group->planes); - /* Skip planes that are being disabled or don't need to be + /* + * Skip planes that are being disabled or don't need to be * reallocated. */ - if (!plane_state->format || - !rcar_du_plane_needs_realloc(plane, plane_state)) + if (!new_plane_state->format || + !rcar_du_plane_needs_realloc(old_plane_state, new_plane_state)) continue; - /* Try to allocate the plane from the free planes currently + /* + * Try to allocate the plane from the free planes currently * associated with the target CRTC to avoid restarting the CRTC * group and thus minimize flicker. If it fails fall back to * allocating from all free planes. */ - crtc_planes = to_rcar_crtc(plane_state->state.crtc)->index % 2 + crtc_planes = to_rcar_crtc(new_plane_state->state.crtc)->index % 2 ? plane->group->dptsr_planes : ~plane->group->dptsr_planes; free = group_free_planes[plane->group->index]; - idx = rcar_du_plane_hwalloc(plane, plane_state, + idx = rcar_du_plane_hwalloc(plane, new_plane_state, free & crtc_planes); if (idx < 0) - idx = rcar_du_plane_hwalloc(plane, plane_state, + idx = rcar_du_plane_hwalloc(plane, new_plane_state, free); if (idx < 0) { dev_dbg(rcdu->dev, "%s: no available hardware plane\n", @@ -288,12 +299,12 @@ int rcar_du_atomic_check_planes(struct drm_device *dev, } dev_dbg(rcdu->dev, "%s: allocated %u hwplanes (index %u)\n", - __func__, plane_state->format->planes, idx); + __func__, new_plane_state->format->planes, idx); - plane_state->hwindex = idx; + new_plane_state->hwindex = idx; group_free_planes[plane->group->index] &= - ~rcar_du_plane_hwmask(plane_state); + ~rcar_du_plane_hwmask(new_plane_state); dev_dbg(rcdu->dev, "%s: group %u free planes mask 0x%02x\n", __func__, plane->group->index, @@ -351,14 +362,16 @@ static void rcar_du_plane_setup_scanout(struct rcar_du_group *rgrp, dma[1] = 0; } - /* Memory pitch (expressed in pixels). Must be doubled for interlaced + /* + * Memory pitch (expressed in pixels). Must be doubled for interlaced * operation with 32bpp formats. */ rcar_du_plane_write(rgrp, index, PnMWR, (interlaced && state->format->bpp == 32) ? pitch * 2 : pitch); - /* The Y position is expressed in raster line units and must be doubled + /* + * The Y position is expressed in raster line units and must be doubled * for 32bpp formats, according to the R8A7790 datasheet. No mention of * doubling the Y position is found in the R8A7779 datasheet, but the * rule seems to apply there as well. @@ -396,7 +409,8 @@ static void rcar_du_plane_setup_mode(struct rcar_du_group *rgrp, u32 colorkey; u32 pnmr; - /* The PnALPHAR register controls alpha-blending in 16bpp formats + /* + * The PnALPHAR register controls alpha-blending in 16bpp formats * (ARGB1555 and XRGB1555). * * For ARGB, set the alpha value to 0, and enable alpha-blending when @@ -413,7 +427,8 @@ static void rcar_du_plane_setup_mode(struct rcar_du_group *rgrp, pnmr = PnMR_BM_MD | state->format->pnmr; - /* Disable color keying when requested. YUV formats have the + /* + * Disable color keying when requested. YUV formats have the * PnMR_SPIM_TP_OFF bit set in their pnmr field, disabling color keying * automatically. */ @@ -457,7 +472,8 @@ static void rcar_du_plane_setup_format_gen2(struct rcar_du_group *rgrp, u32 ddcr2 = PnDDCR2_CODE; u32 ddcr4; - /* Data format + /* + * Data format * * The data format is selected by the DDDF field in PnMR and the EDF * field in DDCR4. @@ -589,7 +605,8 @@ static void rcar_du_plane_atomic_update(struct drm_plane *plane, rcar_du_plane_setup(rplane); - /* Check whether the source has changed from memory to live source or + /* + * Check whether the source has changed from memory to live source or * from live source to memory. The source has been configured by the * VSPS bit in the PnDDCR4 register. Although the datasheet states that * the bit is updated during vertical blanking, it seems that updates @@ -725,7 +742,8 @@ int rcar_du_planes_init(struct rcar_du_group *rgrp) unsigned int i; int ret; - /* Create one primary plane per CRTC in this group and seven overlay + /* + * Create one primary plane per CRTC in this group and seven overlay * planes. */ rgrp->num_planes = rgrp->num_crtcs + 7; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.h b/drivers/gpu/drm/rcar-du/rcar_du_plane.h index 8b91dd3a46e4..f62e09f195de 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_plane.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.h @@ -20,7 +20,8 @@ struct rcar_du_format_info; struct rcar_du_group; -/* The RCAR DU has 8 hardware planes, shared between primary and overlay planes. +/* + * The RCAR DU has 8 hardware planes, shared between primary and overlay planes. * As using overlay planes requires at least one of the CRTCs being enabled, no * more than 7 overlay planes can be available. We thus create 1 primary plane * per CRTC and 7 overlay planes, for a total of up to 9 KMS planes. diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c index 4e46e479e961..2c96147bc444 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c @@ -19,6 +19,7 @@ #include <drm/drm_gem_cma_helper.h> #include <drm/drm_plane_helper.h> +#include <linux/bitops.h> #include <linux/dma-mapping.h> #include <linux/of_platform.h> #include <linux/scatterlist.h> @@ -30,11 +31,15 @@ #include "rcar_du_kms.h" #include "rcar_du_vsp.h" -static void rcar_du_vsp_complete(void *private) +static void rcar_du_vsp_complete(void *private, bool completed) { struct rcar_du_crtc *crtc = private; - rcar_du_crtc_finish_page_flip(crtc); + if (crtc->vblank_enable) + drm_crtc_handle_vblank(&crtc->crtc); + + if (completed) + rcar_du_crtc_finish_page_flip(crtc); } void rcar_du_vsp_enable(struct rcar_du_crtc *crtc) @@ -73,7 +78,8 @@ void rcar_du_vsp_enable(struct rcar_du_crtc *crtc) __rcar_du_plane_setup(crtc->group, &state); - /* Ensure that the plane source configuration takes effect by requesting + /* + * Ensure that the plane source configuration takes effect by requesting * a restart of the group. See rcar_du_plane_atomic_update() for a more * detailed explanation. * @@ -81,22 +87,22 @@ void rcar_du_vsp_enable(struct rcar_du_crtc *crtc) */ crtc->group->need_restart = true; - vsp1_du_setup_lif(crtc->vsp->vsp, &cfg); + vsp1_du_setup_lif(crtc->vsp->vsp, crtc->vsp_pipe, &cfg); } void rcar_du_vsp_disable(struct rcar_du_crtc *crtc) { - vsp1_du_setup_lif(crtc->vsp->vsp, NULL); + vsp1_du_setup_lif(crtc->vsp->vsp, crtc->vsp_pipe, NULL); } void rcar_du_vsp_atomic_begin(struct rcar_du_crtc *crtc) { - vsp1_du_atomic_begin(crtc->vsp->vsp); + vsp1_du_atomic_begin(crtc->vsp->vsp, crtc->vsp_pipe); } void rcar_du_vsp_atomic_flush(struct rcar_du_crtc *crtc) { - vsp1_du_atomic_flush(crtc->vsp->vsp); + vsp1_du_atomic_flush(crtc->vsp->vsp, crtc->vsp_pipe); } /* Keep the two tables in sync. */ @@ -162,6 +168,7 @@ static void rcar_du_vsp_plane_setup(struct rcar_du_vsp_plane *plane) { struct rcar_du_vsp_plane_state *state = to_rcar_vsp_plane_state(plane->plane.state); + struct rcar_du_crtc *crtc = to_rcar_crtc(state->state.crtc); struct drm_framebuffer *fb = plane->plane.state->fb; struct vsp1_du_atomic_config cfg = { .pixelformat = 0, @@ -192,7 +199,8 @@ static void rcar_du_vsp_plane_setup(struct rcar_du_vsp_plane *plane) } } - vsp1_du_atomic_update(plane->vsp->vsp, plane->index, &cfg); + vsp1_du_atomic_update(plane->vsp->vsp, crtc->vsp_pipe, + plane->index, &cfg); } static int rcar_du_vsp_plane_prepare_fb(struct drm_plane *plane, @@ -288,11 +296,13 @@ static void rcar_du_vsp_plane_atomic_update(struct drm_plane *plane, struct drm_plane_state *old_state) { struct rcar_du_vsp_plane *rplane = to_rcar_vsp_plane(plane); + struct rcar_du_crtc *crtc = to_rcar_crtc(old_state->crtc); if (plane->state->crtc) rcar_du_vsp_plane_setup(rplane); else - vsp1_du_atomic_update(rplane->vsp->vsp, rplane->index, NULL); + vsp1_du_atomic_update(rplane->vsp->vsp, crtc->vsp_pipe, + rplane->index, NULL); } static const struct drm_plane_helper_funcs rcar_du_vsp_plane_helper_funcs = { @@ -390,23 +400,17 @@ static const struct drm_plane_funcs rcar_du_vsp_plane_funcs = { .atomic_get_property = rcar_du_vsp_plane_atomic_get_property, }; -int rcar_du_vsp_init(struct rcar_du_vsp *vsp) +int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np, + unsigned int crtcs) { struct rcar_du_device *rcdu = vsp->dev; struct platform_device *pdev; - struct device_node *np; + unsigned int num_crtcs = hweight32(crtcs); unsigned int i; int ret; /* Find the VSP device and initialize it. */ - np = of_parse_phandle(rcdu->dev->of_node, "vsps", vsp->index); - if (!np) { - dev_err(rcdu->dev, "vsps node not found\n"); - return -ENXIO; - } - pdev = of_find_device_by_node(np); - of_node_put(np); if (!pdev) return -ENXIO; @@ -416,7 +420,8 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp) if (ret < 0) return ret; - /* The VSP2D (Gen3) has 5 RPFs, but the VSP1D (Gen2) is limited to + /* + * The VSP2D (Gen3) has 5 RPFs, but the VSP1D (Gen2) is limited to * 4 RPFs. */ vsp->num_planes = rcdu->info->gen >= 3 ? 5 : 4; @@ -427,15 +432,15 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp) return -ENOMEM; for (i = 0; i < vsp->num_planes; ++i) { - enum drm_plane_type type = i ? DRM_PLANE_TYPE_OVERLAY - : DRM_PLANE_TYPE_PRIMARY; + enum drm_plane_type type = i < num_crtcs + ? DRM_PLANE_TYPE_PRIMARY + : DRM_PLANE_TYPE_OVERLAY; struct rcar_du_vsp_plane *plane = &vsp->planes[i]; plane->vsp = vsp; plane->index = i; - ret = drm_universal_plane_init(rcdu->ddev, &plane->plane, - 1 << vsp->index, + ret = drm_universal_plane_init(rcdu->ddev, &plane->plane, crtcs, &rcar_du_vsp_plane_funcs, formats_kms, ARRAY_SIZE(formats_kms), diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.h b/drivers/gpu/drm/rcar-du/rcar_du_vsp.h index 8861661590ff..f876c512163c 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.h @@ -64,13 +64,19 @@ to_rcar_vsp_plane_state(struct drm_plane_state *state) } #ifdef CONFIG_DRM_RCAR_VSP -int rcar_du_vsp_init(struct rcar_du_vsp *vsp); +int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np, + unsigned int crtcs); void rcar_du_vsp_enable(struct rcar_du_crtc *crtc); void rcar_du_vsp_disable(struct rcar_du_crtc *crtc); void rcar_du_vsp_atomic_begin(struct rcar_du_crtc *crtc); void rcar_du_vsp_atomic_flush(struct rcar_du_crtc *crtc); #else -static inline int rcar_du_vsp_init(struct rcar_du_vsp *vsp) { return -ENXIO; }; +static inline int rcar_du_vsp_init(struct rcar_du_vsp *vsp, + struct device_node *np, + unsigned int crtcs) +{ + return -ENXIO; +} static inline void rcar_du_vsp_enable(struct rcar_du_crtc *crtc) { }; static inline void rcar_du_vsp_disable(struct rcar_du_crtc *crtc) { }; static inline void rcar_du_vsp_atomic_begin(struct rcar_du_crtc *crtc) { }; diff --git a/drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c b/drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c index 7539626b8ebd..dc85b53d58ef 100644 --- a/drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c +++ b/drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c @@ -45,7 +45,7 @@ static int rcar_hdmi_phy_configure(struct dw_hdmi *hdmi, { const struct rcar_hdmi_phy_params *params = rcar_hdmi_phy_params; - for (; params && params->mpixelclock != ~0UL; ++params) { + for (; params->mpixelclock != ~0UL; ++params) { if (mpixelclock <= params->mpixelclock) break; } diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig index 50c41c0a50ef..dcc539ba85d6 100644 --- a/drivers/gpu/drm/rockchip/Kconfig +++ b/drivers/gpu/drm/rockchip/Kconfig @@ -5,6 +5,10 @@ config DRM_ROCKCHIP select DRM_KMS_HELPER select DRM_PANEL select VIDEOMODE_HELPERS + select DRM_ANALOGIX_DP if ROCKCHIP_ANALOGIX_DP + select DRM_DW_HDMI if ROCKCHIP_DW_HDMI + select DRM_MIPI_DSI if ROCKCHIP_DW_MIPI_DSI + select SND_SOC_HDMI_CODEC if ROCKCHIP_CDN_DP && SND_SOC help Choose this option if you have a Rockchip soc chipset. This driver provides kernel mode setting and buffer @@ -12,10 +16,10 @@ config DRM_ROCKCHIP 2D or 3D acceleration; acceleration is performed by other IP found on the SoC. +if DRM_ROCKCHIP + config ROCKCHIP_ANALOGIX_DP bool "Rockchip specific extensions for Analogix DP driver" - depends on DRM_ROCKCHIP - select DRM_ANALOGIX_DP help This selects support for Rockchip SoC specific extensions for the Analogix Core DP driver. If you want to enable DP @@ -23,9 +27,7 @@ config ROCKCHIP_ANALOGIX_DP config ROCKCHIP_CDN_DP bool "Rockchip cdn DP" - depends on DRM_ROCKCHIP - depends on EXTCON - select SND_SOC_HDMI_CODEC if SND_SOC + depends on EXTCON=y || (EXTCON=m && DRM_ROCKCHIP=m) help This selects support for Rockchip SoC specific extensions for the cdn DP driver. If you want to enable Dp on @@ -34,8 +36,6 @@ config ROCKCHIP_CDN_DP config ROCKCHIP_DW_HDMI bool "Rockchip specific extensions for Synopsys DW HDMI" - depends on DRM_ROCKCHIP - select DRM_DW_HDMI help This selects support for Rockchip SoC specific extensions for the Synopsys DesignWare HDMI driver. If you want to @@ -44,8 +44,6 @@ config ROCKCHIP_DW_HDMI config ROCKCHIP_DW_MIPI_DSI bool "Rockchip specific extensions for Synopsys DW MIPI DSI" - depends on DRM_ROCKCHIP - select DRM_MIPI_DSI help This selects support for Rockchip SoC specific extensions for the Synopsys DesignWare HDMI driver. If you want to @@ -54,8 +52,9 @@ config ROCKCHIP_DW_MIPI_DSI config ROCKCHIP_INNO_HDMI bool "Rockchip specific extensions for Innosilicon HDMI" - depends on DRM_ROCKCHIP help This selects support for Rockchip SoC specific extensions for the Innosilicon HDMI driver. If you want to enable HDMI on RK3036 based SoC, you should select this option. + +endif diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index b442d12f2f7d..a01e5c90fd87 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -294,10 +294,87 @@ static void ttm_bo_vm_close(struct vm_area_struct *vma) vma->vm_private_data = NULL; } +static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo, + unsigned long offset, + void *buf, int len, int write) +{ + unsigned long page = offset >> PAGE_SHIFT; + unsigned long bytes_left = len; + int ret; + + /* Copy a page at a time, that way no extra virtual address + * mapping is needed + */ + offset -= page << PAGE_SHIFT; + do { + unsigned long bytes = min(bytes_left, PAGE_SIZE - offset); + struct ttm_bo_kmap_obj map; + void *ptr; + bool is_iomem; + + ret = ttm_bo_kmap(bo, page, 1, &map); + if (ret) + return ret; + + ptr = (uint8_t *)ttm_kmap_obj_virtual(&map, &is_iomem) + offset; + WARN_ON_ONCE(is_iomem); + if (write) + memcpy(ptr, buf, bytes); + else + memcpy(buf, ptr, bytes); + ttm_bo_kunmap(&map); + + page++; + bytes_left -= bytes; + offset = 0; + } while (bytes_left); + + return len; +} + +static int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr, + void *buf, int len, int write) +{ + unsigned long offset = (addr) - vma->vm_start; + struct ttm_buffer_object *bo = vma->vm_private_data; + int ret; + + if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->num_pages) + return -EIO; + + ret = ttm_bo_reserve(bo, true, false, NULL); + if (ret) + return ret; + + switch (bo->mem.mem_type) { + case TTM_PL_SYSTEM: + if (unlikely(bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { + ret = ttm_tt_swapin(bo->ttm); + if (unlikely(ret != 0)) + return ret; + } + /* fall through */ + case TTM_PL_TT: + ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write); + break; + default: + if (bo->bdev->driver->access_memory) + ret = bo->bdev->driver->access_memory( + bo, offset, buf, len, write); + else + ret = -EIO; + } + + ttm_bo_unreserve(bo); + + return ret; +} + static const struct vm_operations_struct ttm_bo_vm_ops = { .fault = ttm_bo_vm_fault, .open = ttm_bo_vm_open, - .close = ttm_bo_vm_close + .close = ttm_bo_vm_close, + .access = ttm_bo_vm_access }; static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c index 35bf781e418e..c7056322211c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c @@ -30,49 +30,49 @@ #include <drm/ttm/ttm_placement.h> #include <drm/ttm/ttm_page_alloc.h> -static struct ttm_place vram_placement_flags = { +static const struct ttm_place vram_placement_flags = { .fpfn = 0, .lpfn = 0, .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED }; -static struct ttm_place vram_ne_placement_flags = { +static const struct ttm_place vram_ne_placement_flags = { .fpfn = 0, .lpfn = 0, .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT }; -static struct ttm_place sys_placement_flags = { +static const struct ttm_place sys_placement_flags = { .fpfn = 0, .lpfn = 0, .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED }; -static struct ttm_place sys_ne_placement_flags = { +static const struct ttm_place sys_ne_placement_flags = { .fpfn = 0, .lpfn = 0, .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT }; -static struct ttm_place gmr_placement_flags = { +static const struct ttm_place gmr_placement_flags = { .fpfn = 0, .lpfn = 0, .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED }; -static struct ttm_place gmr_ne_placement_flags = { +static const struct ttm_place gmr_ne_placement_flags = { .fpfn = 0, .lpfn = 0, .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT }; -static struct ttm_place mob_placement_flags = { +static const struct ttm_place mob_placement_flags = { .fpfn = 0, .lpfn = 0, .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED }; -static struct ttm_place mob_ne_placement_flags = { +static const struct ttm_place mob_ne_placement_flags = { .fpfn = 0, .lpfn = 0, .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT @@ -85,7 +85,7 @@ struct ttm_placement vmw_vram_placement = { .busy_placement = &vram_placement_flags }; -static struct ttm_place vram_gmr_placement_flags[] = { +static const struct ttm_place vram_gmr_placement_flags[] = { { .fpfn = 0, .lpfn = 0, @@ -97,7 +97,7 @@ static struct ttm_place vram_gmr_placement_flags[] = { } }; -static struct ttm_place gmr_vram_placement_flags[] = { +static const struct ttm_place gmr_vram_placement_flags[] = { { .fpfn = 0, .lpfn = 0, @@ -116,7 +116,7 @@ struct ttm_placement vmw_vram_gmr_placement = { .busy_placement = &gmr_placement_flags }; -static struct ttm_place vram_gmr_ne_placement_flags[] = { +static const struct ttm_place vram_gmr_ne_placement_flags[] = { { .fpfn = 0, .lpfn = 0, @@ -165,7 +165,7 @@ struct ttm_placement vmw_sys_ne_placement = { .busy_placement = &sys_ne_placement_flags }; -static struct ttm_place evictable_placement_flags[] = { +static const struct ttm_place evictable_placement_flags[] = { { .fpfn = 0, .lpfn = 0, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c index 99a7f4ab7d97..86178796de6c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c @@ -779,8 +779,8 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man, if (ret) return ret; - header->cb_header = dma_pool_alloc(man->headers, GFP_KERNEL, - &header->handle); + header->cb_header = dma_pool_zalloc(man->headers, GFP_KERNEL, + &header->handle); if (!header->cb_header) { ret = -ENOMEM; goto out_no_cb_header; @@ -790,7 +790,6 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man, cb_hdr = header->cb_header; offset = header->node.start << PAGE_SHIFT; header->cmd = man->map + offset; - memset(cb_hdr, 0, sizeof(*cb_hdr)); if (man->using_mob) { cb_hdr->flags = SVGA_CB_FLAG_MOB; cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start; @@ -827,8 +826,8 @@ static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man, if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE)) return -ENOMEM; - dheader = dma_pool_alloc(man->dheaders, GFP_KERNEL, - &header->handle); + dheader = dma_pool_zalloc(man->dheaders, GFP_KERNEL, + &header->handle); if (!dheader) return -ENOMEM; @@ -837,7 +836,6 @@ static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man, cb_hdr = &dheader->cb_header; header->cb_header = cb_hdr; header->cmd = dheader->cmd; - memset(dheader, 0, sizeof(*dheader)); cb_hdr->status = SVGA_CB_STATUS_NONE; cb_hdr->flags = SVGA_CB_FLAG_NONE; cb_hdr->ptr.pa = (u64)header->handle + diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c index 1f013d45c9e9..36c7b6c839c0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c @@ -205,7 +205,7 @@ int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man, int ret; cres = kzalloc(sizeof(*cres), GFP_KERNEL); - if (unlikely(cres == NULL)) + if (unlikely(!cres)) return -ENOMEM; cres->hash.key = user_key | (res_type << 24); @@ -291,7 +291,7 @@ vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv) int ret; man = kzalloc(sizeof(*man), GFP_KERNEL); - if (man == NULL) + if (!man) return ERR_PTR(-ENOMEM); man->dev_priv = dev_priv; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c index bcc6d4136c87..4212b3e673bc 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c @@ -210,8 +210,8 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv, for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) { uctx->cotables[i] = vmw_cotable_alloc(dev_priv, &uctx->res, i); - if (unlikely(uctx->cotables[i] == NULL)) { - ret = -ENOMEM; + if (unlikely(IS_ERR(uctx->cotables[i]))) { + ret = PTR_ERR(uctx->cotables[i]); goto out_cotables; } } @@ -777,7 +777,7 @@ static int vmw_context_define(struct drm_device *dev, void *data, } ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); - if (unlikely(ctx == NULL)) { + if (unlikely(!ctx)) { ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_user_context_size); ret = -ENOMEM; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c index 6c026d75c180..d87861bbe971 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c @@ -584,7 +584,7 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv, return ERR_PTR(ret); vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL); - if (unlikely(vcotbl == NULL)) { + if (unlikely(!vcotbl)) { ret = -ENOMEM; goto out_no_alloc; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 204bf181b69e..8be26509a9aa 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -227,7 +227,7 @@ static const struct drm_ioctl_desc vmw_ioctls[] = { DRM_AUTH | DRM_RENDER_ALLOW), }; -static struct pci_device_id vmw_pci_id_list[] = { +static const struct pci_device_id vmw_pci_id_list[] = { {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII}, {0, 0, 0} }; @@ -630,7 +630,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) char host_log[100] = {0}; dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); - if (unlikely(dev_priv == NULL)) { + if (unlikely(!dev_priv)) { DRM_ERROR("Failed allocating a device private struct.\n"); return -ENOMEM; } @@ -1035,7 +1035,7 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv) int ret = -ENOMEM; vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL); - if (unlikely(vmw_fp == NULL)) + if (unlikely(!vmw_fp)) return ret; vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10); @@ -1196,7 +1196,7 @@ static int vmw_master_create(struct drm_device *dev, struct vmw_master *vmaster; vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL); - if (unlikely(vmaster == NULL)) + if (unlikely(!vmaster)) return -ENOMEM; vmw_master_init(vmaster); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index c7b53d987f06..2cfb3c93f42a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -264,7 +264,7 @@ static int vmw_resource_val_add(struct vmw_sw_context *sw_context, } node = kzalloc(sizeof(*node), GFP_KERNEL); - if (unlikely(node == NULL)) { + if (unlikely(!node)) { DRM_ERROR("Failed to allocate a resource validation " "entry.\n"); return -ENOMEM; @@ -452,7 +452,7 @@ static int vmw_resource_relocation_add(struct list_head *list, struct vmw_resource_relocation *rel; rel = kmalloc(sizeof(*rel), GFP_KERNEL); - if (unlikely(rel == NULL)) { + if (unlikely(!rel)) { DRM_ERROR("Failed to allocate a resource relocation.\n"); return -ENOMEM; } @@ -519,7 +519,7 @@ static int vmw_cmd_invalid(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { - return capable(CAP_SYS_ADMIN) ? : -EINVAL; + return -EINVAL; } static int vmw_cmd_ok(struct vmw_private *dev_priv, @@ -2584,7 +2584,7 @@ static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv, /** * vmw_cmd_dx_ia_set_vertex_buffers - Validate an - * SVGA_3D_CMD_DX_IA_SET_VERTEX_BUFFERS command. + * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command. * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index 6b2708b4eafe..b8bc5bc7de7e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -284,7 +284,7 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv) { struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL); - if (unlikely(fman == NULL)) + if (unlikely(!fman)) return NULL; fman->dev_priv = dev_priv; @@ -541,7 +541,7 @@ int vmw_fence_create(struct vmw_fence_manager *fman, int ret; fence = kzalloc(sizeof(*fence), GFP_KERNEL); - if (unlikely(fence == NULL)) + if (unlikely(!fence)) return -ENOMEM; ret = vmw_fence_obj_init(fman, fence, seqno, @@ -606,7 +606,7 @@ int vmw_user_fence_create(struct drm_file *file_priv, return ret; ufence = kzalloc(sizeof(*ufence), GFP_KERNEL); - if (unlikely(ufence == NULL)) { + if (unlikely(!ufence)) { ret = -ENOMEM; goto out_no_object; } @@ -966,7 +966,7 @@ int vmw_event_fence_action_queue(struct drm_file *file_priv, struct vmw_fence_manager *fman = fman_from_fence(fence); eaction = kzalloc(sizeof(*eaction), GFP_KERNEL); - if (unlikely(eaction == NULL)) + if (unlikely(!eaction)) return -ENOMEM; eaction->event = event; @@ -1002,7 +1002,7 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv, int ret; event = kzalloc(sizeof(*event), GFP_KERNEL); - if (unlikely(event == NULL)) { + if (unlikely(!event)) { DRM_ERROR("Failed to allocate an event.\n"); ret = -ENOMEM; goto out_no_space; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c index c1900f4390a4..d2b03d4a3c86 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c @@ -121,7 +121,7 @@ static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man, struct vmwgfx_gmrid_man *gman = kzalloc(sizeof(*gman), GFP_KERNEL); - if (unlikely(gman == NULL)) + if (unlikely(!gman)) return -ENOMEM; spin_lock_init(&gman->lock); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 620180df1303..36dd7930bf5f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -384,6 +384,12 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, hotspot_x = du->hotspot_x; hotspot_y = du->hotspot_y; + + if (plane->fb) { + hotspot_x += plane->fb->hot_x; + hotspot_y += plane->fb->hot_y; + } + du->cursor_surface = vps->surf; du->cursor_dmabuf = vps->dmabuf; @@ -411,6 +417,9 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, vmw_cursor_update_position(dev_priv, true, du->cursor_x + hotspot_x, du->cursor_y + hotspot_y); + + du->core_hotspot_x = hotspot_x - du->hotspot_x; + du->core_hotspot_y = hotspot_y - du->hotspot_y; } else { DRM_ERROR("Failed to update cursor image\n"); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c index 941bcfd131ff..b17f08fc50d3 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c @@ -320,14 +320,14 @@ int vmw_otables_setup(struct vmw_private *dev_priv) if (dev_priv->has_dx) { *otables = kmemdup(dx_tables, sizeof(dx_tables), GFP_KERNEL); - if (*otables == NULL) + if (!(*otables)) return -ENOMEM; dev_priv->otable_batch.num_otables = ARRAY_SIZE(dx_tables); } else { *otables = kmemdup(pre_dx_tables, sizeof(pre_dx_tables), GFP_KERNEL); - if (*otables == NULL) + if (!(*otables)) return -ENOMEM; dev_priv->otable_batch.num_otables = ARRAY_SIZE(pre_dx_tables); @@ -407,7 +407,7 @@ struct vmw_mob *vmw_mob_create(unsigned long data_pages) { struct vmw_mob *mob = kzalloc(sizeof(*mob), GFP_KERNEL); - if (unlikely(mob == NULL)) + if (unlikely(!mob)) return NULL; mob->num_pages = vmw_mob_calculate_pt_pages(data_pages); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c index 6063c9636d4a..97000996b8dc 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c @@ -244,7 +244,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg, reply_len = ebx; reply = kzalloc(reply_len + 1, GFP_KERNEL); - if (reply == NULL) { + if (!reply) { DRM_ERROR("Cannot allocate memory for reply\n"); return -ENOMEM; } @@ -340,7 +340,7 @@ int vmw_host_get_guestinfo(const char *guest_info_param, msg_len = strlen(guest_info_param) + strlen("info-get ") + 1; msg = kzalloc(msg_len, GFP_KERNEL); - if (msg == NULL) { + if (!msg) { DRM_ERROR("Cannot allocate memory to get %s", guest_info_param); return -ENOMEM; } @@ -400,7 +400,7 @@ int vmw_host_log(const char *log) msg_len = strlen(log) + strlen("log ") + 1; msg = kzalloc(msg_len, GFP_KERNEL); - if (msg == NULL) { + if (!msg) { DRM_ERROR("Cannot allocate memory for log message\n"); return -ENOMEM; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 7d591f653dfa..a96f90f017d1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -446,7 +446,7 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, int ret; user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL); - if (unlikely(user_bo == NULL)) { + if (unlikely(!user_bo)) { DRM_ERROR("Failed to allocate a buffer.\n"); return -ENOMEM; } @@ -836,7 +836,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res, } backup = kzalloc(sizeof(*backup), GFP_KERNEL); - if (unlikely(backup == NULL)) + if (unlikely(!backup)) return -ENOMEM; ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c index 68f135c5b0d8..9b832f136813 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c @@ -751,7 +751,7 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv, } ushader = kzalloc(sizeof(*ushader), GFP_KERNEL); - if (unlikely(ushader == NULL)) { + if (unlikely(!ushader)) { ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_user_shader_size); ret = -ENOMEM; @@ -821,7 +821,7 @@ static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv, } shader = kzalloc(sizeof(*shader), GFP_KERNEL); - if (unlikely(shader == NULL)) { + if (unlikely(!shader)) { ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_shader_size); ret = -ENOMEM; @@ -981,7 +981,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv, /* Allocate and pin a DMA buffer */ buf = kzalloc(sizeof(*buf), GFP_KERNEL); - if (unlikely(buf == NULL)) + if (unlikely(!buf)) return -ENOMEM; ret = vmw_dmabuf_init(dev_priv, buf, size, &vmw_sys_ne_placement, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index c4de4ad0543b..ca3afae2db1f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c @@ -1642,8 +1642,8 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv) * something arbitrarily large and we will reject any layout * that doesn't fit prim_bb_mem later */ - dev->mode_config.max_width = 16384; - dev->mode_config.max_height = 16384; + dev->mode_config.max_width = 8192; + dev->mode_config.max_height = 8192; } vmw_kms_create_implicit_placement_property(dev_priv, false); diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c index 2c58a390123a..778272514164 100644 --- a/drivers/gpu/host1x/dev.c +++ b/drivers/gpu/host1x/dev.c @@ -186,8 +186,13 @@ static int host1x_probe(struct platform_device *pdev) return -ENOMEM; err = iommu_attach_device(host->domain, &pdev->dev); - if (err) + if (err == -ENODEV) { + iommu_domain_free(host->domain); + host->domain = NULL; + goto skip_iommu; + } else if (err) { goto fail_free_domain; + } geometry = &host->domain->geometry; @@ -198,6 +203,7 @@ static int host1x_probe(struct platform_device *pdev) host->iova_end = geometry->aperture_end; } +skip_iommu: err = host1x_channel_list_init(&host->channel_list, host->info->nb_channels); if (err) { diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 6fd01a692197..9017dcc14502 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -2216,6 +2216,7 @@ static const struct hid_device_id hid_have_special_driver[] = { #if IS_ENABLED(CONFIG_HID_ORTEK) { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) }, { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S) }, { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) }, #endif #if IS_ENABLED(CONFIG_HID_PANTHERLORD) diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 3d911bfd91cf..c9ba4c6db74c 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -824,6 +824,7 @@ #define USB_VENDOR_ID_ORTEK 0x05a4 #define USB_DEVICE_ID_ORTEK_PKB1700 0x1700 #define USB_DEVICE_ID_ORTEK_WKB2000 0x2000 +#define USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S 0x8003 #define USB_VENDOR_ID_PLANTRONICS 0x047f diff --git a/drivers/hid/hid-ortek.c b/drivers/hid/hid-ortek.c index 6620f15fec22..8783a064cdcf 100644 --- a/drivers/hid/hid-ortek.c +++ b/drivers/hid/hid-ortek.c @@ -5,6 +5,7 @@ * * Ortek PKB-1700 * Ortek WKB-2000 + * iHome IMAC-A210S * Skycable wireless presenter * * Copyright (c) 2010 Johnathon Harris <jmharris@gmail.com> @@ -28,10 +29,10 @@ static __u8 *ortek_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (*rsize >= 56 && rdesc[54] == 0x25 && rdesc[55] == 0x01) { - hid_info(hdev, "Fixing up logical minimum in report descriptor (Ortek)\n"); + hid_info(hdev, "Fixing up logical maximum in report descriptor (Ortek)\n"); rdesc[55] = 0x92; } else if (*rsize >= 54 && rdesc[52] == 0x25 && rdesc[53] == 0x01) { - hid_info(hdev, "Fixing up logical minimum in report descriptor (Skycable)\n"); + hid_info(hdev, "Fixing up logical maximum in report descriptor (Skycable)\n"); rdesc[53] = 0x65; } return rdesc; @@ -40,6 +41,7 @@ static __u8 *ortek_report_fixup(struct hid_device *hdev, __u8 *rdesc, static const struct hid_device_id ortek_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) }, { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S) }, { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) }, { } }; diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c index 76013eb5cb7f..c008847e0b20 100644 --- a/drivers/hid/usbhid/hid-core.c +++ b/drivers/hid/usbhid/hid-core.c @@ -680,18 +680,21 @@ static int usbhid_open(struct hid_device *hid) struct usbhid_device *usbhid = hid->driver_data; int res; + set_bit(HID_OPENED, &usbhid->iofl); + if (hid->quirks & HID_QUIRK_ALWAYS_POLL) return 0; res = usb_autopm_get_interface(usbhid->intf); /* the device must be awake to reliably request remote wakeup */ - if (res < 0) + if (res < 0) { + clear_bit(HID_OPENED, &usbhid->iofl); return -EIO; + } usbhid->intf->needs_remote_wakeup = 1; set_bit(HID_RESUME_RUNNING, &usbhid->iofl); - set_bit(HID_OPENED, &usbhid->iofl); set_bit(HID_IN_POLLING, &usbhid->iofl); res = hid_start_in(hid); @@ -727,19 +730,20 @@ static void usbhid_close(struct hid_device *hid) { struct usbhid_device *usbhid = hid->driver_data; - if (hid->quirks & HID_QUIRK_ALWAYS_POLL) - return; - /* * Make sure we don't restart data acquisition due to * a resumption we no longer care about by avoiding racing * with hid_start_in(). */ spin_lock_irq(&usbhid->lock); - clear_bit(HID_IN_POLLING, &usbhid->iofl); clear_bit(HID_OPENED, &usbhid->iofl); + if (!(hid->quirks & HID_QUIRK_ALWAYS_POLL)) + clear_bit(HID_IN_POLLING, &usbhid->iofl); spin_unlock_irq(&usbhid->lock); + if (hid->quirks & HID_QUIRK_ALWAYS_POLL) + return; + hid_cancel_delayed_stuff(usbhid); usb_kill_urb(usbhid->urbin); usbhid->intf->needs_remote_wakeup = 0; diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 1006b230b236..65fa29591d21 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -983,7 +983,7 @@ config I2C_UNIPHIER_F config I2C_VERSATILE tristate "ARM Versatile/Realview I2C bus support" - depends on ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || COMPILE_TEST + depends on ARCH_MPS2 || ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || COMPILE_TEST select I2C_ALGOBIT help Say yes if you want to support the I2C serial bus on ARMs Versatile diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 2ea6d0d25a01..143a8fd582b4 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -298,6 +298,9 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) } acpi_speed = i2c_acpi_find_bus_speed(&pdev->dev); + /* Some broken DSTDs use 1MiHz instead of 1MHz */ + if (acpi_speed == 1048576) + acpi_speed = 1000000; /* * Find bus speed from the "clock-frequency" device property, ACPI * or by using fast mode if neither is set. @@ -319,7 +322,8 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) if (dev->clk_freq != 100000 && dev->clk_freq != 400000 && dev->clk_freq != 1000000 && dev->clk_freq != 3400000) { dev_err(&pdev->dev, - "Only 100kHz, 400kHz, 1MHz and 3.4MHz supported"); + "%d Hz is unsupported, only 100kHz, 400kHz, 1MHz and 3.4MHz are supported\n", + dev->clk_freq); ret = -EINVAL; goto exit_reset; } diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c index 4842ec3a5451..a9126b3cda61 100644 --- a/drivers/i2c/i2c-core-acpi.c +++ b/drivers/i2c/i2c-core-acpi.c @@ -230,6 +230,16 @@ void i2c_acpi_register_devices(struct i2c_adapter *adap) dev_warn(&adap->dev, "failed to enumerate I2C slaves\n"); } +const struct acpi_device_id * +i2c_acpi_match_device(const struct acpi_device_id *matches, + struct i2c_client *client) +{ + if (!(client && matches)) + return NULL; + + return acpi_match_device(matches, &client->dev); +} + static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level, void *data, void **return_value) { @@ -289,7 +299,7 @@ u32 i2c_acpi_find_bus_speed(struct device *dev) } EXPORT_SYMBOL_GPL(i2c_acpi_find_bus_speed); -static int i2c_acpi_match_adapter(struct device *dev, void *data) +static int i2c_acpi_find_match_adapter(struct device *dev, void *data) { struct i2c_adapter *adapter = i2c_verify_adapter(dev); @@ -299,7 +309,7 @@ static int i2c_acpi_match_adapter(struct device *dev, void *data) return ACPI_HANDLE(dev) == (acpi_handle)data; } -static int i2c_acpi_match_device(struct device *dev, void *data) +static int i2c_acpi_find_match_device(struct device *dev, void *data) { return ACPI_COMPANION(dev) == data; } @@ -309,7 +319,7 @@ static struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle) struct device *dev; dev = bus_find_device(&i2c_bus_type, NULL, handle, - i2c_acpi_match_adapter); + i2c_acpi_find_match_adapter); return dev ? i2c_verify_adapter(dev) : NULL; } @@ -317,7 +327,8 @@ static struct i2c_client *i2c_acpi_find_client_by_adev(struct acpi_device *adev) { struct device *dev; - dev = bus_find_device(&i2c_bus_type, NULL, adev, i2c_acpi_match_device); + dev = bus_find_device(&i2c_bus_type, NULL, adev, + i2c_acpi_find_match_device); return dev ? i2c_verify_client(dev) : NULL; } diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index c89dac7fd2e7..12822a4b8f8f 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -357,6 +357,7 @@ static int i2c_device_probe(struct device *dev) * Tree match table entry is supplied for the probing device. */ if (!driver->id_table && + !i2c_acpi_match_device(dev->driver->acpi_match_table, client) && !i2c_of_match_device(dev->driver->of_match_table, client)) return -ENODEV; diff --git a/drivers/i2c/i2c-core.h b/drivers/i2c/i2c-core.h index 3b63f5e5b89c..3d3d9bf02101 100644 --- a/drivers/i2c/i2c-core.h +++ b/drivers/i2c/i2c-core.h @@ -31,9 +31,18 @@ int i2c_check_addr_validity(unsigned addr, unsigned short flags); int i2c_check_7bit_addr_validity_strict(unsigned short addr); #ifdef CONFIG_ACPI +const struct acpi_device_id * +i2c_acpi_match_device(const struct acpi_device_id *matches, + struct i2c_client *client); void i2c_acpi_register_devices(struct i2c_adapter *adap); #else /* CONFIG_ACPI */ static inline void i2c_acpi_register_devices(struct i2c_adapter *adap) { } +static inline const struct acpi_device_id * +i2c_acpi_match_device(const struct acpi_device_id *matches, + struct i2c_client *client) +{ + return NULL; +} #endif /* CONFIG_ACPI */ extern struct notifier_block i2c_acpi_notifier; diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig index 2c64d0e0740f..17121329bb79 100644 --- a/drivers/i2c/muxes/Kconfig +++ b/drivers/i2c/muxes/Kconfig @@ -83,7 +83,7 @@ config I2C_MUX_PINCTRL different sets of pins at run-time. This driver can also be built as a module. If so, the module will be - called pinctrl-i2cmux. + called i2c-mux-pinctrl. config I2C_MUX_REG tristate "Register-based I2C multiplexer" diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c index 6b5d3be283c4..807299dd45eb 100644 --- a/drivers/iio/accel/bmc150-accel-core.c +++ b/drivers/iio/accel/bmc150-accel-core.c @@ -193,7 +193,6 @@ struct bmc150_accel_data { struct regmap *regmap; int irq; struct bmc150_accel_interrupt interrupts[BMC150_ACCEL_INTERRUPTS]; - atomic_t active_intr; struct bmc150_accel_trigger triggers[BMC150_ACCEL_TRIGGERS]; struct mutex mutex; u8 fifo_mode, watermark; @@ -493,11 +492,6 @@ static int bmc150_accel_set_interrupt(struct bmc150_accel_data *data, int i, goto out_fix_power_state; } - if (state) - atomic_inc(&data->active_intr); - else - atomic_dec(&data->active_intr); - return 0; out_fix_power_state: @@ -1710,8 +1704,7 @@ static int bmc150_accel_resume(struct device *dev) struct bmc150_accel_data *data = iio_priv(indio_dev); mutex_lock(&data->mutex); - if (atomic_read(&data->active_intr)) - bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0); + bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0); bmc150_accel_fifo_set_mode(data); mutex_unlock(&data->mutex); diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c index 07d1489cd457..e44f62bf9caa 100644 --- a/drivers/iio/accel/st_accel_core.c +++ b/drivers/iio/accel/st_accel_core.c @@ -166,6 +166,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .mask_ihl = 0x02, .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, }, + .sim = { + .addr = 0x23, + .value = BIT(0), + }, .multi_read_bit = true, .bootime = 2, }, @@ -234,6 +238,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .mask_od = 0x40, .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, }, + .sim = { + .addr = 0x23, + .value = BIT(0), + }, .multi_read_bit = true, .bootime = 2, }, @@ -316,6 +324,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .en_mask = 0x08, }, }, + .sim = { + .addr = 0x24, + .value = BIT(0), + }, .multi_read_bit = false, .bootime = 2, }, @@ -379,6 +391,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .mask_int1 = 0x04, .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, }, + .sim = { + .addr = 0x21, + .value = BIT(1), + }, .multi_read_bit = true, .bootime = 2, /* guess */ }, @@ -437,6 +453,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .mask_od = 0x40, .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, }, + .sim = { + .addr = 0x21, + .value = BIT(7), + }, .multi_read_bit = false, .bootime = 2, /* guess */ }, @@ -499,6 +519,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .addr_ihl = 0x22, .mask_ihl = 0x80, }, + .sim = { + .addr = 0x23, + .value = BIT(0), + }, .multi_read_bit = true, .bootime = 2, }, @@ -547,6 +571,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .mask_int1 = 0x04, .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, }, + .sim = { + .addr = 0x21, + .value = BIT(1), + }, .multi_read_bit = false, .bootime = 2, }, @@ -614,6 +642,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .mask_ihl = 0x02, .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, }, + .sim = { + .addr = 0x23, + .value = BIT(0), + }, .multi_read_bit = true, .bootime = 2, }, diff --git a/drivers/iio/adc/aspeed_adc.c b/drivers/iio/adc/aspeed_adc.c index e0ea411a0b2d..c02b23d675cb 100644 --- a/drivers/iio/adc/aspeed_adc.c +++ b/drivers/iio/adc/aspeed_adc.c @@ -22,6 +22,7 @@ #include <linux/iio/iio.h> #include <linux/iio/driver.h> +#include <linux/iopoll.h> #define ASPEED_RESOLUTION_BITS 10 #define ASPEED_CLOCKS_PER_SAMPLE 12 @@ -38,11 +39,17 @@ #define ASPEED_ENGINE_ENABLE BIT(0) +#define ASPEED_ADC_CTRL_INIT_RDY BIT(8) + +#define ASPEED_ADC_INIT_POLLING_TIME 500 +#define ASPEED_ADC_INIT_TIMEOUT 500000 + struct aspeed_adc_model_data { const char *model_name; unsigned int min_sampling_rate; // Hz unsigned int max_sampling_rate; // Hz unsigned int vref_voltage; // mV + bool wait_init_sequence; }; struct aspeed_adc_data { @@ -211,6 +218,24 @@ static int aspeed_adc_probe(struct platform_device *pdev) goto scaler_error; } + model_data = of_device_get_match_data(&pdev->dev); + + if (model_data->wait_init_sequence) { + /* Enable engine in normal mode. */ + writel(ASPEED_OPERATION_MODE_NORMAL | ASPEED_ENGINE_ENABLE, + data->base + ASPEED_REG_ENGINE_CONTROL); + + /* Wait for initial sequence complete. */ + ret = readl_poll_timeout(data->base + ASPEED_REG_ENGINE_CONTROL, + adc_engine_control_reg_val, + adc_engine_control_reg_val & + ASPEED_ADC_CTRL_INIT_RDY, + ASPEED_ADC_INIT_POLLING_TIME, + ASPEED_ADC_INIT_TIMEOUT); + if (ret) + goto scaler_error; + } + /* Start all channels in normal mode. */ ret = clk_prepare_enable(data->clk_scaler->clk); if (ret) @@ -274,6 +299,7 @@ static const struct aspeed_adc_model_data ast2500_model_data = { .vref_voltage = 1800, // mV .min_sampling_rate = 1, .max_sampling_rate = 1000000, + .wait_init_sequence = true, }; static const struct of_device_id aspeed_adc_matches[] = { diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c index 64799ad7ebad..462a99c13e7a 100644 --- a/drivers/iio/adc/axp288_adc.c +++ b/drivers/iio/adc/axp288_adc.c @@ -28,6 +28,8 @@ #include <linux/iio/driver.h> #define AXP288_ADC_EN_MASK 0xF1 +#define AXP288_ADC_TS_PIN_GPADC 0xF2 +#define AXP288_ADC_TS_PIN_ON 0xF3 enum axp288_adc_id { AXP288_ADC_TS, @@ -121,6 +123,26 @@ static int axp288_adc_read_channel(int *val, unsigned long address, return IIO_VAL_INT; } +static int axp288_adc_set_ts(struct regmap *regmap, unsigned int mode, + unsigned long address) +{ + int ret; + + /* channels other than GPADC do not need to switch TS pin */ + if (address != AXP288_GP_ADC_H) + return 0; + + ret = regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, mode); + if (ret) + return ret; + + /* When switching to the GPADC pin give things some time to settle */ + if (mode == AXP288_ADC_TS_PIN_GPADC) + usleep_range(6000, 10000); + + return 0; +} + static int axp288_adc_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long mask) @@ -131,7 +153,16 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev, mutex_lock(&indio_dev->mlock); switch (mask) { case IIO_CHAN_INFO_RAW: + if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_GPADC, + chan->address)) { + dev_err(&indio_dev->dev, "GPADC mode\n"); + ret = -EINVAL; + break; + } ret = axp288_adc_read_channel(val, chan->address, info->regmap); + if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_ON, + chan->address)) + dev_err(&indio_dev->dev, "TS pin restore\n"); break; default: ret = -EINVAL; @@ -141,6 +172,15 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev, return ret; } +static int axp288_adc_set_state(struct regmap *regmap) +{ + /* ADC should be always enabled for internal FG to function */ + if (regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON)) + return -EIO; + + return regmap_write(regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK); +} + static const struct iio_info axp288_adc_iio_info = { .read_raw = &axp288_adc_read_raw, .driver_module = THIS_MODULE, @@ -169,7 +209,7 @@ static int axp288_adc_probe(struct platform_device *pdev) * Set ADC to enabled state at all time, including system suspend. * otherwise internal fuel gauge functionality may be affected. */ - ret = regmap_write(info->regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK); + ret = axp288_adc_set_state(axp20x->regmap); if (ret) { dev_err(&pdev->dev, "unable to enable ADC device\n"); return ret; diff --git a/drivers/iio/adc/sun4i-gpadc-iio.c b/drivers/iio/adc/sun4i-gpadc-iio.c index 81d4c39e414a..137f577d9432 100644 --- a/drivers/iio/adc/sun4i-gpadc-iio.c +++ b/drivers/iio/adc/sun4i-gpadc-iio.c @@ -256,6 +256,7 @@ static int sun4i_gpadc_read(struct iio_dev *indio_dev, int channel, int *val, err: pm_runtime_put_autosuspend(indio_dev->dev.parent); + disable_irq(irq); mutex_unlock(&info->mutex); return ret; @@ -365,7 +366,6 @@ static irqreturn_t sun4i_gpadc_temp_data_irq_handler(int irq, void *dev_id) complete(&info->completion); out: - disable_irq_nosync(info->temp_data_irq); return IRQ_HANDLED; } @@ -380,7 +380,6 @@ static irqreturn_t sun4i_gpadc_fifo_data_irq_handler(int irq, void *dev_id) complete(&info->completion); out: - disable_irq_nosync(info->fifo_data_irq); return IRQ_HANDLED; } diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c index 01fc76f7d660..c168e0db329a 100644 --- a/drivers/iio/adc/vf610_adc.c +++ b/drivers/iio/adc/vf610_adc.c @@ -77,7 +77,7 @@ #define VF610_ADC_ADSTS_MASK 0x300 #define VF610_ADC_ADLPC_EN 0x80 #define VF610_ADC_ADHSC_EN 0x400 -#define VF610_ADC_REFSEL_VALT 0x100 +#define VF610_ADC_REFSEL_VALT 0x800 #define VF610_ADC_REFSEL_VBG 0x1000 #define VF610_ADC_ADTRG_HARD 0x2000 #define VF610_ADC_AVGS_8 0x4000 diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c index 79c8c7cd70d5..6e6a1ecc99dd 100644 --- a/drivers/iio/common/st_sensors/st_sensors_core.c +++ b/drivers/iio/common/st_sensors/st_sensors_core.c @@ -550,6 +550,31 @@ out: } EXPORT_SYMBOL(st_sensors_read_info_raw); +static int st_sensors_init_interface_mode(struct iio_dev *indio_dev, + const struct st_sensor_settings *sensor_settings) +{ + struct st_sensor_data *sdata = iio_priv(indio_dev); + struct device_node *np = sdata->dev->of_node; + struct st_sensors_platform_data *pdata; + + pdata = (struct st_sensors_platform_data *)sdata->dev->platform_data; + if (((np && of_property_read_bool(np, "spi-3wire")) || + (pdata && pdata->spi_3wire)) && sensor_settings->sim.addr) { + int err; + + err = sdata->tf->write_byte(&sdata->tb, sdata->dev, + sensor_settings->sim.addr, + sensor_settings->sim.value); + if (err < 0) { + dev_err(&indio_dev->dev, + "failed to init interface mode\n"); + return err; + } + } + + return 0; +} + int st_sensors_check_device_support(struct iio_dev *indio_dev, int num_sensors_list, const struct st_sensor_settings *sensor_settings) @@ -574,6 +599,10 @@ int st_sensors_check_device_support(struct iio_dev *indio_dev, return -ENODEV; } + err = st_sensors_init_interface_mode(indio_dev, &sensor_settings[i]); + if (err < 0) + return err; + if (sensor_settings[i].wai_addr) { err = sdata->tf->read_byte(&sdata->tb, sdata->dev, sensor_settings[i].wai_addr, &wai); diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c index e7d4ea75e007..7599693f7fe9 100644 --- a/drivers/iio/light/tsl2563.c +++ b/drivers/iio/light/tsl2563.c @@ -626,7 +626,7 @@ static irqreturn_t tsl2563_event_handler(int irq, void *private) struct tsl2563_chip *chip = iio_priv(dev_info); iio_push_event(dev_info, - IIO_UNMOD_EVENT_CODE(IIO_LIGHT, + IIO_UNMOD_EVENT_CODE(IIO_INTENSITY, 0, IIO_EV_TYPE_THRESH, IIO_EV_DIR_EITHER), diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c index aa61ec15c139..f1bce05ffa13 100644 --- a/drivers/iio/pressure/st_pressure_core.c +++ b/drivers/iio/pressure/st_pressure_core.c @@ -456,7 +456,7 @@ static const struct st_sensor_settings st_press_sensors_settings[] = { .mask_od = 0x40, .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, }, - .multi_read_bit = true, + .multi_read_bit = false, .bootime = 2, }, }; diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index 01236cef7bfb..437522ca97b4 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -61,6 +61,7 @@ struct addr_req { void (*callback)(int status, struct sockaddr *src_addr, struct rdma_dev_addr *addr, void *context); unsigned long timeout; + struct delayed_work work; int status; u32 seq; }; @@ -295,7 +296,7 @@ int rdma_translate_ip(const struct sockaddr *addr, } EXPORT_SYMBOL(rdma_translate_ip); -static void set_timeout(unsigned long time) +static void set_timeout(struct delayed_work *delayed_work, unsigned long time) { unsigned long delay; @@ -303,7 +304,7 @@ static void set_timeout(unsigned long time) if ((long)delay < 0) delay = 0; - mod_delayed_work(addr_wq, &work, delay); + mod_delayed_work(addr_wq, delayed_work, delay); } static void queue_req(struct addr_req *req) @@ -318,8 +319,7 @@ static void queue_req(struct addr_req *req) list_add(&req->list, &temp_req->list); - if (req_list.next == &req->list) - set_timeout(req->timeout); + set_timeout(&req->work, req->timeout); mutex_unlock(&lock); } @@ -574,6 +574,37 @@ static int addr_resolve(struct sockaddr *src_in, return ret; } +static void process_one_req(struct work_struct *_work) +{ + struct addr_req *req; + struct sockaddr *src_in, *dst_in; + + mutex_lock(&lock); + req = container_of(_work, struct addr_req, work.work); + + if (req->status == -ENODATA) { + src_in = (struct sockaddr *)&req->src_addr; + dst_in = (struct sockaddr *)&req->dst_addr; + req->status = addr_resolve(src_in, dst_in, req->addr, + true, req->seq); + if (req->status && time_after_eq(jiffies, req->timeout)) { + req->status = -ETIMEDOUT; + } else if (req->status == -ENODATA) { + /* requeue the work for retrying again */ + set_timeout(&req->work, req->timeout); + mutex_unlock(&lock); + return; + } + } + list_del(&req->list); + mutex_unlock(&lock); + + req->callback(req->status, (struct sockaddr *)&req->src_addr, + req->addr, req->context); + put_client(req->client); + kfree(req); +} + static void process_req(struct work_struct *work) { struct addr_req *req, *temp_req; @@ -591,20 +622,23 @@ static void process_req(struct work_struct *work) true, req->seq); if (req->status && time_after_eq(jiffies, req->timeout)) req->status = -ETIMEDOUT; - else if (req->status == -ENODATA) + else if (req->status == -ENODATA) { + set_timeout(&req->work, req->timeout); continue; + } } list_move_tail(&req->list, &done_list); } - if (!list_empty(&req_list)) { - req = list_entry(req_list.next, struct addr_req, list); - set_timeout(req->timeout); - } mutex_unlock(&lock); list_for_each_entry_safe(req, temp_req, &done_list, list) { list_del(&req->list); + /* It is safe to cancel other work items from this work item + * because at a time there can be only one work item running + * with this single threaded work queue. + */ + cancel_delayed_work(&req->work); req->callback(req->status, (struct sockaddr *) &req->src_addr, req->addr, req->context); put_client(req->client); @@ -647,6 +681,7 @@ int rdma_resolve_ip(struct rdma_addr_client *client, req->context = context; req->client = client; atomic_inc(&client->refcount); + INIT_DELAYED_WORK(&req->work, process_one_req); req->seq = (u32)atomic_inc_return(&ib_nl_addr_request_seq); req->status = addr_resolve(src_in, dst_in, addr, true, req->seq); @@ -701,7 +736,7 @@ void rdma_addr_cancel(struct rdma_dev_addr *addr) req->status = -ECANCELED; req->timeout = jiffies; list_move(&req->list, &req_list); - set_timeout(req->timeout); + set_timeout(&req->work, req->timeout); break; } } @@ -807,9 +842,8 @@ static int netevent_callback(struct notifier_block *self, unsigned long event, if (event == NETEVENT_NEIGH_UPDATE) { struct neighbour *neigh = ctx; - if (neigh->nud_state & NUD_VALID) { - set_timeout(jiffies); - } + if (neigh->nud_state & NUD_VALID) + set_timeout(&work, jiffies); } return 0; } @@ -820,7 +854,7 @@ static struct notifier_block nb = { int addr_init(void) { - addr_wq = alloc_workqueue("ib_addr", WQ_MEM_RECLAIM, 0); + addr_wq = alloc_ordered_workqueue("ib_addr", WQ_MEM_RECLAIM); if (!addr_wq) return -ENOMEM; diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 2c98533a0203..c551d2b275fd 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -1153,7 +1153,7 @@ ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file, int out_len) { struct ib_uverbs_resize_cq cmd; - struct ib_uverbs_resize_cq_resp resp; + struct ib_uverbs_resize_cq_resp resp = {}; struct ib_udata udata; struct ib_cq *cq; int ret = -EINVAL; diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 3d2609608f58..c023e2c81b8f 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -250,6 +250,7 @@ void ib_uverbs_release_file(struct kref *ref) if (atomic_dec_and_test(&file->device->refcount)) ib_uverbs_comp_dev(file->device); + kobject_put(&file->device->kobj); kfree(file); } @@ -917,7 +918,6 @@ err: static int ib_uverbs_close(struct inode *inode, struct file *filp) { struct ib_uverbs_file *file = filp->private_data; - struct ib_uverbs_device *dev = file->device; mutex_lock(&file->cleanup_mutex); if (file->ucontext) { @@ -939,7 +939,6 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp) ib_uverbs_release_async_event_file); kref_put(&file->ref, ib_uverbs_release_file); - kobject_put(&dev->kobj); return 0; } diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index fb98ed67d5bc..7f8fe443df46 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -895,7 +895,6 @@ static const struct { } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = { [IB_QPS_RESET] = { [IB_QPS_RESET] = { .valid = 1 }, - [IB_QPS_ERR] = { .valid = 1 }, [IB_QPS_INIT] = { .valid = 1, .req_param = { diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index 23fad6d96944..2540b65e242c 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -733,7 +733,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev) continue; free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd); - if (IS_ERR(free_mr->mr_free_qp[i])) { + if (!free_mr->mr_free_qp[i]) { dev_err(dev, "Create loop qp failed!\n"); goto create_lp_qp_failed; } diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index ae0746754008..3d701c7a4c91 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -939,7 +939,7 @@ static int mlx5_ib_mr_initiator_pfault_handler( if (qp->ibqp.qp_type != IB_QPT_RC) { av = *wqe; - if (av->dqp_dct & be32_to_cpu(MLX5_WQE_AV_EXT)) + if (av->dqp_dct & cpu_to_be32(MLX5_EXTENDED_UD_AV)) *wqe += sizeof(struct mlx5_av); else *wqe += sizeof(struct mlx5_base_av); diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index ff50a7bd66d8..7ac25059c40f 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -336,6 +336,7 @@ struct ipoib_dev_priv { unsigned long flags; struct rw_semaphore vlan_rwsem; + struct mutex mcast_mutex; struct rb_root path_tree; struct list_head path_list; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index f87d104837dc..d69410c2ed97 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -511,7 +511,6 @@ static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id, case IB_CM_REQ_RECEIVED: return ipoib_cm_req_handler(cm_id, event); case IB_CM_DREQ_RECEIVED: - p = cm_id->context; ib_send_cm_drep(cm_id, NULL, 0); /* Fall through */ case IB_CM_REJ_RECEIVED: diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c index 7871379342f4..184a22f48027 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c @@ -52,7 +52,8 @@ static const struct ipoib_stats ipoib_gstrings_stats[] = { IPOIB_NETDEV_STAT(tx_bytes), IPOIB_NETDEV_STAT(tx_errors), IPOIB_NETDEV_STAT(rx_dropped), - IPOIB_NETDEV_STAT(tx_dropped) + IPOIB_NETDEV_STAT(tx_dropped), + IPOIB_NETDEV_STAT(multicast), }; #define IPOIB_GLOBAL_STATS_LEN ARRAY_SIZE(ipoib_gstrings_stats) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 57a9655e844d..2e075377242e 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -256,6 +256,8 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) ++dev->stats.rx_packets; dev->stats.rx_bytes += skb->len; + if (skb->pkt_type == PACKET_MULTICAST) + dev->stats.multicast++; skb->dev = dev; if ((dev->features & NETIF_F_RXCSUM) && @@ -709,6 +711,27 @@ static int recvs_pending(struct net_device *dev) return pending; } +static void check_qp_movement_and_print(struct ipoib_dev_priv *priv, + struct ib_qp *qp, + enum ib_qp_state new_state) +{ + struct ib_qp_attr qp_attr; + struct ib_qp_init_attr query_init_attr; + int ret; + + ret = ib_query_qp(qp, &qp_attr, IB_QP_STATE, &query_init_attr); + if (ret) { + ipoib_warn(priv, "%s: Failed to query QP\n", __func__); + return; + } + /* print according to the new-state and the previous state.*/ + if (new_state == IB_QPS_ERR && qp_attr.qp_state == IB_QPS_RESET) + ipoib_dbg(priv, "Failed modify QP, IB_QPS_RESET to IB_QPS_ERR, acceptable\n"); + else + ipoib_warn(priv, "Failed to modify QP to state: %d from state: %d\n", + new_state, qp_attr.qp_state); +} + int ipoib_ib_dev_stop_default(struct net_device *dev) { struct ipoib_dev_priv *priv = ipoib_priv(dev); @@ -728,7 +751,7 @@ int ipoib_ib_dev_stop_default(struct net_device *dev) */ qp_attr.qp_state = IB_QPS_ERR; if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE)) - ipoib_warn(priv, "Failed to modify QP to ERROR state\n"); + check_qp_movement_and_print(priv, priv->qp, IB_QPS_ERR); /* Wait for all sends and receives to complete */ begin = jiffies; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 4ce315c92b48..6c77df34869d 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -1560,6 +1560,7 @@ static void ipoib_flush_neighs(struct ipoib_dev_priv *priv) int i, wait_flushed = 0; init_completion(&priv->ntbl.flushed); + set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags); spin_lock_irqsave(&priv->lock, flags); @@ -1604,7 +1605,6 @@ static void ipoib_neigh_hash_uninit(struct net_device *dev) ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n"); init_completion(&priv->ntbl.deleted); - set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags); /* Stop GC if called at init fail need to cancel work */ stopped = test_and_set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); @@ -1847,6 +1847,7 @@ static const struct net_device_ops ipoib_netdev_ops_vf = { .ndo_tx_timeout = ipoib_timeout, .ndo_set_rx_mode = ipoib_set_mcast_list, .ndo_get_iflink = ipoib_get_iflink, + .ndo_get_stats64 = ipoib_get_stats, }; void ipoib_setup_common(struct net_device *dev) @@ -1877,6 +1878,7 @@ static void ipoib_build_priv(struct net_device *dev) priv->dev = dev; spin_lock_init(&priv->lock); init_rwsem(&priv->vlan_rwsem); + mutex_init(&priv->mcast_mutex); INIT_LIST_HEAD(&priv->path_list); INIT_LIST_HEAD(&priv->child_intfs); @@ -2173,14 +2175,14 @@ static struct net_device *ipoib_add_port(const char *format, priv->dev->dev_id = port - 1; result = ib_query_port(hca, port, &attr); - if (!result) - priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu); - else { + if (result) { printk(KERN_WARNING "%s: ib_query_port %d failed\n", hca->name, port); goto device_init_failed; } + priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu); + /* MTU will be reset when mcast join happens */ priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu); priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu; @@ -2211,12 +2213,14 @@ static struct net_device *ipoib_add_port(const char *format, printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n", hca->name, port, result); goto device_init_failed; - } else - memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid)); + } + + memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, + sizeof(union ib_gid)); set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags); result = ipoib_dev_init(priv->dev, hca, port); - if (result < 0) { + if (result) { printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n", hca->name, port, result); goto device_init_failed; @@ -2365,6 +2369,7 @@ static int __init ipoib_init_module(void) ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE); #ifdef CONFIG_INFINIBAND_IPOIB_CM ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP); + ipoib_max_conn_qp = max(ipoib_max_conn_qp, 0); #endif /* diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 057f58e6afca..93e149efc1f5 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -684,15 +684,10 @@ void ipoib_mcast_start_thread(struct net_device *dev) int ipoib_mcast_stop_thread(struct net_device *dev) { struct ipoib_dev_priv *priv = ipoib_priv(dev); - unsigned long flags; ipoib_dbg_mcast(priv, "stopping multicast thread\n"); - spin_lock_irqsave(&priv->lock, flags); - cancel_delayed_work(&priv->mcast_task); - spin_unlock_irqrestore(&priv->lock, flags); - - flush_workqueue(priv->wq); + cancel_delayed_work_sync(&priv->mcast_task); return 0; } @@ -748,6 +743,14 @@ void ipoib_mcast_remove_list(struct list_head *remove_list) { struct ipoib_mcast *mcast, *tmcast; + /* + * make sure the in-flight joins have finished before we attempt + * to leave + */ + list_for_each_entry_safe(mcast, tmcast, remove_list, list) + if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) + wait_for_completion(&mcast->done); + list_for_each_entry_safe(mcast, tmcast, remove_list, list) { ipoib_mcast_leave(mcast->dev, mcast); ipoib_mcast_free(mcast); @@ -838,6 +841,7 @@ void ipoib_mcast_dev_flush(struct net_device *dev) struct ipoib_mcast *mcast, *tmcast; unsigned long flags; + mutex_lock(&priv->mcast_mutex); ipoib_dbg_mcast(priv, "flushing multicast list\n"); spin_lock_irqsave(&priv->lock, flags); @@ -856,15 +860,8 @@ void ipoib_mcast_dev_flush(struct net_device *dev) spin_unlock_irqrestore(&priv->lock, flags); - /* - * make sure the in-flight joins have finished before we attempt - * to leave - */ - list_for_each_entry_safe(mcast, tmcast, &remove_list, list) - if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) - wait_for_completion(&mcast->done); - ipoib_mcast_remove_list(&remove_list); + mutex_unlock(&priv->mcast_mutex); } static int ipoib_mcast_addr_is_valid(const u8 *addr, const u8 *broadcast) @@ -982,14 +979,6 @@ void ipoib_mcast_restart_task(struct work_struct *work) netif_addr_unlock(dev); local_irq_restore(flags); - /* - * make sure the in-flight joins have finished before we attempt - * to leave - */ - list_for_each_entry_safe(mcast, tmcast, &remove_list, list) - if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) - wait_for_completion(&mcast->done); - ipoib_mcast_remove_list(&remove_list); /* diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 688e77576e5a..354cbd6392cd 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -4452,6 +4452,7 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info) /* Setting */ irte->hi.fields.ga_root_ptr = (pi_data->base >> 12); irte->hi.fields.vector = vcpu_pi_info->vector; + irte->lo.fields_vapic.ga_log_intr = 1; irte->lo.fields_vapic.guest_mode = 1; irte->lo.fields_vapic.ga_tag = pi_data->ga_tag; diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 5cc597b383c7..372303700566 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -2440,11 +2440,11 @@ static int __init state_next(void) break; case IOMMU_ACPI_FINISHED: early_enable_iommus(); - register_syscore_ops(&amd_iommu_syscore_ops); x86_platform.iommu_shutdown = disable_iommus; init_state = IOMMU_ENABLED; break; case IOMMU_ENABLED: + register_syscore_ops(&amd_iommu_syscore_ops); ret = amd_iommu_init_pci(); init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT; enable_iommus_v2(); diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index bc89b4d6c043..2d80fa8a0634 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -400,6 +400,8 @@ struct arm_smmu_device { u32 cavium_id_base; /* Specific to Cavium */ + spinlock_t global_sync_lock; + /* IOMMU core code handle */ struct iommu_device iommu; }; @@ -436,7 +438,7 @@ struct arm_smmu_domain { struct arm_smmu_cfg cfg; enum arm_smmu_domain_stage stage; struct mutex init_mutex; /* Protects smmu pointer */ - spinlock_t cb_lock; /* Serialises ATS1* ops */ + spinlock_t cb_lock; /* Serialises ATS1* ops and TLB syncs */ struct iommu_domain domain; }; @@ -602,9 +604,12 @@ static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu) { void __iomem *base = ARM_SMMU_GR0(smmu); + unsigned long flags; + spin_lock_irqsave(&smmu->global_sync_lock, flags); __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_GR0_sTLBGSYNC, base + ARM_SMMU_GR0_sTLBGSTATUS); + spin_unlock_irqrestore(&smmu->global_sync_lock, flags); } static void arm_smmu_tlb_sync_context(void *cookie) @@ -612,9 +617,12 @@ static void arm_smmu_tlb_sync_context(void *cookie) struct arm_smmu_domain *smmu_domain = cookie; struct arm_smmu_device *smmu = smmu_domain->smmu; void __iomem *base = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx); + unsigned long flags; + spin_lock_irqsave(&smmu_domain->cb_lock, flags); __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_CB_TLBSYNC, base + ARM_SMMU_CB_TLBSTATUS); + spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); } static void arm_smmu_tlb_sync_vmid(void *cookie) @@ -1511,6 +1519,12 @@ static int arm_smmu_add_device(struct device *dev) if (using_legacy_binding) { ret = arm_smmu_register_legacy_master(dev, &smmu); + + /* + * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master() + * will allocate/initialise a new one. Thus we need to update fwspec for + * later use. + */ fwspec = dev->iommu_fwspec; if (ret) goto out_free; @@ -1550,15 +1564,15 @@ static int arm_smmu_add_device(struct device *dev) ret = arm_smmu_master_alloc_smes(dev); if (ret) - goto out_free; + goto out_cfg_free; iommu_device_link(&smmu->iommu, dev); return 0; +out_cfg_free: + kfree(cfg); out_free: - if (fwspec) - kfree(fwspec->iommu_priv); iommu_fwspec_free(dev); return ret; } @@ -1925,6 +1939,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) smmu->num_mapping_groups = size; mutex_init(&smmu->stream_map_mutex); + spin_lock_init(&smmu->global_sync_lock); if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) { smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L; diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index af330f513653..d665d0dc16e8 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c @@ -479,6 +479,9 @@ static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova, if (!(prot & (IOMMU_READ | IOMMU_WRITE))) return 0; + if (WARN_ON(upper_32_bits(iova) || upper_32_bits(paddr))) + return -ERANGE; + ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd); /* * Synchronise all PTE updates for the new mapping before there's @@ -659,6 +662,9 @@ static int arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova, struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops); size_t unmapped; + if (WARN_ON(upper_32_bits(iova))) + return 0; + unmapped = __arm_v7s_unmap(data, iova, size, 1, data->pgd); if (unmapped) io_pgtable_tlb_sync(&data->iop); diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index b182039862c5..e8018a308868 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -452,6 +452,10 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova, if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE))) return 0; + if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) || + paddr >= (1ULL << data->iop.cfg.oas))) + return -ERANGE; + prot = arm_lpae_prot_to_pte(data, iommu_prot); ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep); /* @@ -610,6 +614,9 @@ static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova, arm_lpae_iopte *ptep = data->pgd; int lvl = ARM_LPAE_START_LVL(data); + if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias))) + return 0; + unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep); if (unmapped) io_pgtable_tlb_sync(&data->iop); diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h index 524263a7ae6f..a3e667077b14 100644 --- a/drivers/iommu/io-pgtable.h +++ b/drivers/iommu/io-pgtable.h @@ -158,14 +158,12 @@ void free_io_pgtable_ops(struct io_pgtable_ops *ops); * @fmt: The page table format. * @cookie: An opaque token provided by the IOMMU driver and passed back to * any callback routines. - * @tlb_sync_pending: Private flag for optimising out redundant syncs. * @cfg: A copy of the page table configuration. * @ops: The page table operations in use for this set of page tables. */ struct io_pgtable { enum io_pgtable_fmt fmt; void *cookie; - bool tlb_sync_pending; struct io_pgtable_cfg cfg; struct io_pgtable_ops ops; }; @@ -175,22 +173,17 @@ struct io_pgtable { static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop) { iop->cfg.tlb->tlb_flush_all(iop->cookie); - iop->tlb_sync_pending = true; } static inline void io_pgtable_tlb_add_flush(struct io_pgtable *iop, unsigned long iova, size_t size, size_t granule, bool leaf) { iop->cfg.tlb->tlb_add_flush(iova, size, granule, leaf, iop->cookie); - iop->tlb_sync_pending = true; } static inline void io_pgtable_tlb_sync(struct io_pgtable *iop) { - if (iop->tlb_sync_pending) { - iop->cfg.tlb->tlb_sync(iop->cookie); - iop->tlb_sync_pending = false; - } + iop->cfg.tlb->tlb_sync(iop->cookie); } /** diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 5d14cd15198d..91c6d367ab35 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -129,6 +129,7 @@ static void mtk_iommu_tlb_add_flush_nosync(unsigned long iova, size_t size, writel_relaxed(iova, data->base + REG_MMU_INVLD_START_A); writel_relaxed(iova + size - 1, data->base + REG_MMU_INVLD_END_A); writel_relaxed(F_MMU_INV_RANGE, data->base + REG_MMU_INVALIDATE); + data->tlb_flush_active = true; } static void mtk_iommu_tlb_sync(void *cookie) @@ -137,6 +138,10 @@ static void mtk_iommu_tlb_sync(void *cookie) int ret; u32 tmp; + /* Avoid timing out if there's nothing to wait for */ + if (!data->tlb_flush_active) + return; + ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE, tmp, tmp != 0, 10, 100000); if (ret) { @@ -146,6 +151,7 @@ static void mtk_iommu_tlb_sync(void *cookie) } /* Clear the CPE status */ writel_relaxed(0, data->base + REG_MMU_CPE_DONE); + data->tlb_flush_active = false; } static const struct iommu_gather_ops mtk_iommu_gather_ops = { diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h index 2a28eadeea0e..c06cc91b5d9a 100644 --- a/drivers/iommu/mtk_iommu.h +++ b/drivers/iommu/mtk_iommu.h @@ -47,6 +47,7 @@ struct mtk_iommu_data { struct iommu_group *m4u_group; struct mtk_smi_iommu smi_imu; /* SMI larb iommu info */ bool enable_4GB; + bool tlb_flush_active; struct iommu_device iommu; }; diff --git a/drivers/isdn/hysdn/hysdn_proclog.c b/drivers/isdn/hysdn/hysdn_proclog.c index 7b5fd8fb1761..aaca0b3d662e 100644 --- a/drivers/isdn/hysdn/hysdn_proclog.c +++ b/drivers/isdn/hysdn/hysdn_proclog.c @@ -44,7 +44,6 @@ struct procdata { char log_name[15]; /* log filename */ struct log_data *log_head, *log_tail; /* head and tail for queue */ int if_used; /* open count for interface */ - int volatile del_lock; /* lock for delete operations */ unsigned char logtmp[LOG_MAX_LINELEN]; wait_queue_head_t rd_queue; }; @@ -102,7 +101,6 @@ put_log_buffer(hysdn_card *card, char *cp) { struct log_data *ib; struct procdata *pd = card->proclog; - int i; unsigned long flags; if (!pd) @@ -126,21 +124,21 @@ put_log_buffer(hysdn_card *card, char *cp) else pd->log_tail->next = ib; /* follows existing messages */ pd->log_tail = ib; /* new tail */ - i = pd->del_lock++; /* get lock state */ - spin_unlock_irqrestore(&card->hysdn_lock, flags); /* delete old entrys */ - if (!i) - while (pd->log_head->next) { - if ((pd->log_head->usage_cnt <= 0) && - (pd->log_head->next->usage_cnt <= 0)) { - ib = pd->log_head; - pd->log_head = pd->log_head->next; - kfree(ib); - } else - break; - } /* pd->log_head->next */ - pd->del_lock--; /* release lock level */ + while (pd->log_head->next) { + if ((pd->log_head->usage_cnt <= 0) && + (pd->log_head->next->usage_cnt <= 0)) { + ib = pd->log_head; + pd->log_head = pd->log_head->next; + kfree(ib); + } else { + break; + } + } /* pd->log_head->next */ + + spin_unlock_irqrestore(&card->hysdn_lock, flags); + wake_up_interruptible(&(pd->rd_queue)); /* announce new entry */ } /* put_log_buffer */ diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c index 89b09c51ab7c..38a5bb764c7b 100644 --- a/drivers/isdn/i4l/isdn_common.c +++ b/drivers/isdn/i4l/isdn_common.c @@ -1376,6 +1376,7 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg) if (arg) { if (copy_from_user(bname, argp, sizeof(bname) - 1)) return -EFAULT; + bname[sizeof(bname)-1] = 0; } else return -EINVAL; ret = mutex_lock_interruptible(&dev->mtx); diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c index c151c6daa67e..f63a110b7bcb 100644 --- a/drivers/isdn/i4l/isdn_net.c +++ b/drivers/isdn/i4l/isdn_net.c @@ -2611,10 +2611,9 @@ isdn_net_newslave(char *parm) char newname[10]; if (p) { - /* Slave-Name MUST not be empty */ - if (!strlen(p + 1)) + /* Slave-Name MUST not be empty or overflow 'newname' */ + if (strscpy(newname, p + 1, sizeof(newname)) <= 0) return NULL; - strcpy(newname, p + 1); *p = 0; /* Master must already exist */ if (!(n = isdn_net_findif(parm))) diff --git a/drivers/lightnvm/pblk-rb.c b/drivers/lightnvm/pblk-rb.c index 5ecc154f6831..9bc32578a766 100644 --- a/drivers/lightnvm/pblk-rb.c +++ b/drivers/lightnvm/pblk-rb.c @@ -657,7 +657,7 @@ try: * be directed to disk. */ int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba, - struct ppa_addr ppa, int bio_iter) + struct ppa_addr ppa, int bio_iter, bool advanced_bio) { struct pblk *pblk = container_of(rb, struct pblk, rwb); struct pblk_rb_entry *entry; @@ -694,7 +694,7 @@ int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba, * filled with data from the cache). If part of the data resides on the * media, we will read later on */ - if (unlikely(!bio->bi_iter.bi_idx)) + if (unlikely(!advanced_bio)) bio_advance(bio, bio_iter * PBLK_EXPOSED_PAGE_SIZE); data = bio_data(bio); diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c index 4e5c48f3de62..d682e89e6493 100644 --- a/drivers/lightnvm/pblk-read.c +++ b/drivers/lightnvm/pblk-read.c @@ -26,7 +26,7 @@ */ static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio, sector_t lba, struct ppa_addr ppa, - int bio_iter) + int bio_iter, bool advanced_bio) { #ifdef CONFIG_NVM_DEBUG /* Callers must ensure that the ppa points to a cache address */ @@ -34,7 +34,8 @@ static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio, BUG_ON(!pblk_addr_in_cache(ppa)); #endif - return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa, bio_iter); + return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa, + bio_iter, advanced_bio); } static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd, @@ -44,7 +45,7 @@ static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd, struct ppa_addr ppas[PBLK_MAX_REQ_ADDRS]; sector_t blba = pblk_get_lba(bio); int nr_secs = rqd->nr_ppas; - int advanced_bio = 0; + bool advanced_bio = false; int i, j = 0; /* logic error: lba out-of-bounds. Ignore read request */ @@ -62,19 +63,26 @@ static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd, retry: if (pblk_ppa_empty(p)) { WARN_ON(test_and_set_bit(i, read_bitmap)); - continue; + + if (unlikely(!advanced_bio)) { + bio_advance(bio, (i) * PBLK_EXPOSED_PAGE_SIZE); + advanced_bio = true; + } + + goto next; } /* Try to read from write buffer. The address is later checked * on the write buffer to prevent retrieving overwritten data. */ if (pblk_addr_in_cache(p)) { - if (!pblk_read_from_cache(pblk, bio, lba, p, i)) { + if (!pblk_read_from_cache(pblk, bio, lba, p, i, + advanced_bio)) { pblk_lookup_l2p_seq(pblk, &p, lba, 1); goto retry; } WARN_ON(test_and_set_bit(i, read_bitmap)); - advanced_bio = 1; + advanced_bio = true; #ifdef CONFIG_NVM_DEBUG atomic_long_inc(&pblk->cache_reads); #endif @@ -83,6 +91,7 @@ retry: rqd->ppa_list[j++] = p; } +next: if (advanced_bio) bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE); } @@ -282,7 +291,7 @@ retry: * write buffer to prevent retrieving overwritten data. */ if (pblk_addr_in_cache(ppa)) { - if (!pblk_read_from_cache(pblk, bio, lba, ppa, 0)) { + if (!pblk_read_from_cache(pblk, bio, lba, ppa, 0, 1)) { pblk_lookup_l2p_seq(pblk, &ppa, lba, 1); goto retry; } diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h index 0c5692cc2f60..67e623bd5c2d 100644 --- a/drivers/lightnvm/pblk.h +++ b/drivers/lightnvm/pblk.h @@ -670,7 +670,7 @@ unsigned int pblk_rb_read_to_bio_list(struct pblk_rb *rb, struct bio *bio, struct list_head *list, unsigned int max); int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba, - struct ppa_addr ppa, int bio_iter); + struct ppa_addr ppa, int bio_iter, bool advanced_bio); unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int entries); unsigned int pblk_rb_sync_init(struct pblk_rb *rb, unsigned long *flags); diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c index ac91fd0d62c6..cbca5e51b975 100644 --- a/drivers/mailbox/pcc.c +++ b/drivers/mailbox/pcc.c @@ -92,7 +92,7 @@ static struct mbox_controller pcc_mbox_ctrl = {}; */ static struct mbox_chan *get_pcc_channel(int id) { - if (id < 0 || id > pcc_mbox_ctrl.num_chans) + if (id < 0 || id >= pcc_mbox_ctrl.num_chans) return ERR_PTR(-ENOENT); return &pcc_mbox_channels[id]; diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 850ff6c67994..44f4a8ac95bd 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -1258,8 +1258,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async); */ int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c) { - blk_status_t a; - int f; + int a, f; unsigned long buffers_processed = 0; struct dm_buffer *b, *tmp; diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 1b224aa9cf15..3acce09bba35 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -1587,16 +1587,18 @@ retry: if (likely(ic->mode == 'J')) { if (dio->write) { unsigned next_entry, i, pos; - unsigned ws, we; + unsigned ws, we, range_sectors; - dio->range.n_sectors = min(dio->range.n_sectors, ic->free_sectors); + dio->range.n_sectors = min(dio->range.n_sectors, + ic->free_sectors << ic->sb->log2_sectors_per_block); if (unlikely(!dio->range.n_sectors)) goto sleep; - ic->free_sectors -= dio->range.n_sectors; + range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block; + ic->free_sectors -= range_sectors; journal_section = ic->free_section; journal_entry = ic->free_section_entry; - next_entry = ic->free_section_entry + dio->range.n_sectors; + next_entry = ic->free_section_entry + range_sectors; ic->free_section_entry = next_entry % ic->journal_section_entries; ic->free_section += next_entry / ic->journal_section_entries; ic->n_uncommitted_sections += next_entry / ic->journal_section_entries; @@ -1727,6 +1729,8 @@ static void pad_uncommitted(struct dm_integrity_c *ic) wraparound_section(ic, &ic->free_section); ic->n_uncommitted_sections++; } + WARN_ON(ic->journal_sections * ic->journal_section_entries != + (ic->n_uncommitted_sections + ic->n_committed_sections) * ic->journal_section_entries + ic->free_sectors); } static void integrity_commit(struct work_struct *w) @@ -1821,6 +1825,9 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start, { unsigned i, j, n; struct journal_completion comp; + struct blk_plug plug; + + blk_start_plug(&plug); comp.ic = ic; comp.in_flight = (atomic_t)ATOMIC_INIT(1); @@ -1945,6 +1952,8 @@ skip_io: dm_bufio_write_dirty_buffers_async(ic->bufio); + blk_finish_plug(&plug); + complete_journal_op(&comp); wait_for_completion_io(&comp.comp); @@ -3019,6 +3028,11 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) ti->error = "Block size doesn't match the information in superblock"; goto bad; } + if (!le32_to_cpu(ic->sb->journal_sections)) { + r = -EINVAL; + ti->error = "Corrupted superblock, journal_sections is 0"; + goto bad; + } /* make sure that ti->max_io_len doesn't overflow */ if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS || ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) { diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 2e10c2f13a34..5bfe285ea9d1 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -208,6 +208,7 @@ struct raid_dev { #define RT_FLAG_RS_BITMAP_LOADED 2 #define RT_FLAG_UPDATE_SBS 3 #define RT_FLAG_RESHAPE_RS 4 +#define RT_FLAG_RS_SUSPENDED 5 /* Array elements of 64 bit needed for rebuild/failed disk bits */ #define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8) @@ -564,9 +565,10 @@ static const char *raid10_md_layout_to_format(int layout) if (__raid10_near_copies(layout) > 1) return "near"; - WARN_ON(__raid10_far_copies(layout) < 2); + if (__raid10_far_copies(layout) > 1) + return "far"; - return "far"; + return "unknown"; } /* Return md raid10 algorithm for @name */ @@ -2540,11 +2542,6 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) if (!freshest) return 0; - if (validate_raid_redundancy(rs)) { - rs->ti->error = "Insufficient redundancy to activate array"; - return -EINVAL; - } - /* * Validation of the freshest device provides the source of * validation for the remaining devices. @@ -2553,6 +2550,11 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) if (super_validate(rs, freshest)) return -EINVAL; + if (validate_raid_redundancy(rs)) { + rs->ti->error = "Insufficient redundancy to activate array"; + return -EINVAL; + } + rdev_for_each(rdev, mddev) if (!test_bit(Journal, &rdev->flags) && rdev != freshest && @@ -3168,6 +3170,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv) } mddev_suspend(&rs->md); + set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags); /* Try to adjust the raid4/5/6 stripe cache size to the stripe size */ if (rs_is_raid456(rs)) { @@ -3625,7 +3628,7 @@ static void raid_postsuspend(struct dm_target *ti) { struct raid_set *rs = ti->private; - if (!rs->md.suspended) + if (!test_and_set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) mddev_suspend(&rs->md); rs->md.ro = 1; @@ -3759,7 +3762,7 @@ static int rs_start_reshape(struct raid_set *rs) return r; /* Need to be resumed to be able to start reshape, recovery is frozen until raid_resume() though */ - if (mddev->suspended) + if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) mddev_resume(mddev); /* @@ -3786,8 +3789,8 @@ static int rs_start_reshape(struct raid_set *rs) } /* Suspend because a resume will happen in raid_resume() */ - if (!mddev->suspended) - mddev_suspend(mddev); + set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags); + mddev_suspend(mddev); /* * Now reshape got set up, update superblocks to @@ -3883,13 +3886,13 @@ static void raid_resume(struct dm_target *ti) if (!(rs->ctr_flags & RESUME_STAY_FROZEN_FLAGS)) clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); - if (mddev->suspended) + if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) mddev_resume(mddev); } static struct target_type raid_target = { .name = "raid", - .version = {1, 11, 1}, + .version = {1, 12, 1}, .module = THIS_MODULE, .ctr = raid_ctr, .dtr = raid_dtr, diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index a39bcd9b982a..28a4071cdf85 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -20,6 +20,7 @@ #include <linux/atomic.h> #include <linux/blk-mq.h> #include <linux/mount.h> +#include <linux/dax.h> #define DM_MSG_PREFIX "table" @@ -1630,6 +1631,37 @@ static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush) return false; } +static int device_dax_write_cache_enabled(struct dm_target *ti, + struct dm_dev *dev, sector_t start, + sector_t len, void *data) +{ + struct dax_device *dax_dev = dev->dax_dev; + + if (!dax_dev) + return false; + + if (dax_write_cache_enabled(dax_dev)) + return true; + return false; +} + +static int dm_table_supports_dax_write_cache(struct dm_table *t) +{ + struct dm_target *ti; + unsigned i; + + for (i = 0; i < dm_table_get_num_targets(t); i++) { + ti = dm_table_get_target(t, i); + + if (ti->type->iterate_devices && + ti->type->iterate_devices(ti, + device_dax_write_cache_enabled, NULL)) + return true; + } + + return false; +} + static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { @@ -1785,6 +1817,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, } blk_queue_write_cache(q, wc, fua); + if (dm_table_supports_dax_write_cache(t)) + dax_write_cache(t->md->dax_dev, true); + /* Ensure that all underlying devices are non-rotational. */ if (dm_table_all_devices_attribute(t, device_is_nonrot)) queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c index 504ba3fa328b..e13f90832b6b 100644 --- a/drivers/md/dm-verity-fec.c +++ b/drivers/md/dm-verity-fec.c @@ -308,19 +308,14 @@ static int fec_alloc_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio) { unsigned n; - if (!fio->rs) { - fio->rs = mempool_alloc(v->fec->rs_pool, 0); - if (unlikely(!fio->rs)) { - DMERR("failed to allocate RS"); - return -ENOMEM; - } - } + if (!fio->rs) + fio->rs = mempool_alloc(v->fec->rs_pool, GFP_NOIO); fec_for_each_prealloc_buffer(n) { if (fio->bufs[n]) continue; - fio->bufs[n] = mempool_alloc(v->fec->prealloc_pool, GFP_NOIO); + fio->bufs[n] = mempool_alloc(v->fec->prealloc_pool, GFP_NOWAIT); if (unlikely(!fio->bufs[n])) { DMERR("failed to allocate FEC buffer"); return -ENOMEM; @@ -332,22 +327,16 @@ static int fec_alloc_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio) if (fio->bufs[n]) continue; - fio->bufs[n] = mempool_alloc(v->fec->extra_pool, GFP_NOIO); + fio->bufs[n] = mempool_alloc(v->fec->extra_pool, GFP_NOWAIT); /* we can manage with even one buffer if necessary */ if (unlikely(!fio->bufs[n])) break; } fio->nbufs = n; - if (!fio->output) { + if (!fio->output) fio->output = mempool_alloc(v->fec->output_pool, GFP_NOIO); - if (!fio->output) { - DMERR("failed to allocate FEC page"); - return -ENOMEM; - } - } - return 0; } diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c index 884ff7c170a0..a4fa2ada6883 100644 --- a/drivers/md/dm-zoned-metadata.c +++ b/drivers/md/dm-zoned-metadata.c @@ -624,7 +624,7 @@ static int dmz_write_sb(struct dmz_metadata *zmd, unsigned int set) ret = dmz_rdwr_block(zmd, REQ_OP_WRITE, block, mblk->page); if (ret == 0) - ret = blkdev_issue_flush(zmd->dev->bdev, GFP_KERNEL, NULL); + ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL); return ret; } @@ -658,7 +658,7 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd, /* Flush drive cache (this will also sync data) */ if (ret == 0) - ret = blkdev_issue_flush(zmd->dev->bdev, GFP_KERNEL, NULL); + ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL); return ret; } @@ -722,7 +722,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd) /* If there are no dirty metadata blocks, just flush the device cache */ if (list_empty(&write_list)) { - ret = blkdev_issue_flush(zmd->dev->bdev, GFP_KERNEL, NULL); + ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL); goto out; } @@ -927,7 +927,7 @@ static int dmz_recover_mblocks(struct dmz_metadata *zmd, unsigned int dst_set) (zmd->nr_meta_zones << zmd->dev->zone_nr_blocks_shift); } - page = alloc_page(GFP_KERNEL); + page = alloc_page(GFP_NOIO); if (!page) return -ENOMEM; @@ -1183,7 +1183,7 @@ static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone) /* Get zone information from disk */ ret = blkdev_report_zones(zmd->dev->bdev, dmz_start_sect(zmd, zone), - &blkz, &nr_blkz, GFP_KERNEL); + &blkz, &nr_blkz, GFP_NOIO); if (ret) { dmz_dev_err(zmd->dev, "Get zone %u report failed", dmz_id(zmd, zone)); @@ -1257,7 +1257,7 @@ static int dmz_reset_zone(struct dmz_metadata *zmd, struct dm_zone *zone) ret = blkdev_reset_zones(dev->bdev, dmz_start_sect(zmd, zone), - dev->zone_nr_sectors, GFP_KERNEL); + dev->zone_nr_sectors, GFP_NOIO); if (ret) { dmz_dev_err(dev, "Reset zone %u failed %d", dmz_id(zmd, zone), ret); diff --git a/drivers/md/dm-zoned-reclaim.c b/drivers/md/dm-zoned-reclaim.c index 05c0a126f5c8..44a119e12f1a 100644 --- a/drivers/md/dm-zoned-reclaim.c +++ b/drivers/md/dm-zoned-reclaim.c @@ -75,7 +75,7 @@ static int dmz_reclaim_align_wp(struct dmz_reclaim *zrc, struct dm_zone *zone, nr_blocks = block - wp_block; ret = blkdev_issue_zeroout(zrc->dev->bdev, dmz_start_sect(zmd, zone) + dmz_blk2sect(wp_block), - dmz_blk2sect(nr_blocks), GFP_NOFS, false); + dmz_blk2sect(nr_blocks), GFP_NOIO, 0); if (ret) { dmz_dev_err(zrc->dev, "Align zone %u wp %llu to %llu (wp+%u) blocks failed %d", diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c index 2b538fa817f4..b08bbbd4d902 100644 --- a/drivers/md/dm-zoned-target.c +++ b/drivers/md/dm-zoned-target.c @@ -541,7 +541,7 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio) int ret; /* Create a new chunk work */ - cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOFS); + cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO); if (!cw) goto out; @@ -588,7 +588,7 @@ static int dmz_map(struct dm_target *ti, struct bio *bio) bio->bi_bdev = dev->bdev; - if (!nr_sectors && (bio_op(bio) != REQ_OP_FLUSH) && (bio_op(bio) != REQ_OP_WRITE)) + if (!nr_sectors && bio_op(bio) != REQ_OP_WRITE) return DM_MAPIO_REMAPPED; /* The BIO should be block aligned */ @@ -603,7 +603,7 @@ static int dmz_map(struct dm_target *ti, struct bio *bio) bioctx->status = BLK_STS_OK; /* Set the BIO pending in the flush list */ - if (bio_op(bio) == REQ_OP_FLUSH || (!nr_sectors && bio_op(bio) == REQ_OP_WRITE)) { + if (!nr_sectors && bio_op(bio) == REQ_OP_WRITE) { spin_lock(&dmz->flush_lock); bio_list_add(&dmz->flush_list, bio); spin_unlock(&dmz->flush_lock); @@ -785,7 +785,7 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv) /* Chunk BIO work */ mutex_init(&dmz->chunk_lock); - INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_NOFS); + INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_KERNEL); dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 0, dev->name); if (!dmz->chunk_wq) { diff --git a/drivers/md/md.c b/drivers/md/md.c index 8cdca0296749..c99634612fc4 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -2287,7 +2287,7 @@ static void export_array(struct mddev *mddev) static bool set_in_sync(struct mddev *mddev) { - WARN_ON_ONCE(!spin_is_locked(&mddev->lock)); + WARN_ON_ONCE(NR_CPUS != 1 && !spin_is_locked(&mddev->lock)); if (!mddev->in_sync) { mddev->sync_checkers++; spin_unlock(&mddev->lock); diff --git a/drivers/md/md.h b/drivers/md/md.h index b50eb4ac1b82..09db03455801 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -731,58 +731,4 @@ static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio !bdev_get_queue(bio->bi_bdev)->limits.max_write_zeroes_sectors) mddev->queue->limits.max_write_zeroes_sectors = 0; } - -/* Maximum size of each resync request */ -#define RESYNC_BLOCK_SIZE (64*1024) -#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE) - -/* for managing resync I/O pages */ -struct resync_pages { - unsigned idx; /* for get/put page from the pool */ - void *raid_bio; - struct page *pages[RESYNC_PAGES]; -}; - -static inline int resync_alloc_pages(struct resync_pages *rp, - gfp_t gfp_flags) -{ - int i; - - for (i = 0; i < RESYNC_PAGES; i++) { - rp->pages[i] = alloc_page(gfp_flags); - if (!rp->pages[i]) - goto out_free; - } - - return 0; - -out_free: - while (--i >= 0) - put_page(rp->pages[i]); - return -ENOMEM; -} - -static inline void resync_free_pages(struct resync_pages *rp) -{ - int i; - - for (i = 0; i < RESYNC_PAGES; i++) - put_page(rp->pages[i]); -} - -static inline void resync_get_all_pages(struct resync_pages *rp) -{ - int i; - - for (i = 0; i < RESYNC_PAGES; i++) - get_page(rp->pages[i]); -} - -static inline struct page *resync_fetch_page(struct resync_pages *rp, - unsigned idx) -{ - if (WARN_ON_ONCE(idx >= RESYNC_PAGES)) - return NULL; - return rp->pages[idx]; -} #endif /* _MD_MD_H */ diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c new file mode 100644 index 000000000000..9f2670b45f31 --- /dev/null +++ b/drivers/md/raid1-10.c @@ -0,0 +1,81 @@ +/* Maximum size of each resync request */ +#define RESYNC_BLOCK_SIZE (64*1024) +#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE) + +/* for managing resync I/O pages */ +struct resync_pages { + void *raid_bio; + struct page *pages[RESYNC_PAGES]; +}; + +static inline int resync_alloc_pages(struct resync_pages *rp, + gfp_t gfp_flags) +{ + int i; + + for (i = 0; i < RESYNC_PAGES; i++) { + rp->pages[i] = alloc_page(gfp_flags); + if (!rp->pages[i]) + goto out_free; + } + + return 0; + +out_free: + while (--i >= 0) + put_page(rp->pages[i]); + return -ENOMEM; +} + +static inline void resync_free_pages(struct resync_pages *rp) +{ + int i; + + for (i = 0; i < RESYNC_PAGES; i++) + put_page(rp->pages[i]); +} + +static inline void resync_get_all_pages(struct resync_pages *rp) +{ + int i; + + for (i = 0; i < RESYNC_PAGES; i++) + get_page(rp->pages[i]); +} + +static inline struct page *resync_fetch_page(struct resync_pages *rp, + unsigned idx) +{ + if (WARN_ON_ONCE(idx >= RESYNC_PAGES)) + return NULL; + return rp->pages[idx]; +} + +/* + * 'strct resync_pages' stores actual pages used for doing the resync + * IO, and it is per-bio, so make .bi_private points to it. + */ +static inline struct resync_pages *get_resync_pages(struct bio *bio) +{ + return bio->bi_private; +} + +/* generally called after bio_reset() for reseting bvec */ +static void md_bio_reset_resync_pages(struct bio *bio, struct resync_pages *rp, + int size) +{ + int idx = 0; + + /* initialize bvec table again */ + do { + struct page *page = resync_fetch_page(rp, idx); + int len = min_t(int, size, PAGE_SIZE); + + /* + * won't fail because the vec table is big + * enough to hold all these pages + */ + bio_add_page(bio, page, len, 0); + size -= len; + } while (idx++ < RESYNC_PAGES && size > 0); +} diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 3febfc8391fb..f50958ded9f0 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -81,14 +81,7 @@ static void lower_barrier(struct r1conf *conf, sector_t sector_nr); #define raid1_log(md, fmt, args...) \ do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0) -/* - * 'strct resync_pages' stores actual pages used for doing the resync - * IO, and it is per-bio, so make .bi_private points to it. - */ -static inline struct resync_pages *get_resync_pages(struct bio *bio) -{ - return bio->bi_private; -} +#include "raid1-10.c" /* * for resync bio, r1bio pointer can be retrieved from the per-bio @@ -170,7 +163,6 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) resync_get_all_pages(rp); } - rp->idx = 0; rp->raid_bio = r1_bio; bio->bi_private = rp; } @@ -492,10 +484,6 @@ static void raid1_end_write_request(struct bio *bio) } if (behind) { - /* we release behind master bio when all write are done */ - if (r1_bio->behind_master_bio == bio) - to_put = NULL; - if (test_bit(WriteMostly, &rdev->flags)) atomic_dec(&r1_bio->behind_remaining); @@ -802,8 +790,7 @@ static void flush_bio_list(struct r1conf *conf, struct bio *bio) bio->bi_next = NULL; bio->bi_bdev = rdev->bdev; if (test_bit(Faulty, &rdev->flags)) { - bio->bi_status = BLK_STS_IOERR; - bio_endio(bio); + bio_io_error(bio); } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) /* Just ignore it */ @@ -1088,7 +1075,7 @@ static void unfreeze_array(struct r1conf *conf) wake_up(&conf->wait_barrier); } -static struct bio *alloc_behind_master_bio(struct r1bio *r1_bio, +static void alloc_behind_master_bio(struct r1bio *r1_bio, struct bio *bio) { int size = bio->bi_iter.bi_size; @@ -1098,11 +1085,13 @@ static struct bio *alloc_behind_master_bio(struct r1bio *r1_bio, behind_bio = bio_alloc_mddev(GFP_NOIO, vcnt, r1_bio->mddev); if (!behind_bio) - goto fail; + return; /* discard op, we don't support writezero/writesame yet */ - if (!bio_has_data(bio)) + if (!bio_has_data(bio)) { + behind_bio->bi_iter.bi_size = size; goto skip_copy; + } while (i < vcnt && size) { struct page *page; @@ -1123,14 +1112,13 @@ skip_copy: r1_bio->behind_master_bio = behind_bio;; set_bit(R1BIO_BehindIO, &r1_bio->state); - return behind_bio; + return; free_pages: pr_debug("%dB behind alloc failed, doing sync I/O\n", bio->bi_iter.bi_size); bio_free_pages(behind_bio); -fail: - return behind_bio; + bio_put(behind_bio); } struct raid1_plug_cb { @@ -1483,7 +1471,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, (atomic_read(&bitmap->behind_writes) < mddev->bitmap_info.max_write_behind) && !waitqueue_active(&bitmap->behind_wait)) { - mbio = alloc_behind_master_bio(r1_bio, bio); + alloc_behind_master_bio(r1_bio, bio); } bitmap_startwrite(bitmap, r1_bio->sector, @@ -1493,14 +1481,11 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, first_clone = 0; } - if (!mbio) { - if (r1_bio->behind_master_bio) - mbio = bio_clone_fast(r1_bio->behind_master_bio, - GFP_NOIO, - mddev->bio_set); - else - mbio = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set); - } + if (r1_bio->behind_master_bio) + mbio = bio_clone_fast(r1_bio->behind_master_bio, + GFP_NOIO, mddev->bio_set); + else + mbio = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set); if (r1_bio->behind_master_bio) { if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags)) @@ -2086,10 +2071,7 @@ static void process_checks(struct r1bio *r1_bio) /* Fix variable parts of all bios */ vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9); for (i = 0; i < conf->raid_disks * 2; i++) { - int j; - int size; blk_status_t status; - struct bio_vec *bi; struct bio *b = r1_bio->bios[i]; struct resync_pages *rp = get_resync_pages(b); if (b->bi_end_io != end_sync_read) @@ -2098,8 +2080,6 @@ static void process_checks(struct r1bio *r1_bio) status = b->bi_status; bio_reset(b); b->bi_status = status; - b->bi_vcnt = vcnt; - b->bi_iter.bi_size = r1_bio->sectors << 9; b->bi_iter.bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset; b->bi_bdev = conf->mirrors[i].rdev->bdev; @@ -2107,15 +2087,8 @@ static void process_checks(struct r1bio *r1_bio) rp->raid_bio = r1_bio; b->bi_private = rp; - size = b->bi_iter.bi_size; - bio_for_each_segment_all(bi, b, j) { - bi->bv_offset = 0; - if (size > PAGE_SIZE) - bi->bv_len = PAGE_SIZE; - else - bi->bv_len = size; - size -= PAGE_SIZE; - } + /* initialize bvec table again */ + md_bio_reset_resync_pages(b, rp, r1_bio->sectors << 9); } for (primary = 0; primary < conf->raid_disks * 2; primary++) if (r1_bio->bios[primary]->bi_end_io == end_sync_read && @@ -2366,8 +2339,6 @@ static int narrow_write_error(struct r1bio *r1_bio, int i) wbio = bio_clone_fast(r1_bio->behind_master_bio, GFP_NOIO, mddev->bio_set); - /* We really need a _all clone */ - wbio->bi_iter = (struct bvec_iter){ 0 }; } else { wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO, mddev->bio_set); @@ -2619,6 +2590,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, int good_sectors = RESYNC_SECTORS; int min_bad = 0; /* number of sectors that are bad in all devices */ int idx = sector_to_idx(sector_nr); + int page_idx = 0; if (!conf->r1buf_pool) if (init_resync(conf)) @@ -2846,7 +2818,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, bio = r1_bio->bios[i]; rp = get_resync_pages(bio); if (bio->bi_end_io) { - page = resync_fetch_page(rp, rp->idx++); + page = resync_fetch_page(rp, page_idx); /* * won't fail because the vec table is big @@ -2858,7 +2830,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, nr_sectors += len>>9; sector_nr += len>>9; sync_blocks -= (len>>9); - } while (get_resync_pages(r1_bio->bios[disk]->bi_private)->idx < RESYNC_PAGES); + } while (++page_idx < RESYNC_PAGES); r1_bio->sectors = nr_sectors; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 5026e7ad51d3..f55d4cc085f6 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -110,14 +110,7 @@ static void end_reshape(struct r10conf *conf); #define raid10_log(md, fmt, args...) \ do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid10 " fmt, ##args); } while (0) -/* - * 'strct resync_pages' stores actual pages used for doing the resync - * IO, and it is per-bio, so make .bi_private points to it. - */ -static inline struct resync_pages *get_resync_pages(struct bio *bio) -{ - return bio->bi_private; -} +#include "raid1-10.c" /* * for resync bio, r10bio pointer can be retrieved from the per-bio @@ -221,7 +214,6 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data) resync_get_all_pages(rp); } - rp->idx = 0; rp->raid_bio = r10_bio; bio->bi_private = rp; if (rbio) { @@ -913,8 +905,7 @@ static void flush_pending_writes(struct r10conf *conf) bio->bi_next = NULL; bio->bi_bdev = rdev->bdev; if (test_bit(Faulty, &rdev->flags)) { - bio->bi_status = BLK_STS_IOERR; - bio_endio(bio); + bio_io_error(bio); } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) /* Just ignore it */ @@ -1098,8 +1089,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule) bio->bi_next = NULL; bio->bi_bdev = rdev->bdev; if (test_bit(Faulty, &rdev->flags)) { - bio->bi_status = BLK_STS_IOERR; - bio_endio(bio); + bio_io_error(bio); } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) /* Just ignore it */ @@ -2087,8 +2077,8 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) rp = get_resync_pages(tbio); bio_reset(tbio); - tbio->bi_vcnt = vcnt; - tbio->bi_iter.bi_size = fbio->bi_iter.bi_size; + md_bio_reset_resync_pages(tbio, rp, fbio->bi_iter.bi_size); + rp->raid_bio = r10_bio; tbio->bi_private = rp; tbio->bi_iter.bi_sector = r10_bio->devs[i].addr; @@ -2853,6 +2843,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, sector_t sectors_skipped = 0; int chunks_skipped = 0; sector_t chunk_mask = conf->geo.chunk_mask; + int page_idx = 0; if (!conf->r10buf_pool) if (init_resync(conf)) @@ -3355,7 +3346,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, break; for (bio= biolist ; bio ; bio=bio->bi_next) { struct resync_pages *rp = get_resync_pages(bio); - page = resync_fetch_page(rp, rp->idx++); + page = resync_fetch_page(rp, page_idx); /* * won't fail because the vec table is big enough * to hold all these pages @@ -3364,7 +3355,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, } nr_sectors += len>>9; sector_nr += len>>9; - } while (get_resync_pages(biolist)->idx < RESYNC_PAGES); + } while (++page_idx < RESYNC_PAGES); r10_bio->sectors = nr_sectors; while (biolist) { diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index aeeb8d6854e2..0fc2748aaf95 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -3381,9 +3381,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, sh->dev[i].sector + STRIPE_SECTORS) { struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); - bi->bi_status = BLK_STS_IOERR; md_write_end(conf->mddev); - bio_endio(bi); + bio_io_error(bi); bi = nextbi; } if (bitmap_end) @@ -3403,9 +3402,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, sh->dev[i].sector + STRIPE_SECTORS) { struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); - bi->bi_status = BLK_STS_IOERR; md_write_end(conf->mddev); - bio_endio(bi); + bio_io_error(bi); bi = bi2; } @@ -3429,8 +3427,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); - bi->bi_status = BLK_STS_IOERR; - bio_endio(bi); + bio_io_error(bi); bi = nextbi; } } @@ -6237,6 +6234,8 @@ static void raid5_do_work(struct work_struct *work) pr_debug("%d stripes handled\n", handled); spin_unlock_irq(&conf->device_lock); + + async_tx_issue_pending_all(); blk_finish_plug(&plug); pr_debug("--- raid5worker inactive\n"); diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c index bf45977b2823..d596b601ff42 100644 --- a/drivers/media/cec/cec-adap.c +++ b/drivers/media/cec/cec-adap.c @@ -559,7 +559,7 @@ EXPORT_SYMBOL_GPL(cec_transmit_done); void cec_transmit_attempt_done(struct cec_adapter *adap, u8 status) { - switch (status) { + switch (status & ~CEC_TX_STATUS_MAX_RETRIES) { case CEC_TX_STATUS_OK: cec_transmit_done(adap, status, 0, 0, 0, 0); return; diff --git a/drivers/media/cec/cec-notifier.c b/drivers/media/cec/cec-notifier.c index 74dc1c32080e..08b619d0ea1e 100644 --- a/drivers/media/cec/cec-notifier.c +++ b/drivers/media/cec/cec-notifier.c @@ -87,6 +87,9 @@ EXPORT_SYMBOL_GPL(cec_notifier_put); void cec_notifier_set_phys_addr(struct cec_notifier *n, u16 pa) { + if (n == NULL) + return; + mutex_lock(&n->lock); n->phys_addr = pa; if (n->callback) @@ -100,6 +103,9 @@ void cec_notifier_set_phys_addr_from_edid(struct cec_notifier *n, { u16 pa = CEC_PHYS_ADDR_INVALID; + if (n == NULL) + return; + if (edid && edid->extensions) pa = cec_get_edid_phys_addr((const u8 *)edid, EDID_LENGTH * (edid->extensions + 1), NULL); diff --git a/drivers/media/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb-core/dvb_ca_en50221.c index af694f2066a2..17970cdd55fa 100644 --- a/drivers/media/dvb-core/dvb_ca_en50221.c +++ b/drivers/media/dvb-core/dvb_ca_en50221.c @@ -349,7 +349,8 @@ static int dvb_ca_en50221_link_init(struct dvb_ca_private *ca, int slot) /* read the buffer size from the CAM */ if ((ret = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND, IRQEN | CMDREG_SR)) != 0) return ret; - if ((ret = dvb_ca_en50221_wait_if_status(ca, slot, STATUSREG_DA, HZ / 10)) != 0) + ret = dvb_ca_en50221_wait_if_status(ca, slot, STATUSREG_DA, HZ); + if (ret != 0) return ret; if ((ret = dvb_ca_en50221_read_data(ca, slot, buf, 2)) != 2) return -EIO; @@ -644,72 +645,101 @@ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, } buf_free = dvb_ringbuffer_free(&ca->slot_info[slot].rx_buffer); - if (buf_free < (ca->slot_info[slot].link_buf_size + DVB_RINGBUFFER_PKTHDRSIZE)) { + if (buf_free < (ca->slot_info[slot].link_buf_size + + DVB_RINGBUFFER_PKTHDRSIZE)) { status = -EAGAIN; goto exit; } } - /* check if there is data available */ - if ((status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS)) < 0) - goto exit; - if (!(status & STATUSREG_DA)) { - /* no data */ - status = 0; - goto exit; - } - - /* read the amount of data */ - if ((status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_SIZE_HIGH)) < 0) - goto exit; - bytes_read = status << 8; - if ((status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_SIZE_LOW)) < 0) - goto exit; - bytes_read |= status; + if (ca->pub->read_data && + (ca->slot_info[slot].slot_state != DVB_CA_SLOTSTATE_LINKINIT)) { + if (ebuf == NULL) + status = ca->pub->read_data(ca->pub, slot, buf, + sizeof(buf)); + else + status = ca->pub->read_data(ca->pub, slot, buf, ecount); + if (status < 0) + return status; + bytes_read = status; + if (status == 0) + goto exit; + } else { - /* check it will fit */ - if (ebuf == NULL) { - if (bytes_read > ca->slot_info[slot].link_buf_size) { - pr_err("dvb_ca adapter %d: CAM tried to send a buffer larger than the link buffer size (%i > %i)!\n", - ca->dvbdev->adapter->num, bytes_read, - ca->slot_info[slot].link_buf_size); - ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_LINKINIT; - status = -EIO; + /* check if there is data available */ + status = ca->pub->read_cam_control(ca->pub, slot, + CTRLIF_STATUS); + if (status < 0) goto exit; - } - if (bytes_read < 2) { - pr_err("dvb_ca adapter %d: CAM sent a buffer that was less than 2 bytes!\n", - ca->dvbdev->adapter->num); - ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_LINKINIT; - status = -EIO; + if (!(status & STATUSREG_DA)) { + /* no data */ + status = 0; goto exit; } - } else { - if (bytes_read > ecount) { - pr_err("dvb_ca adapter %d: CAM tried to send a buffer larger than the ecount size!\n", - ca->dvbdev->adapter->num); - status = -EIO; + + /* read the amount of data */ + status = ca->pub->read_cam_control(ca->pub, slot, + CTRLIF_SIZE_HIGH); + if (status < 0) + goto exit; + bytes_read = status << 8; + status = ca->pub->read_cam_control(ca->pub, slot, + CTRLIF_SIZE_LOW); + if (status < 0) goto exit; + bytes_read |= status; + + /* check it will fit */ + if (ebuf == NULL) { + if (bytes_read > ca->slot_info[slot].link_buf_size) { + pr_err("dvb_ca adapter %d: CAM tried to send a buffer larger than the link buffer size (%i > %i)!\n", + ca->dvbdev->adapter->num, bytes_read, + ca->slot_info[slot].link_buf_size); + ca->slot_info[slot].slot_state = + DVB_CA_SLOTSTATE_LINKINIT; + status = -EIO; + goto exit; + } + if (bytes_read < 2) { + pr_err("dvb_ca adapter %d: CAM sent a buffer that was less than 2 bytes!\n", + ca->dvbdev->adapter->num); + ca->slot_info[slot].slot_state = + DVB_CA_SLOTSTATE_LINKINIT; + status = -EIO; + goto exit; + } + } else { + if (bytes_read > ecount) { + pr_err("dvb_ca adapter %d: CAM tried to send a buffer larger than the ecount size!\n", + ca->dvbdev->adapter->num); + status = -EIO; + goto exit; + } } - } - /* fill the buffer */ - for (i = 0; i < bytes_read; i++) { - /* read byte and check */ - if ((status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_DATA)) < 0) - goto exit; + /* fill the buffer */ + for (i = 0; i < bytes_read; i++) { + /* read byte and check */ + status = ca->pub->read_cam_control(ca->pub, slot, + CTRLIF_DATA); + if (status < 0) + goto exit; - /* OK, store it in the buffer */ - buf[i] = status; - } + /* OK, store it in the buffer */ + buf[i] = status; + } - /* check for read error (RE should now be 0) */ - if ((status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS)) < 0) - goto exit; - if (status & STATUSREG_RE) { - ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_LINKINIT; - status = -EIO; - goto exit; + /* check for read error (RE should now be 0) */ + status = ca->pub->read_cam_control(ca->pub, slot, + CTRLIF_STATUS); + if (status < 0) + goto exit; + if (status & STATUSREG_RE) { + ca->slot_info[slot].slot_state = + DVB_CA_SLOTSTATE_LINKINIT; + status = -EIO; + goto exit; + } } /* OK, add it to the receive buffer, or copy into external buffer if supplied */ @@ -762,6 +792,10 @@ static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot, if (bytes_write > ca->slot_info[slot].link_buf_size) return -EINVAL; + if (ca->pub->write_data && + (ca->slot_info[slot].slot_state != DVB_CA_SLOTSTATE_LINKINIT)) + return ca->pub->write_data(ca->pub, slot, buf, bytes_write); + /* it is possible we are dealing with a single buffer implementation, thus if there is data available for read or if there is even a read already in progress, we do nothing but awake the kernel thread to @@ -1176,7 +1210,8 @@ static int dvb_ca_en50221_thread(void *data) pr_err("dvb_ca adapter %d: DVB CAM link initialisation failed :(\n", ca->dvbdev->adapter->num); - ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID; + ca->slot_info[slot].slot_state = + DVB_CA_SLOTSTATE_UNINITIALISED; dvb_ca_en50221_thread_update_delay(ca); break; } diff --git a/drivers/media/dvb-core/dvb_ca_en50221.h b/drivers/media/dvb-core/dvb_ca_en50221.h index 1e4bbbd34d91..82617bac0875 100644 --- a/drivers/media/dvb-core/dvb_ca_en50221.h +++ b/drivers/media/dvb-core/dvb_ca_en50221.h @@ -41,6 +41,8 @@ * @write_attribute_mem: function for writing attribute memory on the CAM * @read_cam_control: function for reading the control interface on the CAM * @write_cam_control: function for reading the control interface on the CAM + * @read_data: function for reading data (block mode) + * @write_data: function for writing data (block mode) * @slot_reset: function to reset the CAM slot * @slot_shutdown: function to shutdown a CAM slot * @slot_ts_enable: function to enable the Transport Stream on a CAM slot @@ -66,6 +68,11 @@ struct dvb_ca_en50221 { int (*write_cam_control)(struct dvb_ca_en50221 *ca, int slot, u8 address, u8 value); + int (*read_data)(struct dvb_ca_en50221 *ca, + int slot, u8 *ebuf, int ecount); + int (*write_data)(struct dvb_ca_en50221 *ca, + int slot, u8 *ebuf, int ecount); + int (*slot_reset)(struct dvb_ca_en50221 *ca, int slot); int (*slot_shutdown)(struct dvb_ca_en50221 *ca, int slot); int (*slot_ts_enable)(struct dvb_ca_en50221 *ca, int slot); diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c index 08f67d60a7d9..12bff778c97f 100644 --- a/drivers/media/dvb-frontends/cxd2841er.c +++ b/drivers/media/dvb-frontends/cxd2841er.c @@ -3279,7 +3279,10 @@ static int cxd2841er_get_frontend(struct dvb_frontend *fe, else if (priv->state == STATE_ACTIVE_TC) cxd2841er_read_status_tc(fe, &status); - cxd2841er_read_signal_strength(fe); + if (priv->state == STATE_ACTIVE_TC || priv->state == STATE_ACTIVE_S) + cxd2841er_read_signal_strength(fe); + else + p->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE; if (status & FE_HAS_LOCK) { cxd2841er_read_snr(fe); diff --git a/drivers/media/dvb-frontends/drx39xyj/drx_driver.h b/drivers/media/dvb-frontends/drx39xyj/drx_driver.h index 4442e478db72..cd69e187ba7a 100644 --- a/drivers/media/dvb-frontends/drx39xyj/drx_driver.h +++ b/drivers/media/dvb-frontends/drx39xyj/drx_driver.h @@ -307,7 +307,7 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner, * \def DRX_UNKNOWN * \brief Generic UNKNOWN value for DRX enumerated types. * -* Used to indicate that the parameter value is unknown or not yet initalized. +* Used to indicate that the parameter value is unknown or not yet initialized. */ #ifndef DRX_UNKNOWN #define DRX_UNKNOWN (254) @@ -450,19 +450,6 @@ MACROS ((u8)((((u16)x)>>8)&0xFF)) /** -* \brief Macro to sign extend signed 9 bit value to signed 16 bit value -*/ -#define DRX_S9TOS16(x) ((((u16)x)&0x100) ? ((s16)((u16)(x)|0xFF00)) : (x)) - -/** -* \brief Macro to sign extend signed 9 bit value to signed 16 bit value -*/ -#define DRX_S24TODRXFREQ(x) ((((u32) x) & 0x00800000UL) ? \ - ((s32) \ - (((u32) x) | 0xFF000000)) : \ - ((s32) x)) - -/** * \brief Macro to convert 16 bit register value to a s32 */ #define DRX_U16TODRXFREQ(x) ((x & 0x8000) ? \ diff --git a/drivers/media/dvb-frontends/lnbh25.c b/drivers/media/dvb-frontends/lnbh25.c index ef3021e964be..cb486e879fdd 100644 --- a/drivers/media/dvb-frontends/lnbh25.c +++ b/drivers/media/dvb-frontends/lnbh25.c @@ -76,8 +76,8 @@ static int lnbh25_read_vmon(struct lnbh25_priv *priv) return ret; } } - print_hex_dump_bytes("lnbh25_read_vmon: ", - DUMP_PREFIX_OFFSET, status, sizeof(status)); + dev_dbg(&priv->i2c->dev, "%s(): %*ph\n", + __func__, (int) sizeof(status), status); if ((status[0] & (LNBH25_STATUS_OFL | LNBH25_STATUS_VMON)) != 0) { dev_err(&priv->i2c->dev, "%s(): voltage in failure state, status reg 0x%x\n", @@ -178,7 +178,7 @@ struct dvb_frontend *lnbh25_attach(struct dvb_frontend *fe, fe->ops.release_sec = lnbh25_release; fe->ops.set_voltage = lnbh25_set_voltage; - dev_err(&i2c->dev, "%s(): attached at I2C addr 0x%02x\n", + dev_info(&i2c->dev, "%s(): attached at I2C addr 0x%02x\n", __func__, priv->i2c_address); return fe; } diff --git a/drivers/media/dvb-frontends/stv0367.c b/drivers/media/dvb-frontends/stv0367.c index e726c2e00460..8ac0f598978d 100644 --- a/drivers/media/dvb-frontends/stv0367.c +++ b/drivers/media/dvb-frontends/stv0367.c @@ -25,6 +25,8 @@ #include <linux/slab.h> #include <linux/i2c.h> +#include "dvb_math.h" + #include "stv0367.h" #include "stv0367_defs.h" #include "stv0367_regs.h" @@ -1437,7 +1439,7 @@ static int stv0367ter_get_frontend(struct dvb_frontend *fe, return 0; } -static int stv0367ter_read_snr(struct dvb_frontend *fe, u16 *snr) +static u32 stv0367ter_snr_readreg(struct dvb_frontend *fe) { struct stv0367_state *state = fe->demodulator_priv; u32 snru32 = 0; @@ -1453,10 +1455,16 @@ static int stv0367ter_read_snr(struct dvb_frontend *fe, u16 *snr) cpt++; } - snru32 /= 10;/*average on 10 values*/ - *snr = snru32 / 1000; + return snru32; +} + +static int stv0367ter_read_snr(struct dvb_frontend *fe, u16 *snr) +{ + u32 snrval = stv0367ter_snr_readreg(fe); + + *snr = snrval / 1000; return 0; } @@ -1501,7 +1509,8 @@ static int stv0367ter_read_status(struct dvb_frontend *fe, *status = 0; if (stv0367_readbits(state, F367TER_LK)) { - *status |= FE_HAS_LOCK; + *status = FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_VITERBI + | FE_HAS_SYNC | FE_HAS_LOCK; dprintk("%s: stv0367 has locked\n", __func__); } @@ -2149,6 +2158,18 @@ static int stv0367cab_read_status(struct dvb_frontend *fe, *status = 0; + if (state->cab_state->state > FE_CAB_NOSIGNAL) + *status |= FE_HAS_SIGNAL; + + if (state->cab_state->state > FE_CAB_NOCARRIER) + *status |= FE_HAS_CARRIER; + + if (state->cab_state->state >= FE_CAB_DEMODOK) + *status |= FE_HAS_VITERBI; + + if (state->cab_state->state >= FE_CAB_DATAOK) + *status |= FE_HAS_SYNC; + if (stv0367_readbits(state, (state->cab_state->qamfec_status_reg ? state->cab_state->qamfec_status_reg : F367CAB_QAMFEC_LOCK))) { *status |= FE_HAS_LOCK; @@ -2702,51 +2723,61 @@ static int stv0367cab_read_strength(struct dvb_frontend *fe, u16 *strength) return 0; } -static int stv0367cab_read_snr(struct dvb_frontend *fe, u16 *snr) +static int stv0367cab_snr_power(struct dvb_frontend *fe) { struct stv0367_state *state = fe->demodulator_priv; - u32 noisepercentage; enum stv0367cab_mod QAMSize; - u32 regval = 0, temp = 0; - int power, i; QAMSize = stv0367_readbits(state, F367CAB_QAM_MODE); switch (QAMSize) { case FE_CAB_MOD_QAM4: - power = 21904; - break; + return 21904; case FE_CAB_MOD_QAM16: - power = 20480; - break; + return 20480; case FE_CAB_MOD_QAM32: - power = 23040; - break; + return 23040; case FE_CAB_MOD_QAM64: - power = 21504; - break; + return 21504; case FE_CAB_MOD_QAM128: - power = 23616; - break; + return 23616; case FE_CAB_MOD_QAM256: - power = 21760; - break; - case FE_CAB_MOD_QAM512: - power = 1; - break; + return 21760; case FE_CAB_MOD_QAM1024: - power = 21280; - break; + return 21280; default: - power = 1; break; } + return 1; +} + +static int stv0367cab_snr_readreg(struct dvb_frontend *fe, int avgdiv) +{ + struct stv0367_state *state = fe->demodulator_priv; + u32 regval = 0; + int i; + for (i = 0; i < 10; i++) { regval += (stv0367_readbits(state, F367CAB_SNR_LO) + 256 * stv0367_readbits(state, F367CAB_SNR_HI)); } - regval /= 10; /*for average over 10 times in for loop above*/ + if (avgdiv) + regval /= 10; + + return regval; +} + +static int stv0367cab_read_snr(struct dvb_frontend *fe, u16 *snr) +{ + struct stv0367_state *state = fe->demodulator_priv; + u32 noisepercentage; + u32 regval = 0, temp = 0; + int power; + + power = stv0367cab_snr_power(fe); + regval = stv0367cab_snr_readreg(fe, 1); + if (regval != 0) { temp = power * (1 << (3 + stv0367_readbits(state, F367CAB_SNR_PER))); @@ -2980,21 +3011,117 @@ static int stv0367ddb_set_frontend(struct dvb_frontend *fe) return -EINVAL; } +static void stv0367ddb_read_signal_strength(struct dvb_frontend *fe) +{ + struct stv0367_state *state = fe->demodulator_priv; + struct dtv_frontend_properties *p = &fe->dtv_property_cache; + s32 signalstrength; + + switch (state->activedemod) { + case demod_cab: + signalstrength = stv0367cab_get_rf_lvl(state) * 1000; + break; + default: + p->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE; + return; + } + + p->strength.stat[0].scale = FE_SCALE_DECIBEL; + p->strength.stat[0].uvalue = signalstrength; +} + +static void stv0367ddb_read_snr(struct dvb_frontend *fe) +{ + struct stv0367_state *state = fe->demodulator_priv; + struct dtv_frontend_properties *p = &fe->dtv_property_cache; + int cab_pwr; + u32 regval, tmpval, snrval = 0; + + switch (state->activedemod) { + case demod_ter: + snrval = stv0367ter_snr_readreg(fe); + break; + case demod_cab: + cab_pwr = stv0367cab_snr_power(fe); + regval = stv0367cab_snr_readreg(fe, 0); + + /* prevent division by zero */ + if (!regval) { + snrval = 0; + break; + } + + tmpval = (cab_pwr * 320) / regval; + snrval = ((tmpval != 0) ? (intlog2(tmpval) / 5581) : 0); + break; + default: + p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; + return; + } + + p->cnr.stat[0].scale = FE_SCALE_DECIBEL; + p->cnr.stat[0].uvalue = snrval; +} + +static void stv0367ddb_read_ucblocks(struct dvb_frontend *fe) +{ + struct stv0367_state *state = fe->demodulator_priv; + struct dtv_frontend_properties *p = &fe->dtv_property_cache; + u32 ucblocks = 0; + + switch (state->activedemod) { + case demod_ter: + stv0367ter_read_ucblocks(fe, &ucblocks); + break; + case demod_cab: + stv0367cab_read_ucblcks(fe, &ucblocks); + break; + default: + p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; + return; + } + + p->block_error.stat[0].scale = FE_SCALE_COUNTER; + p->block_error.stat[0].uvalue = ucblocks; +} + static int stv0367ddb_read_status(struct dvb_frontend *fe, enum fe_status *status) { struct stv0367_state *state = fe->demodulator_priv; + struct dtv_frontend_properties *p = &fe->dtv_property_cache; + int ret; switch (state->activedemod) { case demod_ter: - return stv0367ter_read_status(fe, status); + ret = stv0367ter_read_status(fe, status); + break; case demod_cab: - return stv0367cab_read_status(fe, status); - default: + ret = stv0367cab_read_status(fe, status); break; + default: + return 0; } - return -EINVAL; + /* stop and report on *_read_status failure */ + if (ret) + return ret; + + stv0367ddb_read_signal_strength(fe); + + /* read carrier/noise when a carrier is detected */ + if (*status & FE_HAS_CARRIER) + stv0367ddb_read_snr(fe); + else + p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; + + /* read uncorrected blocks on FE_HAS_LOCK */ + if (*status & FE_HAS_LOCK) + stv0367ddb_read_ucblocks(fe); + else + p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; + + return 0; } static int stv0367ddb_get_frontend(struct dvb_frontend *fe, @@ -3035,6 +3162,7 @@ static int stv0367ddb_sleep(struct dvb_frontend *fe) static int stv0367ddb_init(struct stv0367_state *state) { struct stv0367ter_state *ter_state = state->ter_state; + struct dtv_frontend_properties *p = &state->fe.dtv_property_cache; stv0367_writereg(state, R367TER_TOPCTRL, 0x10); @@ -3109,6 +3237,13 @@ static int stv0367ddb_init(struct stv0367_state *state) ter_state->first_lock = 0; ter_state->unlock_counter = 2; + p->strength.len = 1; + p->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE; + p->cnr.len = 1; + p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; + p->block_error.len = 1; + p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; + return 0; } @@ -3126,15 +3261,12 @@ static const struct dvb_frontend_ops stv0367ddb_ops = { 0x400 |/* FE_CAN_QAM_4 */ FE_CAN_QAM_16 | FE_CAN_QAM_32 | FE_CAN_QAM_64 | FE_CAN_QAM_128 | - FE_CAN_QAM_256 | FE_CAN_FEC_AUTO | + FE_CAN_QAM_256 | FE_CAN_QAM_AUTO | /* DVB-T */ - FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | - FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | - FE_CAN_FEC_AUTO | - FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 | - FE_CAN_QAM_128 | FE_CAN_QAM_256 | FE_CAN_QAM_AUTO | - FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_RECOVER | - FE_CAN_INVERSION_AUTO | + FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | + FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | + FE_CAN_QPSK | FE_CAN_TRANSMISSION_MODE_AUTO | + FE_CAN_RECOVER | FE_CAN_INVERSION_AUTO | FE_CAN_MUTE_TS }, .release = stv0367_release, diff --git a/drivers/media/i2c/et8ek8/et8ek8_driver.c b/drivers/media/i2c/et8ek8/et8ek8_driver.c index 6e313d5243a0..f39f5179dd95 100644 --- a/drivers/media/i2c/et8ek8/et8ek8_driver.c +++ b/drivers/media/i2c/et8ek8/et8ek8_driver.c @@ -1496,7 +1496,6 @@ MODULE_DEVICE_TABLE(i2c, et8ek8_id_table); static const struct dev_pm_ops et8ek8_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(et8ek8_suspend, et8ek8_resume) }; -MODULE_DEVICE_TABLE(of, et8ek8_of_table); static struct i2c_driver et8ek8_i2c_driver = { .driver = { diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c index 9da4bf4f2c7a..7b79a7498751 100644 --- a/drivers/media/i2c/tvp5150.c +++ b/drivers/media/i2c/tvp5150.c @@ -659,7 +659,7 @@ static int tvp5150_set_vbi(struct v4l2_subdev *sd, struct tvp5150 *decoder = to_tvp5150(sd); v4l2_std_id std = decoder->norm; u8 reg; - int pos=0; + int pos = 0; if (std == V4L2_STD_ALL) { dev_err(sd->dev, "VBI can't be configured without knowing number of lines\n"); @@ -669,33 +669,30 @@ static int tvp5150_set_vbi(struct v4l2_subdev *sd, line += 3; } - if (line<6||line>27) + if (line < 6 || line > 27) return 0; - while (regs->reg != (u16)-1 ) { + while (regs->reg != (u16)-1) { if ((type & regs->type.vbi_type) && - (line>=regs->type.ini_line) && - (line<=regs->type.end_line)) { - type=regs->type.vbi_type; + (line >= regs->type.ini_line) && + (line <= regs->type.end_line)) break; - } regs++; pos++; } + if (regs->reg == (u16)-1) return 0; - type=pos | (flags & 0xf0); - reg=((line-6)<<1)+TVP5150_LINE_MODE_INI; + type = pos | (flags & 0xf0); + reg = ((line - 6) << 1) + TVP5150_LINE_MODE_INI; - if (fields&1) { + if (fields & 1) tvp5150_write(sd, reg, type); - } - if (fields&2) { - tvp5150_write(sd, reg+1, type); - } + if (fields & 2) + tvp5150_write(sd, reg + 1, type); return type; } diff --git a/drivers/media/pci/ddbridge/ddbridge-core.c b/drivers/media/pci/ddbridge/ddbridge-core.c index 9420479bee9a..cd1723e79a07 100644 --- a/drivers/media/pci/ddbridge/ddbridge-core.c +++ b/drivers/media/pci/ddbridge/ddbridge-core.c @@ -17,6 +17,8 @@ * http://www.gnu.org/copyleft/gpl.html */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> @@ -114,6 +116,19 @@ static int i2c_write_reg(struct i2c_adapter *adap, u8 adr, return i2c_write(adap, adr, msg, 2); } +static inline u32 safe_ddbreadl(struct ddb *dev, u32 adr) +{ + u32 val = ddbreadl(adr); + + /* (ddb)readl returns (uint)-1 (all bits set) on failure, catch that */ + if (val == ~0) { + dev_err(&dev->pdev->dev, "ddbreadl failure, adr=%08x\n", adr); + return 0; + } + + return val; +} + static int ddb_i2c_cmd(struct ddb_i2c *i2c, u32 adr, u32 cmd) { struct ddb *dev = i2c->dev; @@ -124,10 +139,10 @@ static int ddb_i2c_cmd(struct ddb_i2c *i2c, u32 adr, u32 cmd) ddbwritel((adr << 9) | cmd, i2c->regs + I2C_COMMAND); stat = wait_event_timeout(i2c->wq, i2c->done == 1, HZ); if (stat == 0) { - printk(KERN_ERR "I2C timeout\n"); + dev_err(&dev->pdev->dev, "I2C timeout\n"); { /* MSI debugging*/ u32 istat = ddbreadl(INTERRUPT_STATUS); - printk(KERN_ERR "IRS %08x\n", istat); + dev_err(&dev->pdev->dev, "IRS %08x\n", istat); ddbwritel(istat, INTERRUPT_ACK); } return -EIO; @@ -533,7 +548,7 @@ static u32 ddb_input_avail(struct ddb_input *input) off = (stat & 0x7ff) << 7; if (ctrl & 4) { - printk(KERN_ERR "IA %d %d %08x\n", idx, off, ctrl); + dev_err(&dev->pdev->dev, "IA %d %d %08x\n", idx, off, ctrl); ddbwritel(input->stat, DMA_BUFFER_ACK(input->nr)); return 0; } @@ -611,6 +626,7 @@ static int demod_attach_drxk(struct ddb_input *input) struct i2c_adapter *i2c = &input->port->i2c->adap; struct dvb_frontend *fe; struct drxk_config config; + struct device *dev = &input->port->dev->pdev->dev; memset(&config, 0, sizeof(config)); config.microcode_name = "drxk_a3.mc"; @@ -619,7 +635,7 @@ static int demod_attach_drxk(struct ddb_input *input) fe = input->fe = dvb_attach(drxk_attach, &config, i2c); if (!input->fe) { - printk(KERN_ERR "No DRXK found!\n"); + dev_err(dev, "No DRXK found!\n"); return -ENODEV; } fe->sec_priv = input; @@ -632,12 +648,13 @@ static int tuner_attach_tda18271(struct ddb_input *input) { struct i2c_adapter *i2c = &input->port->i2c->adap; struct dvb_frontend *fe; + struct device *dev = &input->port->dev->pdev->dev; if (input->fe->ops.i2c_gate_ctrl) input->fe->ops.i2c_gate_ctrl(input->fe, 1); fe = dvb_attach(tda18271c2dd_attach, input->fe, i2c, 0x60); if (!fe) { - printk(KERN_ERR "No TDA18271 found!\n"); + dev_err(dev, "No TDA18271 found!\n"); return -ENODEV; } if (input->fe->ops.i2c_gate_ctrl) @@ -670,13 +687,14 @@ static struct stv0367_config ddb_stv0367_config[] = { static int demod_attach_stv0367(struct ddb_input *input) { struct i2c_adapter *i2c = &input->port->i2c->adap; + struct device *dev = &input->port->dev->pdev->dev; /* attach frontend */ input->fe = dvb_attach(stv0367ddb_attach, &ddb_stv0367_config[(input->nr & 1)], i2c); if (!input->fe) { - printk(KERN_ERR "stv0367ddb_attach failed (not found?)\n"); + dev_err(dev, "stv0367ddb_attach failed (not found?)\n"); return -ENODEV; } @@ -690,17 +708,19 @@ static int demod_attach_stv0367(struct ddb_input *input) static int tuner_tda18212_ping(struct ddb_input *input, unsigned short adr) { struct i2c_adapter *adapter = &input->port->i2c->adap; + struct device *dev = &input->port->dev->pdev->dev; + u8 tda_id[2]; u8 subaddr = 0x00; - printk(KERN_DEBUG "stv0367-tda18212 tuner ping\n"); + dev_dbg(dev, "stv0367-tda18212 tuner ping\n"); if (input->fe->ops.i2c_gate_ctrl) input->fe->ops.i2c_gate_ctrl(input->fe, 1); if (i2c_read_regs(adapter, adr, subaddr, tda_id, sizeof(tda_id)) < 0) - printk(KERN_DEBUG "tda18212 ping 1 fail\n"); + dev_dbg(dev, "tda18212 ping 1 fail\n"); if (i2c_read_regs(adapter, adr, subaddr, tda_id, sizeof(tda_id)) < 0) - printk(KERN_DEBUG "tda18212 ping 2 fail\n"); + dev_warn(dev, "tda18212 ping failed, expect problems\n"); if (input->fe->ops.i2c_gate_ctrl) input->fe->ops.i2c_gate_ctrl(input->fe, 0); @@ -711,6 +731,7 @@ static int tuner_tda18212_ping(struct ddb_input *input, unsigned short adr) static int demod_attach_cxd28xx(struct ddb_input *input, int par, int osc24) { struct i2c_adapter *i2c = &input->port->i2c->adap; + struct device *dev = &input->port->dev->pdev->dev; struct cxd2841er_config cfg; /* the cxd2841er driver expects 8bit/shifted I2C addresses */ @@ -728,7 +749,7 @@ static int demod_attach_cxd28xx(struct ddb_input *input, int par, int osc24) input->fe = dvb_attach(cxd2841er_attach_t_c, &cfg, i2c); if (!input->fe) { - printk(KERN_ERR "No Sony CXD28xx found!\n"); + dev_err(dev, "No Sony CXD28xx found!\n"); return -ENODEV; } @@ -742,6 +763,7 @@ static int demod_attach_cxd28xx(struct ddb_input *input, int par, int osc24) static int tuner_attach_tda18212(struct ddb_input *input, u32 porttype) { struct i2c_adapter *adapter = &input->port->i2c->adap; + struct device *dev = &input->port->dev->pdev->dev; struct i2c_client *client; struct tda18212_config config = { .fe = input->fe, @@ -786,7 +808,7 @@ static int tuner_attach_tda18212(struct ddb_input *input, u32 porttype) return 0; err: - printk(KERN_INFO "TDA18212 tuner not found. Device is not fully operational.\n"); + dev_warn(dev, "TDA18212 tuner not found. Device is not fully operational.\n"); return -ENODEV; } @@ -847,19 +869,20 @@ static struct stv6110x_config stv6110b = { static int demod_attach_stv0900(struct ddb_input *input, int type) { struct i2c_adapter *i2c = &input->port->i2c->adap; + struct device *dev = &input->port->dev->pdev->dev; struct stv090x_config *feconf = type ? &stv0900_aa : &stv0900; input->fe = dvb_attach(stv090x_attach, feconf, i2c, (input->nr & 1) ? STV090x_DEMODULATOR_1 : STV090x_DEMODULATOR_0); if (!input->fe) { - printk(KERN_ERR "No STV0900 found!\n"); + dev_err(dev, "No STV0900 found!\n"); return -ENODEV; } if (!dvb_attach(lnbh24_attach, input->fe, i2c, 0, 0, (input->nr & 1) ? (0x09 - type) : (0x0b - type))) { - printk(KERN_ERR "No LNBH24 found!\n"); + dev_err(dev, "No LNBH24 found!\n"); return -ENODEV; } return 0; @@ -868,6 +891,7 @@ static int demod_attach_stv0900(struct ddb_input *input, int type) static int tuner_attach_stv6110(struct ddb_input *input, int type) { struct i2c_adapter *i2c = &input->port->i2c->adap; + struct device *dev = &input->port->dev->pdev->dev; struct stv090x_config *feconf = type ? &stv0900_aa : &stv0900; struct stv6110x_config *tunerconf = (input->nr & 1) ? &stv6110b : &stv6110a; @@ -875,10 +899,10 @@ static int tuner_attach_stv6110(struct ddb_input *input, int type) ctl = dvb_attach(stv6110x_attach, input->fe, tunerconf, i2c); if (!ctl) { - printk(KERN_ERR "No STV6110X found!\n"); + dev_err(dev, "No STV6110X found!\n"); return -ENODEV; } - printk(KERN_INFO "attach tuner input %d adr %02x\n", + dev_info(dev, "attach tuner input %d adr %02x\n", input->nr, tunerconf->addr); feconf->tuner_init = ctl->tuner_init; @@ -1009,13 +1033,14 @@ static int dvb_input_attach(struct ddb_input *input) struct ddb_port *port = input->port; struct dvb_adapter *adap = &input->adap; struct dvb_demux *dvbdemux = &input->demux; + struct device *dev = &input->port->dev->pdev->dev; int sony_osc24 = 0, sony_tspar = 0; ret = dvb_register_adapter(adap, "DDBridge", THIS_MODULE, &input->port->dev->pdev->dev, adapter_nr); if (ret < 0) { - printk(KERN_ERR "ddbridge: Could not register adapter.Check if you enabled enough adapters in dvb-core!\n"); + dev_err(dev, "Could not register adapter. Check if you enabled enough adapters in dvb-core!\n"); return ret; } input->attached = 1; @@ -1241,9 +1266,9 @@ static void input_tasklet(unsigned long data) if (input->port->class == DDB_PORT_TUNER) { if (4&ddbreadl(DMA_BUFFER_CONTROL(input->nr))) - printk(KERN_ERR "Overflow input %d\n", input->nr); + dev_err(&dev->pdev->dev, "Overflow input %d\n", input->nr); while (input->cbuf != ((input->stat >> 11) & 0x1f) - || (4&ddbreadl(DMA_BUFFER_CONTROL(input->nr)))) { + || (4 & safe_ddbreadl(dev, DMA_BUFFER_CONTROL(input->nr)))) { dvb_dmx_swfilter_packets(&input->demux, input->vbuf[input->cbuf], input->dma_buf_size / 188); @@ -1280,6 +1305,7 @@ static struct cxd2099_cfg cxd_cfg = { .adr = 0x40, .polarity = 1, .clock_mode = 1, + .max_i2c = 512, }; static int ddb_ci_attach(struct ddb_port *port) @@ -1310,6 +1336,7 @@ static int ddb_ci_attach(struct ddb_port *port) static int ddb_port_attach(struct ddb_port *port) { + struct device *dev = &port->dev->pdev->dev; int ret = 0; switch (port->class) { @@ -1326,7 +1353,7 @@ static int ddb_port_attach(struct ddb_port *port) break; } if (ret < 0) - printk(KERN_ERR "port_attach on port %d failed\n", port->nr); + dev_err(dev, "port_attach on port %d failed\n", port->nr); return ret; } @@ -1377,6 +1404,7 @@ static void ddb_ports_detach(struct ddb *dev) static int init_xo2(struct ddb_port *port) { struct i2c_adapter *i2c = &port->i2c->adap; + struct device *dev = &port->dev->pdev->dev; u8 val, data[2]; int res; @@ -1385,7 +1413,7 @@ static int init_xo2(struct ddb_port *port) return res; if (data[0] != 0x01) { - pr_info("Port %d: invalid XO2\n", port->nr); + dev_info(dev, "Port %d: invalid XO2\n", port->nr); return -1; } @@ -1511,7 +1539,7 @@ static void ddb_port_probe(struct ddb_port *port) port->class = DDB_PORT_CI; ddbwritel(I2C_SPEED_400, port->i2c->regs + I2C_TIMING); } else if (port_has_xo2(port, &xo2_type, &xo2_id)) { - printk(KERN_INFO "Port %d (TAB %d): XO2 type: %d, id: %d\n", + dev_dbg(&dev->pdev->dev, "Port %d (TAB %d): XO2 type: %d, id: %d\n", port->nr, port->nr+1, xo2_type, xo2_id); ddbwritel(I2C_SPEED_400, port->i2c->regs + I2C_TIMING); @@ -1556,10 +1584,10 @@ static void ddb_port_probe(struct ddb_port *port) } break; case DDB_XO2_TYPE_CI: - printk(KERN_INFO "DuoFlex CI modules not supported\n"); + dev_info(&dev->pdev->dev, "DuoFlex CI modules not supported\n"); break; default: - printk(KERN_INFO "Unknown XO2 DuoFlex module\n"); + dev_info(&dev->pdev->dev, "Unknown XO2 DuoFlex module\n"); break; } } else if (port_has_cxd28xx(port, &cxd_id)) { @@ -1611,7 +1639,7 @@ static void ddb_port_probe(struct ddb_port *port) ddbwritel(I2C_SPEED_100, port->i2c->regs + I2C_TIMING); } - printk(KERN_INFO "Port %d (TAB %d): %s\n", + dev_info(&dev->pdev->dev, "Port %d (TAB %d): %s\n", port->nr, port->nr+1, modname); } @@ -1765,7 +1793,7 @@ static int flashio(struct ddb *dev, u8 *wbuf, u32 wlen, u8 *rbuf, u32 rlen) wbuf += 4; wlen -= 4; ddbwritel(data, SPI_DATA); - while (ddbreadl(SPI_CONTROL) & 0x0004) + while (safe_ddbreadl(dev, SPI_CONTROL) & 0x0004) ; } @@ -1785,7 +1813,7 @@ static int flashio(struct ddb *dev, u8 *wbuf, u32 wlen, u8 *rbuf, u32 rlen) if (shift) data <<= shift; ddbwritel(data, SPI_DATA); - while (ddbreadl(SPI_CONTROL) & 0x0004) + while (safe_ddbreadl(dev, SPI_CONTROL) & 0x0004) ; if (!rlen) { @@ -1797,7 +1825,7 @@ static int flashio(struct ddb *dev, u8 *wbuf, u32 wlen, u8 *rbuf, u32 rlen) while (rlen > 4) { ddbwritel(0xffffffff, SPI_DATA); - while (ddbreadl(SPI_CONTROL) & 0x0004) + while (safe_ddbreadl(dev, SPI_CONTROL) & 0x0004) ; data = ddbreadl(SPI_DATA); *(u32 *) rbuf = swab32(data); @@ -1806,7 +1834,7 @@ static int flashio(struct ddb *dev, u8 *wbuf, u32 wlen, u8 *rbuf, u32 rlen) } ddbwritel(0x0003 | ((rlen << (8 + 3)) & 0x1F00), SPI_CONTROL); ddbwritel(0xffffffff, SPI_DATA); - while (ddbreadl(SPI_CONTROL) & 0x0004) + while (safe_ddbreadl(dev, SPI_CONTROL) & 0x0004) ; data = ddbreadl(SPI_DATA); @@ -1993,7 +2021,7 @@ static int ddb_probe(struct pci_dev *pdev, const struct pci_device_id *id) dev->pdev = pdev; pci_set_drvdata(pdev, dev); dev->info = (struct ddb_info *) id->driver_data; - printk(KERN_INFO "DDBridge driver detected: %s\n", dev->info->name); + dev_info(&pdev->dev, "Detected %s\n", dev->info->name); dev->regs = ioremap(pci_resource_start(dev->pdev, 0), pci_resource_len(dev->pdev, 0)); @@ -2001,13 +2029,13 @@ static int ddb_probe(struct pci_dev *pdev, const struct pci_device_id *id) stat = -ENOMEM; goto fail; } - printk(KERN_INFO "HW %08x FW %08x\n", ddbreadl(0), ddbreadl(4)); + dev_info(&pdev->dev, "HW %08x FW %08x\n", ddbreadl(0), ddbreadl(4)); #ifdef CONFIG_PCI_MSI if (pci_msi_enabled()) stat = pci_enable_msi(dev->pdev); if (stat) { - printk(KERN_INFO ": MSI not available.\n"); + dev_info(&pdev->dev, "MSI not available.\n"); } else { irq_flag = 0; dev->msi = 1; @@ -2040,7 +2068,7 @@ static int ddb_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto fail1; ddb_ports_init(dev); if (ddb_buffers_alloc(dev) < 0) { - printk(KERN_INFO ": Could not allocate buffer memory\n"); + dev_err(&pdev->dev, "Could not allocate buffer memory\n"); goto fail2; } if (ddb_ports_attach(dev) < 0) @@ -2050,19 +2078,19 @@ static int ddb_probe(struct pci_dev *pdev, const struct pci_device_id *id) fail3: ddb_ports_detach(dev); - printk(KERN_ERR "fail3\n"); + dev_err(&pdev->dev, "fail3\n"); ddb_ports_release(dev); fail2: - printk(KERN_ERR "fail2\n"); + dev_err(&pdev->dev, "fail2\n"); ddb_buffers_free(dev); fail1: - printk(KERN_ERR "fail1\n"); + dev_err(&pdev->dev, "fail1\n"); if (dev->msi) pci_disable_msi(dev->pdev); if (stat == 0) free_irq(dev->pdev->irq, dev); fail: - printk(KERN_ERR "fail\n"); + dev_err(&pdev->dev, "fail\n"); ddb_unmap(dev); pci_set_drvdata(pdev, NULL); pci_disable_device(pdev); @@ -2242,7 +2270,7 @@ static __init int module_init_ddbridge(void) { int ret; - printk(KERN_INFO "Digital Devices PCIE bridge driver, Copyright (C) 2010-11 Digital Devices GmbH\n"); + pr_info("Digital Devices PCIE bridge driver, Copyright (C) 2010-11 Digital Devices GmbH\n"); ret = ddb_class_create(); if (ret < 0) diff --git a/drivers/media/pci/ngene/ngene-core.c b/drivers/media/pci/ngene/ngene-core.c index ce69e648b663..8c92cb7f7e72 100644 --- a/drivers/media/pci/ngene/ngene-core.c +++ b/drivers/media/pci/ngene/ngene-core.c @@ -336,9 +336,9 @@ int ngene_command(struct ngene *dev, struct ngene_command *com) { int result; - down(&dev->cmd_mutex); + mutex_lock(&dev->cmd_mutex); result = ngene_command_mutex(dev, com); - up(&dev->cmd_mutex); + mutex_unlock(&dev->cmd_mutex); return result; } @@ -560,7 +560,6 @@ static int ngene_command_stream_control(struct ngene *dev, u8 stream, u16 BsSPI = ((stream & 1) ? 0x9800 : 0x9700); u16 BsSDO = 0x9B00; - down(&dev->stream_mutex); memset(&com, 0, sizeof(com)); com.cmd.hdr.Opcode = CMD_CONTROL; com.cmd.hdr.Length = sizeof(struct FW_STREAM_CONTROL) - 2; @@ -586,17 +585,13 @@ static int ngene_command_stream_control(struct ngene *dev, u8 stream, chan->State = KSSTATE_ACQUIRE; chan->HWState = HWSTATE_STOP; spin_unlock_irq(&chan->state_lock); - if (ngene_command(dev, &com) < 0) { - up(&dev->stream_mutex); + if (ngene_command(dev, &com) < 0) return -1; - } /* clear_buffers(chan); */ flush_buffers(chan); - up(&dev->stream_mutex); return 0; } spin_unlock_irq(&chan->state_lock); - up(&dev->stream_mutex); return 0; } @@ -692,11 +687,9 @@ static int ngene_command_stream_control(struct ngene *dev, u8 stream, chan->HWState = HWSTATE_STARTUP; spin_unlock_irq(&chan->state_lock); - if (ngene_command(dev, &com) < 0) { - up(&dev->stream_mutex); + if (ngene_command(dev, &com) < 0) return -1; - } - up(&dev->stream_mutex); + return 0; } @@ -750,8 +743,11 @@ void set_transfer(struct ngene_channel *chan, int state) /* else printk(KERN_INFO DEVICE_NAME ": lock=%08x\n", ngreadl(0x9310)); */ + mutex_lock(&dev->stream_mutex); ret = ngene_command_stream_control(dev, chan->number, control, mode, flags); + mutex_unlock(&dev->stream_mutex); + if (!ret) chan->running = state; else @@ -1283,7 +1279,7 @@ static int ngene_load_firm(struct ngene *dev) static void ngene_stop(struct ngene *dev) { - down(&dev->cmd_mutex); + mutex_destroy(&dev->cmd_mutex); i2c_del_adapter(&(dev->channel[0].i2c_adapter)); i2c_del_adapter(&(dev->channel[1].i2c_adapter)); ngwritel(0, NGENE_INT_ENABLE); @@ -1346,10 +1342,10 @@ static int ngene_start(struct ngene *dev) init_waitqueue_head(&dev->cmd_wq); init_waitqueue_head(&dev->tx_wq); init_waitqueue_head(&dev->rx_wq); - sema_init(&dev->cmd_mutex, 1); - sema_init(&dev->stream_mutex, 1); + mutex_init(&dev->cmd_mutex); + mutex_init(&dev->stream_mutex); sema_init(&dev->pll_mutex, 1); - sema_init(&dev->i2c_switch_mutex, 1); + mutex_init(&dev->i2c_switch_mutex); spin_lock_init(&dev->cmd_lock); for (i = 0; i < MAX_STREAM; i++) spin_lock_init(&dev->channel[i].state_lock); @@ -1606,10 +1602,10 @@ static void ngene_unlink(struct ngene *dev) com.in_len = 3; com.out_len = 1; - down(&dev->cmd_mutex); + mutex_lock(&dev->cmd_mutex); ngwritel(0, NGENE_INT_ENABLE); ngene_command_mutex(dev, &com); - up(&dev->cmd_mutex); + mutex_unlock(&dev->cmd_mutex); } void ngene_shutdown(struct pci_dev *pdev) diff --git a/drivers/media/pci/ngene/ngene-i2c.c b/drivers/media/pci/ngene/ngene-i2c.c index cf39fcf54adf..fbf36353c701 100644 --- a/drivers/media/pci/ngene/ngene-i2c.c +++ b/drivers/media/pci/ngene/ngene-i2c.c @@ -118,7 +118,7 @@ static int ngene_i2c_master_xfer(struct i2c_adapter *adapter, (struct ngene_channel *)i2c_get_adapdata(adapter); struct ngene *dev = chan->dev; - down(&dev->i2c_switch_mutex); + mutex_lock(&dev->i2c_switch_mutex); ngene_i2c_set_bus(dev, chan->number); if (num == 2 && msg[1].flags & I2C_M_RD && !(msg[0].flags & I2C_M_RD)) @@ -136,11 +136,11 @@ static int ngene_i2c_master_xfer(struct i2c_adapter *adapter, msg[0].buf, msg[0].len, 0)) goto done; - up(&dev->i2c_switch_mutex); + mutex_unlock(&dev->i2c_switch_mutex); return -EIO; done: - up(&dev->i2c_switch_mutex); + mutex_unlock(&dev->i2c_switch_mutex); return num; } diff --git a/drivers/media/pci/ngene/ngene.h b/drivers/media/pci/ngene/ngene.h index 10d8f74c4f0a..7c7cd217333d 100644 --- a/drivers/media/pci/ngene/ngene.h +++ b/drivers/media/pci/ngene/ngene.h @@ -762,10 +762,10 @@ struct ngene { wait_queue_head_t cmd_wq; int cmd_done; - struct semaphore cmd_mutex; - struct semaphore stream_mutex; + struct mutex cmd_mutex; + struct mutex stream_mutex; struct semaphore pll_mutex; - struct semaphore i2c_switch_mutex; + struct mutex i2c_switch_mutex; int i2c_current_channel; int i2c_current_bus; spinlock_t cmd_lock; diff --git a/drivers/media/pci/tw5864/tw5864-video.c b/drivers/media/pci/tw5864/tw5864-video.c index 2a044be729da..e7bd2b8484e3 100644 --- a/drivers/media/pci/tw5864/tw5864-video.c +++ b/drivers/media/pci/tw5864/tw5864-video.c @@ -545,6 +545,7 @@ static int tw5864_fmt_vid_cap(struct file *file, void *priv, switch (input->std) { default: WARN_ON_ONCE(1); + return -EINVAL; case STD_NTSC: f->fmt.pix.height = 480; break; diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig index 1313cd533436..fb1fa0b82077 100644 --- a/drivers/media/platform/Kconfig +++ b/drivers/media/platform/Kconfig @@ -475,8 +475,8 @@ config VIDEO_QCOM_VENUS tristate "Qualcomm Venus V4L2 encoder/decoder driver" depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA depends on (ARCH_QCOM && IOMMU_DMA) || COMPILE_TEST - select QCOM_MDT_LOADER if (ARM || ARM64) - select QCOM_SCM if (ARM || ARM64) + select QCOM_MDT_LOADER if ARCH_QCOM + select QCOM_SCM if ARCH_QCOM select VIDEOBUF2_DMA_SG select V4L2_MEM2MEM_DEV ---help--- diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c index 25cbf9e5ac5a..bba1eb43b5d8 100644 --- a/drivers/media/platform/coda/coda-bit.c +++ b/drivers/media/platform/coda/coda-bit.c @@ -393,8 +393,8 @@ static int coda_alloc_framebuffers(struct coda_ctx *ctx, int ret; int i; - if (ctx->codec && (ctx->codec->src_fourcc == V4L2_PIX_FMT_H264 || - ctx->codec->dst_fourcc == V4L2_PIX_FMT_H264)) { + if (ctx->codec->src_fourcc == V4L2_PIX_FMT_H264 || + ctx->codec->dst_fourcc == V4L2_PIX_FMT_H264) { width = round_up(q_data->width, 16); height = round_up(q_data->height, 16); } else { @@ -2198,7 +2198,7 @@ static void coda_finish_decode(struct coda_ctx *ctx) ctx->display_idx = display_idx; } -static void coda_error_decode(struct coda_ctx *ctx) +static void coda_decode_timeout(struct coda_ctx *ctx) { struct vb2_v4l2_buffer *dst_buf; @@ -2223,7 +2223,7 @@ const struct coda_context_ops coda_bit_decode_ops = { .start_streaming = coda_start_decoding, .prepare_run = coda_prepare_decode, .finish_run = coda_finish_decode, - .error_run = coda_error_decode, + .run_timeout = coda_decode_timeout, .seq_end_work = coda_seq_end_work, .release = coda_bit_release, }; diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c index f92cc7df58fb..829c7895a98a 100644 --- a/drivers/media/platform/coda/coda-common.c +++ b/drivers/media/platform/coda/coda-common.c @@ -1164,8 +1164,8 @@ static void coda_pic_run_work(struct work_struct *work) coda_hw_reset(ctx); - if (ctx->ops->error_run) - ctx->ops->error_run(ctx); + if (ctx->ops->run_timeout) + ctx->ops->run_timeout(ctx); } else if (!ctx->aborting) { ctx->ops->finish_run(ctx); } diff --git a/drivers/media/platform/coda/coda.h b/drivers/media/platform/coda/coda.h index 40fe22f0d757..c5f504d8cf67 100644 --- a/drivers/media/platform/coda/coda.h +++ b/drivers/media/platform/coda/coda.h @@ -183,7 +183,7 @@ struct coda_context_ops { int (*start_streaming)(struct coda_ctx *ctx); int (*prepare_run)(struct coda_ctx *ctx); void (*finish_run)(struct coda_ctx *ctx); - void (*error_run)(struct coda_ctx *ctx); + void (*run_timeout)(struct coda_ctx *ctx); void (*seq_end_work)(struct work_struct *work); void (*release)(struct coda_ctx *ctx); }; diff --git a/drivers/media/platform/davinci/ccdc_hw_device.h b/drivers/media/platform/davinci/ccdc_hw_device.h index 8f6688a7a111..f1b521045d64 100644 --- a/drivers/media/platform/davinci/ccdc_hw_device.h +++ b/drivers/media/platform/davinci/ccdc_hw_device.h @@ -42,16 +42,6 @@ struct ccdc_hw_ops { int (*set_hw_if_params) (struct vpfe_hw_if_param *param); /* get interface parameters */ int (*get_hw_if_params) (struct vpfe_hw_if_param *param); - /* - * Pointer to function to set parameters. Used - * for implementing VPFE_S_CCDC_PARAMS - */ - int (*set_params) (void *params); - /* - * Pointer to function to get parameter. Used - * for implementing VPFE_G_CCDC_PARAMS - */ - int (*get_params) (void *params); /* Pointer to function to configure ccdc */ int (*configure) (void); diff --git a/drivers/media/platform/davinci/dm355_ccdc.c b/drivers/media/platform/davinci/dm355_ccdc.c index 73db166dc338..6d492dc4c3a9 100644 --- a/drivers/media/platform/davinci/dm355_ccdc.c +++ b/drivers/media/platform/davinci/dm355_ccdc.c @@ -17,12 +17,7 @@ * This module is for configuring DM355 CCD controller of VPFE to capture * Raw yuv or Bayer RGB data from a decoder. CCDC has several modules * such as Defect Pixel Correction, Color Space Conversion etc to - * pre-process the Bayer RGB data, before writing it to SDRAM. This - * module also allows application to configure individual - * module parameters through VPFE_CMD_S_CCDC_RAW_PARAMS IOCTL. - * To do so, application include dm355_ccdc.h and vpfe_capture.h header - * files. The setparams() API is called by vpfe_capture driver - * to configure module parameters + * pre-process the Bayer RGB data, before writing it to SDRAM. * * TODO: 1) Raw bayer parameter settings and bayer capture * 2) Split module parameter structure to module specific ioctl structs @@ -260,90 +255,6 @@ static void ccdc_setwin(struct v4l2_rect *image_win, dev_dbg(ccdc_cfg.dev, "\nEnd of ccdc_setwin..."); } -static int validate_ccdc_param(struct ccdc_config_params_raw *ccdcparam) -{ - if (ccdcparam->datasft < CCDC_DATA_NO_SHIFT || - ccdcparam->datasft > CCDC_DATA_SHIFT_6BIT) { - dev_dbg(ccdc_cfg.dev, "Invalid value of data shift\n"); - return -EINVAL; - } - - if (ccdcparam->mfilt1 < CCDC_NO_MEDIAN_FILTER1 || - ccdcparam->mfilt1 > CCDC_MEDIAN_FILTER1) { - dev_dbg(ccdc_cfg.dev, "Invalid value of median filter1\n"); - return -EINVAL; - } - - if (ccdcparam->mfilt2 < CCDC_NO_MEDIAN_FILTER2 || - ccdcparam->mfilt2 > CCDC_MEDIAN_FILTER2) { - dev_dbg(ccdc_cfg.dev, "Invalid value of median filter2\n"); - return -EINVAL; - } - - if ((ccdcparam->med_filt_thres < 0) || - (ccdcparam->med_filt_thres > CCDC_MED_FILT_THRESH)) { - dev_dbg(ccdc_cfg.dev, - "Invalid value of median filter threshold\n"); - return -EINVAL; - } - - if (ccdcparam->data_sz < CCDC_DATA_16BITS || - ccdcparam->data_sz > CCDC_DATA_8BITS) { - dev_dbg(ccdc_cfg.dev, "Invalid value of data size\n"); - return -EINVAL; - } - - if (ccdcparam->alaw.enable) { - if (ccdcparam->alaw.gamma_wd < CCDC_GAMMA_BITS_13_4 || - ccdcparam->alaw.gamma_wd > CCDC_GAMMA_BITS_09_0) { - dev_dbg(ccdc_cfg.dev, "Invalid value of ALAW\n"); - return -EINVAL; - } - } - - if (ccdcparam->blk_clamp.b_clamp_enable) { - if (ccdcparam->blk_clamp.sample_pixel < CCDC_SAMPLE_1PIXELS || - ccdcparam->blk_clamp.sample_pixel > CCDC_SAMPLE_16PIXELS) { - dev_dbg(ccdc_cfg.dev, - "Invalid value of sample pixel\n"); - return -EINVAL; - } - if (ccdcparam->blk_clamp.sample_ln < CCDC_SAMPLE_1LINES || - ccdcparam->blk_clamp.sample_ln > CCDC_SAMPLE_16LINES) { - dev_dbg(ccdc_cfg.dev, - "Invalid value of sample lines\n"); - return -EINVAL; - } - } - return 0; -} - -/* Parameter operations */ -static int ccdc_set_params(void __user *params) -{ - struct ccdc_config_params_raw ccdc_raw_params; - int x; - - /* only raw module parameters can be set through the IOCTL */ - if (ccdc_cfg.if_type != VPFE_RAW_BAYER) - return -EINVAL; - - x = copy_from_user(&ccdc_raw_params, params, sizeof(ccdc_raw_params)); - if (x) { - dev_dbg(ccdc_cfg.dev, "ccdc_set_params: error in copying ccdcparams, %d\n", - x); - return -EFAULT; - } - - if (!validate_ccdc_param(&ccdc_raw_params)) { - memcpy(&ccdc_cfg.bayer.config_params, - &ccdc_raw_params, - sizeof(ccdc_raw_params)); - return 0; - } - return -EINVAL; -} - /* This function will configure CCDC for YCbCr video capture */ static void ccdc_config_ycbcr(void) { @@ -939,7 +850,6 @@ static struct ccdc_hw_device ccdc_hw_dev = { .enable = ccdc_enable, .enable_out_to_sdram = ccdc_enable_output_to_sdram, .set_hw_if_params = ccdc_set_hw_if_params, - .set_params = ccdc_set_params, .configure = ccdc_configure, .set_buftype = ccdc_set_buftype, .get_buftype = ccdc_get_buftype, diff --git a/drivers/media/platform/davinci/dm644x_ccdc.c b/drivers/media/platform/davinci/dm644x_ccdc.c index 740fbc7a8c14..3b2d8a9317b8 100644 --- a/drivers/media/platform/davinci/dm644x_ccdc.c +++ b/drivers/media/platform/davinci/dm644x_ccdc.c @@ -17,13 +17,9 @@ * This module is for configuring CCD controller of DM6446 VPFE to capture * Raw yuv or Bayer RGB data from a decoder. CCDC has several modules * such as Defect Pixel Correction, Color Space Conversion etc to - * pre-process the Raw Bayer RGB data, before writing it to SDRAM. This - * module also allows application to configure individual - * module parameters through VPFE_CMD_S_CCDC_RAW_PARAMS IOCTL. - * To do so, application includes dm644x_ccdc.h and vpfe_capture.h header - * files. The setparams() API is called by vpfe_capture driver - * to configure module parameters. This file is named DM644x so that other - * variants such DM6443 may be supported using the same module. + * pre-process the Raw Bayer RGB data, before writing it to SDRAM. + * This file is named DM644x so that other variants such DM6443 + * may be supported using the same module. * * TODO: Test Raw bayer parameter settings and bayer capture * Split module parameter structure to module specific ioctl structs @@ -216,96 +212,8 @@ static void ccdc_readregs(void) dev_notice(ccdc_cfg.dev, "\nReading 0x%x to VERT_LINES...\n", val); } -static int validate_ccdc_param(struct ccdc_config_params_raw *ccdcparam) -{ - if (ccdcparam->alaw.enable) { - u8 max_gamma = ccdc_gamma_width_max_bit(ccdcparam->alaw.gamma_wd); - u8 max_data = ccdc_data_size_max_bit(ccdcparam->data_sz); - - if ((ccdcparam->alaw.gamma_wd > CCDC_GAMMA_BITS_09_0) || - (ccdcparam->alaw.gamma_wd < CCDC_GAMMA_BITS_15_6) || - (max_gamma > max_data)) { - dev_dbg(ccdc_cfg.dev, "\nInvalid data line select"); - return -1; - } - } - return 0; -} - -static int ccdc_update_raw_params(struct ccdc_config_params_raw *raw_params) -{ - struct ccdc_config_params_raw *config_params = - &ccdc_cfg.bayer.config_params; - unsigned int *fpc_virtaddr = NULL; - unsigned int *fpc_physaddr = NULL; - - memcpy(config_params, raw_params, sizeof(*raw_params)); - /* - * allocate memory for fault pixel table and copy the user - * values to the table - */ - if (!config_params->fault_pxl.enable) - return 0; - - fpc_physaddr = (unsigned int *)config_params->fault_pxl.fpc_table_addr; - fpc_virtaddr = (unsigned int *)phys_to_virt( - (unsigned long)fpc_physaddr); - /* - * Allocate memory for FPC table if current - * FPC table buffer is not big enough to - * accommodate FPC Number requested - */ - if (raw_params->fault_pxl.fp_num != config_params->fault_pxl.fp_num) { - if (fpc_physaddr != NULL) { - free_pages((unsigned long)fpc_virtaddr, - get_order - (config_params->fault_pxl.fp_num * - FP_NUM_BYTES)); - } - - /* Allocate memory for FPC table */ - fpc_virtaddr = - (unsigned int *)__get_free_pages(GFP_KERNEL | GFP_DMA, - get_order(raw_params-> - fault_pxl.fp_num * - FP_NUM_BYTES)); - - if (fpc_virtaddr == NULL) { - dev_dbg(ccdc_cfg.dev, - "\nUnable to allocate memory for FPC"); - return -EFAULT; - } - fpc_physaddr = - (unsigned int *)virt_to_phys((void *)fpc_virtaddr); - } - - /* Copy number of fault pixels and FPC table */ - config_params->fault_pxl.fp_num = raw_params->fault_pxl.fp_num; - if (copy_from_user(fpc_virtaddr, - (void __user *)raw_params->fault_pxl.fpc_table_addr, - config_params->fault_pxl.fp_num * FP_NUM_BYTES)) { - dev_dbg(ccdc_cfg.dev, "\n copy_from_user failed"); - return -EFAULT; - } - config_params->fault_pxl.fpc_table_addr = (unsigned long)fpc_physaddr; - return 0; -} - static int ccdc_close(struct device *dev) { - struct ccdc_config_params_raw *config_params = - &ccdc_cfg.bayer.config_params; - unsigned int *fpc_physaddr = NULL, *fpc_virtaddr = NULL; - - fpc_physaddr = (unsigned int *)config_params->fault_pxl.fpc_table_addr; - - if (fpc_physaddr != NULL) { - fpc_virtaddr = (unsigned int *) - phys_to_virt((unsigned long)fpc_physaddr); - free_pages((unsigned long)fpc_virtaddr, - get_order(config_params->fault_pxl.fp_num * - FP_NUM_BYTES)); - } return 0; } @@ -339,29 +247,6 @@ static void ccdc_sbl_reset(void) vpss_clear_wbl_overflow(VPSS_PCR_CCDC_WBL_O); } -/* Parameter operations */ -static int ccdc_set_params(void __user *params) -{ - struct ccdc_config_params_raw ccdc_raw_params; - int x; - - if (ccdc_cfg.if_type != VPFE_RAW_BAYER) - return -EINVAL; - - x = copy_from_user(&ccdc_raw_params, params, sizeof(ccdc_raw_params)); - if (x) { - dev_dbg(ccdc_cfg.dev, "ccdc_set_params: error in copyingccdc params, %d\n", - x); - return -EFAULT; - } - - if (!validate_ccdc_param(&ccdc_raw_params)) { - if (!ccdc_update_raw_params(&ccdc_raw_params)) - return 0; - } - return -EINVAL; -} - /* * ccdc_config_ycbcr() * This function will configure CCDC for YCbCr video capture @@ -489,32 +374,6 @@ static void ccdc_config_black_compense(struct ccdc_black_compensation *bcomp) regw(val, CCDC_BLKCMP); } -static void ccdc_config_fpc(struct ccdc_fault_pixel *fpc) -{ - u32 val; - - /* Initially disable FPC */ - val = CCDC_FPC_DISABLE; - regw(val, CCDC_FPC); - - if (!fpc->enable) - return; - - /* Configure Fault pixel if needed */ - regw(fpc->fpc_table_addr, CCDC_FPC_ADDR); - dev_dbg(ccdc_cfg.dev, "\nWriting 0x%lx to FPC_ADDR...\n", - (fpc->fpc_table_addr)); - /* Write the FPC params with FPC disable */ - val = fpc->fp_num & CCDC_FPC_FPC_NUM_MASK; - regw(val, CCDC_FPC); - - dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to FPC...\n", val); - /* read the FPC register */ - val = regr(CCDC_FPC) | CCDC_FPC_ENABLE; - regw(val, CCDC_FPC); - dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to FPC...\n", val); -} - /* * ccdc_config_raw() * This function will configure CCDC for Raw capture mode @@ -569,9 +428,6 @@ static void ccdc_config_raw(void) /* Configure Black level compensation */ ccdc_config_black_compense(&config_params->blk_comp); - /* Configure Fault Pixel Correction */ - ccdc_config_fpc(&config_params->fault_pxl); - /* If data size is 8 bit then pack the data */ if ((config_params->data_sz == CCDC_DATA_8BITS) || config_params->alaw.enable) @@ -929,7 +785,6 @@ static struct ccdc_hw_device ccdc_hw_dev = { .reset = ccdc_sbl_reset, .enable = ccdc_enable, .set_hw_if_params = ccdc_set_hw_if_params, - .set_params = ccdc_set_params, .configure = ccdc_configure, .set_buftype = ccdc_set_buftype, .get_buftype = ccdc_get_buftype, diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c index e3fe3e0635aa..b1bf4a7e8eb7 100644 --- a/drivers/media/platform/davinci/vpfe_capture.c +++ b/drivers/media/platform/davinci/vpfe_capture.c @@ -281,45 +281,6 @@ void vpfe_unregister_ccdc_device(struct ccdc_hw_device *dev) EXPORT_SYMBOL(vpfe_unregister_ccdc_device); /* - * vpfe_get_ccdc_image_format - Get image parameters based on CCDC settings - */ -static int vpfe_get_ccdc_image_format(struct vpfe_device *vpfe_dev, - struct v4l2_format *f) -{ - struct v4l2_rect image_win; - enum ccdc_buftype buf_type; - enum ccdc_frmfmt frm_fmt; - - memset(f, 0, sizeof(*f)); - f->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; - ccdc_dev->hw_ops.get_image_window(&image_win); - f->fmt.pix.width = image_win.width; - f->fmt.pix.height = image_win.height; - f->fmt.pix.bytesperline = ccdc_dev->hw_ops.get_line_length(); - f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * - f->fmt.pix.height; - buf_type = ccdc_dev->hw_ops.get_buftype(); - f->fmt.pix.pixelformat = ccdc_dev->hw_ops.get_pixel_format(); - frm_fmt = ccdc_dev->hw_ops.get_frame_format(); - if (frm_fmt == CCDC_FRMFMT_PROGRESSIVE) - f->fmt.pix.field = V4L2_FIELD_NONE; - else if (frm_fmt == CCDC_FRMFMT_INTERLACED) { - if (buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED) - f->fmt.pix.field = V4L2_FIELD_INTERLACED; - else if (buf_type == CCDC_BUFTYPE_FLD_SEPARATED) - f->fmt.pix.field = V4L2_FIELD_SEQ_TB; - else { - v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf_type\n"); - return -EINVAL; - } - } else { - v4l2_err(&vpfe_dev->v4l2_dev, "Invalid frm_fmt\n"); - return -EINVAL; - } - return 0; -} - -/* * vpfe_config_ccdc_image_format() * For a pix format, configure ccdc to setup the capture */ @@ -1697,59 +1658,6 @@ unlock_out: return ret; } - -static long vpfe_param_handler(struct file *file, void *priv, - bool valid_prio, unsigned int cmd, void *param) -{ - struct vpfe_device *vpfe_dev = video_drvdata(file); - int ret; - - v4l2_dbg(2, debug, &vpfe_dev->v4l2_dev, "vpfe_param_handler\n"); - - if (vpfe_dev->started) { - /* only allowed if streaming is not started */ - v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, - "device already started\n"); - return -EBUSY; - } - - ret = mutex_lock_interruptible(&vpfe_dev->lock); - if (ret) - return ret; - - switch (cmd) { - case VPFE_CMD_S_CCDC_RAW_PARAMS: - v4l2_warn(&vpfe_dev->v4l2_dev, - "VPFE_CMD_S_CCDC_RAW_PARAMS: experimental ioctl\n"); - if (ccdc_dev->hw_ops.set_params) { - ret = ccdc_dev->hw_ops.set_params(param); - if (ret) { - v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, - "Error setting parameters in CCDC\n"); - goto unlock_out; - } - ret = vpfe_get_ccdc_image_format(vpfe_dev, - &vpfe_dev->fmt); - if (ret < 0) { - v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, - "Invalid image format at CCDC\n"); - goto unlock_out; - } - } else { - ret = -EINVAL; - v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, - "VPFE_CMD_S_CCDC_RAW_PARAMS not supported\n"); - } - break; - default: - ret = -ENOTTY; - } -unlock_out: - mutex_unlock(&vpfe_dev->lock); - return ret; -} - - /* vpfe capture ioctl operations */ static const struct v4l2_ioctl_ops vpfe_ioctl_ops = { .vidioc_querycap = vpfe_querycap, @@ -1772,7 +1680,6 @@ static const struct v4l2_ioctl_ops vpfe_ioctl_ops = { .vidioc_cropcap = vpfe_cropcap, .vidioc_g_selection = vpfe_g_selection, .vidioc_s_selection = vpfe_s_selection, - .vidioc_default = vpfe_param_handler, }; static struct vpfe_device *vpfe_initialize(void) diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c index d78580f9e431..4be6554c56c5 100644 --- a/drivers/media/platform/davinci/vpif_capture.c +++ b/drivers/media/platform/davinci/vpif_capture.c @@ -1719,7 +1719,6 @@ vpif_unregister: */ static int vpif_remove(struct platform_device *device) { - struct common_obj *common; struct channel_obj *ch; int i; @@ -1730,7 +1729,6 @@ static int vpif_remove(struct platform_device *device) for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) { /* Get the pointer to the channel object */ ch = vpif_obj.dev[i]; - common = &ch->common[VPIF_VIDEO_INDEX]; /* Unregister video device */ video_unregister_device(&ch->video_dev); kfree(vpif_obj.dev[i]); diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c index b5ac6ce626b3..bf982bf86542 100644 --- a/drivers/media/platform/davinci/vpif_display.c +++ b/drivers/media/platform/davinci/vpif_display.c @@ -1339,7 +1339,6 @@ vpif_unregister: */ static int vpif_remove(struct platform_device *device) { - struct common_obj *common; struct channel_obj *ch; int i; @@ -1350,7 +1349,6 @@ static int vpif_remove(struct platform_device *device) for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++) { /* Get the pointer to the channel object */ ch = vpif_obj.dev[i]; - common = &ch->common[VPIF_VIDEO_INDEX]; /* Unregister video device */ video_unregister_device(&ch->video_dev); kfree(vpif_obj.dev[i]); diff --git a/drivers/media/platform/omap/omap_vout_vrfb.c b/drivers/media/platform/omap/omap_vout_vrfb.c index 92c4e1826356..45a553d4f5b2 100644 --- a/drivers/media/platform/omap/omap_vout_vrfb.c +++ b/drivers/media/platform/omap/omap_vout_vrfb.c @@ -16,7 +16,6 @@ #include <media/videobuf-dma-contig.h> #include <media/v4l2-device.h> -#include <linux/omap-dma.h> #include <video/omapvrfb.h> #include "omap_voutdef.h" @@ -63,7 +62,7 @@ static int omap_vout_allocate_vrfb_buffers(struct omap_vout_device *vout, /* * Wakes up the application once the DMA transfer to VRFB space is completed. */ -static void omap_vout_vrfb_dma_tx_callback(int lch, u16 ch_status, void *data) +static void omap_vout_vrfb_dma_tx_callback(void *data) { struct vid_vrfb_dma *t = (struct vid_vrfb_dma *) data; @@ -94,6 +93,7 @@ int omap_vout_setup_vrfb_bufs(struct platform_device *pdev, int vid_num, int ret = 0, i, j; struct omap_vout_device *vout; struct video_device *vfd; + dma_cap_mask_t mask; int image_width, image_height; int vrfb_num_bufs = VRFB_NUM_BUFS; struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev); @@ -131,18 +131,27 @@ int omap_vout_setup_vrfb_bufs(struct platform_device *pdev, int vid_num, /* * Request and Initialize DMA, for DMA based VRFB transfer */ - vout->vrfb_dma_tx.dev_id = OMAP_DMA_NO_DEVICE; - vout->vrfb_dma_tx.dma_ch = -1; - vout->vrfb_dma_tx.req_status = DMA_CHAN_ALLOTED; - ret = omap_request_dma(vout->vrfb_dma_tx.dev_id, "VRFB DMA TX", - omap_vout_vrfb_dma_tx_callback, - (void *) &vout->vrfb_dma_tx, &vout->vrfb_dma_tx.dma_ch); - if (ret < 0) { + dma_cap_zero(mask); + dma_cap_set(DMA_INTERLEAVE, mask); + vout->vrfb_dma_tx.chan = dma_request_chan_by_mask(&mask); + if (IS_ERR(vout->vrfb_dma_tx.chan)) { vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED; + } else { + size_t xt_size = sizeof(struct dma_interleaved_template) + + sizeof(struct data_chunk); + + vout->vrfb_dma_tx.xt = kzalloc(xt_size, GFP_KERNEL); + if (!vout->vrfb_dma_tx.xt) { + dma_release_channel(vout->vrfb_dma_tx.chan); + vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED; + } + } + + if (vout->vrfb_dma_tx.req_status == DMA_CHAN_NOT_ALLOTED) dev_info(&pdev->dev, ": failed to allocate DMA Channel for video%d\n", vfd->minor); - } + init_waitqueue_head(&vout->vrfb_dma_tx.wait); /* statically allocated the VRFB buffer is done through @@ -177,7 +186,9 @@ void omap_vout_release_vrfb(struct omap_vout_device *vout) if (vout->vrfb_dma_tx.req_status == DMA_CHAN_ALLOTED) { vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED; - omap_free_dma(vout->vrfb_dma_tx.dma_ch); + kfree(vout->vrfb_dma_tx.xt); + dmaengine_terminate_sync(vout->vrfb_dma_tx.chan); + dma_release_channel(vout->vrfb_dma_tx.chan); } } @@ -219,70 +230,84 @@ int omap_vout_vrfb_buffer_setup(struct omap_vout_device *vout, } int omap_vout_prepare_vrfb(struct omap_vout_device *vout, - struct videobuf_buffer *vb) + struct videobuf_buffer *vb) { - dma_addr_t dmabuf; - struct vid_vrfb_dma *tx; + struct dma_async_tx_descriptor *tx; + enum dma_ctrl_flags flags; + struct dma_chan *chan = vout->vrfb_dma_tx.chan; + struct dma_device *dmadev = chan->device; + struct dma_interleaved_template *xt = vout->vrfb_dma_tx.xt; + dma_cookie_t cookie; + enum dma_status status; enum dss_rotation rotation; - u32 dest_frame_index = 0, src_element_index = 0; - u32 dest_element_index = 0, src_frame_index = 0; - u32 elem_count = 0, frame_count = 0, pixsize = 2; + size_t dst_icg; + u32 pixsize; if (!is_rotation_enabled(vout)) return 0; - dmabuf = vout->buf_phy_addr[vb->i]; /* If rotation is enabled, copy input buffer into VRFB * memory space using DMA. We are copying input buffer * into VRFB memory space of desired angle and DSS will * read image VRFB memory for 0 degree angle */ + pixsize = vout->bpp * vout->vrfb_bpp; - /* - * DMA transfer in double index mode - */ + dst_icg = ((MAX_PIXELS_PER_LINE * pixsize) - + (vout->pix.width * vout->bpp)) + 1; + + xt->src_start = vout->buf_phy_addr[vb->i]; + xt->dst_start = vout->vrfb_context[vb->i].paddr[0]; + + xt->numf = vout->pix.height; + xt->frame_size = 1; + xt->sgl[0].size = vout->pix.width * vout->bpp; + xt->sgl[0].icg = dst_icg; + + xt->dir = DMA_MEM_TO_MEM; + xt->src_sgl = false; + xt->src_inc = true; + xt->dst_sgl = true; + xt->dst_inc = true; + + tx = dmadev->device_prep_interleaved_dma(chan, xt, flags); + if (tx == NULL) { + pr_err("%s: DMA interleaved prep error\n", __func__); + return -EINVAL; + } - /* Frame index */ - dest_frame_index = ((MAX_PIXELS_PER_LINE * pixsize) - - (vout->pix.width * vout->bpp)) + 1; - - /* Source and destination parameters */ - src_element_index = 0; - src_frame_index = 0; - dest_element_index = 1; - /* Number of elements per frame */ - elem_count = vout->pix.width * vout->bpp; - frame_count = vout->pix.height; - tx = &vout->vrfb_dma_tx; - tx->tx_status = 0; - omap_set_dma_transfer_params(tx->dma_ch, OMAP_DMA_DATA_TYPE_S32, - (elem_count / 4), frame_count, OMAP_DMA_SYNC_ELEMENT, - tx->dev_id, 0x0); - /* src_port required only for OMAP1 */ - omap_set_dma_src_params(tx->dma_ch, 0, OMAP_DMA_AMODE_POST_INC, - dmabuf, src_element_index, src_frame_index); - /*set dma source burst mode for VRFB */ - omap_set_dma_src_burst_mode(tx->dma_ch, OMAP_DMA_DATA_BURST_16); - rotation = calc_rotation(vout); + tx->callback = omap_vout_vrfb_dma_tx_callback; + tx->callback_param = &vout->vrfb_dma_tx; + + cookie = dmaengine_submit(tx); + if (dma_submit_error(cookie)) { + pr_err("%s: dmaengine_submit failed (%d)\n", __func__, cookie); + return -EINVAL; + } - /* dest_port required only for OMAP1 */ - omap_set_dma_dest_params(tx->dma_ch, 0, OMAP_DMA_AMODE_DOUBLE_IDX, - vout->vrfb_context[vb->i].paddr[0], dest_element_index, - dest_frame_index); - /*set dma dest burst mode for VRFB */ - omap_set_dma_dest_burst_mode(tx->dma_ch, OMAP_DMA_DATA_BURST_16); - omap_dma_set_global_params(DMA_DEFAULT_ARB_RATE, 0x20, 0); + vout->vrfb_dma_tx.tx_status = 0; + dma_async_issue_pending(chan); - omap_start_dma(tx->dma_ch); - wait_event_interruptible_timeout(tx->wait, tx->tx_status == 1, + wait_event_interruptible_timeout(vout->vrfb_dma_tx.wait, + vout->vrfb_dma_tx.tx_status == 1, VRFB_TX_TIMEOUT); - if (tx->tx_status == 0) { - omap_stop_dma(tx->dma_ch); + status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); + + if (vout->vrfb_dma_tx.tx_status == 0) { + pr_err("%s: Timeout while waiting for DMA\n", __func__); + dmaengine_terminate_sync(chan); + return -EINVAL; + } else if (status != DMA_COMPLETE) { + pr_err("%s: DMA completion %s status\n", __func__, + status == DMA_ERROR ? "error" : "busy"); + dmaengine_terminate_sync(chan); return -EINVAL; } + /* Store buffers physical address into an array. Addresses * from this array will be used to configure DSS */ + rotation = calc_rotation(vout); vout->queued_buf_addr[vb->i] = (u8 *) vout->vrfb_context[vb->i].paddr[rotation]; return 0; diff --git a/drivers/media/platform/omap/omap_voutdef.h b/drivers/media/platform/omap/omap_voutdef.h index 80c79fabdf95..56b630b1c8b4 100644 --- a/drivers/media/platform/omap/omap_voutdef.h +++ b/drivers/media/platform/omap/omap_voutdef.h @@ -14,6 +14,7 @@ #include <media/v4l2-ctrls.h> #include <video/omapfb_dss.h> #include <video/omapvrfb.h> +#include <linux/dmaengine.h> #define YUYV_BPP 2 #define RGB565_BPP 2 @@ -81,8 +82,9 @@ enum vout_rotaion_type { * for VRFB hidden buffer */ struct vid_vrfb_dma { - int dev_id; - int dma_ch; + struct dma_chan *chan; + struct dma_interleaved_template *xt; + int req_status; int tx_status; wait_queue_head_t wait; diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c index 776d2bae6979..41eef376eb2d 100644 --- a/drivers/media/platform/qcom/venus/core.c +++ b/drivers/media/platform/qcom/venus/core.c @@ -76,7 +76,7 @@ static void venus_sys_error_handler(struct work_struct *work) hfi_core_deinit(core, true); hfi_destroy(core); mutex_lock(&core->lock); - venus_shutdown(&core->dev_fw); + venus_shutdown(core->dev); pm_runtime_put_sync(core->dev); @@ -84,7 +84,7 @@ static void venus_sys_error_handler(struct work_struct *work) pm_runtime_get_sync(core->dev); - ret |= venus_boot(core->dev, &core->dev_fw, core->res->fwname); + ret |= venus_boot(core->dev, core->res->fwname); ret |= hfi_core_resume(core, true); @@ -137,7 +137,7 @@ static int venus_clks_enable(struct venus_core *core) return 0; err: - while (--i) + while (i--) clk_disable_unprepare(core->clks[i]); return ret; @@ -207,7 +207,7 @@ static int venus_probe(struct platform_device *pdev) if (ret < 0) goto err_runtime_disable; - ret = venus_boot(dev, &core->dev_fw, core->res->fwname); + ret = venus_boot(dev, core->res->fwname); if (ret) goto err_runtime_disable; @@ -238,7 +238,7 @@ err_dev_unregister: err_core_deinit: hfi_core_deinit(core, false); err_venus_shutdown: - venus_shutdown(&core->dev_fw); + venus_shutdown(dev); err_runtime_disable: pm_runtime_set_suspended(dev); pm_runtime_disable(dev); @@ -259,7 +259,7 @@ static int venus_remove(struct platform_device *pdev) WARN_ON(ret); hfi_destroy(core); - venus_shutdown(&core->dev_fw); + venus_shutdown(dev); of_platform_depopulate(dev); pm_runtime_put_sync(dev); @@ -270,8 +270,7 @@ static int venus_remove(struct platform_device *pdev) return ret; } -#ifdef CONFIG_PM -static int venus_runtime_suspend(struct device *dev) +static __maybe_unused int venus_runtime_suspend(struct device *dev) { struct venus_core *core = dev_get_drvdata(dev); int ret; @@ -283,7 +282,7 @@ static int venus_runtime_suspend(struct device *dev) return ret; } -static int venus_runtime_resume(struct device *dev) +static __maybe_unused int venus_runtime_resume(struct device *dev) { struct venus_core *core = dev_get_drvdata(dev); int ret; @@ -302,7 +301,6 @@ err_clks_disable: venus_clks_disable(core); return ret; } -#endif static const struct dev_pm_ops venus_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h index e542700eee32..cba092bcb76d 100644 --- a/drivers/media/platform/qcom/venus/core.h +++ b/drivers/media/platform/qcom/venus/core.h @@ -101,7 +101,6 @@ struct venus_core { struct device *dev; struct device *dev_dec; struct device *dev_enc; - struct device dev_fw; struct mutex lock; struct list_head instances; atomic_t insts_count; diff --git a/drivers/media/platform/qcom/venus/firmware.c b/drivers/media/platform/qcom/venus/firmware.c index 1b1a4f355918..521d4b36c090 100644 --- a/drivers/media/platform/qcom/venus/firmware.c +++ b/drivers/media/platform/qcom/venus/firmware.c @@ -12,97 +12,87 @@ * */ -#include <linux/dma-mapping.h> +#include <linux/device.h> #include <linux/firmware.h> #include <linux/kernel.h> +#include <linux/io.h> #include <linux/of.h> -#include <linux/of_reserved_mem.h> -#include <linux/slab.h> +#include <linux/of_address.h> #include <linux/qcom_scm.h> +#include <linux/sizes.h> #include <linux/soc/qcom/mdt_loader.h> #include "firmware.h" #define VENUS_PAS_ID 9 -#define VENUS_FW_MEM_SIZE SZ_8M +#define VENUS_FW_MEM_SIZE (6 * SZ_1M) -static void device_release_dummy(struct device *dev) -{ - of_reserved_mem_device_release(dev); -} - -int venus_boot(struct device *parent, struct device *fw_dev, const char *fwname) +int venus_boot(struct device *dev, const char *fwname) { const struct firmware *mdt; + struct device_node *node; phys_addr_t mem_phys; + struct resource r; ssize_t fw_size; size_t mem_size; void *mem_va; int ret; - if (!qcom_scm_is_available()) + if (!IS_ENABLED(CONFIG_QCOM_MDT_LOADER) || !qcom_scm_is_available()) return -EPROBE_DEFER; - fw_dev->parent = parent; - fw_dev->release = device_release_dummy; + node = of_parse_phandle(dev->of_node, "memory-region", 0); + if (!node) { + dev_err(dev, "no memory-region specified\n"); + return -EINVAL; + } - ret = dev_set_name(fw_dev, "%s:%s", dev_name(parent), "firmware"); + ret = of_address_to_resource(node, 0, &r); if (ret) return ret; - ret = device_register(fw_dev); - if (ret < 0) - return ret; + mem_phys = r.start; + mem_size = resource_size(&r); - ret = of_reserved_mem_device_init_by_idx(fw_dev, parent->of_node, 0); - if (ret) - goto err_unreg_device; + if (mem_size < VENUS_FW_MEM_SIZE) + return -EINVAL; - mem_size = VENUS_FW_MEM_SIZE; - - mem_va = dmam_alloc_coherent(fw_dev, mem_size, &mem_phys, GFP_KERNEL); + mem_va = memremap(r.start, mem_size, MEMREMAP_WC); if (!mem_va) { - ret = -ENOMEM; - goto err_unreg_device; + dev_err(dev, "unable to map memory region: %pa+%zx\n", + &r.start, mem_size); + return -ENOMEM; } - ret = request_firmware(&mdt, fwname, fw_dev); + ret = request_firmware(&mdt, fwname, dev); if (ret < 0) - goto err_unreg_device; + goto err_unmap; fw_size = qcom_mdt_get_size(mdt); if (fw_size < 0) { ret = fw_size; release_firmware(mdt); - goto err_unreg_device; + goto err_unmap; } - ret = qcom_mdt_load(fw_dev, mdt, fwname, VENUS_PAS_ID, mem_va, mem_phys, + ret = qcom_mdt_load(dev, mdt, fwname, VENUS_PAS_ID, mem_va, mem_phys, mem_size); release_firmware(mdt); if (ret) - goto err_unreg_device; + goto err_unmap; ret = qcom_scm_pas_auth_and_reset(VENUS_PAS_ID); if (ret) - goto err_unreg_device; - - return 0; + goto err_unmap; -err_unreg_device: - device_unregister(fw_dev); +err_unmap: + memunmap(mem_va); return ret; } -int venus_shutdown(struct device *fw_dev) +int venus_shutdown(struct device *dev) { - int ret; - - ret = qcom_scm_pas_shutdown(VENUS_PAS_ID); - device_unregister(fw_dev); - memset(fw_dev, 0, sizeof(*fw_dev)); - - return ret; + return qcom_scm_pas_shutdown(VENUS_PAS_ID); } diff --git a/drivers/media/platform/qcom/venus/firmware.h b/drivers/media/platform/qcom/venus/firmware.h index f81a98979798..428efb56d339 100644 --- a/drivers/media/platform/qcom/venus/firmware.h +++ b/drivers/media/platform/qcom/venus/firmware.h @@ -16,8 +16,7 @@ struct device; -int venus_boot(struct device *parent, struct device *fw_dev, - const char *fwname); -int venus_shutdown(struct device *fw_dev); +int venus_boot(struct device *dev, const char *fwname); +int venus_shutdown(struct device *dev); #endif diff --git a/drivers/media/platform/qcom/venus/hfi_msgs.c b/drivers/media/platform/qcom/venus/hfi_msgs.c index f8841713e417..a681ae5381d6 100644 --- a/drivers/media/platform/qcom/venus/hfi_msgs.c +++ b/drivers/media/platform/qcom/venus/hfi_msgs.c @@ -239,11 +239,12 @@ static void hfi_sys_init_done(struct venus_core *core, struct venus_inst *inst, break; } - if (!error) { - rem_bytes -= read_bytes; - data += read_bytes; - num_properties--; - } + if (error) + break; + + rem_bytes -= read_bytes; + data += read_bytes; + num_properties--; } err_no_prop: diff --git a/drivers/media/platform/sti/bdisp/bdisp-debug.c b/drivers/media/platform/sti/bdisp/bdisp-debug.c index 7af66860d624..2cc289e4dea1 100644 --- a/drivers/media/platform/sti/bdisp/bdisp-debug.c +++ b/drivers/media/platform/sti/bdisp/bdisp-debug.c @@ -104,7 +104,7 @@ static void bdisp_dbg_dump_ins(struct seq_file *s, u32 val) if (val & BLT_INS_IRQ) seq_puts(s, "IRQ - "); - seq_puts(s, "\n"); + seq_putc(s, '\n'); } static void bdisp_dbg_dump_tty(struct seq_file *s, u32 val) @@ -153,7 +153,7 @@ static void bdisp_dbg_dump_tty(struct seq_file *s, u32 val) if (val & BLT_TTY_BIG_END) seq_puts(s, "BigEndian - "); - seq_puts(s, "\n"); + seq_putc(s, '\n'); } static void bdisp_dbg_dump_xy(struct seq_file *s, u32 val, char *name) @@ -230,7 +230,7 @@ static void bdisp_dbg_dump_sty(struct seq_file *s, seq_puts(s, "BigEndian - "); done: - seq_puts(s, "\n"); + seq_putc(s, '\n'); } static void bdisp_dbg_dump_fctl(struct seq_file *s, u32 val) @@ -247,7 +247,7 @@ static void bdisp_dbg_dump_fctl(struct seq_file *s, u32 val) else if ((val & BLT_FCTL_HV_SCALE) == BLT_FCTL_HV_SAMPLE) seq_puts(s, "Sample Chroma"); - seq_puts(s, "\n"); + seq_putc(s, '\n'); } static void bdisp_dbg_dump_rsf(struct seq_file *s, u32 val, char *name) @@ -266,7 +266,7 @@ static void bdisp_dbg_dump_rsf(struct seq_file *s, u32 val, char *name) seq_printf(s, "V: %d(6.10) / scale~%dx0.1", inc, 1024 * 10 / inc); done: - seq_puts(s, "\n"); + seq_putc(s, '\n'); } static void bdisp_dbg_dump_rzi(struct seq_file *s, u32 val, char *name) @@ -281,7 +281,7 @@ static void bdisp_dbg_dump_rzi(struct seq_file *s, u32 val, char *name) seq_printf(s, "V: init=%d repeat=%d", val & 0x3FF, (val >> 12) & 7); done: - seq_puts(s, "\n"); + seq_putc(s, '\n'); } static void bdisp_dbg_dump_ivmx(struct seq_file *s, @@ -293,7 +293,7 @@ static void bdisp_dbg_dump_ivmx(struct seq_file *s, seq_printf(s, "IVMX3\t0x%08X\t", c3); if (!c0 && !c1 && !c2 && !c3) { - seq_puts(s, "\n"); + seq_putc(s, '\n'); return; } diff --git a/drivers/media/platform/vimc/vimc-capture.c b/drivers/media/platform/vimc/vimc-capture.c index 14cb32e21130..88a1e5670c72 100644 --- a/drivers/media/platform/vimc/vimc-capture.c +++ b/drivers/media/platform/vimc/vimc-capture.c @@ -517,21 +517,22 @@ static int vimc_cap_remove(struct platform_device *pdev) return 0; } +static const struct platform_device_id vimc_cap_driver_ids[] = { + { + .name = VIMC_CAP_DRV_NAME, + }, + { } +}; + static struct platform_driver vimc_cap_pdrv = { .probe = vimc_cap_probe, .remove = vimc_cap_remove, + .id_table = vimc_cap_driver_ids, .driver = { .name = VIMC_CAP_DRV_NAME, }, }; -static const struct platform_device_id vimc_cap_driver_ids[] = { - { - .name = VIMC_CAP_DRV_NAME, - }, - { } -}; - module_platform_driver(vimc_cap_pdrv); MODULE_DEVICE_TABLE(platform, vimc_cap_driver_ids); diff --git a/drivers/media/platform/vimc/vimc-debayer.c b/drivers/media/platform/vimc/vimc-debayer.c index 35b15bd4d61d..033a131f67af 100644 --- a/drivers/media/platform/vimc/vimc-debayer.c +++ b/drivers/media/platform/vimc/vimc-debayer.c @@ -577,21 +577,22 @@ static int vimc_deb_remove(struct platform_device *pdev) return 0; } +static const struct platform_device_id vimc_deb_driver_ids[] = { + { + .name = VIMC_DEB_DRV_NAME, + }, + { } +}; + static struct platform_driver vimc_deb_pdrv = { .probe = vimc_deb_probe, .remove = vimc_deb_remove, + .id_table = vimc_deb_driver_ids, .driver = { .name = VIMC_DEB_DRV_NAME, }, }; -static const struct platform_device_id vimc_deb_driver_ids[] = { - { - .name = VIMC_DEB_DRV_NAME, - }, - { } -}; - module_platform_driver(vimc_deb_pdrv); MODULE_DEVICE_TABLE(platform, vimc_deb_driver_ids); diff --git a/drivers/media/platform/vimc/vimc-scaler.c b/drivers/media/platform/vimc/vimc-scaler.c index fe77505d2679..0a3e086e12f3 100644 --- a/drivers/media/platform/vimc/vimc-scaler.c +++ b/drivers/media/platform/vimc/vimc-scaler.c @@ -431,21 +431,22 @@ static int vimc_sca_remove(struct platform_device *pdev) return 0; } +static const struct platform_device_id vimc_sca_driver_ids[] = { + { + .name = VIMC_SCA_DRV_NAME, + }, + { } +}; + static struct platform_driver vimc_sca_pdrv = { .probe = vimc_sca_probe, .remove = vimc_sca_remove, + .id_table = vimc_sca_driver_ids, .driver = { .name = VIMC_SCA_DRV_NAME, }, }; -static const struct platform_device_id vimc_sca_driver_ids[] = { - { - .name = VIMC_SCA_DRV_NAME, - }, - { } -}; - module_platform_driver(vimc_sca_pdrv); MODULE_DEVICE_TABLE(platform, vimc_sca_driver_ids); diff --git a/drivers/media/platform/vimc/vimc-sensor.c b/drivers/media/platform/vimc/vimc-sensor.c index ebdbbe8c05ed..615c2b18dcfc 100644 --- a/drivers/media/platform/vimc/vimc-sensor.c +++ b/drivers/media/platform/vimc/vimc-sensor.c @@ -365,21 +365,22 @@ static int vimc_sen_remove(struct platform_device *pdev) return 0; } +static const struct platform_device_id vimc_sen_driver_ids[] = { + { + .name = VIMC_SEN_DRV_NAME, + }, + { } +}; + static struct platform_driver vimc_sen_pdrv = { .probe = vimc_sen_probe, .remove = vimc_sen_remove, + .id_table = vimc_sen_driver_ids, .driver = { .name = VIMC_SEN_DRV_NAME, }, }; -static const struct platform_device_id vimc_sen_driver_ids[] = { - { - .name = VIMC_SEN_DRV_NAME, - }, - { } -}; - module_platform_driver(vimc_sen_pdrv); MODULE_DEVICE_TABLE(platform, vimc_sen_driver_ids); diff --git a/drivers/media/platform/vsp1/vsp1.h b/drivers/media/platform/vsp1/vsp1.h index 847963b6e9eb..78ef838416b3 100644 --- a/drivers/media/platform/vsp1/vsp1.h +++ b/drivers/media/platform/vsp1/vsp1.h @@ -41,11 +41,11 @@ struct vsp1_rwpf; struct vsp1_sru; struct vsp1_uds; +#define VSP1_MAX_LIF 2 #define VSP1_MAX_RPF 5 #define VSP1_MAX_UDS 3 #define VSP1_MAX_WPF 4 -#define VSP1_HAS_LIF (1 << 0) #define VSP1_HAS_LUT (1 << 1) #define VSP1_HAS_SRU (1 << 2) #define VSP1_HAS_BRU (1 << 3) @@ -54,12 +54,14 @@ struct vsp1_uds; #define VSP1_HAS_WPF_HFLIP (1 << 6) #define VSP1_HAS_HGO (1 << 7) #define VSP1_HAS_HGT (1 << 8) +#define VSP1_HAS_BRS (1 << 9) struct vsp1_device_info { u32 version; const char *model; unsigned int gen; unsigned int features; + unsigned int lif_count; unsigned int rpf_count; unsigned int uds_count; unsigned int wpf_count; @@ -76,13 +78,14 @@ struct vsp1_device { struct rcar_fcp_device *fcp; struct device *bus_master; + struct vsp1_bru *brs; struct vsp1_bru *bru; struct vsp1_clu *clu; struct vsp1_hgo *hgo; struct vsp1_hgt *hgt; struct vsp1_hsit *hsi; struct vsp1_hsit *hst; - struct vsp1_lif *lif; + struct vsp1_lif *lif[VSP1_MAX_LIF]; struct vsp1_lut *lut; struct vsp1_rwpf *rpf[VSP1_MAX_RPF]; struct vsp1_sru *sru; diff --git a/drivers/media/platform/vsp1/vsp1_bru.c b/drivers/media/platform/vsp1/vsp1_bru.c index 85362c5ef57a..e8fd2ae3b3eb 100644 --- a/drivers/media/platform/vsp1/vsp1_bru.c +++ b/drivers/media/platform/vsp1/vsp1_bru.c @@ -33,7 +33,7 @@ static inline void vsp1_bru_write(struct vsp1_bru *bru, struct vsp1_dl_list *dl, u32 reg, u32 data) { - vsp1_dl_list_write(dl, reg, data); + vsp1_dl_list_write(dl, bru->base + reg, data); } /* ----------------------------------------------------------------------------- @@ -332,11 +332,14 @@ static void bru_configure(struct vsp1_entity *entity, /* * Route BRU input 1 as SRC input to the ROP unit and configure the ROP * unit with a NOP operation to make BRU input 1 available as the - * Blend/ROP unit B SRC input. + * Blend/ROP unit B SRC input. Only needed for BRU, the BRS has no ROP + * unit. */ - vsp1_bru_write(bru, dl, VI6_BRU_ROP, VI6_BRU_ROP_DSTSEL_BRUIN(1) | - VI6_BRU_ROP_CROP(VI6_ROP_NOP) | - VI6_BRU_ROP_AROP(VI6_ROP_NOP)); + if (entity->type == VSP1_ENTITY_BRU) + vsp1_bru_write(bru, dl, VI6_BRU_ROP, + VI6_BRU_ROP_DSTSEL_BRUIN(1) | + VI6_BRU_ROP_CROP(VI6_ROP_NOP) | + VI6_BRU_ROP_AROP(VI6_ROP_NOP)); for (i = 0; i < bru->entity.source_pad; ++i) { bool premultiplied = false; @@ -366,12 +369,13 @@ static void bru_configure(struct vsp1_entity *entity, ctrl |= VI6_BRU_CTRL_DSTSEL_VRPF; /* - * Route BRU inputs 0 to 3 as SRC inputs to Blend/ROP units A to - * D in that order. The Blend/ROP unit B SRC is hardwired to the - * ROP unit output, the corresponding register bits must be set - * to 0. + * Route inputs 0 to 3 as SRC inputs to Blend/ROP units A to D + * in that order. In the BRU the Blend/ROP unit B SRC is + * hardwired to the ROP unit output, the corresponding register + * bits must be set to 0. The BRS has no ROP unit and doesn't + * need any special processing. */ - if (i != 1) + if (!(entity->type == VSP1_ENTITY_BRU && i == 1)) ctrl |= VI6_BRU_CTRL_SRCSEL_BRUIN(i); vsp1_bru_write(bru, dl, VI6_BRU_CTRL(i), ctrl); @@ -407,20 +411,31 @@ static const struct vsp1_entity_operations bru_entity_ops = { * Initialization and Cleanup */ -struct vsp1_bru *vsp1_bru_create(struct vsp1_device *vsp1) +struct vsp1_bru *vsp1_bru_create(struct vsp1_device *vsp1, + enum vsp1_entity_type type) { struct vsp1_bru *bru; + unsigned int num_pads; + const char *name; int ret; bru = devm_kzalloc(vsp1->dev, sizeof(*bru), GFP_KERNEL); if (bru == NULL) return ERR_PTR(-ENOMEM); + bru->base = type == VSP1_ENTITY_BRU ? VI6_BRU_BASE : VI6_BRS_BASE; bru->entity.ops = &bru_entity_ops; - bru->entity.type = VSP1_ENTITY_BRU; + bru->entity.type = type; + + if (type == VSP1_ENTITY_BRU) { + num_pads = vsp1->info->num_bru_inputs + 1; + name = "bru"; + } else { + num_pads = 3; + name = "brs"; + } - ret = vsp1_entity_init(vsp1, &bru->entity, "bru", - vsp1->info->num_bru_inputs + 1, &bru_ops, + ret = vsp1_entity_init(vsp1, &bru->entity, name, num_pads, &bru_ops, MEDIA_ENT_F_PROC_VIDEO_COMPOSER); if (ret < 0) return ERR_PTR(ret); @@ -435,7 +450,7 @@ struct vsp1_bru *vsp1_bru_create(struct vsp1_device *vsp1) bru->entity.subdev.ctrl_handler = &bru->ctrls; if (bru->ctrls.error) { - dev_err(vsp1->dev, "bru: failed to initialize controls\n"); + dev_err(vsp1->dev, "%s: failed to initialize controls\n", name); ret = bru->ctrls.error; vsp1_entity_destroy(&bru->entity); return ERR_PTR(ret); diff --git a/drivers/media/platform/vsp1/vsp1_bru.h b/drivers/media/platform/vsp1/vsp1_bru.h index 828a3fcadea8..c98ed96d8de6 100644 --- a/drivers/media/platform/vsp1/vsp1_bru.h +++ b/drivers/media/platform/vsp1/vsp1_bru.h @@ -26,6 +26,7 @@ struct vsp1_rwpf; struct vsp1_bru { struct vsp1_entity entity; + unsigned int base; struct v4l2_ctrl_handler ctrls; @@ -41,6 +42,7 @@ static inline struct vsp1_bru *to_bru(struct v4l2_subdev *subdev) return container_of(subdev, struct vsp1_bru, entity.subdev); } -struct vsp1_bru *vsp1_bru_create(struct vsp1_device *vsp1); +struct vsp1_bru *vsp1_bru_create(struct vsp1_device *vsp1, + enum vsp1_entity_type type); #endif /* __VSP1_BRU_H__ */ diff --git a/drivers/media/platform/vsp1/vsp1_dl.c b/drivers/media/platform/vsp1/vsp1_dl.c index aaf17b13fd78..8b5cbb6b7a70 100644 --- a/drivers/media/platform/vsp1/vsp1_dl.c +++ b/drivers/media/platform/vsp1/vsp1_dl.c @@ -95,6 +95,7 @@ enum vsp1_dl_mode { * struct vsp1_dl_manager - Display List manager * @index: index of the related WPF * @mode: display list operation mode (header or headerless) + * @singleshot: execute the display list in single-shot mode * @vsp1: the VSP1 device * @lock: protects the free, active, queued, pending and gc_fragments lists * @free: array of all free display lists @@ -107,6 +108,7 @@ enum vsp1_dl_mode { struct vsp1_dl_manager { unsigned int index; enum vsp1_dl_mode mode; + bool singleshot; struct vsp1_device *vsp1; spinlock_t lock; @@ -437,6 +439,7 @@ int vsp1_dl_list_add_chain(struct vsp1_dl_list *head, static void vsp1_dl_list_fill_header(struct vsp1_dl_list *dl, bool is_last) { + struct vsp1_dl_manager *dlm = dl->dlm; struct vsp1_dl_header_list *hdr = dl->header->lists; struct vsp1_dl_body *dlb; unsigned int num_lists = 0; @@ -461,106 +464,152 @@ static void vsp1_dl_list_fill_header(struct vsp1_dl_list *dl, bool is_last) dl->header->num_lists = num_lists; - /* - * If this display list's chain is not empty, we are on a list, where - * the next item in the list is the display list entity which should be - * automatically queued by the hardware. - */ if (!list_empty(&dl->chain) && !is_last) { + /* + * If this display list's chain is not empty, we are on a list, + * and the next item is the display list that we must queue for + * automatic processing by the hardware. + */ struct vsp1_dl_list *next = list_next_entry(dl, chain); dl->header->next_header = next->dma; dl->header->flags = VSP1_DLH_AUTO_START; + } else if (!dlm->singleshot) { + /* + * if the display list manager works in continuous mode, the VSP + * should loop over the display list continuously until + * instructed to do otherwise. + */ + dl->header->next_header = dl->dma; + dl->header->flags = VSP1_DLH_INT_ENABLE | VSP1_DLH_AUTO_START; } else { + /* + * Otherwise, in mem-to-mem mode, we work in single-shot mode + * and the next display list must not be started automatically. + */ dl->header->flags = VSP1_DLH_INT_ENABLE; } } -void vsp1_dl_list_commit(struct vsp1_dl_list *dl) +static bool vsp1_dl_list_hw_update_pending(struct vsp1_dl_manager *dlm) { - struct vsp1_dl_manager *dlm = dl->dlm; struct vsp1_device *vsp1 = dlm->vsp1; - unsigned long flags; - bool update; - spin_lock_irqsave(&dlm->lock, flags); + if (!dlm->queued) + return false; - if (dl->dlm->mode == VSP1_DL_MODE_HEADER) { - struct vsp1_dl_list *dl_child; + /* + * Check whether the VSP1 has taken the update. In headerless mode the + * hardware indicates this by clearing the UPD bit in the DL_BODY_SIZE + * register, and in header mode by clearing the UPDHDR bit in the CMD + * register. + */ + if (dlm->mode == VSP1_DL_MODE_HEADERLESS) + return !!(vsp1_read(vsp1, VI6_DL_BODY_SIZE) + & VI6_DL_BODY_SIZE_UPD); + else + return !!(vsp1_read(vsp1, VI6_CMD(dlm->index) & VI6_CMD_UPDHDR)); +} +static void vsp1_dl_list_hw_enqueue(struct vsp1_dl_list *dl) +{ + struct vsp1_dl_manager *dlm = dl->dlm; + struct vsp1_device *vsp1 = dlm->vsp1; + + if (dlm->mode == VSP1_DL_MODE_HEADERLESS) { /* - * In header mode the caller guarantees that the hardware is - * idle at this point. + * In headerless mode, program the hardware directly with the + * display list body address and size and set the UPD bit. The + * bit will be cleared by the hardware when the display list + * processing starts. */ - - /* Fill the header for the head and chained display lists. */ - vsp1_dl_list_fill_header(dl, list_empty(&dl->chain)); - - list_for_each_entry(dl_child, &dl->chain, chain) { - bool last = list_is_last(&dl_child->chain, &dl->chain); - - vsp1_dl_list_fill_header(dl_child, last); - } - + vsp1_write(vsp1, VI6_DL_HDR_ADDR(0), dl->body0.dma); + vsp1_write(vsp1, VI6_DL_BODY_SIZE, VI6_DL_BODY_SIZE_UPD | + (dl->body0.num_entries * sizeof(*dl->header->lists))); + } else { /* - * Commit the head display list to hardware. Chained headers - * will auto-start. + * In header mode, program the display list header address. If + * the hardware is idle (single-shot mode or first frame in + * continuous mode) it will then be started independently. If + * the hardware is operating, the VI6_DL_HDR_REF_ADDR register + * will be updated with the display list address. */ vsp1_write(vsp1, VI6_DL_HDR_ADDR(dlm->index), dl->dma); - - dlm->active = dl; - goto done; } +} + +static void vsp1_dl_list_commit_continuous(struct vsp1_dl_list *dl) +{ + struct vsp1_dl_manager *dlm = dl->dlm; /* - * Once the UPD bit has been set the hardware can start processing the - * display list at any time and we can't touch the address and size - * registers. In that case mark the update as pending, it will be - * queued up to the hardware by the frame end interrupt handler. + * If a previous display list has been queued to the hardware but not + * processed yet, the VSP can start processing it at any time. In that + * case we can't replace the queued list by the new one, as we could + * race with the hardware. We thus mark the update as pending, it will + * be queued up to the hardware by the frame end interrupt handler. */ - update = !!(vsp1_read(vsp1, VI6_DL_BODY_SIZE) & VI6_DL_BODY_SIZE_UPD); - if (update) { + if (vsp1_dl_list_hw_update_pending(dlm)) { __vsp1_dl_list_put(dlm->pending); dlm->pending = dl; - goto done; + return; } /* - * Program the hardware with the display list body address and size. - * The UPD bit will be cleared by the device when the display list is - * processed. + * Pass the new display list to the hardware and mark it as queued. It + * will become active when the hardware starts processing it. */ - vsp1_write(vsp1, VI6_DL_HDR_ADDR(0), dl->body0.dma); - vsp1_write(vsp1, VI6_DL_BODY_SIZE, VI6_DL_BODY_SIZE_UPD | - (dl->body0.num_entries * sizeof(*dl->header->lists))); + vsp1_dl_list_hw_enqueue(dl); __vsp1_dl_list_put(dlm->queued); dlm->queued = dl; - -done: - spin_unlock_irqrestore(&dlm->lock, flags); } -/* ----------------------------------------------------------------------------- - * Display List Manager - */ - -/* Interrupt Handling */ -void vsp1_dlm_irq_display_start(struct vsp1_dl_manager *dlm) +static void vsp1_dl_list_commit_singleshot(struct vsp1_dl_list *dl) { - spin_lock(&dlm->lock); + struct vsp1_dl_manager *dlm = dl->dlm; /* - * The display start interrupt signals the end of the display list - * processing by the device. The active display list, if any, won't be - * accessed anymore and can be reused. + * When working in single-shot mode, the caller guarantees that the + * hardware is idle at this point. Just commit the head display list + * to hardware. Chained lists will be started automatically. */ - __vsp1_dl_list_put(dlm->active); - dlm->active = NULL; + vsp1_dl_list_hw_enqueue(dl); - spin_unlock(&dlm->lock); + dlm->active = dl; +} + +void vsp1_dl_list_commit(struct vsp1_dl_list *dl) +{ + struct vsp1_dl_manager *dlm = dl->dlm; + struct vsp1_dl_list *dl_child; + unsigned long flags; + + if (dlm->mode == VSP1_DL_MODE_HEADER) { + /* Fill the header for the head and chained display lists. */ + vsp1_dl_list_fill_header(dl, list_empty(&dl->chain)); + + list_for_each_entry(dl_child, &dl->chain, chain) { + bool last = list_is_last(&dl_child->chain, &dl->chain); + + vsp1_dl_list_fill_header(dl_child, last); + } + } + + spin_lock_irqsave(&dlm->lock, flags); + + if (dlm->singleshot) + vsp1_dl_list_commit_singleshot(dl); + else + vsp1_dl_list_commit_continuous(dl); + + spin_unlock_irqrestore(&dlm->lock, flags); } +/* ----------------------------------------------------------------------------- + * Display List Manager + */ + /** * vsp1_dlm_irq_frame_end - Display list handler for the frame end interrupt * @dlm: the display list manager @@ -572,31 +621,28 @@ void vsp1_dlm_irq_display_start(struct vsp1_dl_manager *dlm) */ bool vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm) { - struct vsp1_device *vsp1 = dlm->vsp1; bool completed = false; spin_lock(&dlm->lock); - __vsp1_dl_list_put(dlm->active); - dlm->active = NULL; - /* - * Header mode is used for mem-to-mem pipelines only. We don't need to - * perform any operation as there can't be any new display list queued - * in that case. + * The mem-to-mem pipelines work in single-shot mode. No new display + * list can be queued, we don't have to do anything. */ - if (dlm->mode == VSP1_DL_MODE_HEADER) { + if (dlm->singleshot) { + __vsp1_dl_list_put(dlm->active); + dlm->active = NULL; completed = true; goto done; } /* - * The UPD bit set indicates that the commit operation raced with the - * interrupt and occurred after the frame end event and UPD clear but - * before interrupt processing. The hardware hasn't taken the update - * into account yet, we'll thus skip one frame and retry. + * If the commit operation raced with the interrupt and occurred after + * the frame end event but before interrupt processing, the hardware + * hasn't taken the update into account yet. We have to skip one frame + * and retry. */ - if (vsp1_read(vsp1, VI6_DL_BODY_SIZE) & VI6_DL_BODY_SIZE_UPD) + if (vsp1_dl_list_hw_update_pending(dlm)) goto done; /* @@ -604,24 +650,20 @@ bool vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm) * frame end interrupt. The display list thus becomes active. */ if (dlm->queued) { + __vsp1_dl_list_put(dlm->active); dlm->active = dlm->queued; dlm->queued = NULL; completed = true; } /* - * Now that the UPD bit has been cleared we can queue the next display - * list to the hardware if one has been prepared. + * Now that the VSP has started processing the queued display list, we + * can queue the pending display list to the hardware if one has been + * prepared. */ if (dlm->pending) { - struct vsp1_dl_list *dl = dlm->pending; - - vsp1_write(vsp1, VI6_DL_HDR_ADDR(0), dl->body0.dma); - vsp1_write(vsp1, VI6_DL_BODY_SIZE, VI6_DL_BODY_SIZE_UPD | - (dl->body0.num_entries * - sizeof(*dl->header->lists))); - - dlm->queued = dl; + vsp1_dl_list_hw_enqueue(dlm->pending); + dlm->queued = dlm->pending; dlm->pending = NULL; } @@ -714,6 +756,7 @@ struct vsp1_dl_manager *vsp1_dlm_create(struct vsp1_device *vsp1, dlm->index = index; dlm->mode = index == 0 && !vsp1->info->uapi ? VSP1_DL_MODE_HEADERLESS : VSP1_DL_MODE_HEADER; + dlm->singleshot = vsp1->info->uapi; dlm->vsp1 = vsp1; spin_lock_init(&dlm->lock); diff --git a/drivers/media/platform/vsp1/vsp1_dl.h b/drivers/media/platform/vsp1/vsp1_dl.h index 6ec1380a10af..ee3508172f0a 100644 --- a/drivers/media/platform/vsp1/vsp1_dl.h +++ b/drivers/media/platform/vsp1/vsp1_dl.h @@ -27,7 +27,6 @@ struct vsp1_dl_manager *vsp1_dlm_create(struct vsp1_device *vsp1, unsigned int prealloc); void vsp1_dlm_destroy(struct vsp1_dl_manager *dlm); void vsp1_dlm_reset(struct vsp1_dl_manager *dlm); -void vsp1_dlm_irq_display_start(struct vsp1_dl_manager *dlm); bool vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm); struct vsp1_dl_list *vsp1_dl_list_get(struct vsp1_dl_manager *dlm); diff --git a/drivers/media/platform/vsp1/vsp1_drm.c b/drivers/media/platform/vsp1/vsp1_drm.c index 9377aafa8996..4dfbeac8f42c 100644 --- a/drivers/media/platform/vsp1/vsp1_drm.c +++ b/drivers/media/platform/vsp1/vsp1_drm.c @@ -32,17 +32,13 @@ * Interrupt Handling */ -void vsp1_drm_display_start(struct vsp1_device *vsp1) +static void vsp1_du_pipeline_frame_end(struct vsp1_pipeline *pipe, + bool completed) { - vsp1_dlm_irq_display_start(vsp1->drm->pipe.output->dlm); -} - -static void vsp1_du_pipeline_frame_end(struct vsp1_pipeline *pipe) -{ - struct vsp1_drm *drm = to_vsp1_drm(pipe); + struct vsp1_drm_pipeline *drm_pipe = to_vsp1_drm_pipeline(pipe); - if (drm->du_complete) - drm->du_complete(drm->du_private); + if (drm_pipe->du_complete) + drm_pipe->du_complete(drm_pipe->du_private, completed); } /* ----------------------------------------------------------------------------- @@ -63,29 +59,44 @@ EXPORT_SYMBOL_GPL(vsp1_du_init); /** * vsp1_du_setup_lif - Setup the output part of the VSP pipeline * @dev: the VSP device + * @pipe_index: the DRM pipeline index * @cfg: the LIF configuration * * Configure the output part of VSP DRM pipeline for the given frame @cfg.width - * and @cfg.height. This sets up formats on the BRU source pad, the WPF0 sink - * and source pads, and the LIF sink pad. + * and @cfg.height. This sets up formats on the blend unit (BRU or BRS) source + * pad, the WPF sink and source pads, and the LIF sink pad. + * + * The @pipe_index argument selects which DRM pipeline to setup. The number of + * available pipelines depend on the VSP instance. * - * As the media bus code on the BRU source pad is conditioned by the - * configuration of the BRU sink 0 pad, we also set up the formats on all BRU + * As the media bus code on the blend unit source pad is conditioned by the + * configuration of its sink 0 pad, we also set up the formats on all blend unit * sinks, even if the configuration will be overwritten later by - * vsp1_du_setup_rpf(). This ensures that the BRU configuration is set to a well - * defined state. + * vsp1_du_setup_rpf(). This ensures that the blend unit configuration is set to + * a well defined state. * * Return 0 on success or a negative error code on failure. */ -int vsp1_du_setup_lif(struct device *dev, const struct vsp1_du_lif_config *cfg) +int vsp1_du_setup_lif(struct device *dev, unsigned int pipe_index, + const struct vsp1_du_lif_config *cfg) { struct vsp1_device *vsp1 = dev_get_drvdata(dev); - struct vsp1_pipeline *pipe = &vsp1->drm->pipe; - struct vsp1_bru *bru = vsp1->bru; + struct vsp1_drm_pipeline *drm_pipe; + struct vsp1_pipeline *pipe; + struct vsp1_bru *bru; struct v4l2_subdev_format format; + const char *bru_name; unsigned int i; int ret; + if (pipe_index >= vsp1->info->lif_count) + return -EINVAL; + + drm_pipe = &vsp1->drm->pipe[pipe_index]; + pipe = &drm_pipe->pipe; + bru = to_bru(&pipe->bru->subdev); + bru_name = pipe->bru->type == VSP1_ENTITY_BRU ? "BRU" : "BRS"; + if (!cfg) { /* * NULL configuration means the CRTC is being disabled, stop @@ -97,14 +108,25 @@ int vsp1_du_setup_lif(struct device *dev, const struct vsp1_du_lif_config *cfg) media_pipeline_stop(&pipe->output->entity.subdev.entity); - for (i = 0; i < bru->entity.source_pad; ++i) { - vsp1->drm->inputs[i].enabled = false; - bru->inputs[i].rpf = NULL; + for (i = 0; i < ARRAY_SIZE(pipe->inputs); ++i) { + struct vsp1_rwpf *rpf = pipe->inputs[i]; + + if (!rpf) + continue; + + /* + * Remove the RPF from the pipe and the list of BRU + * inputs. + */ + WARN_ON(list_empty(&rpf->entity.list_pipe)); + list_del_init(&rpf->entity.list_pipe); pipe->inputs[i] = NULL; + + bru->inputs[rpf->bru_input].rpf = NULL; } + drm_pipe->du_complete = NULL; pipe->num_inputs = 0; - vsp1->drm->du_complete = NULL; vsp1_dlm_reset(pipe->output->dlm); vsp1_device_put(vsp1); @@ -114,8 +136,8 @@ int vsp1_du_setup_lif(struct device *dev, const struct vsp1_du_lif_config *cfg) return 0; } - dev_dbg(vsp1->dev, "%s: configuring LIF with format %ux%u\n", - __func__, cfg->width, cfg->height); + dev_dbg(vsp1->dev, "%s: configuring LIF%u with format %ux%u\n", + __func__, pipe_index, cfg->width, cfg->height); /* * Configure the format at the BRU sinks and propagate it through the @@ -124,7 +146,7 @@ int vsp1_du_setup_lif(struct device *dev, const struct vsp1_du_lif_config *cfg) memset(&format, 0, sizeof(format)); format.which = V4L2_SUBDEV_FORMAT_ACTIVE; - for (i = 0; i < bru->entity.source_pad; ++i) { + for (i = 0; i < pipe->bru->source_pad; ++i) { format.pad = i; format.format.width = cfg->width; @@ -132,60 +154,60 @@ int vsp1_du_setup_lif(struct device *dev, const struct vsp1_du_lif_config *cfg) format.format.code = MEDIA_BUS_FMT_ARGB8888_1X32; format.format.field = V4L2_FIELD_NONE; - ret = v4l2_subdev_call(&bru->entity.subdev, pad, + ret = v4l2_subdev_call(&pipe->bru->subdev, pad, set_fmt, NULL, &format); if (ret < 0) return ret; - dev_dbg(vsp1->dev, "%s: set format %ux%u (%x) on BRU pad %u\n", + dev_dbg(vsp1->dev, "%s: set format %ux%u (%x) on %s pad %u\n", __func__, format.format.width, format.format.height, - format.format.code, i); + format.format.code, bru_name, i); } - format.pad = bru->entity.source_pad; + format.pad = pipe->bru->source_pad; format.format.width = cfg->width; format.format.height = cfg->height; format.format.code = MEDIA_BUS_FMT_ARGB8888_1X32; format.format.field = V4L2_FIELD_NONE; - ret = v4l2_subdev_call(&bru->entity.subdev, pad, set_fmt, NULL, + ret = v4l2_subdev_call(&pipe->bru->subdev, pad, set_fmt, NULL, &format); if (ret < 0) return ret; - dev_dbg(vsp1->dev, "%s: set format %ux%u (%x) on BRU pad %u\n", + dev_dbg(vsp1->dev, "%s: set format %ux%u (%x) on %s pad %u\n", __func__, format.format.width, format.format.height, - format.format.code, i); + format.format.code, bru_name, i); format.pad = RWPF_PAD_SINK; - ret = v4l2_subdev_call(&vsp1->wpf[0]->entity.subdev, pad, set_fmt, NULL, + ret = v4l2_subdev_call(&pipe->output->entity.subdev, pad, set_fmt, NULL, &format); if (ret < 0) return ret; - dev_dbg(vsp1->dev, "%s: set format %ux%u (%x) on WPF0 sink\n", + dev_dbg(vsp1->dev, "%s: set format %ux%u (%x) on WPF%u sink\n", __func__, format.format.width, format.format.height, - format.format.code); + format.format.code, pipe->output->entity.index); format.pad = RWPF_PAD_SOURCE; - ret = v4l2_subdev_call(&vsp1->wpf[0]->entity.subdev, pad, get_fmt, NULL, + ret = v4l2_subdev_call(&pipe->output->entity.subdev, pad, get_fmt, NULL, &format); if (ret < 0) return ret; - dev_dbg(vsp1->dev, "%s: got format %ux%u (%x) on WPF0 source\n", + dev_dbg(vsp1->dev, "%s: got format %ux%u (%x) on WPF%u source\n", __func__, format.format.width, format.format.height, - format.format.code); + format.format.code, pipe->output->entity.index); format.pad = LIF_PAD_SINK; - ret = v4l2_subdev_call(&vsp1->lif->entity.subdev, pad, set_fmt, NULL, + ret = v4l2_subdev_call(&pipe->lif->subdev, pad, set_fmt, NULL, &format); if (ret < 0) return ret; - dev_dbg(vsp1->dev, "%s: set format %ux%u (%x) on LIF sink\n", + dev_dbg(vsp1->dev, "%s: set format %ux%u (%x) on LIF%u sink\n", __func__, format.format.width, format.format.height, - format.format.code); + format.format.code, pipe_index); /* * Verify that the format at the output of the pipeline matches the @@ -213,8 +235,8 @@ int vsp1_du_setup_lif(struct device *dev, const struct vsp1_du_lif_config *cfg) * Register a callback to allow us to notify the DRM driver of frame * completion events. */ - vsp1->drm->du_complete = cfg->callback; - vsp1->drm->du_private = cfg->callback_data; + drm_pipe->du_complete = cfg->callback; + drm_pipe->du_private = cfg->callback_data; ret = media_pipeline_start(&pipe->output->entity.subdev.entity, &pipe->pipe); @@ -224,6 +246,10 @@ int vsp1_du_setup_lif(struct device *dev, const struct vsp1_du_lif_config *cfg) return ret; } + /* Disable the display interrupts. */ + vsp1_write(vsp1, VI6_DISP_IRQ_STA, 0); + vsp1_write(vsp1, VI6_DISP_IRQ_ENB, 0); + dev_dbg(vsp1->dev, "%s: pipeline enabled\n", __func__); return 0; @@ -233,19 +259,21 @@ EXPORT_SYMBOL_GPL(vsp1_du_setup_lif); /** * vsp1_du_atomic_begin - Prepare for an atomic update * @dev: the VSP device + * @pipe_index: the DRM pipeline index */ -void vsp1_du_atomic_begin(struct device *dev) +void vsp1_du_atomic_begin(struct device *dev, unsigned int pipe_index) { struct vsp1_device *vsp1 = dev_get_drvdata(dev); - struct vsp1_pipeline *pipe = &vsp1->drm->pipe; + struct vsp1_drm_pipeline *drm_pipe = &vsp1->drm->pipe[pipe_index]; - vsp1->drm->num_inputs = pipe->num_inputs; + drm_pipe->enabled = drm_pipe->pipe.num_inputs != 0; } EXPORT_SYMBOL_GPL(vsp1_du_atomic_begin); /** * vsp1_du_atomic_update - Setup one RPF input of the VSP pipeline * @dev: the VSP device + * @pipe_index: the DRM pipeline index * @rpf_index: index of the RPF to setup (0-based) * @cfg: the RPF configuration * @@ -272,10 +300,12 @@ EXPORT_SYMBOL_GPL(vsp1_du_atomic_begin); * * Return 0 on success or a negative error code on failure. */ -int vsp1_du_atomic_update(struct device *dev, unsigned int rpf_index, +int vsp1_du_atomic_update(struct device *dev, unsigned int pipe_index, + unsigned int rpf_index, const struct vsp1_du_atomic_config *cfg) { struct vsp1_device *vsp1 = dev_get_drvdata(dev); + struct vsp1_drm_pipeline *drm_pipe = &vsp1->drm->pipe[pipe_index]; const struct vsp1_format_info *fmtinfo; struct vsp1_rwpf *rpf; @@ -288,7 +318,12 @@ int vsp1_du_atomic_update(struct device *dev, unsigned int rpf_index, dev_dbg(vsp1->dev, "%s: RPF%u: disable requested\n", __func__, rpf_index); - vsp1->drm->inputs[rpf_index].enabled = false; + /* + * Remove the RPF from the pipe's inputs. The atomic flush + * handler will disable the input and remove the entity from the + * pipe's entities list. + */ + drm_pipe->pipe.inputs[rpf_index] = NULL; return 0; } @@ -324,13 +359,15 @@ int vsp1_du_atomic_update(struct device *dev, unsigned int rpf_index, vsp1->drm->inputs[rpf_index].crop = cfg->src; vsp1->drm->inputs[rpf_index].compose = cfg->dst; vsp1->drm->inputs[rpf_index].zpos = cfg->zpos; - vsp1->drm->inputs[rpf_index].enabled = true; + + drm_pipe->pipe.inputs[rpf_index] = rpf; return 0; } EXPORT_SYMBOL_GPL(vsp1_du_atomic_update); static int vsp1_du_setup_rpf_pipe(struct vsp1_device *vsp1, + struct vsp1_pipeline *pipe, struct vsp1_rwpf *rpf, unsigned int bru_input) { struct v4l2_subdev_selection sel; @@ -404,7 +441,7 @@ static int vsp1_du_setup_rpf_pipe(struct vsp1_device *vsp1, /* BRU sink, propagate the format from the RPF source. */ format.pad = bru_input; - ret = v4l2_subdev_call(&vsp1->bru->entity.subdev, pad, set_fmt, NULL, + ret = v4l2_subdev_call(&pipe->bru->subdev, pad, set_fmt, NULL, &format); if (ret < 0) return ret; @@ -417,8 +454,8 @@ static int vsp1_du_setup_rpf_pipe(struct vsp1_device *vsp1, sel.target = V4L2_SEL_TGT_COMPOSE; sel.r = vsp1->drm->inputs[rpf->entity.index].compose; - ret = v4l2_subdev_call(&vsp1->bru->entity.subdev, pad, set_selection, - NULL, &sel); + ret = v4l2_subdev_call(&pipe->bru->subdev, pad, set_selection, NULL, + &sel); if (ret < 0) return ret; @@ -438,18 +475,25 @@ static unsigned int rpf_zpos(struct vsp1_device *vsp1, struct vsp1_rwpf *rpf) /** * vsp1_du_atomic_flush - Commit an atomic update * @dev: the VSP device + * @pipe_index: the DRM pipeline index */ -void vsp1_du_atomic_flush(struct device *dev) +void vsp1_du_atomic_flush(struct device *dev, unsigned int pipe_index) { struct vsp1_device *vsp1 = dev_get_drvdata(dev); - struct vsp1_pipeline *pipe = &vsp1->drm->pipe; + struct vsp1_drm_pipeline *drm_pipe = &vsp1->drm->pipe[pipe_index]; + struct vsp1_pipeline *pipe = &drm_pipe->pipe; struct vsp1_rwpf *inputs[VSP1_MAX_RPF] = { NULL, }; + struct vsp1_bru *bru = to_bru(&pipe->bru->subdev); struct vsp1_entity *entity; + struct vsp1_entity *next; struct vsp1_dl_list *dl; + const char *bru_name; unsigned long flags; unsigned int i; int ret; + bru_name = pipe->bru->type == VSP1_ENTITY_BRU ? "BRU" : "BRS"; + /* Prepare the display list. */ dl = vsp1_dl_list_get(pipe->output->dlm); @@ -460,12 +504,8 @@ void vsp1_du_atomic_flush(struct device *dev) struct vsp1_rwpf *rpf = vsp1->rpf[i]; unsigned int j; - if (!vsp1->drm->inputs[i].enabled) { - pipe->inputs[i] = NULL; + if (!pipe->inputs[i]) continue; - } - - pipe->inputs[i] = rpf; /* Insert the RPF in the sorted RPFs array. */ for (j = pipe->num_inputs++; j > 0; --j) { @@ -478,22 +518,26 @@ void vsp1_du_atomic_flush(struct device *dev) } /* Setup the RPF input pipeline for every enabled input. */ - for (i = 0; i < vsp1->info->num_bru_inputs; ++i) { + for (i = 0; i < pipe->bru->source_pad; ++i) { struct vsp1_rwpf *rpf = inputs[i]; if (!rpf) { - vsp1->bru->inputs[i].rpf = NULL; + bru->inputs[i].rpf = NULL; continue; } - vsp1->bru->inputs[i].rpf = rpf; + if (list_empty(&rpf->entity.list_pipe)) + list_add_tail(&rpf->entity.list_pipe, &pipe->entities); + + bru->inputs[i].rpf = rpf; rpf->bru_input = i; + rpf->entity.sink = pipe->bru; rpf->entity.sink_pad = i; - dev_dbg(vsp1->dev, "%s: connecting RPF.%u to BRU:%u\n", - __func__, rpf->entity.index, i); + dev_dbg(vsp1->dev, "%s: connecting RPF.%u to %s:%u\n", + __func__, rpf->entity.index, bru_name, i); - ret = vsp1_du_setup_rpf_pipe(vsp1, rpf, i); + ret = vsp1_du_setup_rpf_pipe(vsp1, pipe, rpf, i); if (ret < 0) dev_err(vsp1->dev, "%s: failed to setup RPF.%u\n", @@ -501,16 +545,16 @@ void vsp1_du_atomic_flush(struct device *dev) } /* Configure all entities in the pipeline. */ - list_for_each_entry(entity, &pipe->entities, list_pipe) { + list_for_each_entry_safe(entity, next, &pipe->entities, list_pipe) { /* Disconnect unused RPFs from the pipeline. */ - if (entity->type == VSP1_ENTITY_RPF) { - struct vsp1_rwpf *rpf = to_rwpf(&entity->subdev); + if (entity->type == VSP1_ENTITY_RPF && + !pipe->inputs[entity->index]) { + vsp1_dl_list_write(dl, entity->route->reg, + VI6_DPR_NODE_UNUSED); - if (!pipe->inputs[rpf->entity.index]) { - vsp1_dl_list_write(dl, entity->route->reg, - VI6_DPR_NODE_UNUSED); - continue; - } + list_del_init(&entity->list_pipe); + + continue; } vsp1_entity_route_setup(entity, pipe, dl); @@ -528,14 +572,11 @@ void vsp1_du_atomic_flush(struct device *dev) vsp1_dl_list_commit(dl); /* Start or stop the pipeline if needed. */ - if (!vsp1->drm->num_inputs && pipe->num_inputs) { - vsp1_write(vsp1, VI6_DISP_IRQ_STA, 0); - vsp1_write(vsp1, VI6_DISP_IRQ_ENB, VI6_DISP_IRQ_ENB_DSTE); + if (!drm_pipe->enabled && pipe->num_inputs) { spin_lock_irqsave(&pipe->irqlock, flags); vsp1_pipeline_run(pipe); spin_unlock_irqrestore(&pipe->irqlock, flags); - } else if (vsp1->drm->num_inputs && !pipe->num_inputs) { - vsp1_write(vsp1, VI6_DISP_IRQ_ENB, 0); + } else if (drm_pipe->enabled && !pipe->num_inputs) { vsp1_pipeline_stop(pipe); } } @@ -568,83 +609,48 @@ EXPORT_SYMBOL_GPL(vsp1_du_unmap_sg); * Initialization */ -int vsp1_drm_create_links(struct vsp1_device *vsp1) -{ - const u32 flags = MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE; - unsigned int i; - int ret; - - /* - * VSPD instances require a BRU to perform composition and a LIF to - * output to the DU. - */ - if (!vsp1->bru || !vsp1->lif) - return -ENXIO; - - for (i = 0; i < vsp1->info->rpf_count; ++i) { - struct vsp1_rwpf *rpf = vsp1->rpf[i]; - - ret = media_create_pad_link(&rpf->entity.subdev.entity, - RWPF_PAD_SOURCE, - &vsp1->bru->entity.subdev.entity, - i, flags); - if (ret < 0) - return ret; - - rpf->entity.sink = &vsp1->bru->entity.subdev.entity; - rpf->entity.sink_pad = i; - } - - ret = media_create_pad_link(&vsp1->bru->entity.subdev.entity, - vsp1->bru->entity.source_pad, - &vsp1->wpf[0]->entity.subdev.entity, - RWPF_PAD_SINK, flags); - if (ret < 0) - return ret; - - vsp1->bru->entity.sink = &vsp1->wpf[0]->entity.subdev.entity; - vsp1->bru->entity.sink_pad = RWPF_PAD_SINK; - - ret = media_create_pad_link(&vsp1->wpf[0]->entity.subdev.entity, - RWPF_PAD_SOURCE, - &vsp1->lif->entity.subdev.entity, - LIF_PAD_SINK, flags); - if (ret < 0) - return ret; - - return 0; -} - int vsp1_drm_init(struct vsp1_device *vsp1) { - struct vsp1_pipeline *pipe; unsigned int i; vsp1->drm = devm_kzalloc(vsp1->dev, sizeof(*vsp1->drm), GFP_KERNEL); if (!vsp1->drm) return -ENOMEM; - pipe = &vsp1->drm->pipe; + /* Create one DRM pipeline per LIF. */ + for (i = 0; i < vsp1->info->lif_count; ++i) { + struct vsp1_drm_pipeline *drm_pipe = &vsp1->drm->pipe[i]; + struct vsp1_pipeline *pipe = &drm_pipe->pipe; - vsp1_pipeline_init(pipe); + vsp1_pipeline_init(pipe); - /* The DRM pipeline is static, add entities manually. */ + /* + * The DRM pipeline is static, add entities manually. The first + * pipeline uses the BRU and the second pipeline the BRS. + */ + pipe->bru = i == 0 ? &vsp1->bru->entity : &vsp1->brs->entity; + pipe->lif = &vsp1->lif[i]->entity; + pipe->output = vsp1->wpf[i]; + pipe->output->pipe = pipe; + pipe->frame_end = vsp1_du_pipeline_frame_end; + + pipe->bru->sink = &pipe->output->entity; + pipe->bru->sink_pad = 0; + pipe->output->entity.sink = pipe->lif; + pipe->output->entity.sink_pad = 0; + + list_add_tail(&pipe->bru->list_pipe, &pipe->entities); + list_add_tail(&pipe->lif->list_pipe, &pipe->entities); + list_add_tail(&pipe->output->entity.list_pipe, &pipe->entities); + } + + /* Disable all RPFs initially. */ for (i = 0; i < vsp1->info->rpf_count; ++i) { struct vsp1_rwpf *input = vsp1->rpf[i]; - list_add_tail(&input->entity.list_pipe, &pipe->entities); + INIT_LIST_HEAD(&input->entity.list_pipe); } - list_add_tail(&vsp1->bru->entity.list_pipe, &pipe->entities); - list_add_tail(&vsp1->wpf[0]->entity.list_pipe, &pipe->entities); - list_add_tail(&vsp1->lif->entity.list_pipe, &pipe->entities); - - pipe->bru = &vsp1->bru->entity; - pipe->lif = &vsp1->lif->entity; - pipe->output = vsp1->wpf[0]; - pipe->output->pipe = pipe; - pipe->frame_end = vsp1_du_pipeline_frame_end; - return 0; } diff --git a/drivers/media/platform/vsp1/vsp1_drm.h b/drivers/media/platform/vsp1/vsp1_drm.h index e9f80727ff92..1cd9db785bf7 100644 --- a/drivers/media/platform/vsp1/vsp1_drm.h +++ b/drivers/media/platform/vsp1/vsp1_drm.h @@ -18,38 +18,44 @@ #include "vsp1_pipe.h" /** - * vsp1_drm - State for the API exposed to the DRM driver + * vsp1_drm_pipeline - State for the API exposed to the DRM driver * @pipe: the VSP1 pipeline used for display - * @num_inputs: number of active pipeline inputs at the beginning of an update - * @inputs: source crop rectangle, destination compose rectangle and z-order - * position for every input + * @enabled: pipeline state at the beginning of an update * @du_complete: frame completion callback for the DU driver (optional) * @du_private: data to be passed to the du_complete callback */ -struct vsp1_drm { +struct vsp1_drm_pipeline { struct vsp1_pipeline pipe; - unsigned int num_inputs; + bool enabled; + + /* Frame synchronisation */ + void (*du_complete)(void *, bool); + void *du_private; +}; + +/** + * vsp1_drm - State for the API exposed to the DRM driver + * @pipe: the VSP1 DRM pipeline used for display + * @inputs: source crop rectangle, destination compose rectangle and z-order + * position for every input (indexed by RPF index) + */ +struct vsp1_drm { + struct vsp1_drm_pipeline pipe[VSP1_MAX_LIF]; + struct { - bool enabled; struct v4l2_rect crop; struct v4l2_rect compose; unsigned int zpos; } inputs[VSP1_MAX_RPF]; - - /* Frame synchronisation */ - void (*du_complete)(void *); - void *du_private; }; -static inline struct vsp1_drm *to_vsp1_drm(struct vsp1_pipeline *pipe) +static inline struct vsp1_drm_pipeline * +to_vsp1_drm_pipeline(struct vsp1_pipeline *pipe) { - return container_of(pipe, struct vsp1_drm, pipe); + return container_of(pipe, struct vsp1_drm_pipeline, pipe); } int vsp1_drm_init(struct vsp1_device *vsp1); void vsp1_drm_cleanup(struct vsp1_device *vsp1); -int vsp1_drm_create_links(struct vsp1_device *vsp1); - -void vsp1_drm_display_start(struct vsp1_device *vsp1); #endif /* __VSP1_DRM_H__ */ diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c index 95c26edead85..962e4c304076 100644 --- a/drivers/media/platform/vsp1/vsp1_drv.c +++ b/drivers/media/platform/vsp1/vsp1_drv.c @@ -68,14 +68,6 @@ static irqreturn_t vsp1_irq_handler(int irq, void *data) } } - status = vsp1_read(vsp1, VI6_DISP_IRQ_STA); - vsp1_write(vsp1, VI6_DISP_IRQ_STA, ~status & VI6_DISP_IRQ_STA_DST); - - if (status & VI6_DISP_IRQ_STA_DST) { - vsp1_drm_display_start(vsp1); - ret = IRQ_HANDLED; - } - return ret; } @@ -92,6 +84,10 @@ static irqreturn_t vsp1_irq_handler(int irq, void *data) * * - from a UDS to a UDS (UDS entities can't be chained) * - from an entity to itself (no loops are allowed) + * + * Furthermore, the BRS can't be connected to histogram generators, but no + * special check is currently needed as all VSP instances that include a BRS + * have no histogram generator. */ static int vsp1_create_sink_links(struct vsp1_device *vsp1, struct vsp1_entity *sink) @@ -129,7 +125,7 @@ static int vsp1_create_sink_links(struct vsp1_device *vsp1, return ret; if (flags & MEDIA_LNK_FL_ENABLED) - source->sink = entity; + source->sink = sink; } } @@ -172,10 +168,13 @@ static int vsp1_uapi_create_links(struct vsp1_device *vsp1) return ret; } - if (vsp1->lif) { - ret = media_create_pad_link(&vsp1->wpf[0]->entity.subdev.entity, + for (i = 0; i < vsp1->info->lif_count; ++i) { + if (!vsp1->lif[i]) + continue; + + ret = media_create_pad_link(&vsp1->wpf[i]->entity.subdev.entity, RWPF_PAD_SOURCE, - &vsp1->lif->entity.subdev.entity, + &vsp1->lif[i]->entity.subdev.entity, LIF_PAD_SINK, 0); if (ret < 0) return ret; @@ -269,8 +268,18 @@ static int vsp1_create_entities(struct vsp1_device *vsp1) } /* Instantiate all the entities. */ + if (vsp1->info->features & VSP1_HAS_BRS) { + vsp1->brs = vsp1_bru_create(vsp1, VSP1_ENTITY_BRS); + if (IS_ERR(vsp1->brs)) { + ret = PTR_ERR(vsp1->brs); + goto done; + } + + list_add_tail(&vsp1->brs->entity.list_dev, &vsp1->entities); + } + if (vsp1->info->features & VSP1_HAS_BRU) { - vsp1->bru = vsp1_bru_create(vsp1); + vsp1->bru = vsp1_bru_create(vsp1, VSP1_ENTITY_BRU); if (IS_ERR(vsp1->bru)) { ret = PTR_ERR(vsp1->bru); goto done; @@ -328,18 +337,23 @@ static int vsp1_create_entities(struct vsp1_device *vsp1) } /* - * The LIF is only supported when used in conjunction with the DU, in + * The LIFs are only supported when used in conjunction with the DU, in * which case the userspace API is disabled. If the userspace API is - * enabled skip the LIF, even when present. + * enabled skip the LIFs, even when present. */ - if (vsp1->info->features & VSP1_HAS_LIF && !vsp1->info->uapi) { - vsp1->lif = vsp1_lif_create(vsp1); - if (IS_ERR(vsp1->lif)) { - ret = PTR_ERR(vsp1->lif); - goto done; - } + if (!vsp1->info->uapi) { + for (i = 0; i < vsp1->info->lif_count; ++i) { + struct vsp1_lif *lif; + + lif = vsp1_lif_create(vsp1, i); + if (IS_ERR(lif)) { + ret = PTR_ERR(lif); + goto done; + } - list_add_tail(&vsp1->lif->entity.list_dev, &vsp1->entities); + vsp1->lif[i] = lif; + list_add_tail(&lif->entity.list_dev, &vsp1->entities); + } } if (vsp1->info->features & VSP1_HAS_LUT) { @@ -420,7 +434,6 @@ static int vsp1_create_entities(struct vsp1_device *vsp1) } list_add_tail(&video->list, &vsp1->videos); - wpf->entity.sink = &video->video.entity; } } @@ -432,19 +445,15 @@ static int vsp1_create_entities(struct vsp1_device *vsp1) goto done; } - /* Create links. */ - if (vsp1->info->uapi) - ret = vsp1_uapi_create_links(vsp1); - else - ret = vsp1_drm_create_links(vsp1); - if (ret < 0) - goto done; - /* - * Register subdev nodes if the userspace API is enabled or initialize - * the DRM pipeline otherwise. + * Create links and register subdev nodes if the userspace API is + * enabled or initialize the DRM pipeline otherwise. */ if (vsp1->info->uapi) { + ret = vsp1_uapi_create_links(vsp1); + if (ret < 0) + goto done; + ret = v4l2_device_register_subdev_nodes(&vsp1->v4l2_dev); if (ret < 0) goto done; @@ -515,6 +524,9 @@ static int vsp1_device_init(struct vsp1_device *vsp1) vsp1_write(vsp1, VI6_DPR_HSI_ROUTE, VI6_DPR_NODE_UNUSED); vsp1_write(vsp1, VI6_DPR_BRU_ROUTE, VI6_DPR_NODE_UNUSED); + if (vsp1->info->features & VSP1_HAS_BRS) + vsp1_write(vsp1, VI6_DPR_ILV_BRS_ROUTE, VI6_DPR_NODE_UNUSED); + vsp1_write(vsp1, VI6_DPR_HGO_SMPPT, (7 << VI6_DPR_SMPPT_TGW_SHIFT) | (VI6_DPR_NODE_UNUSED << VI6_DPR_SMPPT_PT_SHIFT)); vsp1_write(vsp1, VI6_DPR_HGT_SMPPT, (7 << VI6_DPR_SMPPT_TGW_SHIFT) | @@ -634,8 +646,8 @@ static const struct vsp1_device_info vsp1_device_infos[] = { .version = VI6_IP_VERSION_MODEL_VSPD_GEN2, .model = "VSP1-D", .gen = 2, - .features = VSP1_HAS_BRU | VSP1_HAS_HGO | VSP1_HAS_LIF - | VSP1_HAS_LUT, + .features = VSP1_HAS_BRU | VSP1_HAS_HGO | VSP1_HAS_LUT, + .lif_count = 1, .rpf_count = 4, .uds_count = 1, .wpf_count = 1, @@ -668,8 +680,8 @@ static const struct vsp1_device_info vsp1_device_infos[] = { .version = VI6_IP_VERSION_MODEL_VSPD_V2H, .model = "VSP1V-D", .gen = 2, - .features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_LUT - | VSP1_HAS_LIF, + .features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_LUT, + .lif_count = 1, .rpf_count = 4, .uds_count = 1, .wpf_count = 1, @@ -706,10 +718,37 @@ static const struct vsp1_device_info vsp1_device_infos[] = { .num_bru_inputs = 5, .uapi = true, }, { + .version = VI6_IP_VERSION_MODEL_VSPBS_GEN3, + .model = "VSP2-BS", + .gen = 3, + .features = VSP1_HAS_BRS | VSP1_HAS_WPF_VFLIP, + .rpf_count = 2, + .wpf_count = 1, + .uapi = true, + }, { .version = VI6_IP_VERSION_MODEL_VSPD_GEN3, .model = "VSP2-D", .gen = 3, - .features = VSP1_HAS_BRU | VSP1_HAS_LIF | VSP1_HAS_WPF_VFLIP, + .features = VSP1_HAS_BRU | VSP1_HAS_WPF_VFLIP, + .lif_count = 1, + .rpf_count = 5, + .wpf_count = 2, + .num_bru_inputs = 5, + }, { + .version = VI6_IP_VERSION_MODEL_VSPD_V3, + .model = "VSP2-D", + .gen = 3, + .features = VSP1_HAS_BRS | VSP1_HAS_BRU, + .lif_count = 1, + .rpf_count = 5, + .wpf_count = 1, + .num_bru_inputs = 5, + }, { + .version = VI6_IP_VERSION_MODEL_VSPDL_GEN3, + .model = "VSP2-DL", + .gen = 3, + .features = VSP1_HAS_BRS | VSP1_HAS_BRU, + .lif_count = 2, .rpf_count = 5, .wpf_count = 2, .num_bru_inputs = 5, diff --git a/drivers/media/platform/vsp1/vsp1_entity.c b/drivers/media/platform/vsp1/vsp1_entity.c index 4bdb3b141611..54de15095709 100644 --- a/drivers/media/platform/vsp1/vsp1_entity.c +++ b/drivers/media/platform/vsp1/vsp1_entity.c @@ -24,18 +24,12 @@ #include "vsp1_pipe.h" #include "vsp1_rwpf.h" -static inline struct vsp1_entity * -media_entity_to_vsp1_entity(struct media_entity *entity) -{ - return container_of(entity, struct vsp1_entity, subdev.entity); -} - void vsp1_entity_route_setup(struct vsp1_entity *entity, struct vsp1_pipeline *pipe, struct vsp1_dl_list *dl) { struct vsp1_entity *source; - struct vsp1_entity *sink; + u32 route; if (entity->type == VSP1_ENTITY_HGO) { u32 smppt; @@ -44,7 +38,7 @@ void vsp1_entity_route_setup(struct vsp1_entity *entity, * The HGO is a special case, its routing is configured on the * sink pad. */ - source = media_entity_to_vsp1_entity(entity->sources[0]); + source = entity->sources[0]; smppt = (pipe->output->entity.index << VI6_DPR_SMPPT_TGW_SHIFT) | (source->route->output << VI6_DPR_SMPPT_PT_SHIFT); @@ -57,7 +51,7 @@ void vsp1_entity_route_setup(struct vsp1_entity *entity, * The HGT is a special case, its routing is configured on the * sink pad. */ - source = media_entity_to_vsp1_entity(entity->sources[0]); + source = entity->sources[0]; smppt = (pipe->output->entity.index << VI6_DPR_SMPPT_TGW_SHIFT) | (source->route->output << VI6_DPR_SMPPT_PT_SHIFT); @@ -69,9 +63,14 @@ void vsp1_entity_route_setup(struct vsp1_entity *entity, if (source->route->reg == 0) return; - sink = media_entity_to_vsp1_entity(source->sink); - vsp1_dl_list_write(dl, source->route->reg, - sink->route->inputs[source->sink_pad]); + route = source->sink->route->inputs[source->sink_pad]; + /* + * The ILV and BRS share the same data path route. The extra BRSSEL bit + * selects between the ILV and BRS. + */ + if (source->type == VSP1_ENTITY_BRS) + route |= VI6_DPR_ROUTE_BRSSEL; + vsp1_dl_list_write(dl, source->route->reg, route); } /* ----------------------------------------------------------------------------- @@ -316,6 +315,12 @@ done: * Media Operations */ +static inline struct vsp1_entity * +media_entity_to_vsp1_entity(struct media_entity *entity) +{ + return container_of(entity, struct vsp1_entity, subdev.entity); +} + static int vsp1_entity_link_setup_source(const struct media_pad *source_pad, const struct media_pad *sink_pad, u32 flags) @@ -339,7 +344,7 @@ static int vsp1_entity_link_setup_source(const struct media_pad *source_pad, sink->type != VSP1_ENTITY_HGT) { if (source->sink) return -EBUSY; - source->sink = sink_pad->entity; + source->sink = sink; source->sink_pad = sink_pad->index; } } else { @@ -355,15 +360,17 @@ static int vsp1_entity_link_setup_sink(const struct media_pad *source_pad, u32 flags) { struct vsp1_entity *sink; + struct vsp1_entity *source; sink = media_entity_to_vsp1_entity(sink_pad->entity); + source = media_entity_to_vsp1_entity(source_pad->entity); if (flags & MEDIA_LNK_FL_ENABLED) { /* Fan-in is limited to one. */ if (sink->sources[sink_pad->index]) return -EBUSY; - sink->sources[sink_pad->index] = source_pad->entity; + sink->sources[sink_pad->index] = source; } else { sink->sources[sink_pad->index] = NULL; } @@ -450,6 +457,8 @@ struct media_pad *vsp1_entity_remote_pad(struct media_pad *pad) { VI6_DPR_NODE_WPF(idx) }, VI6_DPR_NODE_WPF(idx) } static const struct vsp1_route vsp1_routes[] = { + { VSP1_ENTITY_BRS, 0, VI6_DPR_ILV_BRS_ROUTE, + { VI6_DPR_NODE_BRS_IN(0), VI6_DPR_NODE_BRS_IN(1) }, 0 }, { VSP1_ENTITY_BRU, 0, VI6_DPR_BRU_ROUTE, { VI6_DPR_NODE_BRU_IN(0), VI6_DPR_NODE_BRU_IN(1), VI6_DPR_NODE_BRU_IN(2), VI6_DPR_NODE_BRU_IN(3), @@ -459,7 +468,8 @@ static const struct vsp1_route vsp1_routes[] = { { VSP1_ENTITY_HGT, 0, 0, { 0, }, 0 }, VSP1_ENTITY_ROUTE(HSI), VSP1_ENTITY_ROUTE(HST), - { VSP1_ENTITY_LIF, 0, 0, { VI6_DPR_NODE_LIF, }, VI6_DPR_NODE_LIF }, + { VSP1_ENTITY_LIF, 0, 0, { 0, }, 0 }, + { VSP1_ENTITY_LIF, 1, 0, { 0, }, 0 }, VSP1_ENTITY_ROUTE(LUT), VSP1_ENTITY_ROUTE_RPF(0), VSP1_ENTITY_ROUTE_RPF(1), diff --git a/drivers/media/platform/vsp1/vsp1_entity.h b/drivers/media/platform/vsp1/vsp1_entity.h index c169a060b6d2..11f8363fa6b0 100644 --- a/drivers/media/platform/vsp1/vsp1_entity.h +++ b/drivers/media/platform/vsp1/vsp1_entity.h @@ -23,6 +23,7 @@ struct vsp1_dl_list; struct vsp1_pipeline; enum vsp1_entity_type { + VSP1_ENTITY_BRS, VSP1_ENTITY_BRU, VSP1_ENTITY_CLU, VSP1_ENTITY_HGO, @@ -104,8 +105,8 @@ struct vsp1_entity { struct media_pad *pads; unsigned int source_pad; - struct media_entity **sources; - struct media_entity *sink; + struct vsp1_entity **sources; + struct vsp1_entity *sink; unsigned int sink_pad; struct v4l2_subdev subdev; diff --git a/drivers/media/platform/vsp1/vsp1_lif.c b/drivers/media/platform/vsp1/vsp1_lif.c index 702487f895b3..e6fa16d7fda8 100644 --- a/drivers/media/platform/vsp1/vsp1_lif.c +++ b/drivers/media/platform/vsp1/vsp1_lif.c @@ -30,7 +30,7 @@ static inline void vsp1_lif_write(struct vsp1_lif *lif, struct vsp1_dl_list *dl, u32 reg, u32 data) { - vsp1_dl_list_write(dl, reg, data); + vsp1_dl_list_write(dl, reg + lif->entity.index * VI6_LIF_OFFSET, data); } /* ----------------------------------------------------------------------------- @@ -165,7 +165,7 @@ static const struct vsp1_entity_operations lif_entity_ops = { * Initialization and Cleanup */ -struct vsp1_lif *vsp1_lif_create(struct vsp1_device *vsp1) +struct vsp1_lif *vsp1_lif_create(struct vsp1_device *vsp1, unsigned int index) { struct vsp1_lif *lif; int ret; @@ -176,6 +176,7 @@ struct vsp1_lif *vsp1_lif_create(struct vsp1_device *vsp1) lif->entity.ops = &lif_entity_ops; lif->entity.type = VSP1_ENTITY_LIF; + lif->entity.index = index; /* * The LIF is never exposed to userspace, but media entity registration diff --git a/drivers/media/platform/vsp1/vsp1_lif.h b/drivers/media/platform/vsp1/vsp1_lif.h index 7b35879028de..3417339379b1 100644 --- a/drivers/media/platform/vsp1/vsp1_lif.h +++ b/drivers/media/platform/vsp1/vsp1_lif.h @@ -32,6 +32,6 @@ static inline struct vsp1_lif *to_lif(struct v4l2_subdev *subdev) return container_of(subdev, struct vsp1_lif, entity.subdev); } -struct vsp1_lif *vsp1_lif_create(struct vsp1_device *vsp1); +struct vsp1_lif *vsp1_lif_create(struct vsp1_device *vsp1, unsigned int index); #endif /* __VSP1_LIF_H__ */ diff --git a/drivers/media/platform/vsp1/vsp1_pipe.c b/drivers/media/platform/vsp1/vsp1_pipe.c index e817623b84e0..4f4b732df84b 100644 --- a/drivers/media/platform/vsp1/vsp1_pipe.c +++ b/drivers/media/platform/vsp1/vsp1_pipe.c @@ -335,16 +335,12 @@ void vsp1_pipeline_frame_end(struct vsp1_pipeline *pipe) if (pipe == NULL) return; + /* + * If the DL commit raced with the frame end interrupt, the commit ends + * up being postponed by one frame. @completed represents whether the + * active frame was finished or postponed. + */ completed = vsp1_dlm_irq_frame_end(pipe->output->dlm); - if (!completed) { - /* - * If the DL commit raced with the frame end interrupt, the - * commit ends up being postponed by one frame. Return - * immediately without calling the pipeline's frame end handler - * or incrementing the sequence number. - */ - return; - } if (pipe->hgo) vsp1_hgo_frame_end(pipe->hgo); @@ -352,8 +348,12 @@ void vsp1_pipeline_frame_end(struct vsp1_pipeline *pipe) if (pipe->hgt) vsp1_hgt_frame_end(pipe->hgt); + /* + * Regardless of frame completion we still need to notify the pipe + * frame_end to account for vblank events. + */ if (pipe->frame_end) - pipe->frame_end(pipe); + pipe->frame_end(pipe, completed); pipe->sequence++; } @@ -373,10 +373,11 @@ void vsp1_pipeline_propagate_alpha(struct vsp1_pipeline *pipe, return; /* - * The BRU background color has a fixed alpha value set to 255, the - * output alpha value is thus always equal to 255. + * The BRU and BRS background color has a fixed alpha value set to 255, + * the output alpha value is thus always equal to 255. */ - if (pipe->uds_input->type == VSP1_ENTITY_BRU) + if (pipe->uds_input->type == VSP1_ENTITY_BRU || + pipe->uds_input->type == VSP1_ENTITY_BRS) alpha = 255; vsp1_uds_set_alpha(pipe->uds, dl, alpha); diff --git a/drivers/media/platform/vsp1/vsp1_pipe.h b/drivers/media/platform/vsp1/vsp1_pipe.h index 91a784a13422..c5d01a365370 100644 --- a/drivers/media/platform/vsp1/vsp1_pipe.h +++ b/drivers/media/platform/vsp1/vsp1_pipe.h @@ -91,7 +91,7 @@ struct vsp1_pipeline { enum vsp1_pipeline_state state; wait_queue_head_t wq; - void (*frame_end)(struct vsp1_pipeline *pipe); + void (*frame_end)(struct vsp1_pipeline *pipe, bool completed); struct mutex lock; struct kref kref; diff --git a/drivers/media/platform/vsp1/vsp1_regs.h b/drivers/media/platform/vsp1/vsp1_regs.h index cd3e32af6e3b..58d0bea963a6 100644 --- a/drivers/media/platform/vsp1/vsp1_regs.h +++ b/drivers/media/platform/vsp1/vsp1_regs.h @@ -18,6 +18,7 @@ */ #define VI6_CMD(n) (0x0000 + (n) * 4) +#define VI6_CMD_UPDHDR (1 << 4) #define VI6_CMD_STRCMD (1 << 0) #define VI6_CLK_DCSWT 0x0018 @@ -238,6 +239,10 @@ #define VI6_WPF_SRCRPF_VIRACT_SUB (1 << 28) #define VI6_WPF_SRCRPF_VIRACT_MST (2 << 28) #define VI6_WPF_SRCRPF_VIRACT_MASK (3 << 28) +#define VI6_WPF_SRCRPF_VIRACT2_DIS (0 << 24) +#define VI6_WPF_SRCRPF_VIRACT2_SUB (1 << 24) +#define VI6_WPF_SRCRPF_VIRACT2_MST (2 << 24) +#define VI6_WPF_SRCRPF_VIRACT2_MASK (3 << 24) #define VI6_WPF_SRCRPF_RPF_ACT_DIS(n) (0 << ((n) * 2)) #define VI6_WPF_SRCRPF_RPF_ACT_SUB(n) (1 << ((n) * 2)) #define VI6_WPF_SRCRPF_RPF_ACT_MST(n) (2 << ((n) * 2)) @@ -321,6 +326,8 @@ #define VI6_DPR_HST_ROUTE 0x2044 #define VI6_DPR_HSI_ROUTE 0x2048 #define VI6_DPR_BRU_ROUTE 0x204c +#define VI6_DPR_ILV_BRS_ROUTE 0x2050 +#define VI6_DPR_ROUTE_BRSSEL (1 << 28) #define VI6_DPR_ROUTE_FXA_MASK (0xff << 16) #define VI6_DPR_ROUTE_FXA_SHIFT 16 #define VI6_DPR_ROUTE_FP_MASK (0x3f << 8) @@ -344,7 +351,8 @@ #define VI6_DPR_NODE_CLU 29 #define VI6_DPR_NODE_HST 30 #define VI6_DPR_NODE_HSI 31 -#define VI6_DPR_NODE_LIF 55 +#define VI6_DPR_NODE_BRS_IN(n) (38 + (n)) +#define VI6_DPR_NODE_LIF 55 /* Gen2 only */ #define VI6_DPR_NODE_WPF(n) (56 + (n)) #define VI6_DPR_NODE_UNUSED 63 @@ -476,7 +484,7 @@ #define VI6_HSI_CTRL_EN (1 << 0) /* ----------------------------------------------------------------------------- - * BRU Control Registers + * BRS and BRU Control Registers */ #define VI6_ROP_NOP 0 @@ -496,7 +504,10 @@ #define VI6_ROP_NAND 14 #define VI6_ROP_SET 15 -#define VI6_BRU_INCTRL 0x2c00 +#define VI6_BRU_BASE 0x2c00 +#define VI6_BRS_BASE 0x3900 + +#define VI6_BRU_INCTRL 0x0000 #define VI6_BRU_INCTRL_NRM (1 << 28) #define VI6_BRU_INCTRL_DnON (1 << (16 + (n))) #define VI6_BRU_INCTRL_DITHn_OFF (0 << ((n) * 4)) @@ -508,19 +519,19 @@ #define VI6_BRU_INCTRL_DITHn_MASK (7 << ((n) * 4)) #define VI6_BRU_INCTRL_DITHn_SHIFT ((n) * 4) -#define VI6_BRU_VIRRPF_SIZE 0x2c04 +#define VI6_BRU_VIRRPF_SIZE 0x0004 #define VI6_BRU_VIRRPF_SIZE_HSIZE_MASK (0x1fff << 16) #define VI6_BRU_VIRRPF_SIZE_HSIZE_SHIFT 16 #define VI6_BRU_VIRRPF_SIZE_VSIZE_MASK (0x1fff << 0) #define VI6_BRU_VIRRPF_SIZE_VSIZE_SHIFT 0 -#define VI6_BRU_VIRRPF_LOC 0x2c08 +#define VI6_BRU_VIRRPF_LOC 0x0008 #define VI6_BRU_VIRRPF_LOC_HCOORD_MASK (0x1fff << 16) #define VI6_BRU_VIRRPF_LOC_HCOORD_SHIFT 16 #define VI6_BRU_VIRRPF_LOC_VCOORD_MASK (0x1fff << 0) #define VI6_BRU_VIRRPF_LOC_VCOORD_SHIFT 0 -#define VI6_BRU_VIRRPF_COL 0x2c0c +#define VI6_BRU_VIRRPF_COL 0x000c #define VI6_BRU_VIRRPF_COL_A_MASK (0xff << 24) #define VI6_BRU_VIRRPF_COL_A_SHIFT 24 #define VI6_BRU_VIRRPF_COL_RCR_MASK (0xff << 16) @@ -530,7 +541,7 @@ #define VI6_BRU_VIRRPF_COL_BCB_MASK (0xff << 0) #define VI6_BRU_VIRRPF_COL_BCB_SHIFT 0 -#define VI6_BRU_CTRL(n) (0x2c10 + (n) * 8 + ((n) <= 3 ? 0 : 4)) +#define VI6_BRU_CTRL(n) (0x0010 + (n) * 8 + ((n) <= 3 ? 0 : 4)) #define VI6_BRU_CTRL_RBC (1 << 31) #define VI6_BRU_CTRL_DSTSEL_BRUIN(n) (((n) <= 3 ? (n) : (n)+1) << 20) #define VI6_BRU_CTRL_DSTSEL_VRPF (4 << 20) @@ -543,7 +554,7 @@ #define VI6_BRU_CTRL_AROP(rop) ((rop) << 0) #define VI6_BRU_CTRL_AROP_MASK (0xf << 0) -#define VI6_BRU_BLD(n) (0x2c14 + (n) * 8 + ((n) <= 3 ? 0 : 4)) +#define VI6_BRU_BLD(n) (0x0014 + (n) * 8 + ((n) <= 3 ? 0 : 4)) #define VI6_BRU_BLD_CBES (1 << 31) #define VI6_BRU_BLD_CCMDX_DST_A (0 << 28) #define VI6_BRU_BLD_CCMDX_255_DST_A (1 << 28) @@ -576,7 +587,7 @@ #define VI6_BRU_BLD_COEFY_MASK (0xff << 0) #define VI6_BRU_BLD_COEFY_SHIFT 0 -#define VI6_BRU_ROP 0x2c30 +#define VI6_BRU_ROP 0x0030 /* Only available on BRU */ #define VI6_BRU_ROP_DSTSEL_BRUIN(n) (((n) <= 3 ? (n) : (n)+1) << 20) #define VI6_BRU_ROP_DSTSEL_VRPF (4 << 20) #define VI6_BRU_ROP_DSTSEL_MASK (7 << 20) @@ -653,6 +664,8 @@ * LIF Control Registers */ +#define VI6_LIF_OFFSET (-0x100) + #define VI6_LIF_CTRL 0x3b00 #define VI6_LIF_CTRL_OBTH_MASK (0x7ff << 16) #define VI6_LIF_CTRL_OBTH_SHIFT 16 @@ -689,9 +702,20 @@ #define VI6_IP_VERSION_MODEL_VSPBD_GEN3 (0x15 << 8) #define VI6_IP_VERSION_MODEL_VSPBC_GEN3 (0x16 << 8) #define VI6_IP_VERSION_MODEL_VSPD_GEN3 (0x17 << 8) +#define VI6_IP_VERSION_MODEL_VSPD_V3 (0x18 << 8) +#define VI6_IP_VERSION_MODEL_VSPDL_GEN3 (0x19 << 8) +#define VI6_IP_VERSION_MODEL_VSPBS_GEN3 (0x1a << 8) #define VI6_IP_VERSION_SOC_MASK (0xff << 0) -#define VI6_IP_VERSION_SOC_H (0x01 << 0) -#define VI6_IP_VERSION_SOC_M (0x02 << 0) +#define VI6_IP_VERSION_SOC_H2 (0x01 << 0) +#define VI6_IP_VERSION_SOC_V2H (0x01 << 0) +#define VI6_IP_VERSION_SOC_V3M (0x01 << 0) +#define VI6_IP_VERSION_SOC_M2 (0x02 << 0) +#define VI6_IP_VERSION_SOC_M3W (0x02 << 0) +#define VI6_IP_VERSION_SOC_V3H (0x02 << 0) +#define VI6_IP_VERSION_SOC_H3 (0x03 << 0) +#define VI6_IP_VERSION_SOC_D3 (0x04 << 0) +#define VI6_IP_VERSION_SOC_M3N (0x04 << 0) +#define VI6_IP_VERSION_SOC_E3 (0x04 << 0) /* ----------------------------------------------------------------------------- * RPF CLUT Registers diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c index 5af3486afe07..e9f5dcb8fae5 100644 --- a/drivers/media/platform/vsp1/vsp1_video.c +++ b/drivers/media/platform/vsp1/vsp1_video.c @@ -440,13 +440,17 @@ static void vsp1_video_pipeline_run(struct vsp1_pipeline *pipe) vsp1_pipeline_run(pipe); } -static void vsp1_video_pipeline_frame_end(struct vsp1_pipeline *pipe) +static void vsp1_video_pipeline_frame_end(struct vsp1_pipeline *pipe, + bool completed) { struct vsp1_device *vsp1 = pipe->output->entity.vsp1; enum vsp1_pipeline_state state; unsigned long flags; unsigned int i; + /* M2M Pipelines should never call here with an incomplete frame. */ + WARN_ON_ONCE(!completed); + spin_lock_irqsave(&pipe->irqlock, flags); /* Complete buffers on all video nodes. */ @@ -481,7 +485,7 @@ static int vsp1_video_pipeline_build_branch(struct vsp1_pipeline *pipe, struct media_entity_enum ent_enum; struct vsp1_entity *entity; struct media_pad *pad; - bool bru_found = false; + struct vsp1_bru *bru = NULL; int ret; ret = media_entity_enum_init(&ent_enum, &input->entity.vsp1->media_dev); @@ -511,16 +515,20 @@ static int vsp1_video_pipeline_build_branch(struct vsp1_pipeline *pipe, media_entity_to_v4l2_subdev(pad->entity)); /* - * A BRU is present in the pipeline, store the BRU input pad + * A BRU or BRS is present in the pipeline, store its input pad * number in the input RPF for use when configuring the RPF. */ - if (entity->type == VSP1_ENTITY_BRU) { - struct vsp1_bru *bru = to_bru(&entity->subdev); + if (entity->type == VSP1_ENTITY_BRU || + entity->type == VSP1_ENTITY_BRS) { + /* BRU and BRS can't be chained. */ + if (bru) { + ret = -EPIPE; + goto out; + } + bru = to_bru(&entity->subdev); bru->inputs[pad->index].rpf = input; input->bru_input = pad->index; - - bru_found = true; } /* We've reached the WPF, we're done. */ @@ -542,8 +550,7 @@ static int vsp1_video_pipeline_build_branch(struct vsp1_pipeline *pipe, } pipe->uds = entity; - pipe->uds_input = bru_found ? pipe->bru - : &input->entity; + pipe->uds_input = bru ? &bru->entity : &input->entity; } /* Follow the source link, ignoring any HGO or HGT. */ @@ -589,30 +596,42 @@ static int vsp1_video_pipeline_build(struct vsp1_pipeline *pipe, e = to_vsp1_entity(subdev); list_add_tail(&e->list_pipe, &pipe->entities); - if (e->type == VSP1_ENTITY_RPF) { + switch (e->type) { + case VSP1_ENTITY_RPF: rwpf = to_rwpf(subdev); pipe->inputs[rwpf->entity.index] = rwpf; rwpf->video->pipe_index = ++pipe->num_inputs; rwpf->pipe = pipe; - } else if (e->type == VSP1_ENTITY_WPF) { + break; + + case VSP1_ENTITY_WPF: rwpf = to_rwpf(subdev); pipe->output = rwpf; rwpf->video->pipe_index = 0; rwpf->pipe = pipe; - } else if (e->type == VSP1_ENTITY_LIF) { + break; + + case VSP1_ENTITY_LIF: pipe->lif = e; - } else if (e->type == VSP1_ENTITY_BRU) { + break; + + case VSP1_ENTITY_BRU: + case VSP1_ENTITY_BRS: pipe->bru = e; - } else if (e->type == VSP1_ENTITY_HGO) { - struct vsp1_hgo *hgo = to_hgo(subdev); + break; + case VSP1_ENTITY_HGO: pipe->hgo = e; - hgo->histo.pipe = pipe; - } else if (e->type == VSP1_ENTITY_HGT) { - struct vsp1_hgt *hgt = to_hgt(subdev); + to_hgo(subdev)->histo.pipe = pipe; + break; + case VSP1_ENTITY_HGT: pipe->hgt = e; - hgt->histo.pipe = pipe; + to_hgt(subdev)->histo.pipe = pipe; + break; + + default: + break; } } @@ -796,12 +815,14 @@ static int vsp1_video_setup_pipeline(struct vsp1_pipeline *pipe) struct vsp1_uds *uds = to_uds(&pipe->uds->subdev); /* - * If a BRU is present in the pipeline before the UDS, the alpha - * component doesn't need to be scaled as the BRU output alpha - * value is fixed to 255. Otherwise we need to scale the alpha - * component only when available at the input RPF. + * If a BRU or BRS is present in the pipeline before the UDS, + * the alpha component doesn't need to be scaled as the BRU and + * BRS output alpha value is fixed to 255. Otherwise we need to + * scale the alpha component only when available at the input + * RPF. */ - if (pipe->uds_input->type == VSP1_ENTITY_BRU) { + if (pipe->uds_input->type == VSP1_ENTITY_BRU || + pipe->uds_input->type == VSP1_ENTITY_BRS) { uds->scale_alpha = false; } else { struct vsp1_rwpf *rpf = diff --git a/drivers/media/platform/vsp1/vsp1_wpf.c b/drivers/media/platform/vsp1/vsp1_wpf.c index 32df109b119f..b6c902be225b 100644 --- a/drivers/media/platform/vsp1/vsp1_wpf.c +++ b/drivers/media/platform/vsp1/vsp1_wpf.c @@ -453,7 +453,9 @@ static void wpf_configure(struct vsp1_entity *entity, } if (pipe->bru || pipe->num_inputs > 1) - srcrpf |= VI6_WPF_SRCRPF_VIRACT_MST; + srcrpf |= pipe->bru->type == VSP1_ENTITY_BRU + ? VI6_WPF_SRCRPF_VIRACT_MST + : VI6_WPF_SRCRPF_VIRACT2_MST; vsp1_wpf_write(wpf, dl, VI6_WPF_SRCRPF, srcrpf); diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c index 7240223dc15a..17e82a9a0109 100644 --- a/drivers/media/radio/radio-wl1273.c +++ b/drivers/media/radio/radio-wl1273.c @@ -610,10 +610,21 @@ static int wl1273_fm_start(struct wl1273_device *radio, int new_mode) } } - if (radio->rds_on) + if (radio->rds_on) { r = core->write(core, WL1273_RDS_DATA_ENB, 1); - else + if (r) { + dev_err(dev, "%s: RDS_DATA_ENB ON fails\n", + __func__); + goto fail; + } + } else { r = core->write(core, WL1273_RDS_DATA_ENB, 0); + if (r) { + dev_err(dev, "%s: RDS_DATA_ENB OFF fails\n", + __func__); + goto fail; + } + } } else { dev_warn(dev, "%s: Illegal mode.\n", __func__); } diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c index a30af91710fe..d2223c04e9ad 100644 --- a/drivers/media/rc/ir-lirc-codec.c +++ b/drivers/media/rc/ir-lirc-codec.c @@ -266,7 +266,7 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd, if (!dev->rx_resolution) return -ENOTTY; - val = dev->rx_resolution; + val = dev->rx_resolution / 1000; break; case LIRC_SET_WIDEBAND_RECEIVER: diff --git a/drivers/media/tuners/fc0011.c b/drivers/media/tuners/fc0011.c index 192b1c7740df..145407dee3db 100644 --- a/drivers/media/tuners/fc0011.c +++ b/drivers/media/tuners/fc0011.c @@ -342,6 +342,7 @@ static int fc0011_set_params(struct dvb_frontend *fe) switch (vco_sel) { default: WARN_ON(1); + return -EINVAL; case 0: if (vco_cal < 8) { regs[FC11_REG_VCOSEL] &= ~(FC11_VCOSEL_1 | FC11_VCOSEL_2); diff --git a/drivers/media/tuners/mxl5005s.c b/drivers/media/tuners/mxl5005s.c index 353744fee053..dd59c2c0e4a5 100644 --- a/drivers/media/tuners/mxl5005s.c +++ b/drivers/media/tuners/mxl5005s.c @@ -2737,8 +2737,6 @@ static u16 MXL_TuneRF(struct dvb_frontend *fe, u32 RF_Freq) status += MXL_ControlWrite(fe, TG_LO_DIVVAL, 0x0); status += MXL_ControlWrite(fe, TG_LO_SELVAL, 0x7); divider_val = 2 ; - Fmax = FmaxBin ; - Fmin = FminBin ; } /* TG_DIV_VAL */ diff --git a/drivers/media/usb/au0828/au0828-input.c b/drivers/media/usb/au0828/au0828-input.c index 9ec919c68482..9d82ec0a4b64 100644 --- a/drivers/media/usb/au0828/au0828-input.c +++ b/drivers/media/usb/au0828/au0828-input.c @@ -351,7 +351,7 @@ int au0828_rc_register(struct au0828_dev *dev) if (err) goto error; - pr_info("Remote controller %s initalized\n", ir->name); + pr_info("Remote controller %s initialized\n", ir->name); return 0; diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c index 594360a63c18..a91fdad8f8d4 100644 --- a/drivers/media/usb/dvb-usb-v2/lmedm04.c +++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c @@ -207,15 +207,13 @@ static int lme2510_stream_restart(struct dvb_usb_device *d) struct lme2510_state *st = d->priv; u8 all_pids[] = LME_ALL_PIDS; u8 stream_on[] = LME_ST_ON_W; - int ret; u8 rbuff[1]; if (st->pid_off) - ret = lme2510_usb_talk(d, all_pids, sizeof(all_pids), - rbuff, sizeof(rbuff)); + lme2510_usb_talk(d, all_pids, sizeof(all_pids), + rbuff, sizeof(rbuff)); /*Restart Stream Command*/ - ret = lme2510_usb_talk(d, stream_on, sizeof(stream_on), - rbuff, sizeof(rbuff)); - return ret; + return lme2510_usb_talk(d, stream_on, sizeof(stream_on), + rbuff, sizeof(rbuff)); } static int lme2510_enable_pid(struct dvb_usb_device *d, u8 index, u16 pid_out) diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c index 08acdd32e412..bea1b4764a66 100644 --- a/drivers/media/usb/dvb-usb/dib0700_core.c +++ b/drivers/media/usb/dvb-usb/dib0700_core.c @@ -215,13 +215,14 @@ static int dib0700_i2c_xfer_new(struct i2c_adapter *adap, struct i2c_msg *msg, USB_CTRL_GET_TIMEOUT); if (result < 0) { deb_info("i2c read error (status = %d)\n", result); - break; + goto unlock; } if (msg[i].len > sizeof(st->buf)) { deb_info("buffer too small to fit %d bytes\n", msg[i].len); - return -EIO; + result = -EIO; + goto unlock; } memcpy(msg[i].buf, st->buf, msg[i].len); @@ -233,8 +234,8 @@ static int dib0700_i2c_xfer_new(struct i2c_adapter *adap, struct i2c_msg *msg, /* Write request */ if (mutex_lock_interruptible(&d->usb_mutex) < 0) { err("could not acquire lock"); - mutex_unlock(&d->i2c_mutex); - return -EINTR; + result = -EINTR; + goto unlock; } st->buf[0] = REQUEST_NEW_I2C_WRITE; st->buf[1] = msg[i].addr << 1; @@ -247,7 +248,9 @@ static int dib0700_i2c_xfer_new(struct i2c_adapter *adap, struct i2c_msg *msg, if (msg[i].len > sizeof(st->buf) - 4) { deb_info("i2c message to big: %d\n", msg[i].len); - return -EIO; + mutex_unlock(&d->usb_mutex); + result = -EIO; + goto unlock; } /* The Actual i2c payload */ @@ -269,8 +272,11 @@ static int dib0700_i2c_xfer_new(struct i2c_adapter *adap, struct i2c_msg *msg, } } } + result = i; + +unlock: mutex_unlock(&d->i2c_mutex); - return i; + return result; } /* @@ -281,7 +287,7 @@ static int dib0700_i2c_xfer_legacy(struct i2c_adapter *adap, { struct dvb_usb_device *d = i2c_get_adapdata(adap); struct dib0700_state *st = d->priv; - int i,len; + int i, len, result; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EINTR; @@ -298,7 +304,8 @@ static int dib0700_i2c_xfer_legacy(struct i2c_adapter *adap, if (msg[i].len > sizeof(st->buf) - 2) { deb_info("i2c xfer to big: %d\n", msg[i].len); - return -EIO; + result = -EIO; + goto unlock; } memcpy(&st->buf[2], msg[i].buf, msg[i].len); @@ -313,13 +320,15 @@ static int dib0700_i2c_xfer_legacy(struct i2c_adapter *adap, if (len <= 0) { deb_info("I2C read failed on address 0x%02x\n", msg[i].addr); - break; + result = -EIO; + goto unlock; } if (msg[i + 1].len > sizeof(st->buf)) { deb_info("i2c xfer buffer to small for %d\n", msg[i].len); - return -EIO; + result = -EIO; + goto unlock; } memcpy(msg[i + 1].buf, st->buf, msg[i + 1].len); @@ -328,14 +337,17 @@ static int dib0700_i2c_xfer_legacy(struct i2c_adapter *adap, i++; } else { st->buf[0] = REQUEST_I2C_WRITE; - if (dib0700_ctrl_wr(d, st->buf, msg[i].len + 2) < 0) - break; + result = dib0700_ctrl_wr(d, st->buf, msg[i].len + 2); + if (result < 0) + goto unlock; } } + result = i; +unlock: mutex_unlock(&d->usb_mutex); mutex_unlock(&d->i2c_mutex); - return i; + return result; } static int dib0700_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c index 146341aeb782..4c57fd7929cb 100644 --- a/drivers/media/usb/em28xx/em28xx-cards.c +++ b/drivers/media/usb/em28xx/em28xx-cards.c @@ -1193,6 +1193,22 @@ struct em28xx_board em28xx_boards[] = { .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_400_KHZ, }, + [EM2884_BOARD_TERRATEC_H6] = { + .name = "Terratec Cinergy H6 rev. 2", + .has_dvb = 1, + .ir_codes = RC_MAP_NEC_TERRATEC_CINERGY_XS, +#if 0 + .tuner_type = TUNER_PHILIPS_TDA8290, + .tuner_addr = 0x41, + .dvb_gpio = terratec_h5_digital, /* FIXME: probably wrong */ + .tuner_gpio = terratec_h5_gpio, +#else + .tuner_type = TUNER_ABSENT, +#endif + .def_i2c_bus = 1, + .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | + EM28XX_I2C_FREQ_400_KHZ, + }, [EM2884_BOARD_HAUPPAUGE_WINTV_HVR_930C] = { .name = "Hauppauge WinTV HVR 930C", .has_dvb = 1, @@ -2496,6 +2512,8 @@ struct usb_device_id em28xx_id_table[] = { .driver_info = EM2884_BOARD_TERRATEC_H5 }, { USB_DEVICE(0x0ccd, 0x10b6), /* H5 Rev. 3 */ .driver_info = EM2884_BOARD_TERRATEC_H5 }, + { USB_DEVICE(0x0ccd, 0x10b2), /* H6 */ + .driver_info = EM2884_BOARD_TERRATEC_H6 }, { USB_DEVICE(0x0ccd, 0x0084), .driver_info = EM2860_BOARD_TERRATEC_AV350 }, { USB_DEVICE(0x0ccd, 0x0096), diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c index 82edd37f0d73..4a7db623fe29 100644 --- a/drivers/media/usb/em28xx/em28xx-dvb.c +++ b/drivers/media/usb/em28xx/em28xx-dvb.c @@ -1522,6 +1522,7 @@ static int em28xx_dvb_init(struct em28xx *dev) break; case EM2884_BOARD_ELGATO_EYETV_HYBRID_2008: case EM2884_BOARD_CINERGY_HTC_STICK: + case EM2884_BOARD_TERRATEC_H6: terratec_htc_stick_init(dev); /* attach demodulator */ diff --git a/drivers/media/usb/em28xx/em28xx-i2c.c b/drivers/media/usb/em28xx/em28xx-i2c.c index 8c472d5adb50..60b195c157b8 100644 --- a/drivers/media/usb/em28xx/em28xx-i2c.c +++ b/drivers/media/usb/em28xx/em28xx-i2c.c @@ -982,8 +982,6 @@ int em28xx_i2c_register(struct em28xx *dev, unsigned bus, dev_err(&dev->intf->dev, "%s: em28xx_i2_eeprom failed! retval [%d]\n", __func__, retval); - - return retval; } } diff --git a/drivers/media/usb/em28xx/em28xx-input.c b/drivers/media/usb/em28xx/em28xx-input.c index eba75736e654..ca9673917ad5 100644 --- a/drivers/media/usb/em28xx/em28xx-input.c +++ b/drivers/media/usb/em28xx/em28xx-input.c @@ -821,7 +821,7 @@ static int em28xx_ir_init(struct em28xx *dev) if (err) goto error; - dev_info(&dev->intf->dev, "Input extension successfully initalized\n"); + dev_info(&dev->intf->dev, "Input extension successfully initialized\n"); return 0; diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h index e8d97d5ec161..88084f24f033 100644 --- a/drivers/media/usb/em28xx/em28xx.h +++ b/drivers/media/usb/em28xx/em28xx.h @@ -148,6 +148,7 @@ #define EM28178_BOARD_PLEX_PX_BCUD 98 #define EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_DVB 99 #define EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_01595 100 +#define EM2884_BOARD_TERRATEC_H6 101 /* Limits minimum and default number of buffers */ #define EM28XX_MIN_BUF 4 diff --git a/drivers/media/usb/pulse8-cec/pulse8-cec.c b/drivers/media/usb/pulse8-cec/pulse8-cec.c index c843070f24c1..f9ed9c950247 100644 --- a/drivers/media/usb/pulse8-cec/pulse8-cec.c +++ b/drivers/media/usb/pulse8-cec/pulse8-cec.c @@ -51,7 +51,7 @@ MODULE_DESCRIPTION("Pulse Eight HDMI CEC driver"); MODULE_LICENSE("GPL"); static int debug; -static int persistent_config = 1; +static int persistent_config; module_param(debug, int, 0644); module_param(persistent_config, int, 0644); MODULE_PARM_DESC(debug, "debug level (0-1)"); diff --git a/drivers/media/usb/rainshadow-cec/rainshadow-cec.c b/drivers/media/usb/rainshadow-cec/rainshadow-cec.c index f203699e9c1b..65692576690f 100644 --- a/drivers/media/usb/rainshadow-cec/rainshadow-cec.c +++ b/drivers/media/usb/rainshadow-cec/rainshadow-cec.c @@ -116,21 +116,19 @@ static void rain_irq_work_handler(struct work_struct *work) while (true) { unsigned long flags; - bool exit_loop = false; char data; spin_lock_irqsave(&rain->buf_lock, flags); - if (rain->buf_len) { - data = rain->buf[rain->buf_rd_idx]; - rain->buf_len--; - rain->buf_rd_idx = (rain->buf_rd_idx + 1) & 0xff; - } else { - exit_loop = true; + if (!rain->buf_len) { + spin_unlock_irqrestore(&rain->buf_lock, flags); + break; } - spin_unlock_irqrestore(&rain->buf_lock, flags); - if (exit_loop) - break; + data = rain->buf[rain->buf_rd_idx]; + rain->buf_len--; + rain->buf_rd_idx = (rain->buf_rd_idx + 1) & 0xff; + + spin_unlock_irqrestore(&rain->buf_lock, flags); if (!rain->cmd_started && data != '?') continue; diff --git a/drivers/media/usb/stkwebcam/stk-sensor.c b/drivers/media/usb/stkwebcam/stk-sensor.c index 985af9933c7e..c1d4505f84ea 100644 --- a/drivers/media/usb/stkwebcam/stk-sensor.c +++ b/drivers/media/usb/stkwebcam/stk-sensor.c @@ -41,6 +41,8 @@ /* It seems the i2c bus is controlled with these registers */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include "stk-webcam.h" #define STK_IIC_BASE (0x0200) @@ -239,8 +241,8 @@ static int stk_sensor_outb(struct stk_camera *dev, u8 reg, u8 val) } while (tmpval == 0 && i < MAX_RETRIES); if (tmpval != STK_IIC_STAT_TX_OK) { if (tmpval) - STK_ERROR("stk_sensor_outb failed, status=0x%02x\n", - tmpval); + pr_err("stk_sensor_outb failed, status=0x%02x\n", + tmpval); return 1; } else return 0; @@ -262,8 +264,8 @@ static int stk_sensor_inb(struct stk_camera *dev, u8 reg, u8 *val) } while (tmpval == 0 && i < MAX_RETRIES); if (tmpval != STK_IIC_STAT_RX_OK) { if (tmpval) - STK_ERROR("stk_sensor_inb failed, status=0x%02x\n", - tmpval); + pr_err("stk_sensor_inb failed, status=0x%02x\n", + tmpval); return 1; } @@ -366,29 +368,29 @@ int stk_sensor_init(struct stk_camera *dev) if (stk_camera_write_reg(dev, STK_IIC_ENABLE, STK_IIC_ENABLE_YES) || stk_camera_write_reg(dev, STK_IIC_ADDR, SENSOR_ADDRESS) || stk_sensor_outb(dev, REG_COM7, COM7_RESET)) { - STK_ERROR("Sensor resetting failed\n"); + pr_err("Sensor resetting failed\n"); return -ENODEV; } msleep(10); /* Read the manufacturer ID: ov = 0x7FA2 */ if (stk_sensor_inb(dev, REG_MIDH, &idh) || stk_sensor_inb(dev, REG_MIDL, &idl)) { - STK_ERROR("Strange error reading sensor ID\n"); + pr_err("Strange error reading sensor ID\n"); return -ENODEV; } if (idh != 0x7f || idl != 0xa2) { - STK_ERROR("Huh? you don't have a sensor from ovt\n"); + pr_err("Huh? you don't have a sensor from ovt\n"); return -ENODEV; } if (stk_sensor_inb(dev, REG_PID, &idh) || stk_sensor_inb(dev, REG_VER, &idl)) { - STK_ERROR("Could not read sensor model\n"); + pr_err("Could not read sensor model\n"); return -ENODEV; } stk_sensor_write_regvals(dev, ov_initvals); msleep(10); - STK_INFO("OmniVision sensor detected, id %02X%02X at address %x\n", - idh, idl, SENSOR_ADDRESS); + pr_info("OmniVision sensor detected, id %02X%02X at address %x\n", + idh, idl, SENSOR_ADDRESS); return 0; } @@ -520,7 +522,8 @@ int stk_sensor_configure(struct stk_camera *dev) case MODE_SXGA: com7 = COM7_FMT_SXGA; dummylines = 0; break; - default: STK_ERROR("Unsupported mode %d\n", dev->vsettings.mode); + default: + pr_err("Unsupported mode %d\n", dev->vsettings.mode); return -EFAULT; } switch (dev->vsettings.palette) { @@ -544,7 +547,8 @@ int stk_sensor_configure(struct stk_camera *dev) com7 |= COM7_PBAYER; rv = ov_fmt_bayer; break; - default: STK_ERROR("Unsupported colorspace\n"); + default: + pr_err("Unsupported colorspace\n"); return -EFAULT; } /*FIXME sometimes the sensor go to a bad state @@ -564,7 +568,7 @@ int stk_sensor_configure(struct stk_camera *dev) switch (dev->vsettings.mode) { case MODE_VGA: if (stk_sensor_set_hw(dev, 302, 1582, 6, 486)) - STK_ERROR("stk_sensor_set_hw failed (VGA)\n"); + pr_err("stk_sensor_set_hw failed (VGA)\n"); break; case MODE_SXGA: case MODE_CIF: @@ -572,7 +576,7 @@ int stk_sensor_configure(struct stk_camera *dev) case MODE_QCIF: /*FIXME These settings seem ignored by the sensor if (stk_sensor_set_hw(dev, 220, 1500, 10, 1034)) - STK_ERROR("stk_sensor_set_hw failed (SXGA)\n"); + pr_err("stk_sensor_set_hw failed (SXGA)\n"); */ break; } diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c index 6e7fc36b658f..90d4a08cda31 100644 --- a/drivers/media/usb/stkwebcam/stk-webcam.c +++ b/drivers/media/usb/stkwebcam/stk-webcam.c @@ -18,6 +18,8 @@ * GNU General Public License for more details. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> @@ -175,15 +177,15 @@ static int stk_start_stream(struct stk_camera *dev) if (!is_present(dev)) return -ENODEV; if (!is_memallocd(dev) || !is_initialised(dev)) { - STK_ERROR("FIXME: Buffers are not allocated\n"); + pr_err("FIXME: Buffers are not allocated\n"); return -EFAULT; } ret = usb_set_interface(dev->udev, 0, 5); if (ret < 0) - STK_ERROR("usb_set_interface failed !\n"); + pr_err("usb_set_interface failed !\n"); if (stk_sensor_wakeup(dev)) - STK_ERROR("error awaking the sensor\n"); + pr_err("error awaking the sensor\n"); stk_camera_read_reg(dev, 0x0116, &value_116); stk_camera_read_reg(dev, 0x0117, &value_117); @@ -224,9 +226,9 @@ static int stk_stop_stream(struct stk_camera *dev) unset_streaming(dev); if (usb_set_interface(dev->udev, 0, 0)) - STK_ERROR("usb_set_interface failed !\n"); + pr_err("usb_set_interface failed !\n"); if (stk_sensor_sleep(dev)) - STK_ERROR("error suspending the sensor\n"); + pr_err("error suspending the sensor\n"); } return 0; } @@ -313,7 +315,7 @@ static void stk_isoc_handler(struct urb *urb) dev = (struct stk_camera *) urb->context; if (dev == NULL) { - STK_ERROR("isoc_handler called with NULL device !\n"); + pr_err("isoc_handler called with NULL device !\n"); return; } @@ -326,14 +328,13 @@ static void stk_isoc_handler(struct urb *urb) spin_lock_irqsave(&dev->spinlock, flags); if (urb->status != -EINPROGRESS && urb->status != 0) { - STK_ERROR("isoc_handler: urb->status == %d\n", urb->status); + pr_err("isoc_handler: urb->status == %d\n", urb->status); goto resubmit; } if (list_empty(&dev->sio_avail)) { /*FIXME Stop streaming after a while */ - (void) (printk_ratelimit() && - STK_ERROR("isoc_handler without available buffer!\n")); + pr_err_ratelimited("isoc_handler without available buffer!\n"); goto resubmit; } fb = list_first_entry(&dev->sio_avail, @@ -343,8 +344,8 @@ static void stk_isoc_handler(struct urb *urb) for (i = 0; i < urb->number_of_packets; i++) { if (urb->iso_frame_desc[i].status != 0) { if (urb->iso_frame_desc[i].status != -EXDEV) - STK_ERROR("Frame %d has error %d\n", i, - urb->iso_frame_desc[i].status); + pr_err("Frame %d has error %d\n", + i, urb->iso_frame_desc[i].status); continue; } framelen = urb->iso_frame_desc[i].actual_length; @@ -368,9 +369,8 @@ static void stk_isoc_handler(struct urb *urb) /* This marks a new frame */ if (fb->v4lbuf.bytesused != 0 && fb->v4lbuf.bytesused != dev->frame_size) { - (void) (printk_ratelimit() && - STK_ERROR("frame %d, bytesused=%d, skipping\n", - i, fb->v4lbuf.bytesused)); + pr_err_ratelimited("frame %d, bytesused=%d, skipping\n", + i, fb->v4lbuf.bytesused); fb->v4lbuf.bytesused = 0; fill = fb->buffer; } else if (fb->v4lbuf.bytesused == dev->frame_size) { @@ -395,8 +395,7 @@ static void stk_isoc_handler(struct urb *urb) /* Our buffer is full !!! */ if (framelen + fb->v4lbuf.bytesused > dev->frame_size) { - (void) (printk_ratelimit() && - STK_ERROR("Frame buffer overflow, lost sync\n")); + pr_err_ratelimited("Frame buffer overflow, lost sync\n"); /*FIXME Do something here? */ continue; } @@ -414,8 +413,8 @@ resubmit: urb->dev = dev->udev; ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret != 0) { - STK_ERROR("Error (%d) re-submitting urb in stk_isoc_handler.\n", - ret); + pr_err("Error (%d) re-submitting urb in stk_isoc_handler\n", + ret); } } @@ -433,32 +432,31 @@ static int stk_prepare_iso(struct stk_camera *dev) udev = dev->udev; if (dev->isobufs) - STK_ERROR("isobufs already allocated. Bad\n"); + pr_err("isobufs already allocated. Bad\n"); else dev->isobufs = kcalloc(MAX_ISO_BUFS, sizeof(*dev->isobufs), GFP_KERNEL); if (dev->isobufs == NULL) { - STK_ERROR("Unable to allocate iso buffers\n"); + pr_err("Unable to allocate iso buffers\n"); return -ENOMEM; } for (i = 0; i < MAX_ISO_BUFS; i++) { if (dev->isobufs[i].data == NULL) { kbuf = kzalloc(ISO_BUFFER_SIZE, GFP_KERNEL); if (kbuf == NULL) { - STK_ERROR("Failed to allocate iso buffer %d\n", - i); + pr_err("Failed to allocate iso buffer %d\n", i); goto isobufs_out; } dev->isobufs[i].data = kbuf; } else - STK_ERROR("isobuf data already allocated\n"); + pr_err("isobuf data already allocated\n"); if (dev->isobufs[i].urb == NULL) { urb = usb_alloc_urb(ISO_FRAMES_PER_DESC, GFP_KERNEL); if (urb == NULL) goto isobufs_out; dev->isobufs[i].urb = urb; } else { - STK_ERROR("Killing URB\n"); + pr_err("Killing URB\n"); usb_kill_urb(dev->isobufs[i].urb); urb = dev->isobufs[i].urb; } @@ -567,7 +565,7 @@ static int stk_prepare_sio_buffers(struct stk_camera *dev, unsigned n_sbufs) { int i; if (dev->sio_bufs != NULL) - STK_ERROR("sio_bufs already allocated\n"); + pr_err("sio_bufs already allocated\n"); else { dev->sio_bufs = kzalloc(n_sbufs * sizeof(struct stk_sio_buffer), GFP_KERNEL); @@ -690,7 +688,7 @@ static ssize_t stk_read(struct file *fp, char __user *buf, spin_lock_irqsave(&dev->spinlock, flags); if (list_empty(&dev->sio_full)) { spin_unlock_irqrestore(&dev->spinlock, flags); - STK_ERROR("BUG: No siobufs ready\n"); + pr_err("BUG: No siobufs ready\n"); return 0; } sbuf = list_first_entry(&dev->sio_full, struct stk_sio_buffer, list); @@ -907,7 +905,7 @@ static int stk_vidioc_g_fmt_vid_cap(struct file *filp, stk_sizes[i].m != dev->vsettings.mode; i++) ; if (i == ARRAY_SIZE(stk_sizes)) { - STK_ERROR("ERROR: mode invalid\n"); + pr_err("ERROR: mode invalid\n"); return -EINVAL; } pix_format->width = stk_sizes[i].w; @@ -985,7 +983,7 @@ static int stk_setup_format(struct stk_camera *dev) stk_sizes[i].m != dev->vsettings.mode) i++; if (i == ARRAY_SIZE(stk_sizes)) { - STK_ERROR("Something is broken in %s\n", __func__); + pr_err("Something is broken in %s\n", __func__); return -EFAULT; } /* This registers controls some timings, not sure of what. */ @@ -1241,7 +1239,7 @@ static void stk_v4l_dev_release(struct video_device *vd) struct stk_camera *dev = vdev_to_camera(vd); if (dev->sio_bufs != NULL || dev->isobufs != NULL) - STK_ERROR("We are leaking memory\n"); + pr_err("We are leaking memory\n"); usb_put_intf(dev->interface); kfree(dev); } @@ -1264,10 +1262,10 @@ static int stk_register_video_device(struct stk_camera *dev) video_set_drvdata(&dev->vdev, dev); err = video_register_device(&dev->vdev, VFL_TYPE_GRABBER, -1); if (err) - STK_ERROR("v4l registration failed\n"); + pr_err("v4l registration failed\n"); else - STK_INFO("Syntek USB2.0 Camera is now controlling device %s\n", - video_device_node_name(&dev->vdev)); + pr_info("Syntek USB2.0 Camera is now controlling device %s\n", + video_device_node_name(&dev->vdev)); return err; } @@ -1288,7 +1286,7 @@ static int stk_camera_probe(struct usb_interface *interface, dev = kzalloc(sizeof(struct stk_camera), GFP_KERNEL); if (dev == NULL) { - STK_ERROR("Out of memory !\n"); + pr_err("Out of memory !\n"); return -ENOMEM; } err = v4l2_device_register(&interface->dev, &dev->v4l2_dev); @@ -1352,7 +1350,7 @@ static int stk_camera_probe(struct usb_interface *interface, } } if (!dev->isoc_ep) { - STK_ERROR("Could not find isoc-in endpoint"); + pr_err("Could not find isoc-in endpoint\n"); err = -ENODEV; goto error; } @@ -1387,8 +1385,8 @@ static void stk_camera_disconnect(struct usb_interface *interface) wake_up_interruptible(&dev->wait_frame); - STK_INFO("Syntek USB2.0 Camera release resources device %s\n", - video_device_node_name(&dev->vdev)); + pr_info("Syntek USB2.0 Camera release resources device %s\n", + video_device_node_name(&dev->vdev)); video_unregister_device(&dev->vdev); v4l2_ctrl_handler_free(&dev->hdl); diff --git a/drivers/media/usb/stkwebcam/stk-webcam.h b/drivers/media/usb/stkwebcam/stk-webcam.h index 0284120ce246..5cecbdc97573 100644 --- a/drivers/media/usb/stkwebcam/stk-webcam.h +++ b/drivers/media/usb/stkwebcam/stk-webcam.h @@ -31,12 +31,6 @@ #define ISO_MAX_FRAME_SIZE 3 * 1024 #define ISO_BUFFER_SIZE (ISO_FRAMES_PER_DESC * ISO_MAX_FRAME_SIZE) - -#define PREFIX "stkwebcam: " -#define STK_INFO(str, args...) printk(KERN_INFO PREFIX str, ##args) -#define STK_ERROR(str, args...) printk(KERN_ERR PREFIX str, ##args) -#define STK_WARNING(str, args...) printk(KERN_WARNING PREFIX str, ##args) - struct stk_iso_buf { void *data; int length; diff --git a/drivers/media/v4l2-core/tuner-core.c b/drivers/media/v4l2-core/tuner-core.c index e48b7c032c95..8db45dfc271b 100644 --- a/drivers/media/v4l2-core/tuner-core.c +++ b/drivers/media/v4l2-core/tuner-core.c @@ -43,8 +43,6 @@ #define UNSET (-1U) -#define PREFIX (t->i2c->dev.driver->name) - /* * Driver modprobe parameters */ diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 8621a198a2ce..bac33311f55a 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -216,6 +216,12 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_drvdata(pdev, dev); /* + * MEI requires to resume from runtime suspend mode + * in order to perform link reset flow upon system suspend. + */ + pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME; + + /* * For not wake-able HW runtime pm framework * can't be used on pci device level. * Use domain runtime pm callbacks instead. diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c index f811cd524468..e38a5f144373 100644 --- a/drivers/misc/mei/pci-txe.c +++ b/drivers/misc/mei/pci-txe.c @@ -138,6 +138,12 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_drvdata(pdev, dev); /* + * MEI requires to resume from runtime suspend mode + * in order to perform link reset flow upon system suspend. + */ + pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME; + + /* * For not wake-able HW runtime pm framework * can't be used on pci device level. * Use domain runtime pm callbacks instead. diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 8ac59dc80f23..f1bbfd389367 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -2170,6 +2170,9 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md) * from being accepted. */ card = md->queue.card; + spin_lock_irq(md->queue.queue->queue_lock); + queue_flag_set(QUEUE_FLAG_BYPASS, md->queue.queue); + spin_unlock_irq(md->queue.queue->queue_lock); blk_set_queue_dying(md->queue.queue); mmc_cleanup_queue(&md->queue); if (md->disk->flags & GENHD_FL_UP) { diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 4ffea14b7eb6..2bae69e39544 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -1289,7 +1289,7 @@ out_err: static int mmc_select_hs400es(struct mmc_card *card) { struct mmc_host *host = card->host; - int err = 0; + int err = -EINVAL; u8 val; if (!(host->caps & MMC_CAP_8_BIT_DATA)) { diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index a9dfb26972f2..250dc6ec4c82 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -2957,7 +2957,7 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) } /* find out number of slots supported */ - if (device_property_read_u32(dev, "num-slots", &pdata->num_slots)) + if (!device_property_read_u32(dev, "num-slots", &pdata->num_slots)) dev_info(dev, "'num-slots' was deprecated.\n"); if (device_property_read_u32(dev, "fifo-depth", &pdata->fifo_depth)) diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 7c12f3715676..2ab4788d021f 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -356,9 +356,6 @@ static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on, struct mmc_host *mmc = host->mmc; int ret = 0; - if (mmc_pdata(host)->set_power) - return mmc_pdata(host)->set_power(host->dev, power_on, vdd); - /* * If we don't see a Vcc regulator, assume it's a fixed * voltage always-on regulator. @@ -366,9 +363,6 @@ static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on, if (IS_ERR(mmc->supply.vmmc)) return 0; - if (mmc_pdata(host)->before_set_reg) - mmc_pdata(host)->before_set_reg(host->dev, power_on, vdd); - ret = omap_hsmmc_set_pbias(host, false, 0); if (ret) return ret; @@ -400,9 +394,6 @@ static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on, return ret; } - if (mmc_pdata(host)->after_set_reg) - mmc_pdata(host)->after_set_reg(host->dev, power_on, vdd); - return 0; err_set_voltage: @@ -469,8 +460,6 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host) int ret; struct mmc_host *mmc = host->mmc; - if (mmc_pdata(host)->set_power) - return 0; ret = mmc_regulator_get_supply(mmc); if (ret == -EPROBE_DEFER) @@ -2097,7 +2086,7 @@ static int omap_hsmmc_probe(struct platform_device *pdev) mmc->max_seg_size = mmc->max_req_size; mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | - MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE; + MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE | MMC_CAP_CMD23; mmc->caps |= mmc_pdata(host)->caps; if (mmc->caps & MMC_CAP_8_BIT_DATA) diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c index 7611fd679f1a..1485530c3592 100644 --- a/drivers/mmc/host/sdhci-of-at91.c +++ b/drivers/mmc/host/sdhci-of-at91.c @@ -31,6 +31,7 @@ #define SDMMC_MC1R 0x204 #define SDMMC_MC1R_DDR BIT(3) +#define SDMMC_MC1R_FCD BIT(7) #define SDMMC_CACR 0x230 #define SDMMC_CACR_CAPWREN BIT(0) #define SDMMC_CACR_KEY (0x46 << 8) @@ -43,6 +44,15 @@ struct sdhci_at91_priv { struct clk *mainck; }; +static void sdhci_at91_set_force_card_detect(struct sdhci_host *host) +{ + u8 mc1r; + + mc1r = readb(host->ioaddr + SDMMC_MC1R); + mc1r |= SDMMC_MC1R_FCD; + writeb(mc1r, host->ioaddr + SDMMC_MC1R); +} + static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock) { u16 clk; @@ -110,10 +120,18 @@ void sdhci_at91_set_uhs_signaling(struct sdhci_host *host, unsigned int timing) sdhci_set_uhs_signaling(host, timing); } +static void sdhci_at91_reset(struct sdhci_host *host, u8 mask) +{ + sdhci_reset(host, mask); + + if (host->mmc->caps & MMC_CAP_NONREMOVABLE) + sdhci_at91_set_force_card_detect(host); +} + static const struct sdhci_ops sdhci_at91_sama5d2_ops = { .set_clock = sdhci_at91_set_clock, .set_bus_width = sdhci_set_bus_width, - .reset = sdhci_reset, + .reset = sdhci_at91_reset, .set_uhs_signaling = sdhci_at91_set_uhs_signaling, .set_power = sdhci_at91_set_power, }; @@ -324,6 +342,21 @@ static int sdhci_at91_probe(struct platform_device *pdev) host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; } + /* + * If the device attached to the MMC bus is not removable, it is safer + * to set the Force Card Detect bit. People often don't connect the + * card detect signal and use this pin for another purpose. If the card + * detect pin is not muxed to SDHCI controller, a default value is + * used. This value can be different from a SoC revision to another + * one. Problems come when this default value is not card present. To + * avoid this case, if the device is non removable then the card + * detection procedure using the SDMCC_CD signal is bypassed. + * This bit is reset when a software reset for all command is performed + * so we need to implement our own reset function to set back this bit. + */ + if (host->mmc->caps & MMC_CAP_NONREMOVABLE) + sdhci_at91_set_force_card_detect(host); + pm_runtime_put_autosuspend(&pdev->dev); return 0; diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c index d6fa2214aaae..0fb4e4c119e1 100644 --- a/drivers/mmc/host/sunxi-mmc.c +++ b/drivers/mmc/host/sunxi-mmc.c @@ -793,8 +793,12 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host, } mmc_writel(host, REG_CLKCR, rval); - if (host->cfg->needs_new_timings) - mmc_writel(host, REG_SD_NTSR, SDXC_2X_TIMING_MODE); + if (host->cfg->needs_new_timings) { + /* Don't touch the delay bits */ + rval = mmc_readl(host, REG_SD_NTSR); + rval |= SDXC_2X_TIMING_MODE; + mmc_writel(host, REG_SD_NTSR, rval); + } ret = sunxi_mmc_clk_set_phase(host, ios, rate); if (ret) diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index f336a9b85576..9ec8f033ac5f 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c @@ -113,6 +113,7 @@ static blk_status_t do_blktrans_request(struct mtd_blktrans_ops *tr, for (; nsect > 0; nsect--, block++, buf += tr->blksize) if (tr->writesect(dev, block, buf)) return BLK_STS_IOERR; + return BLK_STS_OK; default: return BLK_STS_IOERR; } diff --git a/drivers/mtd/nand/atmel/nand-controller.c b/drivers/mtd/nand/atmel/nand-controller.c index d922a88e407f..2c8baa0c2c4e 100644 --- a/drivers/mtd/nand/atmel/nand-controller.c +++ b/drivers/mtd/nand/atmel/nand-controller.c @@ -1201,7 +1201,7 @@ static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand, * tRC < 30ns implies EDO mode. This controller does not support this * mode. */ - if (conf->timings.sdr.tRC_min < 30) + if (conf->timings.sdr.tRC_min < 30000) return -ENOTSUPP; atmel_smc_cs_conf_init(smcconf); diff --git a/drivers/mtd/nand/atmel/pmecc.c b/drivers/mtd/nand/atmel/pmecc.c index 55a8ee5306ea..8c210a5776bc 100644 --- a/drivers/mtd/nand/atmel/pmecc.c +++ b/drivers/mtd/nand/atmel/pmecc.c @@ -945,6 +945,7 @@ struct atmel_pmecc *devm_atmel_pmecc_get(struct device *userdev) */ struct platform_device *pdev = to_platform_device(userdev); const struct atmel_pmecc_caps *caps; + const struct of_device_id *match; /* No PMECC engine available. */ if (!of_property_read_bool(userdev->of_node, @@ -953,21 +954,11 @@ struct atmel_pmecc *devm_atmel_pmecc_get(struct device *userdev) caps = &at91sam9g45_caps; - /* - * Try to find the NFC subnode and extract the associated caps - * from there. - */ - np = of_find_compatible_node(userdev->of_node, NULL, - "atmel,sama5d3-nfc"); - if (np) { - const struct of_device_id *match; - - match = of_match_node(atmel_pmecc_legacy_match, np); - if (match && match->data) - caps = match->data; - - of_node_put(np); - } + /* Find the caps associated to the NAND dev node. */ + match = of_match_node(atmel_pmecc_legacy_match, + userdev->of_node); + if (match && match->data) + caps = match->data; pmecc = atmel_pmecc_create(pdev, caps, 1, 2); } diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 5fa5ddc94834..c6c18b82f8f4 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -65,8 +65,14 @@ static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section, if (!section) { oobregion->offset = 0; - oobregion->length = 4; + if (mtd->oobsize == 16) + oobregion->length = 4; + else + oobregion->length = 3; } else { + if (mtd->oobsize == 8) + return -ERANGE; + oobregion->offset = 6; oobregion->length = ecc->total - 4; } @@ -1125,7 +1131,9 @@ static int nand_setup_data_interface(struct nand_chip *chip, int chipnr) * Ensure the timing mode has been changed on the chip side * before changing timings on the controller side. */ - if (chip->onfi_version) { + if (chip->onfi_version && + (le16_to_cpu(chip->onfi_params.opt_cmd) & + ONFI_OPT_CMD_SET_GET_FEATURES)) { u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = { chip->onfi_timing_mode_default, }; @@ -2741,7 +2749,6 @@ static int nand_write_page_syndrome(struct mtd_info *mtd, * @buf: the data to write * @oob_required: must write chip->oob_poi to OOB * @page: page number to write - * @cached: cached programming * @raw: use _raw version of write_page */ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, diff --git a/drivers/mtd/nand/nand_timings.c b/drivers/mtd/nand/nand_timings.c index f06312df3669..7e36d7d13c26 100644 --- a/drivers/mtd/nand/nand_timings.c +++ b/drivers/mtd/nand/nand_timings.c @@ -311,9 +311,9 @@ int onfi_init_data_interface(struct nand_chip *chip, struct nand_sdr_timings *timings = &iface->timings.sdr; /* microseconds -> picoseconds */ - timings->tPROG_max = 1000000UL * le16_to_cpu(params->t_prog); - timings->tBERS_max = 1000000UL * le16_to_cpu(params->t_bers); - timings->tR_max = 1000000UL * le16_to_cpu(params->t_r); + timings->tPROG_max = 1000000ULL * le16_to_cpu(params->t_prog); + timings->tBERS_max = 1000000ULL * le16_to_cpu(params->t_bers); + timings->tR_max = 1000000ULL * le16_to_cpu(params->t_r); /* nanoseconds -> picoseconds */ timings->tCCS_min = 1000UL * le16_to_cpu(params->t_ccs); diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c index d0b6f8f9f297..6abd142b1324 100644 --- a/drivers/mtd/nand/sunxi_nand.c +++ b/drivers/mtd/nand/sunxi_nand.c @@ -1728,6 +1728,10 @@ static int sunxi_nfc_setup_data_interface(struct mtd_info *mtd, int csline, */ chip->clk_rate = NSEC_PER_SEC / min_clk_period; real_clk_rate = clk_round_rate(nfc->mod_clk, chip->clk_rate); + if (real_clk_rate <= 0) { + dev_err(nfc->dev, "Unable to round clk %lu\n", chip->clk_rate); + return -EINVAL; + } /* * ONFI specification 3.1, paragraph 4.15.2 dictates that EDO data diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 181839d6fbea..9bee6c1c70cc 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -2050,6 +2050,7 @@ static int bond_miimon_inspect(struct bonding *bond) continue; bond_propose_link_state(slave, BOND_LINK_FAIL); + commit++; slave->delay = bond->params.downdelay; if (slave->delay) { netdev_info(bond->dev, "link status down for %sinterface %s, disabling it in %d ms\n", @@ -2088,6 +2089,7 @@ static int bond_miimon_inspect(struct bonding *bond) continue; bond_propose_link_state(slave, BOND_LINK_BACK); + commit++; slave->delay = bond->params.updelay; if (slave->delay) { diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 1e46418a3b74..264b281eb86b 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -625,6 +625,44 @@ static void mt7530_adjust_link(struct dsa_switch *ds, int port, * all finished. */ mt7623_pad_clk_setup(ds); + } else { + u16 lcl_adv = 0, rmt_adv = 0; + u8 flowctrl; + u32 mcr = PMCR_USERP_LINK | PMCR_FORCE_MODE; + + switch (phydev->speed) { + case SPEED_1000: + mcr |= PMCR_FORCE_SPEED_1000; + break; + case SPEED_100: + mcr |= PMCR_FORCE_SPEED_100; + break; + }; + + if (phydev->link) + mcr |= PMCR_FORCE_LNK; + + if (phydev->duplex) { + mcr |= PMCR_FORCE_FDX; + + if (phydev->pause) + rmt_adv = LPA_PAUSE_CAP; + if (phydev->asym_pause) + rmt_adv |= LPA_PAUSE_ASYM; + + if (phydev->advertising & ADVERTISED_Pause) + lcl_adv |= ADVERTISE_PAUSE_CAP; + if (phydev->advertising & ADVERTISED_Asym_Pause) + lcl_adv |= ADVERTISE_PAUSE_ASYM; + + flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); + + if (flowctrl & FLOW_CTRL_TX) + mcr |= PMCR_TX_FC_EN; + if (flowctrl & FLOW_CTRL_RX) + mcr |= PMCR_RX_FC_EN; + } + mt7530_write(priv, MT7530_PMCR_P(port), mcr); } } diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h index b83d76b99802..74db9822eb40 100644 --- a/drivers/net/dsa/mt7530.h +++ b/drivers/net/dsa/mt7530.h @@ -151,6 +151,7 @@ enum mt7530_stp_state { #define PMCR_TX_FC_EN BIT(5) #define PMCR_RX_FC_EN BIT(4) #define PMCR_FORCE_SPEED_1000 BIT(3) +#define PMCR_FORCE_SPEED_100 BIT(2) #define PMCR_FORCE_FDX BIT(1) #define PMCR_FORCE_LNK BIT(0) #define PMCR_COMMON_LINK (PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | \ diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 86058a9f3417..1d307f2def2d 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -1785,9 +1785,9 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) xgene_enet_gpiod_get(pdata); - if (pdata->phy_mode != PHY_INTERFACE_MODE_SGMII) { - pdata->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(pdata->clk)) { + pdata->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(pdata->clk)) { + if (pdata->phy_mode != PHY_INTERFACE_MODE_SGMII) { /* Abort if the clock is defined but couldn't be * retrived. Always abort if the clock is missing on * DT system as the driver can't cope with this case. diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c index 041cfb7952f8..e94159507847 100644 --- a/drivers/net/ethernet/aurora/nb8800.c +++ b/drivers/net/ethernet/aurora/nb8800.c @@ -609,7 +609,7 @@ static void nb8800_mac_config(struct net_device *dev) mac_mode |= HALF_DUPLEX; if (gigabit) { - if (priv->phy_mode == PHY_INTERFACE_MODE_RGMII) + if (phy_interface_is_rgmii(dev->phydev)) mac_mode |= RGMII_MODE; mac_mode |= GMAC_MODE; @@ -1268,11 +1268,10 @@ static int nb8800_tangox_init(struct net_device *dev) break; case PHY_INTERFACE_MODE_RGMII: - pad_mode = PAD_MODE_RGMII; - break; - + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: case PHY_INTERFACE_MODE_RGMII_TXID: - pad_mode = PAD_MODE_RGMII | PAD_MODE_GTX_CLK_DELAY; + pad_mode = PAD_MODE_RGMII; break; default: diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index f411936b744c..a1125d10c825 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -2368,6 +2368,7 @@ static int b44_init_one(struct ssb_device *sdev, bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE); spin_lock_init(&bp->lock); + u64_stats_init(&bp->hw_stats.syncp); bp->rx_pending = B44_DEF_RX_RING_PENDING; bp->tx_pending = B44_DEF_TX_RING_PENDING; diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 5333601f855f..dc3052751bc1 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -449,6 +449,10 @@ static void bcm_sysport_get_stats(struct net_device *dev, p = (char *)&dev->stats; else p = (char *)priv; + + if (priv->is_lite && !bcm_sysport_lite_stat_valid(s->type)) + continue; + p += s->stat_offset; data[j] = *(unsigned long *)p; j++; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 7b0b399aaedd..a981c4ee9d72 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -3669,7 +3669,7 @@ static int bcmgenet_resume(struct device *d) phy_init_hw(priv->phydev); /* Speed settings must be restored */ - bcmgenet_mii_config(priv->dev); + bcmgenet_mii_config(priv->dev, false); /* disable ethernet MAC while updating its registers */ umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index b9344de669f8..3a34fdba5301 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -698,7 +698,7 @@ GENET_IO_MACRO(rbuf, GENET_RBUF_OFF); /* MDIO routines */ int bcmgenet_mii_init(struct net_device *dev); -int bcmgenet_mii_config(struct net_device *dev); +int bcmgenet_mii_config(struct net_device *dev, bool init); int bcmgenet_mii_probe(struct net_device *dev); void bcmgenet_mii_exit(struct net_device *dev); void bcmgenet_mii_reset(struct net_device *dev); diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 071fcbd14e6a..30cb97b4a1d7 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -238,7 +238,7 @@ static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv) bcmgenet_fixed_phy_link_update); } -int bcmgenet_mii_config(struct net_device *dev) +int bcmgenet_mii_config(struct net_device *dev, bool init) { struct bcmgenet_priv *priv = netdev_priv(dev); struct phy_device *phydev = priv->phydev; @@ -327,7 +327,8 @@ int bcmgenet_mii_config(struct net_device *dev) bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); } - dev_info_once(kdev, "configuring instance for %s\n", phy_name); + if (init) + dev_info(kdev, "configuring instance for %s\n", phy_name); return 0; } @@ -375,7 +376,7 @@ int bcmgenet_mii_probe(struct net_device *dev) * PHY speed which is needed for bcmgenet_mii_config() to configure * things appropriately. */ - ret = bcmgenet_mii_config(dev); + ret = bcmgenet_mii_config(dev, true); if (ret) { phy_disconnect(priv->phydev); return ret; diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index 79112563a25a..5e5c4d7796b8 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -292,11 +292,30 @@ static void bgx_sgmii_change_link_state(struct lmac *lmac) u64 cmr_cfg; u64 port_cfg = 0; u64 misc_ctl = 0; + bool tx_en, rx_en; cmr_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG); - cmr_cfg &= ~CMR_EN; + tx_en = cmr_cfg & CMR_PKT_TX_EN; + rx_en = cmr_cfg & CMR_PKT_RX_EN; + cmr_cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN); bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg); + /* Wait for BGX RX to be idle */ + if (bgx_poll_reg(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, + GMI_PORT_CFG_RX_IDLE, false)) { + dev_err(&bgx->pdev->dev, "BGX%d LMAC%d GMI RX not idle\n", + bgx->bgx_id, lmac->lmacid); + return; + } + + /* Wait for BGX TX to be idle */ + if (bgx_poll_reg(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, + GMI_PORT_CFG_TX_IDLE, false)) { + dev_err(&bgx->pdev->dev, "BGX%d LMAC%d GMI TX not idle\n", + bgx->bgx_id, lmac->lmacid); + return; + } + port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG); misc_ctl = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL); @@ -347,10 +366,8 @@ static void bgx_sgmii_change_link_state(struct lmac *lmac) bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL, misc_ctl); bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, port_cfg); - port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG); - - /* Re-enable lmac */ - cmr_cfg |= CMR_EN; + /* Restore CMR config settings */ + cmr_cfg |= (rx_en ? CMR_PKT_RX_EN : 0) | (tx_en ? CMR_PKT_TX_EN : 0); bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg); if (bgx->is_rgx && (cmr_cfg & (CMR_PKT_RX_EN | CMR_PKT_TX_EN))) diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h index 6b7fe6fdd13b..23acdc5ab896 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h @@ -170,6 +170,8 @@ #define GMI_PORT_CFG_DUPLEX BIT_ULL(2) #define GMI_PORT_CFG_SLOT_TIME BIT_ULL(3) #define GMI_PORT_CFG_SPEED_MSB BIT_ULL(8) +#define GMI_PORT_CFG_RX_IDLE BIT_ULL(12) +#define GMI_PORT_CFG_TX_IDLE BIT_ULL(13) #define BGX_GMP_GMI_RXX_JABBER 0x38038 #define BGX_GMP_GMI_TXX_THRESH 0x38210 #define BGX_GMP_GMI_TXX_APPEND 0x38218 diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index 95bf5e89cfd1..34dae51effd4 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -125,7 +125,7 @@ static int ftgmac100_reset_mac(struct ftgmac100 *priv, u32 maccr) iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR); iowrite32(maccr | FTGMAC100_MACCR_SW_RST, priv->base + FTGMAC100_OFFSET_MACCR); - for (i = 0; i < 50; i++) { + for (i = 0; i < 200; i++) { unsigned int maccr; maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR); @@ -392,7 +392,7 @@ static int ftgmac100_alloc_rx_buf(struct ftgmac100 *priv, unsigned int entry, struct net_device *netdev = priv->netdev; struct sk_buff *skb; dma_addr_t map; - int err; + int err = 0; skb = netdev_alloc_skb_ip_align(netdev, RX_BUF_SIZE); if (unlikely(!skb)) { @@ -428,7 +428,7 @@ static int ftgmac100_alloc_rx_buf(struct ftgmac100 *priv, unsigned int entry, else rxdes->rxdes0 = 0; - return 0; + return err; } static unsigned int ftgmac100_next_rx_pointer(struct ftgmac100 *priv, @@ -1682,6 +1682,7 @@ static int ftgmac100_setup_mdio(struct net_device *netdev) priv->mii_bus->name = "ftgmac100_mdio"; snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%d", pdev->name, pdev->id); + priv->mii_bus->parent = priv->dev; priv->mii_bus->priv = priv->netdev; priv->mii_bus->read = ftgmac100_mdiobus_read; priv->mii_bus->write = ftgmac100_mdiobus_write; diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index a3e694679635..c45e8e3b82d3 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -111,6 +111,7 @@ static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8); static void send_request_unmap(struct ibmvnic_adapter *, u8); static void send_login(struct ibmvnic_adapter *adapter); static void send_cap_queries(struct ibmvnic_adapter *adapter); +static int init_sub_crqs(struct ibmvnic_adapter *); static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter); static int ibmvnic_init(struct ibmvnic_adapter *); static void release_crq_queue(struct ibmvnic_adapter *); @@ -651,6 +652,7 @@ static int ibmvnic_login(struct net_device *netdev) struct ibmvnic_adapter *adapter = netdev_priv(netdev); unsigned long timeout = msecs_to_jiffies(30000); struct device *dev = &adapter->vdev->dev; + int rc; do { if (adapter->renegotiate) { @@ -664,6 +666,18 @@ static int ibmvnic_login(struct net_device *netdev) dev_err(dev, "Capabilities query timeout\n"); return -1; } + rc = init_sub_crqs(adapter); + if (rc) { + dev_err(dev, + "Initialization of SCRQ's failed\n"); + return -1; + } + rc = init_sub_crq_irqs(adapter); + if (rc) { + dev_err(dev, + "Initialization of SCRQ's irqs failed\n"); + return -1; + } } reinit_completion(&adapter->init_done); @@ -3004,7 +3018,6 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq, *req_value, (long int)be64_to_cpu(crq->request_capability_rsp. number), name); - release_sub_crqs(adapter); *req_value = be64_to_cpu(crq->request_capability_rsp.number); ibmvnic_send_req_caps(adapter, 1); return; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index b936febc315a..2194960d5855 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1113,6 +1113,8 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring) if (!tx_ring->tx_bi) goto err; + u64_stats_init(&tx_ring->syncp); + /* round up to nearest 4K */ tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); /* add u32 for head writeback, align after this takes care of diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 084c53582793..032f8ac06357 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -2988,6 +2988,8 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring) if (!tx_ring->tx_buffer_info) goto err; + u64_stats_init(&tx_ring->syncp); + /* round up to nearest 4K */ tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); tx_ring->size = ALIGN(tx_ring->size, 4096); @@ -3046,6 +3048,8 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring) if (!rx_ring->rx_buffer_info) goto err; + u64_stats_init(&rx_ring->syncp); + /* Round up to nearest 4K */ rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); rx_ring->size = ALIGN(rx_ring->size, 4096); diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 5794d98d946f..9c94ea9b2b80 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -2734,7 +2734,7 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev, ppd.shared = pdev; memset(&res, 0, sizeof(res)); - if (!of_irq_to_resource(pnp, 0, &res)) { + if (of_irq_to_resource(pnp, 0, &res) <= 0) { dev_err(&pdev->dev, "missing interrupt on %s\n", pnp->name); return -EINVAL; } diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index b3d0c2e6347a..e588a0cdb074 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -22,6 +22,7 @@ #include <linux/if_vlan.h> #include <linux/reset.h> #include <linux/tcp.h> +#include <linux/interrupt.h> #include "mtk_eth_soc.h" @@ -947,6 +948,10 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, RX_DMA_FPORT_MASK; mac--; + if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT || + !eth->netdev[mac])) + goto release_desc; + netdev = eth->netdev[mac]; if (unlikely(test_bit(MTK_RESETTING, ð->state))) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index c751a1d434ad..3d4e4a5d00d1 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -223,6 +223,7 @@ static void mlx4_en_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct mlx4_en_priv *priv = netdev_priv(netdev); + struct mlx4_caps *caps = &priv->mdev->dev->caps; int err = 0; u64 config = 0; u64 mask; @@ -235,24 +236,24 @@ static void mlx4_en_get_wol(struct net_device *netdev, mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 : MLX4_DEV_CAP_FLAG_WOL_PORT2; - if (!(priv->mdev->dev->caps.flags & mask)) { + if (!(caps->flags & mask)) { wol->supported = 0; wol->wolopts = 0; return; } + if (caps->wol_port[priv->port]) + wol->supported = WAKE_MAGIC; + else + wol->supported = 0; + err = mlx4_wol_read(priv->mdev->dev, &config, priv->port); if (err) { en_err(priv, "Failed to get WoL information\n"); return; } - if (config & MLX4_EN_WOL_MAGIC) - wol->supported = WAKE_MAGIC; - else - wol->supported = 0; - - if (config & MLX4_EN_WOL_ENABLED) + if ((config & MLX4_EN_WOL_ENABLED) && (config & MLX4_EN_WOL_MAGIC)) wol->wolopts = WAKE_MAGIC; else wol->wolopts = 0; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 436f7689a032..bf1638044a7a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -574,16 +574,21 @@ static inline __wsum get_fixed_vlan_csum(__wsum hw_checksum, * header, the HW adds it. To address that, we are subtracting the pseudo * header checksum from the checksum value provided by the HW. */ -static void get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb, - struct iphdr *iph) +static int get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb, + struct iphdr *iph) { __u16 length_for_csum = 0; __wsum csum_pseudo_header = 0; + __u8 ipproto = iph->protocol; + + if (unlikely(ipproto == IPPROTO_SCTP)) + return -1; length_for_csum = (be16_to_cpu(iph->tot_len) - (iph->ihl << 2)); csum_pseudo_header = csum_tcpudp_nofold(iph->saddr, iph->daddr, - length_for_csum, iph->protocol, 0); + length_for_csum, ipproto, 0); skb->csum = csum_sub(hw_checksum, csum_pseudo_header); + return 0; } #if IS_ENABLED(CONFIG_IPV6) @@ -594,17 +599,20 @@ static void get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb, static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb, struct ipv6hdr *ipv6h) { + __u8 nexthdr = ipv6h->nexthdr; __wsum csum_pseudo_hdr = 0; - if (unlikely(ipv6h->nexthdr == IPPROTO_FRAGMENT || - ipv6h->nexthdr == IPPROTO_HOPOPTS)) + if (unlikely(nexthdr == IPPROTO_FRAGMENT || + nexthdr == IPPROTO_HOPOPTS || + nexthdr == IPPROTO_SCTP)) return -1; - hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(ipv6h->nexthdr)); + hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(nexthdr)); csum_pseudo_hdr = csum_partial(&ipv6h->saddr, sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0); csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ipv6h->payload_len); - csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ntohs(ipv6h->nexthdr)); + csum_pseudo_hdr = csum_add(csum_pseudo_hdr, + (__force __wsum)htons(nexthdr)); skb->csum = csum_sub(hw_checksum, csum_pseudo_hdr); skb->csum = csum_add(skb->csum, csum_partial(ipv6h, sizeof(struct ipv6hdr), 0)); @@ -627,11 +635,10 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va, } if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4)) - get_fixed_ipv4_csum(hw_checksum, skb, hdr); + return get_fixed_ipv4_csum(hw_checksum, skb, hdr); #if IS_ENABLED(CONFIG_IPV6) - else if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6)) - if (unlikely(get_fixed_ipv6_csum(hw_checksum, skb, hdr))) - return -1; + if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6)) + return get_fixed_ipv6_csum(hw_checksum, skb, hdr); #endif return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 37e84a59e751..041c0ed65929 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -159,8 +159,9 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) [32] = "Loopback source checks support", [33] = "RoCEv2 support", [34] = "DMFS Sniffer support (UC & MC)", - [35] = "QinQ VST mode support", - [36] = "sl to vl mapping table change event support" + [35] = "Diag counters per port", + [36] = "QinQ VST mode support", + [37] = "sl to vl mapping table change event support", }; int i; @@ -764,6 +765,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) #define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET 0x3e #define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f #define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40 +#define QUERY_DEV_CAP_WOL_OFFSET 0x43 #define QUERY_DEV_CAP_FLAGS_OFFSET 0x44 #define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48 #define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49 @@ -920,6 +922,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET); dev_cap->flags = flags | (u64)ext_flags << 32; + MLX4_GET(field, outbox, QUERY_DEV_CAP_WOL_OFFSET); + dev_cap->wol_port[1] = !!(field & 0x20); + dev_cap->wol_port[2] = !!(field & 0x40); MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET); dev_cap->reserved_uars = field >> 4; MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET); diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h index 5343a0599253..b52ba01aa486 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.h +++ b/drivers/net/ethernet/mellanox/mlx4/fw.h @@ -129,6 +129,7 @@ struct mlx4_dev_cap { u32 dmfs_high_rate_qpn_range; struct mlx4_rate_limit_caps rl_caps; struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1]; + bool wol_port[MLX4_MAX_PORTS + 1]; }; struct mlx4_func_cap { diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index a27c9c13a36e..09b9bc17bce9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -424,6 +424,8 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev->caps.stat_rate_support = dev_cap->stat_rate_support; dev->caps.max_gso_sz = dev_cap->max_gso_sz; dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; + dev->caps.wol_port[1] = dev_cap->wol_port[1]; + dev->caps.wol_port[2] = dev_cap->wol_port[2]; /* Save uar page shift */ if (!mlx4_is_slave(dev)) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index f5a2c605749f..31cbe5e86a01 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -786,6 +786,10 @@ static void cb_timeout_handler(struct work_struct *work) mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); } +static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg); +static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev, + struct mlx5_cmd_msg *msg); + static void cmd_work_handler(struct work_struct *work) { struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work); @@ -796,17 +800,28 @@ static void cmd_work_handler(struct work_struct *work) struct semaphore *sem; unsigned long flags; bool poll_cmd = ent->polling; + int alloc_ret; sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; down(sem); if (!ent->page_queue) { - ent->idx = alloc_ent(cmd); - if (ent->idx < 0) { + alloc_ret = alloc_ent(cmd); + if (alloc_ret < 0) { mlx5_core_err(dev, "failed to allocate command entry\n"); + if (ent->callback) { + ent->callback(-EAGAIN, ent->context); + mlx5_free_cmd_msg(dev, ent->out); + free_msg(dev, ent->in); + free_cmd(ent); + } else { + ent->ret = -EAGAIN; + complete(&ent->done); + } up(sem); return; } + ent->idx = alloc_ret; } else { ent->idx = cmd->max_reg_cmds; spin_lock_irqsave(&cmd->alloc_lock, flags); @@ -967,7 +982,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, err = wait_func(dev, ent); if (err == -ETIMEDOUT) - goto out_free; + goto out; ds = ent->ts2 - ent->ts1; op = MLX5_GET(mbox_in, in->first.data, opcode); @@ -1430,6 +1445,7 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced) mlx5_core_err(dev, "Command completion arrived after timeout (entry idx = %d).\n", ent->idx); free_ent(cmd, ent->idx); + free_cmd(ent); } continue; } @@ -1488,7 +1504,8 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced) free_msg(dev, ent->in); err = err ? err : ent->status; - free_cmd(ent); + if (!forced) + free_cmd(ent); callback(err, context); } else { complete(&ent->done); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index e1b7ddfecd01..0039b4725405 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -266,6 +266,14 @@ struct mlx5e_dcbx { }; #endif +#define MAX_PIN_NUM 8 +struct mlx5e_pps { + u8 pin_caps[MAX_PIN_NUM]; + struct work_struct out_work; + u64 start[MAX_PIN_NUM]; + u8 enabled; +}; + struct mlx5e_tstamp { rwlock_t lock; struct cyclecounter cycles; @@ -277,7 +285,7 @@ struct mlx5e_tstamp { struct mlx5_core_dev *mdev; struct ptp_clock *ptp; struct ptp_clock_info ptp_info; - u8 *pps_pin_caps; + struct mlx5e_pps pps_info; }; enum { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c index 66f432385dbb..84dd63e74041 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c @@ -53,6 +53,15 @@ enum { MLX5E_EVENT_MODE_ONCE_TILL_ARM = 0x2, }; +enum { + MLX5E_MTPPS_FS_ENABLE = BIT(0x0), + MLX5E_MTPPS_FS_PATTERN = BIT(0x2), + MLX5E_MTPPS_FS_PIN_MODE = BIT(0x3), + MLX5E_MTPPS_FS_TIME_STAMP = BIT(0x4), + MLX5E_MTPPS_FS_OUT_PULSE_DURATION = BIT(0x5), + MLX5E_MTPPS_FS_ENH_OUT_PER_ADJ = BIT(0x7), +}; + void mlx5e_fill_hwstamp(struct mlx5e_tstamp *tstamp, u64 timestamp, struct skb_shared_hwtstamps *hwts) { @@ -73,17 +82,46 @@ static u64 mlx5e_read_internal_timer(const struct cyclecounter *cc) return mlx5_read_internal_timer(tstamp->mdev) & cc->mask; } +static void mlx5e_pps_out(struct work_struct *work) +{ + struct mlx5e_pps *pps_info = container_of(work, struct mlx5e_pps, + out_work); + struct mlx5e_tstamp *tstamp = container_of(pps_info, struct mlx5e_tstamp, + pps_info); + u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; + unsigned long flags; + int i; + + for (i = 0; i < tstamp->ptp_info.n_pins; i++) { + u64 tstart; + + write_lock_irqsave(&tstamp->lock, flags); + tstart = tstamp->pps_info.start[i]; + tstamp->pps_info.start[i] = 0; + write_unlock_irqrestore(&tstamp->lock, flags); + if (!tstart) + continue; + + MLX5_SET(mtpps_reg, in, pin, i); + MLX5_SET64(mtpps_reg, in, time_stamp, tstart); + MLX5_SET(mtpps_reg, in, field_select, MLX5E_MTPPS_FS_TIME_STAMP); + mlx5_set_mtpps(tstamp->mdev, in, sizeof(in)); + } +} + static void mlx5e_timestamp_overflow(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); struct mlx5e_tstamp *tstamp = container_of(dwork, struct mlx5e_tstamp, overflow_work); + struct mlx5e_priv *priv = container_of(tstamp, struct mlx5e_priv, tstamp); unsigned long flags; write_lock_irqsave(&tstamp->lock, flags); timecounter_read(&tstamp->clock); write_unlock_irqrestore(&tstamp->lock, flags); - schedule_delayed_work(&tstamp->overflow_work, tstamp->overflow_period); + queue_delayed_work(priv->wq, &tstamp->overflow_work, + msecs_to_jiffies(tstamp->overflow_period * 1000)); } int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr) @@ -213,18 +251,6 @@ static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta) int neg_adj = 0; struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, ptp_info); - struct mlx5e_priv *priv = - container_of(tstamp, struct mlx5e_priv, tstamp); - - if (MLX5_CAP_GEN(priv->mdev, pps_modify)) { - u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; - - /* For future use need to add a loop for finding all 1PPS out pins */ - MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_OUT); - MLX5_SET(mtpps_reg, in, out_periodic_adjustment, delta & 0xFFFF); - - mlx5_set_mtpps(priv->mdev, in, sizeof(in)); - } if (delta < 0) { neg_adj = 1; @@ -253,12 +279,13 @@ static int mlx5e_extts_configure(struct ptp_clock_info *ptp, struct mlx5e_priv *priv = container_of(tstamp, struct mlx5e_priv, tstamp); u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; + u32 field_select = 0; + u8 pin_mode = 0; u8 pattern = 0; int pin = -1; int err = 0; - if (!MLX5_CAP_GEN(priv->mdev, pps) || - !MLX5_CAP_GEN(priv->mdev, pps_modify)) + if (!MLX5_PPS_CAP(priv->mdev)) return -EOPNOTSUPP; if (rq->extts.index >= tstamp->ptp_info.n_pins) @@ -268,15 +295,21 @@ static int mlx5e_extts_configure(struct ptp_clock_info *ptp, pin = ptp_find_pin(tstamp->ptp, PTP_PF_EXTTS, rq->extts.index); if (pin < 0) return -EBUSY; + pin_mode = MLX5E_PIN_MODE_IN; + pattern = !!(rq->extts.flags & PTP_FALLING_EDGE); + field_select = MLX5E_MTPPS_FS_PIN_MODE | + MLX5E_MTPPS_FS_PATTERN | + MLX5E_MTPPS_FS_ENABLE; + } else { + pin = rq->extts.index; + field_select = MLX5E_MTPPS_FS_ENABLE; } - if (rq->extts.flags & PTP_FALLING_EDGE) - pattern = 1; - MLX5_SET(mtpps_reg, in, pin, pin); - MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_IN); + MLX5_SET(mtpps_reg, in, pin_mode, pin_mode); MLX5_SET(mtpps_reg, in, pattern, pattern); MLX5_SET(mtpps_reg, in, enable, on); + MLX5_SET(mtpps_reg, in, field_select, field_select); err = mlx5_set_mtpps(priv->mdev, in, sizeof(in)); if (err) @@ -295,14 +328,18 @@ static int mlx5e_perout_configure(struct ptp_clock_info *ptp, struct mlx5e_priv *priv = container_of(tstamp, struct mlx5e_priv, tstamp); u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; - u64 nsec_now, nsec_delta, time_stamp; + u64 nsec_now, nsec_delta, time_stamp = 0; u64 cycles_now, cycles_delta; struct timespec64 ts; unsigned long flags; + u32 field_select = 0; + u8 pin_mode = 0; + u8 pattern = 0; int pin = -1; + int err = 0; s64 ns; - if (!MLX5_CAP_GEN(priv->mdev, pps_modify)) + if (!MLX5_PPS_CAP(priv->mdev)) return -EOPNOTSUPP; if (rq->perout.index >= tstamp->ptp_info.n_pins) @@ -313,32 +350,60 @@ static int mlx5e_perout_configure(struct ptp_clock_info *ptp, rq->perout.index); if (pin < 0) return -EBUSY; - } - ts.tv_sec = rq->perout.period.sec; - ts.tv_nsec = rq->perout.period.nsec; - ns = timespec64_to_ns(&ts); - if (on) + pin_mode = MLX5E_PIN_MODE_OUT; + pattern = MLX5E_OUT_PATTERN_PERIODIC; + ts.tv_sec = rq->perout.period.sec; + ts.tv_nsec = rq->perout.period.nsec; + ns = timespec64_to_ns(&ts); + if ((ns >> 1) != 500000000LL) return -EINVAL; - ts.tv_sec = rq->perout.start.sec; - ts.tv_nsec = rq->perout.start.nsec; - ns = timespec64_to_ns(&ts); - cycles_now = mlx5_read_internal_timer(tstamp->mdev); - write_lock_irqsave(&tstamp->lock, flags); - nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now); - nsec_delta = ns - nsec_now; - cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift, - tstamp->cycles.mult); - write_unlock_irqrestore(&tstamp->lock, flags); - time_stamp = cycles_now + cycles_delta; + + ts.tv_sec = rq->perout.start.sec; + ts.tv_nsec = rq->perout.start.nsec; + ns = timespec64_to_ns(&ts); + cycles_now = mlx5_read_internal_timer(tstamp->mdev); + write_lock_irqsave(&tstamp->lock, flags); + nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now); + nsec_delta = ns - nsec_now; + cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift, + tstamp->cycles.mult); + write_unlock_irqrestore(&tstamp->lock, flags); + time_stamp = cycles_now + cycles_delta; + field_select = MLX5E_MTPPS_FS_PIN_MODE | + MLX5E_MTPPS_FS_PATTERN | + MLX5E_MTPPS_FS_ENABLE | + MLX5E_MTPPS_FS_TIME_STAMP; + } else { + pin = rq->perout.index; + field_select = MLX5E_MTPPS_FS_ENABLE; + } + MLX5_SET(mtpps_reg, in, pin, pin); - MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_OUT); - MLX5_SET(mtpps_reg, in, pattern, MLX5E_OUT_PATTERN_PERIODIC); + MLX5_SET(mtpps_reg, in, pin_mode, pin_mode); + MLX5_SET(mtpps_reg, in, pattern, pattern); MLX5_SET(mtpps_reg, in, enable, on); MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp); + MLX5_SET(mtpps_reg, in, field_select, field_select); + + err = mlx5_set_mtpps(priv->mdev, in, sizeof(in)); + if (err) + return err; - return mlx5_set_mtpps(priv->mdev, in, sizeof(in)); + return mlx5_set_mtppse(priv->mdev, pin, 0, + MLX5E_EVENT_MODE_REPETETIVE & on); +} + +static int mlx5e_pps_configure(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, + int on) +{ + struct mlx5e_tstamp *tstamp = + container_of(ptp, struct mlx5e_tstamp, ptp_info); + + tstamp->pps_info.enabled = !!on; + return 0; } static int mlx5e_ptp_enable(struct ptp_clock_info *ptp, @@ -350,6 +415,8 @@ static int mlx5e_ptp_enable(struct ptp_clock_info *ptp, return mlx5e_extts_configure(ptp, rq, on); case PTP_CLK_REQ_PEROUT: return mlx5e_perout_configure(ptp, rq, on); + case PTP_CLK_REQ_PPS: + return mlx5e_pps_configure(ptp, rq, on); default: return -EOPNOTSUPP; } @@ -395,6 +462,7 @@ static int mlx5e_init_pin_config(struct mlx5e_tstamp *tstamp) return -ENOMEM; tstamp->ptp_info.enable = mlx5e_ptp_enable; tstamp->ptp_info.verify = mlx5e_ptp_verify; + tstamp->ptp_info.pps = 1; for (i = 0; i < tstamp->ptp_info.n_pins; i++) { snprintf(tstamp->ptp_info.pin_config[i].name, @@ -422,22 +490,56 @@ static void mlx5e_get_pps_caps(struct mlx5e_priv *priv, tstamp->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out, cap_max_num_of_pps_out_pins); - tstamp->pps_pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode); - tstamp->pps_pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode); - tstamp->pps_pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode); - tstamp->pps_pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode); - tstamp->pps_pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode); - tstamp->pps_pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode); - tstamp->pps_pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode); - tstamp->pps_pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode); + tstamp->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode); + tstamp->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode); + tstamp->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode); + tstamp->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode); + tstamp->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode); + tstamp->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode); + tstamp->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode); + tstamp->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode); } void mlx5e_pps_event_handler(struct mlx5e_priv *priv, struct ptp_clock_event *event) { + struct net_device *netdev = priv->netdev; struct mlx5e_tstamp *tstamp = &priv->tstamp; + struct timespec64 ts; + u64 nsec_now, nsec_delta; + u64 cycles_now, cycles_delta; + int pin = event->index; + s64 ns; + unsigned long flags; - ptp_clock_event(tstamp->ptp, event); + switch (tstamp->ptp_info.pin_config[pin].func) { + case PTP_PF_EXTTS: + if (tstamp->pps_info.enabled) { + event->type = PTP_CLOCK_PPSUSR; + event->pps_times.ts_real = ns_to_timespec64(event->timestamp); + } else { + event->type = PTP_CLOCK_EXTTS; + } + ptp_clock_event(tstamp->ptp, event); + break; + case PTP_PF_PEROUT: + mlx5e_ptp_gettime(&tstamp->ptp_info, &ts); + cycles_now = mlx5_read_internal_timer(tstamp->mdev); + ts.tv_sec += 1; + ts.tv_nsec = 0; + ns = timespec64_to_ns(&ts); + write_lock_irqsave(&tstamp->lock, flags); + nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now); + nsec_delta = ns - nsec_now; + cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift, + tstamp->cycles.mult); + tstamp->pps_info.start[pin] = cycles_now + cycles_delta; + queue_work(priv->wq, &tstamp->pps_info.out_work); + write_unlock_irqrestore(&tstamp->lock, flags); + break; + default: + netdev_err(netdev, "%s: Unhandled event\n", __func__); + } } void mlx5e_timestamp_init(struct mlx5e_priv *priv) @@ -473,9 +575,10 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv) do_div(ns, NSEC_PER_SEC / 2 / HZ); tstamp->overflow_period = ns; + INIT_WORK(&tstamp->pps_info.out_work, mlx5e_pps_out); INIT_DELAYED_WORK(&tstamp->overflow_work, mlx5e_timestamp_overflow); if (tstamp->overflow_period) - schedule_delayed_work(&tstamp->overflow_work, 0); + queue_delayed_work(priv->wq, &tstamp->overflow_work, 0); else mlx5_core_warn(priv->mdev, "invalid overflow period, overflow_work is not scheduled\n"); @@ -484,16 +587,10 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv) snprintf(tstamp->ptp_info.name, 16, "mlx5 ptp"); /* Initialize 1PPS data structures */ -#define MAX_PIN_NUM 8 - tstamp->pps_pin_caps = kzalloc(sizeof(u8) * MAX_PIN_NUM, GFP_KERNEL); - if (tstamp->pps_pin_caps) { - if (MLX5_CAP_GEN(priv->mdev, pps)) - mlx5e_get_pps_caps(priv, tstamp); - if (tstamp->ptp_info.n_pins) - mlx5e_init_pin_config(tstamp); - } else { - mlx5_core_warn(priv->mdev, "1PPS initialization failed\n"); - } + if (MLX5_PPS_CAP(priv->mdev)) + mlx5e_get_pps_caps(priv, tstamp); + if (tstamp->ptp_info.n_pins) + mlx5e_init_pin_config(tstamp); tstamp->ptp = ptp_clock_register(&tstamp->ptp_info, &priv->mdev->pdev->dev); @@ -516,8 +613,7 @@ void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv) priv->tstamp.ptp = NULL; } - kfree(tstamp->pps_pin_caps); - kfree(tstamp->ptp_info.pin_config); - + cancel_work_sync(&tstamp->pps_info.out_work); cancel_delayed_work_sync(&tstamp->overflow_work); + kfree(tstamp->ptp_info.pin_config); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c index bdd82c9b3992..eafc59280ada 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c @@ -276,7 +276,7 @@ static void add_rule_to_list(struct mlx5e_priv *priv, static bool outer_header_zero(u32 *match_criteria) { - int size = MLX5_ST_SZ_BYTES(fte_match_param); + int size = MLX5_FLD_SZ_BYTES(fte_match_param, outer_headers); char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers); @@ -320,7 +320,7 @@ add_ethtool_flow_rule(struct mlx5e_priv *priv, spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria)); flow_act.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; - rule = mlx5_add_flow_rules(ft, spec, &flow_act, dst, 1); + rule = mlx5_add_flow_rules(ft, spec, &flow_act, dst, dst ? 1 : 0); if (IS_ERR(rule)) { err = PTR_ERR(rule); netdev_err(priv->netdev, "%s: failed to add ethtool steering rule: %d\n", diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 1eac5003084f..57f31fa478ce 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -377,7 +377,6 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv, break; case MLX5_DEV_EVENT_PPS: eqe = (struct mlx5_eqe *)param; - ptp_event.type = PTP_CLOCK_EXTTS; ptp_event.index = eqe->data.pps.pin; ptp_event.timestamp = timecounter_cyc2time(&priv->tstamp.clock, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index af51a5d2b912..52b9a64cd3a2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -698,7 +698,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) else mlx5_core_dbg(dev, "port_module_event is not set\n"); - if (MLX5_CAP_GEN(dev, pps)) + if (MLX5_PPS_CAP(dev)) async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT); if (MLX5_CAP_GEN(dev, fpga)) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 89bfda419efe..8b18cc9ec026 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1668,7 +1668,8 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) int i; if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) || - MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) + MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH || + esw->mode == SRIOV_NONE) return; esw_info(esw->dev, "disable SRIOV: active vports(%d) mode(%d)\n", diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index 1ee5bce85901..85298051a3e4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -178,8 +178,6 @@ out: static void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp) { - mlx5_fs_remove_rx_underlay_qpn(mdev, qp->qpn); - mlx5_core_destroy_qp(mdev, qp); } @@ -194,8 +192,6 @@ static int mlx5i_init_tx(struct mlx5e_priv *priv) return err; } - mlx5_fs_add_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn); - err = mlx5e_create_tis(priv->mdev, 0 /* tc */, ipriv->qp.qpn, &priv->tisn[0]); if (err) { mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err); @@ -253,6 +249,7 @@ static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv) static int mlx5i_init_rx(struct mlx5e_priv *priv) { + struct mlx5i_priv *ipriv = priv->ppriv; int err; err = mlx5e_create_indirect_rqt(priv); @@ -271,12 +268,18 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv) if (err) goto err_destroy_indirect_tirs; - err = mlx5i_create_flow_steering(priv); + err = mlx5_fs_add_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn); if (err) goto err_destroy_direct_tirs; + err = mlx5i_create_flow_steering(priv); + if (err) + goto err_remove_rx_underlay_qpn; + return 0; +err_remove_rx_underlay_qpn: + mlx5_fs_remove_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn); err_destroy_direct_tirs: mlx5e_destroy_direct_tirs(priv); err_destroy_indirect_tirs: @@ -290,6 +293,9 @@ err_destroy_indirect_rqts: static void mlx5i_cleanup_rx(struct mlx5e_priv *priv) { + struct mlx5i_priv *ipriv = priv->ppriv; + + mlx5_fs_remove_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn); mlx5i_destroy_flow_steering(priv); mlx5e_destroy_direct_tirs(priv); mlx5e_destroy_indirect_tirs(priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c index a3a836bdcfd2..f26f97fe4666 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c @@ -162,22 +162,17 @@ static bool mlx5_lag_is_bonded(struct mlx5_lag *ldev) static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker, u8 *port1, u8 *port2) { - if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) { - if (tracker->netdev_state[0].tx_enabled) { - *port1 = 1; - *port2 = 1; - } else { - *port1 = 2; - *port2 = 2; - } - } else { - *port1 = 1; - *port2 = 2; - if (!tracker->netdev_state[0].link_up) - *port1 = 2; - else if (!tracker->netdev_state[1].link_up) - *port2 = 1; + *port1 = 1; + *port2 = 2; + if (!tracker->netdev_state[0].tx_enabled || + !tracker->netdev_state[0].link_up) { + *port1 = 2; + return; } + + if (!tracker->netdev_state[1].tx_enabled || + !tracker->netdev_state[1].link_up) + *port2 = 1; } static void mlx5_activate_lag(struct mlx5_lag *ldev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 6a3d6bef7dd4..6a263e8d883a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -154,6 +154,11 @@ int mlx5_set_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size); int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode); int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode); +#define MLX5_PPS_CAP(mdev) (MLX5_CAP_GEN((mdev), pps) && \ + MLX5_CAP_GEN((mdev), pps_modify) && \ + MLX5_CAP_MCAM_FEATURE((mdev), mtpps_fs) && \ + MLX5_CAP_MCAM_FEATURE((mdev), mtpps_enh_out_per_adj)) + int mlx5_firmware_flash(struct mlx5_core_dev *dev, const struct firmware *fw); void mlx5e_init(void); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c index bcdf7779c48d..bf99d40e30b4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c @@ -88,7 +88,11 @@ static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev) int vf; if (!sriov->enabled_vfs) +#ifdef CONFIG_MLX5_CORE_EN + goto disable_sriov_resources; +#else return; +#endif for (vf = 0; vf < sriov->num_vfs; vf++) { if (!sriov->vfs_ctx[vf].enabled) @@ -103,6 +107,7 @@ static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev) } #ifdef CONFIG_MLX5_CORE_EN +disable_sriov_resources: mlx5_eswitch_disable_sriov(dev->priv.eswitch); #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 383fef5a8e24..4b2e0fd7d51e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1512,6 +1512,10 @@ mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp, static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_entry *fib_entry); +static bool +mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node, + const struct mlxsw_sp_fib_entry *fib_entry); + static int mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop_group *nh_grp) @@ -1520,6 +1524,9 @@ mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp, int err; list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) { + if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node, + fib_entry)) + continue; err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 656b2d3f1bee..5eb1606765c5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -626,8 +626,8 @@ static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port, bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge, orig_dev); - if (WARN_ON(!bridge_port)) - return -EINVAL; + if (!bridge_port) + return 0; err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port, MLXSW_SP_FLOOD_TYPE_UC, @@ -711,8 +711,8 @@ static int mlxsw_sp_port_attr_mc_router_set(struct mlxsw_sp_port *mlxsw_sp_port, bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge, orig_dev); - if (WARN_ON(!bridge_port)) - return -EINVAL; + if (!bridge_port) + return 0; if (!bridge_port->bridge_device->multicast_enabled) return 0; @@ -1283,15 +1283,15 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port, return 0; bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); - if (WARN_ON(!bridge_port)) - return -EINVAL; + if (!bridge_port) + return 0; bridge_device = bridge_port->bridge_device; mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port, bridge_device, mdb->vid); - if (WARN_ON(!mlxsw_sp_port_vlan)) - return -EINVAL; + if (!mlxsw_sp_port_vlan) + return 0; fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid); @@ -1407,15 +1407,15 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port, int err = 0; bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); - if (WARN_ON(!bridge_port)) - return -EINVAL; + if (!bridge_port) + return 0; bridge_device = bridge_port->bridge_device; mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port, bridge_device, mdb->vid); - if (WARN_ON(!mlxsw_sp_port_vlan)) - return -EINVAL; + if (!mlxsw_sp_port_vlan) + return 0; fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid); @@ -1974,6 +1974,17 @@ static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp) } +static void mlxsw_sp_mids_fini(struct mlxsw_sp *mlxsw_sp) +{ + struct mlxsw_sp_mid *mid, *tmp; + + list_for_each_entry_safe(mid, tmp, &mlxsw_sp->bridge->mids_list, list) { + list_del(&mid->list); + clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap); + kfree(mid); + } +} + int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp) { struct mlxsw_sp_bridge *bridge; @@ -1996,7 +2007,7 @@ int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp) void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp) { mlxsw_sp_fdb_fini(mlxsw_sp); - WARN_ON(!list_empty(&mlxsw_sp->bridge->mids_list)); + mlxsw_sp_mids_fini(mlxsw_sp); WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list)); kfree(mlxsw_sp->bridge); } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 18750ff0ede6..4631ca8b8eb2 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -513,6 +513,7 @@ nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring, tx_ring->idx = idx; tx_ring->r_vec = r_vec; tx_ring->is_xdp = is_xdp; + u64_stats_init(&tx_ring->r_vec->tx_sync); tx_ring->qcidx = tx_ring->idx * nn->stride_tx; tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx); @@ -532,6 +533,7 @@ nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring, rx_ring->idx = idx; rx_ring->r_vec = r_vec; + u64_stats_init(&rx_ring->r_vec->rx_sync); rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx; rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx); diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 9da91045d167..3eb241657368 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -253,7 +253,7 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32); p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL); p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL); - if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr) + if (!p_info->mfw_mb_cur || !p_info->mfw_mb_shadow) goto err; return 0; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c index 22cf6353ba04..7ecf549c7f1c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c @@ -205,7 +205,7 @@ static void dwmac1000_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space) { int i; - for (i = 0; i < 23; i++) + for (i = 0; i < NUM_DWMAC1000_DMA_REGS; i++) if ((i < 12) || (i > 17)) reg_space[DMA_BUS_MODE / 4 + i] = readl(ioaddr + DMA_BUS_MODE + i * 4); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c index eef2f222ce9a..6502b9aa3bf5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c @@ -70,7 +70,7 @@ static void dwmac100_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space) { int i; - for (i = 0; i < 9; i++) + for (i = 0; i < NUM_DWMAC100_DMA_REGS; i++) reg_space[DMA_BUS_MODE / 4 + i] = readl(ioaddr + DMA_BUS_MODE + i * 4); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h index 9091df86723a..adc54006f884 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h @@ -136,6 +136,9 @@ #define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */ #define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */ +#define NUM_DWMAC100_DMA_REGS 9 +#define NUM_DWMAC1000_DMA_REGS 23 + void dwmac_enable_dma_transmission(void __iomem *ioaddr); void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan); void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index babb39c646ff..af30b4857c3b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -33,6 +33,8 @@ #define MAC100_ETHTOOL_NAME "st_mac100" #define GMAC_ETHTOOL_NAME "st_gmac" +#define ETHTOOL_DMA_OFFSET 55 + struct stmmac_stats { char stat_string[ETH_GSTRING_LEN]; int sizeof_stat; @@ -442,6 +444,9 @@ static void stmmac_ethtool_gregs(struct net_device *dev, priv->hw->mac->dump_regs(priv->hw, reg_space); priv->hw->dma->dump_regs(priv->ioaddr, reg_space); + /* Copy DMA registers to where ethtool expects them */ + memcpy(®_space[ETHTOOL_DMA_OFFSET], ®_space[DMA_BUS_MODE / 4], + NUM_DWMAC1000_DMA_REGS * 4); } static void diff --git a/drivers/net/ethernet/sun/sunhme.h b/drivers/net/ethernet/sun/sunhme.h index 3af540adb3c5..fca1bca7f69d 100644 --- a/drivers/net/ethernet/sun/sunhme.h +++ b/drivers/net/ethernet/sun/sunhme.h @@ -13,9 +13,9 @@ /* Happy Meal global registers. */ #define GREG_SWRESET 0x000UL /* Software Reset */ #define GREG_CFG 0x004UL /* Config Register */ -#define GREG_STAT 0x108UL /* Status */ -#define GREG_IMASK 0x10cUL /* Interrupt Mask */ -#define GREG_REG_SIZE 0x110UL +#define GREG_STAT 0x100UL /* Status */ +#define GREG_IMASK 0x104UL /* Interrupt Mask */ +#define GREG_REG_SIZE 0x108UL /* Global reset register. */ #define GREG_RESET_ETX 0x01 diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c index 32279d21c836..c2121d214f08 100644 --- a/drivers/net/ethernet/ti/cpts.c +++ b/drivers/net/ethernet/ti/cpts.c @@ -31,9 +31,18 @@ #include "cpts.h" +#define CPTS_SKB_TX_WORK_TIMEOUT 1 /* jiffies */ + +struct cpts_skb_cb_data { + unsigned long tmo; +}; + #define cpts_read32(c, r) readl_relaxed(&c->reg->r) #define cpts_write32(c, v, r) writel_relaxed(v, &c->reg->r) +static int cpts_match(struct sk_buff *skb, unsigned int ptp_class, + u16 ts_seqid, u8 ts_msgtype); + static int event_expired(struct cpts_event *event) { return time_after(jiffies, event->tmo); @@ -77,6 +86,47 @@ static int cpts_purge_events(struct cpts *cpts) return removed ? 0 : -1; } +static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event) +{ + struct sk_buff *skb, *tmp; + u16 seqid; + u8 mtype; + bool found = false; + + mtype = (event->high >> MESSAGE_TYPE_SHIFT) & MESSAGE_TYPE_MASK; + seqid = (event->high >> SEQUENCE_ID_SHIFT) & SEQUENCE_ID_MASK; + + /* no need to grab txq.lock as access is always done under cpts->lock */ + skb_queue_walk_safe(&cpts->txq, skb, tmp) { + struct skb_shared_hwtstamps ssh; + unsigned int class = ptp_classify_raw(skb); + struct cpts_skb_cb_data *skb_cb = + (struct cpts_skb_cb_data *)skb->cb; + + if (cpts_match(skb, class, seqid, mtype)) { + u64 ns = timecounter_cyc2time(&cpts->tc, event->low); + + memset(&ssh, 0, sizeof(ssh)); + ssh.hwtstamp = ns_to_ktime(ns); + skb_tstamp_tx(skb, &ssh); + found = true; + __skb_unlink(skb, &cpts->txq); + dev_consume_skb_any(skb); + dev_dbg(cpts->dev, "match tx timestamp mtype %u seqid %04x\n", + mtype, seqid); + } else if (time_after(jiffies, skb_cb->tmo)) { + /* timeout any expired skbs over 1s */ + dev_dbg(cpts->dev, + "expiring tx timestamp mtype %u seqid %04x\n", + mtype, seqid); + __skb_unlink(skb, &cpts->txq); + dev_consume_skb_any(skb); + } + } + + return found; +} + /* * Returns zero if matching event type was found. */ @@ -101,9 +151,15 @@ static int cpts_fifo_read(struct cpts *cpts, int match) event->low = lo; type = event_type(event); switch (type) { + case CPTS_EV_TX: + if (cpts_match_tx_ts(cpts, event)) { + /* if the new event matches an existing skb, + * then don't queue it + */ + break; + } case CPTS_EV_PUSH: case CPTS_EV_RX: - case CPTS_EV_TX: list_del_init(&event->list); list_add_tail(&event->list, &cpts->events); break; @@ -224,6 +280,24 @@ static int cpts_ptp_enable(struct ptp_clock_info *ptp, return -EOPNOTSUPP; } +static long cpts_overflow_check(struct ptp_clock_info *ptp) +{ + struct cpts *cpts = container_of(ptp, struct cpts, info); + unsigned long delay = cpts->ov_check_period; + struct timespec64 ts; + unsigned long flags; + + spin_lock_irqsave(&cpts->lock, flags); + ts = ns_to_timespec64(timecounter_read(&cpts->tc)); + + if (!skb_queue_empty(&cpts->txq)) + delay = CPTS_SKB_TX_WORK_TIMEOUT; + spin_unlock_irqrestore(&cpts->lock, flags); + + pr_debug("cpts overflow check at %lld.%09lu\n", ts.tv_sec, ts.tv_nsec); + return (long)delay; +} + static struct ptp_clock_info cpts_info = { .owner = THIS_MODULE, .name = "CTPS timer", @@ -236,18 +310,9 @@ static struct ptp_clock_info cpts_info = { .gettime64 = cpts_ptp_gettime, .settime64 = cpts_ptp_settime, .enable = cpts_ptp_enable, + .do_aux_work = cpts_overflow_check, }; -static void cpts_overflow_check(struct work_struct *work) -{ - struct timespec64 ts; - struct cpts *cpts = container_of(work, struct cpts, overflow_work.work); - - cpts_ptp_gettime(&cpts->info, &ts); - pr_debug("cpts overflow check at %lld.%09lu\n", ts.tv_sec, ts.tv_nsec); - schedule_delayed_work(&cpts->overflow_work, cpts->ov_check_period); -} - static int cpts_match(struct sk_buff *skb, unsigned int ptp_class, u16 ts_seqid, u8 ts_msgtype) { @@ -299,7 +364,7 @@ static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type) return 0; spin_lock_irqsave(&cpts->lock, flags); - cpts_fifo_read(cpts, CPTS_EV_PUSH); + cpts_fifo_read(cpts, -1); list_for_each_safe(this, next, &cpts->events) { event = list_entry(this, struct cpts_event, list); if (event_expired(event)) { @@ -317,6 +382,19 @@ static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type) break; } } + + if (ev_type == CPTS_EV_TX && !ns) { + struct cpts_skb_cb_data *skb_cb = + (struct cpts_skb_cb_data *)skb->cb; + /* Not found, add frame to queue for processing later. + * The periodic FIFO check will handle this. + */ + skb_get(skb); + /* get the timestamp for timeouts */ + skb_cb->tmo = jiffies + msecs_to_jiffies(100); + __skb_queue_tail(&cpts->txq, skb); + ptp_schedule_worker(cpts->clock, 0); + } spin_unlock_irqrestore(&cpts->lock, flags); return ns; @@ -358,6 +436,7 @@ int cpts_register(struct cpts *cpts) { int err, i; + skb_queue_head_init(&cpts->txq); INIT_LIST_HEAD(&cpts->events); INIT_LIST_HEAD(&cpts->pool); for (i = 0; i < CPTS_MAX_EVENTS; i++) @@ -378,7 +457,7 @@ int cpts_register(struct cpts *cpts) } cpts->phc_index = ptp_clock_index(cpts->clock); - schedule_delayed_work(&cpts->overflow_work, cpts->ov_check_period); + ptp_schedule_worker(cpts->clock, cpts->ov_check_period); return 0; err_ptp: @@ -392,14 +471,15 @@ void cpts_unregister(struct cpts *cpts) if (WARN_ON(!cpts->clock)) return; - cancel_delayed_work_sync(&cpts->overflow_work); - ptp_clock_unregister(cpts->clock); cpts->clock = NULL; cpts_write32(cpts, 0, int_enable); cpts_write32(cpts, 0, control); + /* Drop all packet */ + skb_queue_purge(&cpts->txq); + clk_disable(cpts->refclk); } EXPORT_SYMBOL_GPL(cpts_unregister); @@ -476,7 +556,6 @@ struct cpts *cpts_create(struct device *dev, void __iomem *regs, cpts->dev = dev; cpts->reg = (struct cpsw_cpts __iomem *)regs; spin_lock_init(&cpts->lock); - INIT_DELAYED_WORK(&cpts->overflow_work, cpts_overflow_check); ret = cpts_of_parse(cpts, node); if (ret) diff --git a/drivers/net/ethernet/ti/cpts.h b/drivers/net/ethernet/ti/cpts.h index 01ea82ba9cdc..73d73faf0f38 100644 --- a/drivers/net/ethernet/ti/cpts.h +++ b/drivers/net/ethernet/ti/cpts.h @@ -119,13 +119,13 @@ struct cpts { u32 cc_mult; /* for the nominal frequency */ struct cyclecounter cc; struct timecounter tc; - struct delayed_work overflow_work; int phc_index; struct clk *refclk; struct list_head events; struct list_head pool; struct cpts_event pool_data[CPTS_MAX_EVENTS]; unsigned long ov_check_period; + struct sk_buff_head txq; }; void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb); diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c index d9db8a06afd2..cce9c9ed46aa 100644 --- a/drivers/net/ethernet/toshiba/tc35815.c +++ b/drivers/net/ethernet/toshiba/tc35815.c @@ -1338,7 +1338,7 @@ static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev) static void tc35815_fatal_error_interrupt(struct net_device *dev, u32 status) { static int count; - printk(KERN_WARNING "%s: Fatal Error Intterrupt (%#x):", + printk(KERN_WARNING "%s: Fatal Error Interrupt (%#x):", dev->name, status); if (status & Int_IntPCI) printk(" IntPCI"); diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index de8156c6b292..2bbda71818ad 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -1091,7 +1091,7 @@ static int geneve_validate(struct nlattr *tb[], struct nlattr *data[], if (data[IFLA_GENEVE_ID]) { __u32 vni = nla_get_u32(data[IFLA_GENEVE_ID]); - if (vni >= GENEVE_VID_MASK) + if (vni >= GENEVE_N_VID) return -ERANGE; } diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index 1542e837fdfa..f38e32a7ec9c 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -364,7 +364,7 @@ static int gtp_dev_init(struct net_device *dev) gtp->dev = dev; - dev->tstats = alloc_percpu(struct pcpu_sw_netstats); + dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); if (!dev->tstats) return -ENOMEM; diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index d6c25580f8dd..12cc64bfcff8 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -765,7 +765,8 @@ struct netvsc_device { u32 max_chn; u32 num_chn; - refcount_t sc_offered; + atomic_t open_chn; + wait_queue_head_t subchan_open; struct rndis_device *extension; diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 0a9167dd72fb..d18c3326a1f7 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -78,6 +78,7 @@ static struct netvsc_device *alloc_net_device(void) net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; init_completion(&net_device->channel_init_wait); + init_waitqueue_head(&net_device->subchan_open); return net_device; } @@ -1302,6 +1303,8 @@ int netvsc_device_add(struct hv_device *device, struct netvsc_channel *nvchan = &net_device->chan_table[i]; nvchan->channel = device->channel; + u64_stats_init(&nvchan->tx_stats.syncp); + u64_stats_init(&nvchan->rx_stats.syncp); } /* Enable NAPI handler before init callbacks */ diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 63c98bbbc596..0d78727f1a14 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -315,14 +315,34 @@ static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb, return slots_used; } -/* Estimate number of page buffers neede to transmit - * Need at most 2 for RNDIS header plus skb body and fragments. - */ -static unsigned int netvsc_get_slots(const struct sk_buff *skb) +static int count_skb_frag_slots(struct sk_buff *skb) +{ + int i, frags = skb_shinfo(skb)->nr_frags; + int pages = 0; + + for (i = 0; i < frags; i++) { + skb_frag_t *frag = skb_shinfo(skb)->frags + i; + unsigned long size = skb_frag_size(frag); + unsigned long offset = frag->page_offset; + + /* Skip unused frames from start of page */ + offset &= ~PAGE_MASK; + pages += PFN_UP(offset + size); + } + return pages; +} + +static int netvsc_get_slots(struct sk_buff *skb) { - return PFN_UP(offset_in_page(skb->data) + skb_headlen(skb)) - + skb_shinfo(skb)->nr_frags - + 2; + char *data = skb->data; + unsigned int offset = offset_in_page(data); + unsigned int len = skb_headlen(skb); + int slots; + int frag_slots; + + slots = DIV_ROUND_UP(offset + len, PAGE_SIZE); + frag_slots = count_skb_frag_slots(skb); + return slots + frag_slots; } static u32 net_checksum_info(struct sk_buff *skb) @@ -360,18 +380,21 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT]; struct hv_page_buffer *pb = page_buf; - /* We can only transmit MAX_PAGE_BUFFER_COUNT number + /* We will atmost need two pages to describe the rndis + * header. We can only transmit MAX_PAGE_BUFFER_COUNT number * of pages in a single packet. If skb is scattered around * more pages we try linearizing it. */ - num_data_pgs = netvsc_get_slots(skb); + + num_data_pgs = netvsc_get_slots(skb) + 2; + if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) { ++net_device_ctx->eth_stats.tx_scattered; if (skb_linearize(skb)) goto no_memory; - num_data_pgs = netvsc_get_slots(skb); + num_data_pgs = netvsc_get_slots(skb) + 2; if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) { ++net_device_ctx->eth_stats.tx_too_big; goto drop; diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 85c00e1c52b6..d6308ffda53e 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -1048,8 +1048,8 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc) else netif_napi_del(&nvchan->napi); - if (refcount_dec_and_test(&nvscdev->sc_offered)) - complete(&nvscdev->channel_init_wait); + atomic_inc(&nvscdev->open_chn); + wake_up(&nvscdev->subchan_open); } int rndis_filter_device_add(struct hv_device *dev, @@ -1090,8 +1090,6 @@ int rndis_filter_device_add(struct hv_device *dev, net_device->max_chn = 1; net_device->num_chn = 1; - refcount_set(&net_device->sc_offered, 0); - net_device->extension = rndis_device; rndis_device->ndev = net; @@ -1221,11 +1219,11 @@ int rndis_filter_device_add(struct hv_device *dev, rndis_device->ind_table[i] = ethtool_rxfh_indir_default(i, net_device->num_chn); + atomic_set(&net_device->open_chn, 1); num_rss_qs = net_device->num_chn - 1; if (num_rss_qs == 0) return 0; - refcount_set(&net_device->sc_offered, num_rss_qs); vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open); init_packet = &net_device->channel_init_pkt; @@ -1242,15 +1240,19 @@ int rndis_filter_device_add(struct hv_device *dev, if (ret) goto out; + wait_for_completion(&net_device->channel_init_wait); if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) { ret = -ENODEV; goto out; } - wait_for_completion(&net_device->channel_init_wait); net_device->num_chn = 1 + init_packet->msg.v5_msg.subchn_comp.num_subchannels; + /* wait for all sub channels to open */ + wait_event(net_device->subchan_open, + atomic_read(&net_device->open_chn) == net_device->num_chn); + /* ignore failues from setting rss parameters, still have channels */ rndis_filter_set_rss_param(rndis_device, netvsc_hash_key, net_device->num_chn); diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index f37e3c1fd4e7..8dab74a81303 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -192,7 +192,7 @@ static int ipvlan_init(struct net_device *dev) netdev_lockdep_set_classes(dev); - ipvlan->pcpu_stats = alloc_percpu(struct ipvl_pcpu_stats); + ipvlan->pcpu_stats = netdev_alloc_pcpu_stats(struct ipvl_pcpu_stats); if (!ipvlan->pcpu_stats) return -ENOMEM; diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c index 6f6ed75b63c9..765de3bedb88 100644 --- a/drivers/net/irda/mcs7780.c +++ b/drivers/net/irda/mcs7780.c @@ -141,9 +141,19 @@ static int mcs_set_reg(struct mcs_cb *mcs, __u16 reg, __u16 val) static int mcs_get_reg(struct mcs_cb *mcs, __u16 reg, __u16 * val) { struct usb_device *dev = mcs->usbdev; - int ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ, - MCS_RD_RTYPE, 0, reg, val, 2, - msecs_to_jiffies(MCS_CTRL_TIMEOUT)); + void *dmabuf; + int ret; + + dmabuf = kmalloc(sizeof(__u16), GFP_KERNEL); + if (!dmabuf) + return -ENOMEM; + + ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ, + MCS_RD_RTYPE, 0, reg, dmabuf, 2, + msecs_to_jiffies(MCS_CTRL_TIMEOUT)); + + memcpy(val, dmabuf, sizeof(__u16)); + kfree(dmabuf); return ret; } diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 2dda72004a7d..928fd892f167 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -7,7 +7,16 @@ menuconfig MDIO_DEVICE help MDIO devices and driver infrastructure code. -if MDIO_DEVICE +config MDIO_BUS + tristate + default m if PHYLIB=m + default MDIO_DEVICE + help + This internal symbol is used for link time dependencies and it + reflects whether the mdio_bus/mdio_device code is built as a + loadable module or built-in. + +if MDIO_BUS config MDIO_BCM_IPROC tristate "Broadcom iProc MDIO bus controller" @@ -28,7 +37,6 @@ config MDIO_BCM_UNIMAC config MDIO_BITBANG tristate "Bitbanged MDIO buses" - depends on !(MDIO_DEVICE=y && PHYLIB=m) help This module implements the MDIO bus protocol in software, for use by low level drivers that export the ability to @@ -127,7 +135,6 @@ config MDIO_THUNDER tristate "ThunderX SOCs MDIO buses" depends on 64BIT depends on PCI - depends on !(MDIO_DEVICE=y && PHYLIB=m) select MDIO_CAVIUM help This driver supports the MDIO interfaces found on Cavium diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index d0626bf5c540..5068c582d502 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -749,6 +749,9 @@ void phy_stop_machine(struct phy_device *phydev) if (phydev->state > PHY_UP && phydev->state != PHY_HALTED) phydev->state = PHY_UP; mutex_unlock(&phydev->lock); + + /* Now we can run the state machine synchronously */ + phy_state_machine(&phydev->state_queue.work); } /** diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index bd4303944e44..a404552555d4 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -1915,21 +1915,23 @@ static void __ppp_channel_push(struct channel *pch) spin_unlock(&pch->downl); /* see if there is anything from the attached unit to be sent */ if (skb_queue_empty(&pch->file.xq)) { - read_lock(&pch->upl); ppp = pch->ppp; if (ppp) - ppp_xmit_process(ppp); - read_unlock(&pch->upl); + __ppp_xmit_process(ppp); } } static void ppp_channel_push(struct channel *pch) { - local_bh_disable(); - - __ppp_channel_push(pch); - - local_bh_enable(); + read_lock_bh(&pch->upl); + if (pch->ppp) { + (*this_cpu_ptr(pch->ppp->xmit_recursion))++; + __ppp_channel_push(pch); + (*this_cpu_ptr(pch->ppp->xmit_recursion))--; + } else { + __ppp_channel_push(pch); + } + read_unlock_bh(&pch->upl); } /* diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c index eac499c58aa7..6dde9a0cfe76 100644 --- a/drivers/net/ppp/pptp.c +++ b/drivers/net/ppp/pptp.c @@ -131,7 +131,6 @@ static void del_chan(struct pppox_sock *sock) clear_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap); RCU_INIT_POINTER(callid_sock[sock->proto.pptp.src_addr.call_id], NULL); spin_unlock(&chan_lock); - synchronize_rcu(); } static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb) @@ -520,6 +519,7 @@ static int pptp_release(struct socket *sock) po = pppox_sk(sk); del_chan(po); + synchronize_rcu(); pppox_unbind_sock(sk); sk->sk_state = PPPOX_DEAD; diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 464570409796..ae53e899259f 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -60,11 +60,11 @@ static struct team_port *team_port_get_rtnl(const struct net_device *dev) static int __set_port_dev_addr(struct net_device *port_dev, const unsigned char *dev_addr) { - struct sockaddr addr; + struct sockaddr_storage addr; - memcpy(addr.sa_data, dev_addr, port_dev->addr_len); - addr.sa_family = port_dev->type; - return dev_set_mac_address(port_dev, &addr); + memcpy(addr.__data, dev_addr, port_dev->addr_len); + addr.ss_family = port_dev->type; + return dev_set_mac_address(port_dev, (struct sockaddr *)&addr); } static int team_port_set_orig_dev_addr(struct team_port *port) diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 3d4c24572ecd..32ad87345f57 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -2598,8 +2598,16 @@ static int __init tun_init(void) goto err_misc; } - register_netdevice_notifier(&tun_notifier_block); + ret = register_netdevice_notifier(&tun_notifier_block); + if (ret) { + pr_err("Can't register netdevice notifier\n"); + goto err_notifier; + } + return 0; + +err_notifier: + misc_deregister(&tun_miscdev); err_misc: rtnl_link_unregister(&tun_link_ops); err_linkops: diff --git a/drivers/net/usb/asix.h b/drivers/net/usb/asix.h index d1092421aaa7..9a4171b90947 100644 --- a/drivers/net/usb/asix.h +++ b/drivers/net/usb/asix.h @@ -209,6 +209,7 @@ void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb, struct asix_rx_fixup_info *rx); int asix_rx_fixup_common(struct usbnet *dev, struct sk_buff *skb); +void asix_rx_fixup_common_free(struct asix_common_private *dp); struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags); diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c index 7847436c441e..522d2900cd1d 100644 --- a/drivers/net/usb/asix_common.c +++ b/drivers/net/usb/asix_common.c @@ -75,6 +75,27 @@ void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index, value, index, data, size); } +static void reset_asix_rx_fixup_info(struct asix_rx_fixup_info *rx) +{ + /* Reset the variables that have a lifetime outside of + * asix_rx_fixup_internal() so that future processing starts from a + * known set of initial conditions. + */ + + if (rx->ax_skb) { + /* Discard any incomplete Ethernet frame in the netdev buffer */ + kfree_skb(rx->ax_skb); + rx->ax_skb = NULL; + } + + /* Assume the Data header 32-bit word is at the start of the current + * or next URB socket buffer so reset all the state variables. + */ + rx->remaining = 0; + rx->split_head = false; + rx->header = 0; +} + int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb, struct asix_rx_fixup_info *rx) { @@ -99,15 +120,7 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb, if (size != ((~rx->header >> 16) & 0x7ff)) { netdev_err(dev->net, "asix_rx_fixup() Data Header synchronisation was lost, remaining %d\n", rx->remaining); - if (rx->ax_skb) { - kfree_skb(rx->ax_skb); - rx->ax_skb = NULL; - /* Discard the incomplete netdev Ethernet frame - * and assume the Data header is at the start of - * the current URB socket buffer. - */ - } - rx->remaining = 0; + reset_asix_rx_fixup_info(rx); } } @@ -139,11 +152,13 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb, if (size != ((~rx->header >> 16) & 0x7ff)) { netdev_err(dev->net, "asix_rx_fixup() Bad Header Length 0x%x, offset %d\n", rx->header, offset); + reset_asix_rx_fixup_info(rx); return 0; } if (size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) { netdev_dbg(dev->net, "asix_rx_fixup() Bad RX Length %d\n", size); + reset_asix_rx_fixup_info(rx); return 0; } @@ -168,8 +183,10 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb, if (rx->ax_skb) { skb_put_data(rx->ax_skb, skb->data + offset, copy_length); - if (!rx->remaining) + if (!rx->remaining) { usbnet_skb_return(dev, rx->ax_skb); + rx->ax_skb = NULL; + } } offset += (copy_length + 1) & 0xfffe; @@ -178,6 +195,7 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb, if (skb->len != offset) { netdev_err(dev->net, "asix_rx_fixup() Bad SKB Length %d, %d\n", skb->len, offset); + reset_asix_rx_fixup_info(rx); return 0; } @@ -192,6 +210,21 @@ int asix_rx_fixup_common(struct usbnet *dev, struct sk_buff *skb) return asix_rx_fixup_internal(dev, skb, rx); } +void asix_rx_fixup_common_free(struct asix_common_private *dp) +{ + struct asix_rx_fixup_info *rx; + + if (!dp) + return; + + rx = &dp->rx_fixup_info; + + if (rx->ax_skb) { + kfree_skb(rx->ax_skb); + rx->ax_skb = NULL; + } +} + struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) { diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index a3aa0a27dfe5..b2ff88e69a81 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -764,6 +764,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf) static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf) { + asix_rx_fixup_common_free(dev->driver_priv); kfree(dev->driver_priv); } diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 5833f7e2a127..b99a7fb09f8e 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -2367,9 +2367,6 @@ static int lan78xx_reset(struct lan78xx_net *dev) /* Init LTM */ lan78xx_init_ltm(dev); - dev->net->hard_header_len += TX_OVERHEAD; - dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; - if (dev->udev->speed == USB_SPEED_SUPER) { buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE; dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE; @@ -2855,16 +2852,19 @@ static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf) return ret; } + dev->net->hard_header_len += TX_OVERHEAD; + dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; + /* Init all registers */ ret = lan78xx_reset(dev); - lan78xx_mdio_init(dev); + ret = lan78xx_mdio_init(dev); dev->net->flags |= IFF_MULTICAST; pdata->wol = WAKE_MAGIC; - return 0; + return ret; } static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf) @@ -3525,11 +3525,11 @@ static int lan78xx_probe(struct usb_interface *intf, udev = interface_to_usbdev(intf); udev = usb_get_dev(udev); - ret = -ENOMEM; netdev = alloc_etherdev(sizeof(struct lan78xx_net)); if (!netdev) { - dev_err(&intf->dev, "Error: OOM\n"); - goto out1; + dev_err(&intf->dev, "Error: OOM\n"); + ret = -ENOMEM; + goto out1; } /* netdev_printk() needs this */ @@ -3610,7 +3610,7 @@ static int lan78xx_probe(struct usb_interface *intf, ret = register_netdev(netdev); if (ret != 0) { netif_err(dev, probe, netdev, "couldn't register the device\n"); - goto out2; + goto out3; } usb_set_intfdata(intf, dev); diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 5894e3c9468f..8c3733608271 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1175,6 +1175,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */ {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */ + {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */ {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ @@ -1340,10 +1341,14 @@ static int qmi_wwan_probe(struct usb_interface *intf, static void qmi_wwan_disconnect(struct usb_interface *intf) { struct usbnet *dev = usb_get_intfdata(intf); - struct qmi_wwan_state *info = (void *)&dev->data; + struct qmi_wwan_state *info; struct list_head *iter; struct net_device *ldev; + /* called twice if separate control and data intf */ + if (!dev) + return; + info = (void *)&dev->data; if (info->flags & QMI_WWAN_FLAG_MUX) { if (!rtnl_trylock()) { restart_syscall(); diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 99a26a9efec1..98f17b05c68b 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -889,21 +889,20 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; buf += headroom; /* advance address leaving hole at front of pkt */ - ctx = (void *)(unsigned long)len; get_page(alloc_frag->page); alloc_frag->offset += len + headroom; hole = alloc_frag->size - alloc_frag->offset; if (hole < len + headroom) { /* To avoid internal fragmentation, if there is very likely not * enough space for another buffer, add the remaining space to - * the current buffer. This extra space is not included in - * the truesize stored in ctx. + * the current buffer. */ len += hole; alloc_frag->offset += hole; } sg_init_one(rq->sg, buf, len); + ctx = (void *)(unsigned long)len; err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); if (err < 0) put_page(virt_to_head_page(buf)); @@ -2743,9 +2742,9 @@ module_init(virtio_net_driver_init); static __exit void virtio_net_driver_exit(void) { + unregister_virtio_driver(&virtio_net_driver); cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD); cpuhp_remove_multi_state(virtionet_online); - unregister_virtio_driver(&virtio_net_driver); } module_exit(virtio_net_driver_exit); diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 96aa7e6cf214..e17baac70f43 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -623,6 +623,7 @@ static struct sk_buff **vxlan_gro_receive(struct sock *sk, out: skb_gro_remcsum_cleanup(skb, &grc); + skb->remcsum_offload = 0; NAPI_GRO_CB(skb)->flush |= flush; return pp; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index 2153e8062b4c..5cc3a07dda9e 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c @@ -214,7 +214,7 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb, /* Make sure there's enough writeable headroom */ if (skb_headroom(skb) < drvr->hdrlen || skb_header_cloned(skb)) { - head_delta = drvr->hdrlen - skb_headroom(skb); + head_delta = max_t(int, drvr->hdrlen - skb_headroom(skb), 0); brcmf_dbg(INFO, "%s: insufficient headroom (%d)\n", brcmf_ifname(ifp), head_delta); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index fbcbb4325936..f3556122c6ac 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -2053,12 +2053,13 @@ static int brcmf_sdio_txpkt_hdalign(struct brcmf_sdio *bus, struct sk_buff *pkt) atomic_inc(&stats->pktcow_failed); return -ENOMEM; } + head_pad = 0; } skb_push(pkt, head_pad); dat_buf = (u8 *)(pkt->data); } memset(dat_buf, 0, head_pad + bus->tx_hdrlen); - return 0; + return head_pad; } /** @@ -4174,11 +4175,6 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev) goto fail; } - /* allocate scatter-gather table. sg support - * will be disabled upon allocation failure. - */ - brcmf_sdiod_sgtable_alloc(bus->sdiodev); - /* Query the F2 block size, set roundup accordingly */ bus->blocksize = bus->sdiodev->func[2]->cur_blksize; bus->roundup = min(max_roundup, bus->blocksize); diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c index adaa2f0097cc..fb40ddfced99 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c @@ -1189,11 +1189,11 @@ void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb) next_reclaimed; IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n", next_reclaimed); + iwlagn_check_ratid_empty(priv, sta_id, tid); } iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs); - iwlagn_check_ratid_empty(priv, sta_id, tid); freed = 0; /* process frames */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h index 545d14b0bc92..f5c1127253cb 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h @@ -55,8 +55,8 @@ static inline bool iwl_trace_data(struct sk_buff *skb) /* also account for the RFC 1042 header, of course */ offs += 6; - return skb->len > offs + 2 && - *(__be16 *)(skb->data + offs) == cpu_to_be16(ETH_P_PAE); + return skb->len <= offs + 2 || + *(__be16 *)(skb->data + offs) != cpu_to_be16(ETH_P_PAE); } static inline size_t iwl_rx_trace_len(const struct iwl_trans *trans, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index bcde1ba0f1c8..c7b1e58e3384 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -1084,7 +1084,13 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm) lockdep_assert_held(&mvm->mutex); - if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { + if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)) { + /* + * Now convert the HW_RESTART_REQUESTED flag to IN_HW_RESTART + * so later code will - from now on - see that we're doing it. + */ + set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); + clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status); /* Clean up some internal and mac80211 state on restart */ iwl_mvm_restart_cleanup(mvm); } else { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index eaacfaf37206..ddd8719f27b8 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -1090,6 +1090,7 @@ struct iwl_mvm { * @IWL_MVM_STATUS_HW_RFKILL: HW RF-kill is asserted * @IWL_MVM_STATUS_HW_CTKILL: CT-kill is active * @IWL_MVM_STATUS_ROC_RUNNING: remain-on-channel is running + * @IWL_MVM_STATUS_HW_RESTART_REQUESTED: HW restart was requested * @IWL_MVM_STATUS_IN_HW_RESTART: HW restart is active * @IWL_MVM_STATUS_IN_D0I3: NIC is in D0i3 * @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running @@ -1101,6 +1102,7 @@ enum iwl_mvm_status { IWL_MVM_STATUS_HW_RFKILL, IWL_MVM_STATUS_HW_CTKILL, IWL_MVM_STATUS_ROC_RUNNING, + IWL_MVM_STATUS_HW_RESTART_REQUESTED, IWL_MVM_STATUS_IN_HW_RESTART, IWL_MVM_STATUS_IN_D0I3, IWL_MVM_STATUS_ROC_AUX_RUNNING, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 4d1188b8736a..9c175d5e9d67 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -1235,9 +1235,8 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) */ if (!mvm->fw_restart && fw_error) { iwl_mvm_fw_dbg_collect_desc(mvm, &iwl_mvm_dump_desc_assert, - NULL); - } else if (test_and_set_bit(IWL_MVM_STATUS_IN_HW_RESTART, - &mvm->status)) { + NULL); + } else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { struct iwl_mvm_reprobe *reprobe; IWL_ERR(mvm, @@ -1268,6 +1267,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) if (fw_error && mvm->fw_restart > 0) mvm->fw_restart--; + set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status); ieee80211_restart_hw(mvm->hw); } } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 4df5f13fcdae..ab66b4394dfc 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -277,6 +277,18 @@ static void iwl_mvm_rx_agg_session_expired(unsigned long data) /* Timer expired */ sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]); + + /* + * sta should be valid unless the following happens: + * The firmware asserts which triggers a reconfig flow, but + * the reconfig fails before we set the pointer to sta into + * the fw_id_to_mac_id pointer table. Mac80211 can't stop + * A-MDPU and hence the timer continues to run. Then, the + * timer expires and sta is NULL. + */ + if (!sta) + goto unlock; + mvm_sta = iwl_mvm_sta_from_mac80211(sta); ieee80211_stop_rx_ba_session_offl(mvm_sta->vif, sta->addr, ba_data->tid); @@ -2015,7 +2027,8 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) IWL_MAX_TID_COUNT, wdg_timeout); - if (vif->type == NL80211_IFTYPE_AP) + if (vif->type == NL80211_IFTYPE_AP || + vif->type == NL80211_IFTYPE_ADHOC) mvm->probe_queue = queue; else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) mvm->p2p_dev_queue = queue; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 92b3a55d0fbc..f95eec52508e 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -3150,7 +3150,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, init_waitqueue_head(&trans_pcie->d0i3_waitq); if (trans_pcie->msix_enabled) { - if (iwl_pcie_init_msix_handler(pdev, trans_pcie)) + ret = iwl_pcie_init_msix_handler(pdev, trans_pcie); + if (ret) goto out_no_pci; } else { ret = iwl_pcie_alloc_ict(trans); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index de50418adae5..034bdb4a0b06 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -298,6 +298,9 @@ void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans) for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { struct iwl_txq *txq = trans_pcie->txq[i]; + if (!test_bit(i, trans_pcie->queue_used)) + continue; + spin_lock_bh(&txq->lock); if (txq->need_update) { iwl_pcie_txq_inc_wr_ptr(trans, txq); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c index 2a7ad5ffe997..cd5dc6dcb19f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c @@ -846,9 +846,6 @@ static bool _rtl8723be_init_mac(struct ieee80211_hw *hw) return false; } - if (rtlpriv->cfg->ops->get_btc_status()) - rtlpriv->btcoexist.btc_ops->btc_power_on_setting(rtlpriv); - bytetmp = rtl_read_byte(rtlpriv, REG_MULTI_FUNC_CTRL); rtl_write_byte(rtlpriv, REG_MULTI_FUNC_CTRL, bytetmp | BIT(3)); diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index fb1ebb01133f..70723e67b7d7 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -2547,7 +2547,6 @@ struct bt_coexist_info { struct rtl_btc_ops { void (*btc_init_variables) (struct rtl_priv *rtlpriv); void (*btc_init_hal_vars) (struct rtl_priv *rtlpriv); - void (*btc_power_on_setting)(struct rtl_priv *rtlpriv); void (*btc_init_hw_config) (struct rtl_priv *rtlpriv); void (*btc_ips_notify) (struct rtl_priv *rtlpriv, u8 type); void (*btc_lps_notify)(struct rtl_priv *rtlpriv, u8 type); diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 3b77cfe5aa1e..37046ac2c441 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -336,7 +336,7 @@ static int nvme_get_stream_params(struct nvme_ctrl *ctrl, c.directive.opcode = nvme_admin_directive_recv; c.directive.nsid = cpu_to_le32(nsid); - c.directive.numd = cpu_to_le32(sizeof(*s)); + c.directive.numd = cpu_to_le32((sizeof(*s) >> 2) - 1); c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM; c.directive.dtype = NVME_DIR_STREAMS; @@ -1509,7 +1509,7 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, blk_queue_write_cache(q, vwc, vwc); } -static void nvme_configure_apst(struct nvme_ctrl *ctrl) +static int nvme_configure_apst(struct nvme_ctrl *ctrl) { /* * APST (Autonomous Power State Transition) lets us program a @@ -1538,16 +1538,16 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl) * then don't do anything. */ if (!ctrl->apsta) - return; + return 0; if (ctrl->npss > 31) { dev_warn(ctrl->device, "NPSS is invalid; not using APST\n"); - return; + return 0; } table = kzalloc(sizeof(*table), GFP_KERNEL); if (!table) - return; + return 0; if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) { /* Turn off APST. */ @@ -1629,6 +1629,7 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl) dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret); kfree(table); + return ret; } static void nvme_set_latency_tolerance(struct device *dev, s32 val) @@ -1835,13 +1836,16 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) * In fabrics we need to verify the cntlid matches the * admin connect */ - if (ctrl->cntlid != le16_to_cpu(id->cntlid)) + if (ctrl->cntlid != le16_to_cpu(id->cntlid)) { ret = -EINVAL; + goto out_free; + } if (!ctrl->opts->discovery_nqn && !ctrl->kas) { dev_err(ctrl->device, "keep-alive support is mandatory for fabrics\n"); ret = -EINVAL; + goto out_free; } } else { ctrl->cntlid = le16_to_cpu(id->cntlid); @@ -1856,11 +1860,20 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) else if (!ctrl->apst_enabled && prev_apst_enabled) dev_pm_qos_hide_latency_tolerance(ctrl->device); - nvme_configure_apst(ctrl); - nvme_configure_directives(ctrl); + ret = nvme_configure_apst(ctrl); + if (ret < 0) + return ret; + + ret = nvme_configure_directives(ctrl); + if (ret < 0) + return ret; ctrl->identified = true; + return 0; + +out_free: + kfree(id); return ret; } EXPORT_SYMBOL_GPL(nvme_init_identify); @@ -1995,15 +2008,20 @@ static ssize_t wwid_show(struct device *dev, struct device_attribute *attr, int serial_len = sizeof(ctrl->serial); int model_len = sizeof(ctrl->model); + if (!uuid_is_null(&ns->uuid)) + return sprintf(buf, "uuid.%pU\n", &ns->uuid); + if (memchr_inv(ns->nguid, 0, sizeof(ns->nguid))) return sprintf(buf, "eui.%16phN\n", ns->nguid); if (memchr_inv(ns->eui, 0, sizeof(ns->eui))) return sprintf(buf, "eui.%8phN\n", ns->eui); - while (ctrl->serial[serial_len - 1] == ' ') + while (serial_len > 0 && (ctrl->serial[serial_len - 1] == ' ' || + ctrl->serial[serial_len - 1] == '\0')) serial_len--; - while (ctrl->model[model_len - 1] == ' ') + while (model_len > 0 && (ctrl->model[model_len - 1] == ' ' || + ctrl->model[model_len - 1] == '\0')) model_len--; return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", ctrl->vid, @@ -2709,7 +2727,8 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl) mutex_lock(&ctrl->namespaces_mutex); /* Forcibly unquiesce queues to avoid blocking dispatch */ - blk_mq_unquiesce_queue(ctrl->admin_q); + if (ctrl->admin_q) + blk_mq_unquiesce_queue(ctrl->admin_q); list_for_each_entry(ns, &ctrl->namespaces, list) { /* diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index d666ada39a9b..5c2a08ef08ba 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -1888,7 +1888,7 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, * the target device is present */ if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE) - return BLK_STS_IOERR; + goto busy; if (!nvme_fc_ctrl_get(ctrl)) return BLK_STS_IOERR; @@ -1958,22 +1958,25 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, queue->lldd_handle, &op->fcp_req); if (ret) { - if (op->rq) /* normal request */ + if (!(op->flags & FCOP_FLAGS_AEN)) nvme_fc_unmap_data(ctrl, op->rq, op); - /* else - aen. no cleanup needed */ nvme_fc_ctrl_put(ctrl); - if (ret != -EBUSY) + if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE && + ret != -EBUSY) return BLK_STS_IOERR; - if (op->rq) - blk_mq_delay_run_hw_queue(queue->hctx, NVMEFC_QUEUE_DELAY); - - return BLK_STS_RESOURCE; + goto busy; } return BLK_STS_OK; + +busy: + if (!(op->flags & FCOP_FLAGS_AEN) && queue->hctx) + blk_mq_delay_run_hw_queue(queue->hctx, NVMEFC_QUEUE_DELAY); + + return BLK_STS_RESOURCE; } static blk_status_t @@ -2802,66 +2805,70 @@ out_fail: return ERR_PTR(ret); } -enum { - FCT_TRADDR_ERR = 0, - FCT_TRADDR_WWNN = 1 << 0, - FCT_TRADDR_WWPN = 1 << 1, -}; struct nvmet_fc_traddr { u64 nn; u64 pn; }; -static const match_table_t traddr_opt_tokens = { - { FCT_TRADDR_WWNN, "nn-%s" }, - { FCT_TRADDR_WWPN, "pn-%s" }, - { FCT_TRADDR_ERR, NULL } -}; - static int -nvme_fc_parse_address(struct nvmet_fc_traddr *traddr, char *buf) +__nvme_fc_parse_u64(substring_t *sstr, u64 *val) { - substring_t args[MAX_OPT_ARGS]; - char *options, *o, *p; - int token, ret = 0; u64 token64; - options = o = kstrdup(buf, GFP_KERNEL); - if (!options) - return -ENOMEM; + if (match_u64(sstr, &token64)) + return -EINVAL; + *val = token64; - while ((p = strsep(&o, ":\n")) != NULL) { - if (!*p) - continue; + return 0; +} - token = match_token(p, traddr_opt_tokens, args); - switch (token) { - case FCT_TRADDR_WWNN: - if (match_u64(args, &token64)) { - ret = -EINVAL; - goto out; - } - traddr->nn = token64; - break; - case FCT_TRADDR_WWPN: - if (match_u64(args, &token64)) { - ret = -EINVAL; - goto out; - } - traddr->pn = token64; - break; - default: - pr_warn("unknown traddr token or missing value '%s'\n", - p); - ret = -EINVAL; - goto out; - } - } +/* + * This routine validates and extracts the WWN's from the TRADDR string. + * As kernel parsers need the 0x to determine number base, universally + * build string to parse with 0x prefix before parsing name strings. + */ +static int +nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen) +{ + char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1]; + substring_t wwn = { name, &name[sizeof(name)-1] }; + int nnoffset, pnoffset; + + /* validate it string one of the 2 allowed formats */ + if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH && + !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) && + !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET], + "pn-0x", NVME_FC_TRADDR_OXNNLEN)) { + nnoffset = NVME_FC_TRADDR_OXNNLEN; + pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET + + NVME_FC_TRADDR_OXNNLEN; + } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH && + !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) && + !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET], + "pn-", NVME_FC_TRADDR_NNLEN))) { + nnoffset = NVME_FC_TRADDR_NNLEN; + pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN; + } else + goto out_einval; -out: - kfree(options); - return ret; + name[0] = '0'; + name[1] = 'x'; + name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0; + + memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN); + if (__nvme_fc_parse_u64(&wwn, &traddr->nn)) + goto out_einval; + + memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN); + if (__nvme_fc_parse_u64(&wwn, &traddr->pn)) + goto out_einval; + + return 0; + +out_einval: + pr_warn("%s: bad traddr string\n", __func__); + return -EINVAL; } static struct nvme_ctrl * @@ -2875,11 +2882,11 @@ nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts) unsigned long flags; int ret; - ret = nvme_fc_parse_address(&raddr, opts->traddr); + ret = nvme_fc_parse_traddr(&raddr, opts->traddr, NVMF_TRADDR_SIZE); if (ret || !raddr.nn || !raddr.pn) return ERR_PTR(-EINVAL); - ret = nvme_fc_parse_address(&laddr, opts->host_traddr); + ret = nvme_fc_parse_traddr(&laddr, opts->host_traddr, NVMF_TRADDR_SIZE); if (ret || !laddr.nn || !laddr.pn) return ERR_PTR(-EINVAL); diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 8569ee771269..74a124a06264 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1558,11 +1558,9 @@ static inline void nvme_release_cmb(struct nvme_dev *dev) if (dev->cmb) { iounmap(dev->cmb); dev->cmb = NULL; - if (dev->cmbsz) { - sysfs_remove_file_from_group(&dev->ctrl.device->kobj, - &dev_attr_cmb.attr, NULL); - dev->cmbsz = 0; - } + sysfs_remove_file_from_group(&dev->ctrl.device->kobj, + &dev_attr_cmb.attr, NULL); + dev->cmbsz = 0; } } @@ -1619,7 +1617,7 @@ static void nvme_free_host_mem(struct nvme_dev *dev) static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred) { struct nvme_host_mem_buf_desc *descs; - u32 chunk_size, max_entries; + u32 chunk_size, max_entries, len; int i = 0; void **bufs; u64 size = 0, tmp; @@ -1638,10 +1636,10 @@ retry: if (!bufs) goto out_free_descs; - for (size = 0; size < preferred; size += chunk_size) { - u32 len = min_t(u64, chunk_size, preferred - size); + for (size = 0; size < preferred; size += len) { dma_addr_t dma_addr; + len = min_t(u64, chunk_size, preferred - size); bufs[i] = dma_alloc_attrs(dev->dev, len, &dma_addr, GFP_KERNEL, DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); if (!bufs[i]) @@ -1953,16 +1951,14 @@ static int nvme_pci_enable(struct nvme_dev *dev) /* * CMBs can currently only exist on >=1.2 PCIe devices. We only - * populate sysfs if a CMB is implemented. Note that we add the - * CMB attribute to the nvme_ctrl kobj which removes the need to remove - * it on exit. Since nvme_dev_attrs_group has no name we can pass - * NULL as final argument to sysfs_add_file_to_group. + * populate sysfs if a CMB is implemented. Since nvme_dev_attrs_group + * has no name we can pass NULL as final argument to + * sysfs_add_file_to_group. */ if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2, 0)) { dev->cmb = nvme_map_cmb(dev); - - if (dev->cmbsz) { + if (dev->cmb) { if (sysfs_add_file_to_group(&dev->ctrl.device->kobj, &dev_attr_cmb.attr, NULL)) dev_warn(dev->ctrl.device, diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index d5801c150b1c..1b7f2520a20d 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c @@ -114,6 +114,11 @@ struct nvmet_fc_tgtport { struct kref ref; }; +struct nvmet_fc_defer_fcp_req { + struct list_head req_list; + struct nvmefc_tgt_fcp_req *fcp_req; +}; + struct nvmet_fc_tgt_queue { bool ninetypercent; u16 qid; @@ -132,6 +137,8 @@ struct nvmet_fc_tgt_queue { struct nvmet_fc_tgt_assoc *assoc; struct nvmet_fc_fcp_iod *fod; /* array of fcp_iods */ struct list_head fod_list; + struct list_head pending_cmd_list; + struct list_head avail_defer_list; struct workqueue_struct *work_q; struct kref ref; } __aligned(sizeof(unsigned long long)); @@ -223,6 +230,8 @@ static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue); static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue); static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport); static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport); +static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, + struct nvmet_fc_fcp_iod *fod); /* *********************** FC-NVME DMA Handling **************************** */ @@ -463,9 +472,9 @@ static struct nvmet_fc_fcp_iod * nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue) { static struct nvmet_fc_fcp_iod *fod; - unsigned long flags; - spin_lock_irqsave(&queue->qlock, flags); + lockdep_assert_held(&queue->qlock); + fod = list_first_entry_or_null(&queue->fod_list, struct nvmet_fc_fcp_iod, fcp_list); if (fod) { @@ -477,17 +486,37 @@ nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue) * will "inherit" that reference. */ } - spin_unlock_irqrestore(&queue->qlock, flags); return fod; } static void +nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport, + struct nvmet_fc_tgt_queue *queue, + struct nvmefc_tgt_fcp_req *fcpreq) +{ + struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; + + /* + * put all admin cmds on hw queue id 0. All io commands go to + * the respective hw queue based on a modulo basis + */ + fcpreq->hwqid = queue->qid ? + ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0; + + if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR) + queue_work_on(queue->cpu, queue->work_q, &fod->work); + else + nvmet_fc_handle_fcp_rqst(tgtport, fod); +} + +static void nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue, struct nvmet_fc_fcp_iod *fod) { struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; struct nvmet_fc_tgtport *tgtport = fod->tgtport; + struct nvmet_fc_defer_fcp_req *deferfcp; unsigned long flags; fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma, @@ -495,21 +524,56 @@ nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue, fcpreq->nvmet_fc_private = NULL; - spin_lock_irqsave(&queue->qlock, flags); - list_add_tail(&fod->fcp_list, &fod->queue->fod_list); fod->active = false; fod->abort = false; fod->aborted = false; fod->writedataactive = false; fod->fcpreq = NULL; + + tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq); + + spin_lock_irqsave(&queue->qlock, flags); + deferfcp = list_first_entry_or_null(&queue->pending_cmd_list, + struct nvmet_fc_defer_fcp_req, req_list); + if (!deferfcp) { + list_add_tail(&fod->fcp_list, &fod->queue->fod_list); + spin_unlock_irqrestore(&queue->qlock, flags); + + /* Release reference taken at queue lookup and fod allocation */ + nvmet_fc_tgt_q_put(queue); + return; + } + + /* Re-use the fod for the next pending cmd that was deferred */ + list_del(&deferfcp->req_list); + + fcpreq = deferfcp->fcp_req; + + /* deferfcp can be reused for another IO at a later date */ + list_add_tail(&deferfcp->req_list, &queue->avail_defer_list); + spin_unlock_irqrestore(&queue->qlock, flags); + /* Save NVME CMD IO in fod */ + memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen); + + /* Setup new fcpreq to be processed */ + fcpreq->rspaddr = NULL; + fcpreq->rsplen = 0; + fcpreq->nvmet_fc_private = fod; + fod->fcpreq = fcpreq; + fod->active = true; + + /* inform LLDD IO is now being processed */ + tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq); + + /* Submit deferred IO for processing */ + nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq); + /* - * release the reference taken at queue lookup and fod allocation + * Leave the queue lookup get reference taken when + * fod was originally allocated. */ - nvmet_fc_tgt_q_put(queue); - - tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq); } static int @@ -569,6 +633,8 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc, queue->port = assoc->tgtport->port; queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid); INIT_LIST_HEAD(&queue->fod_list); + INIT_LIST_HEAD(&queue->avail_defer_list); + INIT_LIST_HEAD(&queue->pending_cmd_list); atomic_set(&queue->connected, 0); atomic_set(&queue->sqtail, 0); atomic_set(&queue->rsn, 1); @@ -638,6 +704,7 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue) { struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport; struct nvmet_fc_fcp_iod *fod = queue->fod; + struct nvmet_fc_defer_fcp_req *deferfcp; unsigned long flags; int i, writedataactive; bool disconnect; @@ -666,6 +733,35 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue) } } } + + /* Cleanup defer'ed IOs in queue */ + list_for_each_entry(deferfcp, &queue->avail_defer_list, req_list) { + list_del(&deferfcp->req_list); + kfree(deferfcp); + } + + for (;;) { + deferfcp = list_first_entry_or_null(&queue->pending_cmd_list, + struct nvmet_fc_defer_fcp_req, req_list); + if (!deferfcp) + break; + + list_del(&deferfcp->req_list); + spin_unlock_irqrestore(&queue->qlock, flags); + + tgtport->ops->defer_rcv(&tgtport->fc_target_port, + deferfcp->fcp_req); + + tgtport->ops->fcp_abort(&tgtport->fc_target_port, + deferfcp->fcp_req); + + tgtport->ops->fcp_req_release(&tgtport->fc_target_port, + deferfcp->fcp_req); + + kfree(deferfcp); + + spin_lock_irqsave(&queue->qlock, flags); + } spin_unlock_irqrestore(&queue->qlock, flags); flush_workqueue(queue->work_q); @@ -2172,11 +2268,38 @@ nvmet_fc_handle_fcp_rqst_work(struct work_struct *work) * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc * layer for processing. * - * The nvmet-fc layer will copy cmd payload to an internal structure for - * processing. As such, upon completion of the routine, the LLDD may - * immediately free/reuse the CMD IU buffer passed in the call. + * The nvmet_fc layer allocates a local job structure (struct + * nvmet_fc_fcp_iod) from the queue for the io and copies the + * CMD IU buffer to the job structure. As such, on a successful + * completion (returns 0), the LLDD may immediately free/reuse + * the CMD IU buffer passed in the call. * - * If this routine returns error, the lldd should abort the exchange. + * However, in some circumstances, due to the packetized nature of FC + * and the api of the FC LLDD which may issue a hw command to send the + * response, but the LLDD may not get the hw completion for that command + * and upcall the nvmet_fc layer before a new command may be + * asynchronously received - its possible for a command to be received + * before the LLDD and nvmet_fc have recycled the job structure. It gives + * the appearance of more commands received than fits in the sq. + * To alleviate this scenario, a temporary queue is maintained in the + * transport for pending LLDD requests waiting for a queue job structure. + * In these "overrun" cases, a temporary queue element is allocated + * the LLDD request and CMD iu buffer information remembered, and the + * routine returns a -EOVERFLOW status. Subsequently, when a queue job + * structure is freed, it is immediately reallocated for anything on the + * pending request list. The LLDDs defer_rcv() callback is called, + * informing the LLDD that it may reuse the CMD IU buffer, and the io + * is then started normally with the transport. + * + * The LLDD, when receiving an -EOVERFLOW completion status, is to treat + * the completion as successful but must not reuse the CMD IU buffer + * until the LLDD's defer_rcv() callback has been called for the + * corresponding struct nvmefc_tgt_fcp_req pointer. + * + * If there is any other condition in which an error occurs, the + * transport will return a non-zero status indicating the error. + * In all cases other than -EOVERFLOW, the transport has not accepted the + * request and the LLDD should abort the exchange. * * @target_port: pointer to the (registered) target port the FCP CMD IU * was received on. @@ -2194,6 +2317,8 @@ nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port, struct nvme_fc_cmd_iu *cmdiu = cmdiubuf; struct nvmet_fc_tgt_queue *queue; struct nvmet_fc_fcp_iod *fod; + struct nvmet_fc_defer_fcp_req *deferfcp; + unsigned long flags; /* validate iu, so the connection id can be used to find the queue */ if ((cmdiubuf_len != sizeof(*cmdiu)) || @@ -2214,29 +2339,60 @@ nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port, * when the fod is freed. */ + spin_lock_irqsave(&queue->qlock, flags); + fod = nvmet_fc_alloc_fcp_iod(queue); - if (!fod) { + if (fod) { + spin_unlock_irqrestore(&queue->qlock, flags); + + fcpreq->nvmet_fc_private = fod; + fod->fcpreq = fcpreq; + + memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len); + + nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq); + + return 0; + } + + if (!tgtport->ops->defer_rcv) { + spin_unlock_irqrestore(&queue->qlock, flags); /* release the queue lookup reference */ nvmet_fc_tgt_q_put(queue); return -ENOENT; } - fcpreq->nvmet_fc_private = fod; - fod->fcpreq = fcpreq; - /* - * put all admin cmds on hw queue id 0. All io commands go to - * the respective hw queue based on a modulo basis - */ - fcpreq->hwqid = queue->qid ? - ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0; - memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len); + deferfcp = list_first_entry_or_null(&queue->avail_defer_list, + struct nvmet_fc_defer_fcp_req, req_list); + if (deferfcp) { + /* Just re-use one that was previously allocated */ + list_del(&deferfcp->req_list); + } else { + spin_unlock_irqrestore(&queue->qlock, flags); - if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR) - queue_work_on(queue->cpu, queue->work_q, &fod->work); - else - nvmet_fc_handle_fcp_rqst(tgtport, fod); + /* Now we need to dynamically allocate one */ + deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL); + if (!deferfcp) { + /* release the queue lookup reference */ + nvmet_fc_tgt_q_put(queue); + return -ENOMEM; + } + spin_lock_irqsave(&queue->qlock, flags); + } - return 0; + /* For now, use rspaddr / rsplen to save payload information */ + fcpreq->rspaddr = cmdiubuf; + fcpreq->rsplen = cmdiubuf_len; + deferfcp->fcp_req = fcpreq; + + /* defer processing till a fod becomes available */ + list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list); + + /* NOTE: the queue lookup reference is still valid */ + + spin_unlock_irqrestore(&queue->qlock, flags); + + return -EOVERFLOW; } EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req); @@ -2293,66 +2449,70 @@ nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port, } EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort); -enum { - FCT_TRADDR_ERR = 0, - FCT_TRADDR_WWNN = 1 << 0, - FCT_TRADDR_WWPN = 1 << 1, -}; struct nvmet_fc_traddr { u64 nn; u64 pn; }; -static const match_table_t traddr_opt_tokens = { - { FCT_TRADDR_WWNN, "nn-%s" }, - { FCT_TRADDR_WWPN, "pn-%s" }, - { FCT_TRADDR_ERR, NULL } -}; - static int -nvmet_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf) +__nvme_fc_parse_u64(substring_t *sstr, u64 *val) { - substring_t args[MAX_OPT_ARGS]; - char *options, *o, *p; - int token, ret = 0; u64 token64; - options = o = kstrdup(buf, GFP_KERNEL); - if (!options) - return -ENOMEM; + if (match_u64(sstr, &token64)) + return -EINVAL; + *val = token64; - while ((p = strsep(&o, ":\n")) != NULL) { - if (!*p) - continue; + return 0; +} - token = match_token(p, traddr_opt_tokens, args); - switch (token) { - case FCT_TRADDR_WWNN: - if (match_u64(args, &token64)) { - ret = -EINVAL; - goto out; - } - traddr->nn = token64; - break; - case FCT_TRADDR_WWPN: - if (match_u64(args, &token64)) { - ret = -EINVAL; - goto out; - } - traddr->pn = token64; - break; - default: - pr_warn("unknown traddr token or missing value '%s'\n", - p); - ret = -EINVAL; - goto out; - } - } +/* + * This routine validates and extracts the WWN's from the TRADDR string. + * As kernel parsers need the 0x to determine number base, universally + * build string to parse with 0x prefix before parsing name strings. + */ +static int +nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen) +{ + char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1]; + substring_t wwn = { name, &name[sizeof(name)-1] }; + int nnoffset, pnoffset; + + /* validate it string one of the 2 allowed formats */ + if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH && + !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) && + !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET], + "pn-0x", NVME_FC_TRADDR_OXNNLEN)) { + nnoffset = NVME_FC_TRADDR_OXNNLEN; + pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET + + NVME_FC_TRADDR_OXNNLEN; + } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH && + !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) && + !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET], + "pn-", NVME_FC_TRADDR_NNLEN))) { + nnoffset = NVME_FC_TRADDR_NNLEN; + pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN; + } else + goto out_einval; + + name[0] = '0'; + name[1] = 'x'; + name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0; + + memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN); + if (__nvme_fc_parse_u64(&wwn, &traddr->nn)) + goto out_einval; + + memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN); + if (__nvme_fc_parse_u64(&wwn, &traddr->pn)) + goto out_einval; -out: - kfree(options); - return ret; + return 0; + +out_einval: + pr_warn("%s: bad traddr string\n", __func__); + return -EINVAL; } static int @@ -2370,7 +2530,8 @@ nvmet_fc_add_port(struct nvmet_port *port) /* map the traddr address info to a target port */ - ret = nvmet_fc_parse_traddr(&traddr, port->disc_addr.traddr); + ret = nvme_fc_parse_traddr(&traddr, port->disc_addr.traddr, + sizeof(port->disc_addr.traddr)); if (ret) return ret; diff --git a/drivers/of/irq.c b/drivers/of/irq.c index 6ce72aa65425..ab21c846eb27 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c @@ -476,7 +476,7 @@ int of_irq_to_resource_table(struct device_node *dev, struct resource *res, int i; for (i = 0; i < nr_irqs; i++, res++) - if (!of_irq_to_resource(dev, i, res)) + if (of_irq_to_resource(dev, i, res) <= 0) break; return i; diff --git a/drivers/of/property.c b/drivers/of/property.c index eda50b4be934..067f9fab7b77 100644 --- a/drivers/of/property.c +++ b/drivers/of/property.c @@ -708,6 +708,15 @@ struct device_node *of_graph_get_port_parent(struct device_node *node) { unsigned int depth; + if (!node) + return NULL; + + /* + * Preserve usecount for passed in node as of_get_next_parent() + * will do of_node_put() on it. + */ + of_node_get(node); + /* Walk 3 levels up only if there is 'ports' node. */ for (depth = 3; depth && node; depth--) { node = of_get_next_parent(node); @@ -728,12 +737,16 @@ EXPORT_SYMBOL(of_graph_get_port_parent); struct device_node *of_graph_get_remote_port_parent( const struct device_node *node) { - struct device_node *np; + struct device_node *np, *pp; /* Get remote endpoint node. */ np = of_graph_get_remote_endpoint(node); - return of_graph_get_port_parent(np); + pp = of_graph_get_port_parent(np); + + of_node_put(np); + + return pp; } EXPORT_SYMBOL(of_graph_get_remote_port_parent); diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c index 055f83fddc18..b1ff46fe4547 100644 --- a/drivers/parisc/pdc_stable.c +++ b/drivers/parisc/pdc_stable.c @@ -333,11 +333,11 @@ pdcspath_hwpath_write(struct pdcspath_entry *entry, const char *buf, size_t coun /* Update the symlink to the real device */ sysfs_remove_link(&entry->kobj, "device"); + write_unlock(&entry->rw_lock); + ret = sysfs_create_link(&entry->kobj, &entry->dev->kobj, "device"); WARN_ON(ret); - write_unlock(&entry->rw_lock); - printk(KERN_INFO PDCS_PREFIX ": changed \"%s\" path to \"%s\"\n", entry->name, buf); @@ -954,7 +954,7 @@ static struct attribute *pdcs_subsys_attrs[] = { NULL, }; -static struct attribute_group pdcs_attr_group = { +static const struct attribute_group pdcs_attr_group = { .attrs = pdcs_subsys_attrs, }; @@ -998,6 +998,7 @@ pdcs_register_pathentries(void) /* kobject is now registered */ write_lock(&entry->rw_lock); entry->ready = 2; + write_unlock(&entry->rw_lock); /* Add a nice symlink to the real device */ if (entry->dev) { @@ -1005,7 +1006,6 @@ pdcs_register_pathentries(void) WARN_ON(err); } - write_unlock(&entry->rw_lock); kobject_uevent(&entry->kobj, KOBJ_ADD); } diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index af0cc3456dc1..b4b7eab29400 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -4260,6 +4260,41 @@ int pci_reset_function(struct pci_dev *dev) EXPORT_SYMBOL_GPL(pci_reset_function); /** + * pci_reset_function_locked - quiesce and reset a PCI device function + * @dev: PCI device to reset + * + * Some devices allow an individual function to be reset without affecting + * other functions in the same device. The PCI device must be responsive + * to PCI config space in order to use this function. + * + * This function does not just reset the PCI portion of a device, but + * clears all the state associated with the device. This function differs + * from __pci_reset_function() in that it saves and restores device state + * over the reset. It also differs from pci_reset_function() in that it + * requires the PCI device lock to be held. + * + * Returns 0 if the device function was successfully reset or negative if the + * device doesn't support resetting a single function. + */ +int pci_reset_function_locked(struct pci_dev *dev) +{ + int rc; + + rc = pci_probe_reset_function(dev); + if (rc) + return rc; + + pci_dev_save_and_disable(dev); + + rc = __pci_reset_function_locked(dev); + + pci_dev_restore(dev); + + return rc; +} +EXPORT_SYMBOL_GPL(pci_reset_function_locked); + +/** * pci_try_reset_function - quiesce and reset a PCI device function * @dev: PCI device to reset * diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index dc459eb1246b..1c5e0f333779 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -569,22 +569,41 @@ int armpmu_request_irq(struct arm_pmu *armpmu, int cpu) if (irq != other_irq) { pr_warn("mismatched PPIs detected.\n"); err = -EINVAL; + goto err_out; } } else { - err = request_irq(irq, handler, - IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu", + struct arm_pmu_platdata *platdata = armpmu_get_platdata(armpmu); + unsigned long irq_flags; + + err = irq_force_affinity(irq, cpumask_of(cpu)); + + if (err && num_possible_cpus() > 1) { + pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n", + irq, cpu); + goto err_out; + } + + if (platdata && platdata->irq_flags) { + irq_flags = platdata->irq_flags; + } else { + irq_flags = IRQF_PERCPU | + IRQF_NOBALANCING | + IRQF_NO_THREAD; + } + + err = request_irq(irq, handler, irq_flags, "arm-pmu", per_cpu_ptr(&hw_events->percpu_pmu, cpu)); } - if (err) { - pr_err("unable to request IRQ%d for ARM PMU counters\n", - irq); - return err; - } + if (err) + goto err_out; cpumask_set_cpu(cpu, &armpmu->active_irqs); - return 0; + +err_out: + pr_err("unable to request IRQ%d for ARM PMU counters\n", irq); + return err; } int armpmu_request_irqs(struct arm_pmu *armpmu) @@ -628,12 +647,6 @@ static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node) enable_percpu_irq(irq, IRQ_TYPE_NONE); return 0; } - - if (irq_force_affinity(irq, cpumask_of(cpu)) && - num_possible_cpus() > 1) { - pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n", - irq, cpu); - } } return 0; diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c index 69255f53057a..4eafa7a42e52 100644 --- a/drivers/perf/arm_pmu_platform.c +++ b/drivers/perf/arm_pmu_platform.c @@ -131,8 +131,8 @@ static int pmu_parse_irqs(struct arm_pmu *pmu) } if (!pmu_has_irq_affinity(pdev->dev.of_node)) { - pr_warn("no interrupt-affinity property for %s, guessing.\n", - of_node_full_name(pdev->dev.of_node)); + pr_warn("no interrupt-affinity property for %pOF, guessing.\n", + pdev->dev.of_node); } /* @@ -211,7 +211,7 @@ int arm_pmu_device_probe(struct platform_device *pdev, } if (ret) { - pr_info("%s: failed to probe PMU!\n", of_node_full_name(node)); + pr_info("%pOF: failed to probe PMU!\n", node); goto out_free; } @@ -228,8 +228,7 @@ int arm_pmu_device_probe(struct platform_device *pdev, out_free_irqs: armpmu_free_irqs(pmu); out_free: - pr_info("%s: failed to register PMU devices!\n", - of_node_full_name(node)); + pr_info("%pOF: failed to register PMU devices!\n", node); armpmu_free(pmu); return ret; } diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c index c259848228b4..b242cce10468 100644 --- a/drivers/perf/qcom_l2_pmu.c +++ b/drivers/perf/qcom_l2_pmu.c @@ -546,6 +546,7 @@ static int l2_cache_event_init(struct perf_event *event) } if ((event != event->group_leader) && + !is_software_event(event->group_leader) && (L2_EVT_GROUP(event->group_leader->attr.config) == L2_EVT_GROUP(event->attr.config))) { dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, @@ -558,6 +559,7 @@ static int l2_cache_event_init(struct perf_event *event) list_for_each_entry(sibling, &event->group_leader->sibling_list, group_entry) { if ((sibling != event) && + !is_software_event(sibling) && (L2_EVT_GROUP(sibling->attr.config) == L2_EVT_GROUP(event->attr.config))) { dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, diff --git a/drivers/phy/broadcom/Kconfig b/drivers/phy/broadcom/Kconfig index 37371b89b14f..64fc59c3ae6d 100644 --- a/drivers/phy/broadcom/Kconfig +++ b/drivers/phy/broadcom/Kconfig @@ -30,8 +30,8 @@ config PHY_BCM_NS_USB3 tristate "Broadcom Northstar USB 3.0 PHY Driver" depends on ARCH_BCM_IPROC || COMPILE_TEST depends on HAS_IOMEM && OF + depends on MDIO_BUS select GENERIC_PHY - select MDIO_DEVICE help Enable this to support Broadcom USB 3.0 PHY connected to the USB controller on Northstar family. diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index 20f1b4493994..04e929fd0ffe 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c @@ -1548,6 +1548,13 @@ static const struct dmi_system_id chv_no_valid_mask[] = { }, }, { + .ident = "HP Chromebook 11 G5 (Setzer)", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"), + }, + }, + { .ident = "Acer Chromebook R11 (Cyan)", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), diff --git a/drivers/pinctrl/intel/pinctrl-merrifield.c b/drivers/pinctrl/intel/pinctrl-merrifield.c index 4d4ef42a39b5..86c4b3fab7b0 100644 --- a/drivers/pinctrl/intel/pinctrl-merrifield.c +++ b/drivers/pinctrl/intel/pinctrl-merrifield.c @@ -343,9 +343,9 @@ static const struct pinctrl_pin_desc mrfld_pins[] = { static const unsigned int mrfld_sdio_pins[] = { 50, 51, 52, 53, 54, 55, 56 }; static const unsigned int mrfld_spi5_pins[] = { 90, 91, 92, 93, 94, 95, 96 }; -static const unsigned int mrfld_uart0_pins[] = { 124, 125, 126, 127 }; -static const unsigned int mrfld_uart1_pins[] = { 128, 129, 130, 131 }; -static const unsigned int mrfld_uart2_pins[] = { 132, 133, 134, 135 }; +static const unsigned int mrfld_uart0_pins[] = { 115, 116, 117, 118 }; +static const unsigned int mrfld_uart1_pins[] = { 119, 120, 121, 122 }; +static const unsigned int mrfld_uart2_pins[] = { 123, 124, 125, 126 }; static const unsigned int mrfld_pwm0_pins[] = { 144 }; static const unsigned int mrfld_pwm1_pins[] = { 145 }; static const unsigned int mrfld_pwm2_pins[] = { 132 }; diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c index f024e25787fc..0c6d7812d6fd 100644 --- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c +++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c @@ -37,7 +37,7 @@ #define IRQ_STATUS 0x10 #define IRQ_WKUP 0x18 -#define NB_FUNCS 2 +#define NB_FUNCS 3 #define GPIO_PER_REG 32 /** @@ -126,6 +126,16 @@ struct armada_37xx_pinctrl { .funcs = {_func1, "gpio"} \ } +#define PIN_GRP_GPIO_3(_name, _start, _nr, _mask, _v1, _v2, _v3, _f1, _f2) \ + { \ + .name = _name, \ + .start_pin = _start, \ + .npins = _nr, \ + .reg_mask = _mask, \ + .val = {_v1, _v2, _v3}, \ + .funcs = {_f1, _f2, "gpio"} \ + } + #define PIN_GRP_EXTRA(_name, _start, _nr, _mask, _v1, _v2, _start2, _nr2, \ _f1, _f2) \ { \ @@ -171,12 +181,13 @@ static struct armada_37xx_pin_group armada_37xx_sb_groups[] = { PIN_GRP_GPIO("usb32_drvvbus0", 0, 1, BIT(0), "drvbus"), PIN_GRP_GPIO("usb2_drvvbus1", 1, 1, BIT(1), "drvbus"), PIN_GRP_GPIO("sdio_sb", 24, 6, BIT(2), "sdio"), - PIN_GRP_EXTRA("rgmii", 6, 12, BIT(3), 0, BIT(3), 23, 1, "mii", "gpio"), + PIN_GRP_GPIO("rgmii", 6, 12, BIT(3), "mii"), PIN_GRP_GPIO("pcie1", 3, 2, BIT(4), "pcie"), PIN_GRP_GPIO("ptp", 20, 3, BIT(5), "ptp"), PIN_GRP("ptp_clk", 21, 1, BIT(6), "ptp", "mii"), PIN_GRP("ptp_trig", 22, 1, BIT(7), "ptp", "mii"), - PIN_GRP("mii_col", 23, 1, BIT(8), "mii", "mii_err"), + PIN_GRP_GPIO_3("mii_col", 23, 1, BIT(8) | BIT(14), 0, BIT(8), BIT(14), + "mii", "mii_err"), }; const struct armada_37xx_pin_data armada_37xx_pin_nb = { @@ -187,7 +198,7 @@ const struct armada_37xx_pin_data armada_37xx_pin_nb = { }; const struct armada_37xx_pin_data armada_37xx_pin_sb = { - .nr_pins = 29, + .nr_pins = 30, .name = "GPIO2", .groups = armada_37xx_sb_groups, .ngroups = ARRAY_SIZE(armada_37xx_sb_groups), @@ -208,7 +219,7 @@ static int armada_37xx_get_func_reg(struct armada_37xx_pin_group *grp, { int f; - for (f = 0; f < NB_FUNCS; f++) + for (f = 0; (f < NB_FUNCS) && grp->funcs[f]; f++) if (!strcmp(grp->funcs[f], func)) return f; @@ -795,7 +806,7 @@ static int armada_37xx_fill_group(struct armada_37xx_pinctrl *info) for (j = 0; j < grp->extra_npins; j++) grp->pins[i+j] = grp->extra_pin + j; - for (f = 0; f < NB_FUNCS; f++) { + for (f = 0; (f < NB_FUNCS) && grp->funcs[f]; f++) { int ret; /* check for unique functions and count groups */ ret = armada_37xx_add_function(info->funcs, &funcsize, @@ -847,7 +858,7 @@ static int armada_37xx_fill_func(struct armada_37xx_pinctrl *info) struct armada_37xx_pin_group *gp = &info->groups[g]; int f; - for (f = 0; f < NB_FUNCS; f++) { + for (f = 0; (f < NB_FUNCS) && gp->funcs[f]; f++) { if (strcmp(gp->funcs[f], name) == 0) { *groups = gp->name; groups++; diff --git a/drivers/pinctrl/stm32/Kconfig b/drivers/pinctrl/stm32/Kconfig index 3b8026fca057..7e1fe39a56a5 100644 --- a/drivers/pinctrl/stm32/Kconfig +++ b/drivers/pinctrl/stm32/Kconfig @@ -6,29 +6,30 @@ config PINCTRL_STM32 select PINMUX select GENERIC_PINCONF select GPIOLIB + select IRQ_DOMAIN_HIERARCHY select MFD_SYSCON config PINCTRL_STM32F429 bool "STMicroelectronics STM32F429 pin control" if COMPILE_TEST && !MACH_STM32F429 - depends on OF && IRQ_DOMAIN_HIERARCHY + depends on OF default MACH_STM32F429 select PINCTRL_STM32 config PINCTRL_STM32F469 bool "STMicroelectronics STM32F469 pin control" if COMPILE_TEST && !MACH_STM32F469 - depends on OF && IRQ_DOMAIN_HIERARCHY + depends on OF default MACH_STM32F469 select PINCTRL_STM32 config PINCTRL_STM32F746 bool "STMicroelectronics STM32F746 pin control" if COMPILE_TEST && !MACH_STM32F746 - depends on OF && IRQ_DOMAIN_HIERARCHY + depends on OF default MACH_STM32F746 select PINCTRL_STM32 config PINCTRL_STM32H743 bool "STMicroelectronics STM32H743 pin control" if COMPILE_TEST && !MACH_STM32H743 - depends on OF && IRQ_DOMAIN_HIERARCHY + depends on OF default MACH_STM32H743 select PINCTRL_STM32 endif diff --git a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c index 159580c04b14..47a392bc73c8 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c @@ -918,6 +918,7 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = { SUNXI_FUNCTION_VARIANT(0x3, "emac", /* ETXD1 */ PINCTRL_SUN7I_A20), SUNXI_FUNCTION(0x4, "keypad"), /* IN6 */ + SUNXI_FUNCTION(0x5, "sim"), /* DET */ SUNXI_FUNCTION_IRQ(0x6, 16), /* EINT16 */ SUNXI_FUNCTION(0x7, "csi1")), /* D16 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 17), diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c index a433a306a2d0..c75e094b2d90 100644 --- a/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c +++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c @@ -1084,7 +1084,7 @@ static const unsigned usb1_pins[] = {182, 183}; static const int usb1_muxvals[] = {0, 0}; static const unsigned usb2_pins[] = {184, 185}; static const int usb2_muxvals[] = {0, 0}; -static const unsigned usb3_pins[] = {186, 187}; +static const unsigned usb3_pins[] = {187, 188}; static const int usb3_muxvals[] = {0, 0}; static const unsigned port_range0_pins[] = { 300, 301, 302, 303, 304, 305, 306, 307, /* PORT0x */ diff --git a/drivers/pinctrl/zte/pinctrl-zx.c b/drivers/pinctrl/zte/pinctrl-zx.c index 787e3967bd5c..f828ee340a98 100644 --- a/drivers/pinctrl/zte/pinctrl-zx.c +++ b/drivers/pinctrl/zte/pinctrl-zx.c @@ -64,10 +64,8 @@ static int zx_set_mux(struct pinctrl_dev *pctldev, unsigned int func_selector, struct zx_pinctrl_soc_info *info = zpctl->info; const struct pinctrl_pin_desc *pindesc = info->pins + group_selector; struct zx_pin_data *data = pindesc->drv_data; - struct zx_mux_desc *mux = data->muxes; - u32 mask = (1 << data->width) - 1; - u32 offset = data->offset; - u32 bitpos = data->bitpos; + struct zx_mux_desc *mux; + u32 mask, offset, bitpos; struct function_desc *func; unsigned long flags; u32 val, mval; @@ -76,6 +74,11 @@ static int zx_set_mux(struct pinctrl_dev *pctldev, unsigned int func_selector, if (!data) return -EINVAL; + mux = data->muxes; + mask = (1 << data->width) - 1; + offset = data->offset; + bitpos = data->bitpos; + func = pinmux_generic_get_function(pctldev, func_selector); if (!func) return -EINVAL; diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index b04860703740..80b87954f6dd 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -675,6 +675,7 @@ config PEAQ_WMI tristate "PEAQ 2-in-1 WMI hotkey driver" depends on ACPI_WMI depends on INPUT + select INPUT_POLLDEV help Say Y here if you want to support WMI-based hotkeys on PEAQ 2-in-1s. diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c index f8978464df31..dad8f4afa17c 100644 --- a/drivers/platform/x86/dell-wmi.c +++ b/drivers/platform/x86/dell-wmi.c @@ -626,7 +626,7 @@ static void dell_wmi_input_destroy(struct wmi_device *wdev) * WMI Interface Version 8 4 <version> * WMI buffer length 12 4 4096 */ -static int __init dell_wmi_check_descriptor_buffer(void) +static int dell_wmi_check_descriptor_buffer(void) { struct acpi_buffer out = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *obj; @@ -717,9 +717,15 @@ static int dell_wmi_events_set_enabled(bool enable) static int dell_wmi_probe(struct wmi_device *wdev) { + int err; + struct dell_wmi_priv *priv = devm_kzalloc( &wdev->dev, sizeof(struct dell_wmi_priv), GFP_KERNEL); + err = dell_wmi_check_descriptor_buffer(); + if (err) + return err; + dev_set_drvdata(&wdev->dev, priv); return dell_wmi_input_setup(wdev); @@ -749,10 +755,6 @@ static int __init dell_wmi_init(void) { int err; - err = dell_wmi_check_descriptor_buffer(); - if (err) - return err; - dmi_check_system(dell_wmi_smbios_list); if (wmi_requires_smbios_request) { diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c index 61f106377661..480926786cb8 100644 --- a/drivers/platform/x86/intel-vbtn.c +++ b/drivers/platform/x86/intel-vbtn.c @@ -36,8 +36,8 @@ static const struct acpi_device_id intel_vbtn_ids[] = { /* In theory, these are HID usages. */ static const struct key_entry intel_vbtn_keymap[] = { - { KE_IGNORE, 0xC0, { KEY_POWER } }, /* power key press */ - { KE_KEY, 0xC1, { KEY_POWER } }, /* power key release */ + { KE_KEY, 0xC0, { KEY_POWER } }, /* power key press */ + { KE_IGNORE, 0xC1, { KEY_POWER } }, /* power key release */ { KE_KEY, 0xC4, { KEY_VOLUMEUP } }, /* volume-up key press */ { KE_IGNORE, 0xC5, { KEY_VOLUMEUP } }, /* volume-up key release */ { KE_KEY, 0xC6, { KEY_VOLUMEDOWN } }, /* volume-down key press */ diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c index 1a764e311e11..e32ba575e8d9 100644 --- a/drivers/platform/x86/wmi.c +++ b/drivers/platform/x86/wmi.c @@ -1252,12 +1252,12 @@ static int __init acpi_wmi_init(void) return 0; -err_unreg_class: - class_unregister(&wmi_bus_class); - err_unreg_bus: bus_unregister(&wmi_bus_type); +err_unreg_class: + class_unregister(&wmi_bus_class); + return error; } diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c index b77435783ef3..7eacc1c4b3b1 100644 --- a/drivers/ptp/ptp_clock.c +++ b/drivers/ptp/ptp_clock.c @@ -28,6 +28,7 @@ #include <linux/slab.h> #include <linux/syscalls.h> #include <linux/uaccess.h> +#include <uapi/linux/sched/types.h> #include "ptp_private.h" @@ -184,6 +185,19 @@ static void delete_ptp_clock(struct posix_clock *pc) kfree(ptp); } +static void ptp_aux_kworker(struct kthread_work *work) +{ + struct ptp_clock *ptp = container_of(work, struct ptp_clock, + aux_work.work); + struct ptp_clock_info *info = ptp->info; + long delay; + + delay = info->do_aux_work(info); + + if (delay >= 0) + kthread_queue_delayed_work(ptp->kworker, &ptp->aux_work, delay); +} + /* public interface */ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, @@ -217,6 +231,20 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, mutex_init(&ptp->pincfg_mux); init_waitqueue_head(&ptp->tsev_wq); + if (ptp->info->do_aux_work) { + char *worker_name = kasprintf(GFP_KERNEL, "ptp%d", ptp->index); + + kthread_init_delayed_work(&ptp->aux_work, ptp_aux_kworker); + ptp->kworker = kthread_create_worker(0, worker_name ? + worker_name : info->name); + kfree(worker_name); + if (IS_ERR(ptp->kworker)) { + err = PTR_ERR(ptp->kworker); + pr_err("failed to create ptp aux_worker %d\n", err); + goto kworker_err; + } + } + err = ptp_populate_pin_groups(ptp); if (err) goto no_pin_groups; @@ -259,6 +287,9 @@ no_pps: no_device: ptp_cleanup_pin_groups(ptp); no_pin_groups: + if (ptp->kworker) + kthread_destroy_worker(ptp->kworker); +kworker_err: mutex_destroy(&ptp->tsevq_mux); mutex_destroy(&ptp->pincfg_mux); ida_simple_remove(&ptp_clocks_map, index); @@ -274,6 +305,11 @@ int ptp_clock_unregister(struct ptp_clock *ptp) ptp->defunct = 1; wake_up_interruptible(&ptp->tsev_wq); + if (ptp->kworker) { + kthread_cancel_delayed_work_sync(&ptp->aux_work); + kthread_destroy_worker(ptp->kworker); + } + /* Release the clock's resources. */ if (ptp->pps_source) pps_unregister_source(ptp->pps_source); @@ -339,6 +375,12 @@ int ptp_find_pin(struct ptp_clock *ptp, } EXPORT_SYMBOL(ptp_find_pin); +int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay) +{ + return kthread_mod_delayed_work(ptp->kworker, &ptp->aux_work, delay); +} +EXPORT_SYMBOL(ptp_schedule_worker); + /* module operations */ static void __exit ptp_exit(void) diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h index d95888974d0c..b86f1bfecd6f 100644 --- a/drivers/ptp/ptp_private.h +++ b/drivers/ptp/ptp_private.h @@ -22,6 +22,7 @@ #include <linux/cdev.h> #include <linux/device.h> +#include <linux/kthread.h> #include <linux/mutex.h> #include <linux/posix-clock.h> #include <linux/ptp_clock.h> @@ -56,6 +57,8 @@ struct ptp_clock { struct attribute_group pin_attr_group; /* 1st entry is a pointer to the real group, 2nd is NULL terminator */ const struct attribute_group *pin_attr_groups[2]; + struct kthread_worker *kworker; + struct kthread_delayed_work aux_work; }; /* diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c index 7e0d4f724dda..432fc40990bd 100644 --- a/drivers/s390/cio/chp.c +++ b/drivers/s390/cio/chp.c @@ -559,6 +559,7 @@ static void chp_process_crw(struct crw *crw0, struct crw *crw1, chpid.id = crw0->rsid; switch (crw0->erc) { case CRW_ERC_IPARM: /* Path has come. */ + case CRW_ERC_INIT: if (!chp_is_registered(chpid)) chp_new(chpid); chsc_chp_online(chpid); diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 8975cd321390..d42e758518ed 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -2512,7 +2512,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, struct rtable *rt = (struct rtable *) dst; __be32 *pkey = &ip_hdr(skb)->daddr; - if (rt->rt_gateway) + if (rt && rt->rt_gateway) pkey = &rt->rt_gateway; /* IPv4 */ @@ -2523,7 +2523,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, struct rt6_info *rt = (struct rt6_info *) dst; struct in6_addr *pkey = &ipv6_hdr(skb)->daddr; - if (!ipv6_addr_any(&rt->rt6i_gateway)) + if (rt && !ipv6_addr_any(&rt->rt6i_gateway)) pkey = &rt->rt6i_gateway; /* IPv6 */ diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c index 04efed171c88..f32765d3cbd8 100644 --- a/drivers/sbus/char/display7seg.c +++ b/drivers/sbus/char/display7seg.c @@ -212,8 +212,8 @@ static int d7s_probe(struct platform_device *op) writeb(regs, p->regs); - printk(KERN_INFO PFX "7-Segment Display%s at [%s:0x%llx] %s\n", - op->dev.of_node->full_name, + printk(KERN_INFO PFX "7-Segment Display%pOF at [%s:0x%llx] %s\n", + op->dev.of_node, (regs & D7S_FLIP) ? " (FLIPPED)" : "", op->resource[0].start, sol_compat ? "in sol_compat mode" : ""); diff --git a/drivers/sbus/char/flash.c b/drivers/sbus/char/flash.c index 216f923161d1..a610b8d3d11f 100644 --- a/drivers/sbus/char/flash.c +++ b/drivers/sbus/char/flash.c @@ -181,8 +181,8 @@ static int flash_probe(struct platform_device *op) } flash.busy = 0; - printk(KERN_INFO "%s: OBP Flash, RD %lx[%lx] WR %lx[%lx]\n", - op->dev.of_node->full_name, + printk(KERN_INFO "%pOF: OBP Flash, RD %lx[%lx] WR %lx[%lx]\n", + op->dev.of_node, flash.read_base, flash.read_size, flash.write_base, flash.write_size); diff --git a/drivers/sbus/char/uctrl.c b/drivers/sbus/char/uctrl.c index 57696fc0b482..0a5013350acd 100644 --- a/drivers/sbus/char/uctrl.c +++ b/drivers/sbus/char/uctrl.c @@ -379,8 +379,8 @@ static int uctrl_probe(struct platform_device *op) } sbus_writel(UCTRL_INTR_RXNE_REQ|UCTRL_INTR_RXNE_MSK, &p->regs->uctrl_intr); - printk(KERN_INFO "%s: uctrl regs[0x%p] (irq %d)\n", - op->dev.of_node->full_name, p->regs, p->irq); + printk(KERN_INFO "%pOF: uctrl regs[0x%p] (irq %d)\n", + op->dev.of_node, p->regs, p->irq); uctrl_get_event_status(p); uctrl_get_external_status(p); diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index d384f4f86c26..f4538d7a3016 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -1230,6 +1230,8 @@ config SCSI_LPFC tristate "Emulex LightPulse Fibre Channel Support" depends on PCI && SCSI depends on SCSI_FC_ATTRS + depends on NVME_TARGET_FC || NVME_TARGET_FC=n + depends on NVME_FC || NVME_FC=n select CRC_T10DIF ---help--- This lpfc driver supports the Emulex LightPulse diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index 707ee2f5954d..4591113c49de 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -3198,10 +3198,11 @@ static int query_disk(struct aac_dev *dev, void __user *arg) return -EBUSY; if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk))) return -EFAULT; - if (qd.cnum == -1) + if (qd.cnum == -1) { + if (qd.id < 0 || qd.id >= dev->maximum_num_containers) + return -EINVAL; qd.cnum = qd.id; - else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1)) - { + } else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1)) { if (qd.cnum < 0 || qd.cnum >= dev->maximum_num_containers) return -EINVAL; qd.instance = dev->scsi_host_ptr->host_no; diff --git a/drivers/scsi/aic7xxx/Makefile b/drivers/scsi/aic7xxx/Makefile index 741d81861d17..07b60a780c06 100644 --- a/drivers/scsi/aic7xxx/Makefile +++ b/drivers/scsi/aic7xxx/Makefile @@ -55,9 +55,9 @@ aicasm-7xxx-opts-$(CONFIG_AIC7XXX_REG_PRETTY_PRINT) := \ ifeq ($(CONFIG_AIC7XXX_BUILD_FIRMWARE),y) $(obj)/aic7xxx_seq.h: $(src)/aic7xxx.seq $(src)/aic7xxx.reg $(obj)/aicasm/aicasm - $(obj)/aicasm/aicasm -I$(src) -r $(obj)/aic7xxx_reg.h \ + $(obj)/aicasm/aicasm -I$(srctree)/$(src) -r $(obj)/aic7xxx_reg.h \ $(aicasm-7xxx-opts-y) -o $(obj)/aic7xxx_seq.h \ - $(src)/aic7xxx.seq + $(srctree)/$(src)/aic7xxx.seq $(aic7xxx-gen-y): $(obj)/aic7xxx_seq.h else @@ -72,14 +72,14 @@ aicasm-79xx-opts-$(CONFIG_AIC79XX_REG_PRETTY_PRINT) := \ ifeq ($(CONFIG_AIC79XX_BUILD_FIRMWARE),y) $(obj)/aic79xx_seq.h: $(src)/aic79xx.seq $(src)/aic79xx.reg $(obj)/aicasm/aicasm - $(obj)/aicasm/aicasm -I$(src) -r $(obj)/aic79xx_reg.h \ + $(obj)/aicasm/aicasm -I$(srctree)/$(src) -r $(obj)/aic79xx_reg.h \ $(aicasm-79xx-opts-y) -o $(obj)/aic79xx_seq.h \ - $(src)/aic79xx.seq + $(srctree)/$(src)/aic79xx.seq $(aic79xx-gen-y): $(obj)/aic79xx_seq.h else $(obj)/aic79xx_reg_print.c: $(src)/aic79xx_reg_print.c_shipped endif -$(obj)/aicasm/aicasm: $(src)/aicasm/*.[chyl] - $(MAKE) -C $(src)/aicasm +$(obj)/aicasm/aicasm: $(srctree)/$(src)/aicasm/*.[chyl] + $(MAKE) -C $(srctree)/$(src)/aicasm OUTDIR=$(shell pwd)/$(obj)/aicasm/ diff --git a/drivers/scsi/aic7xxx/aicasm/Makefile b/drivers/scsi/aic7xxx/aicasm/Makefile index b98c5c1056c3..45e2d49c1fff 100644 --- a/drivers/scsi/aic7xxx/aicasm/Makefile +++ b/drivers/scsi/aic7xxx/aicasm/Makefile @@ -1,19 +1,21 @@ PROG= aicasm +OUTDIR ?= ./ + .SUFFIXES= .l .y .c .h CSRCS= aicasm.c aicasm_symbol.c YSRCS= aicasm_gram.y aicasm_macro_gram.y LSRCS= aicasm_scan.l aicasm_macro_scan.l -GENHDRS= aicdb.h $(YSRCS:.y=.h) -GENSRCS= $(YSRCS:.y=.c) $(LSRCS:.l=.c) +GENHDRS= $(addprefix ${OUTDIR}/,aicdb.h $(YSRCS:.y=.h)) +GENSRCS= $(addprefix ${OUTDIR}/,$(YSRCS:.y=.c) $(LSRCS:.l=.c)) SRCS= ${CSRCS} ${GENSRCS} LIBS= -ldb clean-files:= ${GENSRCS} ${GENHDRS} $(YSRCS:.y=.output) $(PROG) # Override default kernel CFLAGS. This is a userland app. -AICASM_CFLAGS:= -I/usr/include -I. +AICASM_CFLAGS:= -I/usr/include -I. -I$(OUTDIR) LEX= flex YACC= bison YFLAGS= -d @@ -32,22 +34,25 @@ YFLAGS+= -t -v LFLAGS= -d endif -$(PROG): ${GENHDRS} $(SRCS) - $(AICASM_CC) $(AICASM_CFLAGS) $(SRCS) -o $(PROG) $(LIBS) +$(PROG): $(OUTDIR) ${GENHDRS} $(SRCS) + $(AICASM_CC) $(AICASM_CFLAGS) $(SRCS) -o $(OUTDIR)/$(PROG) $(LIBS) + +$(OUTDIR): + mkdir -p $(OUTDIR) -aicdb.h: +$(OUTDIR)/aicdb.h: @if [ -e "/usr/include/db4/db_185.h" ]; then \ - echo "#include <db4/db_185.h>" > aicdb.h; \ + echo "#include <db4/db_185.h>" > $@; \ elif [ -e "/usr/include/db3/db_185.h" ]; then \ - echo "#include <db3/db_185.h>" > aicdb.h; \ + echo "#include <db3/db_185.h>" > $@; \ elif [ -e "/usr/include/db2/db_185.h" ]; then \ - echo "#include <db2/db_185.h>" > aicdb.h; \ + echo "#include <db2/db_185.h>" > $@; \ elif [ -e "/usr/include/db1/db_185.h" ]; then \ - echo "#include <db1/db_185.h>" > aicdb.h; \ + echo "#include <db1/db_185.h>" > $@; \ elif [ -e "/usr/include/db/db_185.h" ]; then \ - echo "#include <db/db_185.h>" > aicdb.h; \ + echo "#include <db/db_185.h>" > $@; \ elif [ -e "/usr/include/db_185.h" ]; then \ - echo "#include <db_185.h>" > aicdb.h; \ + echo "#include <db_185.h>" > $@; \ else \ echo "*** Install db development libraries"; \ fi @@ -58,23 +63,23 @@ clean: # Create a dependency chain in generated files # to avoid concurrent invocations of the single # rule that builds them all. -aicasm_gram.c: aicasm_gram.h -aicasm_gram.c aicasm_gram.h: aicasm_gram.y +$(OUTDIR)/aicasm_gram.c: $(OUTDIR)/aicasm_gram.h +$(OUTDIR)/aicasm_gram.c $(OUTDIR)/aicasm_gram.h: aicasm_gram.y $(YACC) $(YFLAGS) -b $(<:.y=) $< - mv $(<:.y=).tab.c $(<:.y=.c) - mv $(<:.y=).tab.h $(<:.y=.h) + mv $(<:.y=).tab.c $(OUTDIR)/$(<:.y=.c) + mv $(<:.y=).tab.h $(OUTDIR)/$(<:.y=.h) # Create a dependency chain in generated files # to avoid concurrent invocations of the single # rule that builds them all. -aicasm_macro_gram.c: aicasm_macro_gram.h -aicasm_macro_gram.c aicasm_macro_gram.h: aicasm_macro_gram.y +$(OUTDIR)/aicasm_macro_gram.c: $(OUTDIR)/aicasm_macro_gram.h +$(OUTDIR)/aicasm_macro_gram.c $(OUTDIR)/aicasm_macro_gram.h: aicasm_macro_gram.y $(YACC) $(YFLAGS) -b $(<:.y=) -p mm $< - mv $(<:.y=).tab.c $(<:.y=.c) - mv $(<:.y=).tab.h $(<:.y=.h) + mv $(<:.y=).tab.c $(OUTDIR)/$(<:.y=.c) + mv $(<:.y=).tab.h $(OUTDIR)/$(<:.y=.h) -aicasm_scan.c: aicasm_scan.l - $(LEX) $(LFLAGS) -o$@ $< +$(OUTDIR)/aicasm_scan.c: aicasm_scan.l + $(LEX) $(LFLAGS) -o $@ $< -aicasm_macro_scan.c: aicasm_macro_scan.l - $(LEX) $(LFLAGS) -Pmm -o$@ $< +$(OUTDIR)/aicasm_macro_scan.c: aicasm_macro_scan.l + $(LEX) $(LFLAGS) -Pmm -o $@ $< diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index 7dfe709a7138..6844ba361616 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -2624,12 +2624,11 @@ static struct fcoe_transport bnx2fc_transport = { }; /** - * bnx2fc_percpu_thread_create - Create a receive thread for an - * online CPU + * bnx2fc_cpu_online - Create a receive thread for an online CPU * * @cpu: cpu index for the online cpu */ -static void bnx2fc_percpu_thread_create(unsigned int cpu) +static int bnx2fc_cpu_online(unsigned int cpu) { struct bnx2fc_percpu_s *p; struct task_struct *thread; @@ -2639,15 +2638,17 @@ static void bnx2fc_percpu_thread_create(unsigned int cpu) thread = kthread_create_on_node(bnx2fc_percpu_io_thread, (void *)p, cpu_to_node(cpu), "bnx2fc_thread/%d", cpu); + if (IS_ERR(thread)) + return PTR_ERR(thread); + /* bind thread to the cpu */ - if (likely(!IS_ERR(thread))) { - kthread_bind(thread, cpu); - p->iothread = thread; - wake_up_process(thread); - } + kthread_bind(thread, cpu); + p->iothread = thread; + wake_up_process(thread); + return 0; } -static void bnx2fc_percpu_thread_destroy(unsigned int cpu) +static int bnx2fc_cpu_offline(unsigned int cpu) { struct bnx2fc_percpu_s *p; struct task_struct *thread; @@ -2661,7 +2662,6 @@ static void bnx2fc_percpu_thread_destroy(unsigned int cpu) thread = p->iothread; p->iothread = NULL; - /* Free all work in the list */ list_for_each_entry_safe(work, tmp, &p->work_list, list) { list_del_init(&work->list); @@ -2673,20 +2673,6 @@ static void bnx2fc_percpu_thread_destroy(unsigned int cpu) if (thread) kthread_stop(thread); -} - - -static int bnx2fc_cpu_online(unsigned int cpu) -{ - printk(PFX "CPU %x online: Create Rx thread\n", cpu); - bnx2fc_percpu_thread_create(cpu); - return 0; -} - -static int bnx2fc_cpu_dead(unsigned int cpu) -{ - printk(PFX "CPU %x offline: Remove Rx thread\n", cpu); - bnx2fc_percpu_thread_destroy(cpu); return 0; } @@ -2761,30 +2747,16 @@ static int __init bnx2fc_mod_init(void) spin_lock_init(&p->fp_work_lock); } - get_online_cpus(); - - for_each_online_cpu(cpu) - bnx2fc_percpu_thread_create(cpu); - - rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, - "scsi/bnx2fc:online", - bnx2fc_cpu_online, NULL); + rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "scsi/bnx2fc:online", + bnx2fc_cpu_online, bnx2fc_cpu_offline); if (rc < 0) - goto stop_threads; + goto stop_thread; bnx2fc_online_state = rc; - cpuhp_setup_state_nocalls(CPUHP_SCSI_BNX2FC_DEAD, "scsi/bnx2fc:dead", - NULL, bnx2fc_cpu_dead); - put_online_cpus(); - cnic_register_driver(CNIC_ULP_FCOE, &bnx2fc_cnic_cb); - return 0; -stop_threads: - for_each_online_cpu(cpu) - bnx2fc_percpu_thread_destroy(cpu); - put_online_cpus(); +stop_thread: kthread_stop(l2_thread); free_wq: destroy_workqueue(bnx2fc_wq); @@ -2803,7 +2775,6 @@ static void __exit bnx2fc_mod_exit(void) struct fcoe_percpu_s *bg; struct task_struct *l2_thread; struct sk_buff *skb; - unsigned int cpu = 0; /* * NOTE: Since cnic calls register_driver routine rtnl_lock, @@ -2844,16 +2815,7 @@ static void __exit bnx2fc_mod_exit(void) if (l2_thread) kthread_stop(l2_thread); - get_online_cpus(); - /* Destroy per cpu threads */ - for_each_online_cpu(cpu) { - bnx2fc_percpu_thread_destroy(cpu); - } - - cpuhp_remove_state_nocalls(bnx2fc_online_state); - cpuhp_remove_state_nocalls(CPUHP_SCSI_BNX2FC_DEAD); - - put_online_cpus(); + cpuhp_remove_state(bnx2fc_online_state); destroy_workqueue(bnx2fc_wq); /* diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c index 913c750205ce..26de61d65a4d 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c +++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c @@ -1008,6 +1008,28 @@ static struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe) return work; } +/* Pending work request completion */ +static void bnx2fc_pending_work(struct bnx2fc_rport *tgt, unsigned int wqe) +{ + unsigned int cpu = wqe % num_possible_cpus(); + struct bnx2fc_percpu_s *fps; + struct bnx2fc_work *work; + + fps = &per_cpu(bnx2fc_percpu, cpu); + spin_lock_bh(&fps->fp_work_lock); + if (fps->iothread) { + work = bnx2fc_alloc_work(tgt, wqe); + if (work) { + list_add_tail(&work->list, &fps->work_list); + wake_up_process(fps->iothread); + spin_unlock_bh(&fps->fp_work_lock); + return; + } + } + spin_unlock_bh(&fps->fp_work_lock); + bnx2fc_process_cq_compl(tgt, wqe); +} + int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt) { struct fcoe_cqe *cq; @@ -1042,28 +1064,7 @@ int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt) /* Unsolicited event notification */ bnx2fc_process_unsol_compl(tgt, wqe); } else { - /* Pending work request completion */ - struct bnx2fc_work *work = NULL; - struct bnx2fc_percpu_s *fps = NULL; - unsigned int cpu = wqe % num_possible_cpus(); - - fps = &per_cpu(bnx2fc_percpu, cpu); - spin_lock_bh(&fps->fp_work_lock); - if (unlikely(!fps->iothread)) - goto unlock; - - work = bnx2fc_alloc_work(tgt, wqe); - if (work) - list_add_tail(&work->list, - &fps->work_list); -unlock: - spin_unlock_bh(&fps->fp_work_lock); - - /* Pending work request completion */ - if (fps->iothread && work) - wake_up_process(fps->iothread); - else - bnx2fc_process_cq_compl(tgt, wqe); + bnx2fc_pending_work(tgt, wqe); num_free_sqes++; } cqe++; diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c index 86afc002814c..4ebcda8d9500 100644 --- a/drivers/scsi/bnx2i/bnx2i_init.c +++ b/drivers/scsi/bnx2i/bnx2i_init.c @@ -404,12 +404,11 @@ int bnx2i_get_stats(void *handle) /** - * bnx2i_percpu_thread_create - Create a receive thread for an - * online CPU + * bnx2i_cpu_online - Create a receive thread for an online CPU * * @cpu: cpu index for the online cpu */ -static void bnx2i_percpu_thread_create(unsigned int cpu) +static int bnx2i_cpu_online(unsigned int cpu) { struct bnx2i_percpu_s *p; struct task_struct *thread; @@ -419,16 +418,17 @@ static void bnx2i_percpu_thread_create(unsigned int cpu) thread = kthread_create_on_node(bnx2i_percpu_io_thread, (void *)p, cpu_to_node(cpu), "bnx2i_thread/%d", cpu); + if (IS_ERR(thread)) + return PTR_ERR(thread); + /* bind thread to the cpu */ - if (likely(!IS_ERR(thread))) { - kthread_bind(thread, cpu); - p->iothread = thread; - wake_up_process(thread); - } + kthread_bind(thread, cpu); + p->iothread = thread; + wake_up_process(thread); + return 0; } - -static void bnx2i_percpu_thread_destroy(unsigned int cpu) +static int bnx2i_cpu_offline(unsigned int cpu) { struct bnx2i_percpu_s *p; struct task_struct *thread; @@ -451,19 +451,6 @@ static void bnx2i_percpu_thread_destroy(unsigned int cpu) spin_unlock_bh(&p->p_work_lock); if (thread) kthread_stop(thread); -} - -static int bnx2i_cpu_online(unsigned int cpu) -{ - pr_info("bnx2i: CPU %x online: Create Rx thread\n", cpu); - bnx2i_percpu_thread_create(cpu); - return 0; -} - -static int bnx2i_cpu_dead(unsigned int cpu) -{ - pr_info("CPU %x offline: Remove Rx thread\n", cpu); - bnx2i_percpu_thread_destroy(cpu); return 0; } @@ -511,27 +498,14 @@ static int __init bnx2i_mod_init(void) p->iothread = NULL; } - get_online_cpus(); - - for_each_online_cpu(cpu) - bnx2i_percpu_thread_create(cpu); - - err = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, - "scsi/bnx2i:online", - bnx2i_cpu_online, NULL); + err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "scsi/bnx2i:online", + bnx2i_cpu_online, bnx2i_cpu_offline); if (err < 0) - goto remove_threads; + goto unreg_driver; bnx2i_online_state = err; - - cpuhp_setup_state_nocalls(CPUHP_SCSI_BNX2I_DEAD, "scsi/bnx2i:dead", - NULL, bnx2i_cpu_dead); - put_online_cpus(); return 0; -remove_threads: - for_each_online_cpu(cpu) - bnx2i_percpu_thread_destroy(cpu); - put_online_cpus(); +unreg_driver: cnic_unregister_driver(CNIC_ULP_ISCSI); unreg_xport: iscsi_unregister_transport(&bnx2i_iscsi_transport); @@ -551,7 +525,6 @@ out: static void __exit bnx2i_mod_exit(void) { struct bnx2i_hba *hba; - unsigned cpu = 0; mutex_lock(&bnx2i_dev_lock); while (!list_empty(&adapter_list)) { @@ -569,14 +542,7 @@ static void __exit bnx2i_mod_exit(void) } mutex_unlock(&bnx2i_dev_lock); - get_online_cpus(); - - for_each_online_cpu(cpu) - bnx2i_percpu_thread_destroy(cpu); - - cpuhp_remove_state_nocalls(bnx2i_online_state); - cpuhp_remove_state_nocalls(CPUHP_SCSI_BNX2I_DEAD); - put_online_cpus(); + cpuhp_remove_state(bnx2i_online_state); iscsi_unregister_transport(&bnx2i_iscsi_transport); cnic_unregister_driver(CNIC_ULP_ISCSI); diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index e4c83b7c96a8..1a4cfa562a60 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c @@ -2128,6 +2128,13 @@ void cxgbi_cleanup_task(struct iscsi_task *task) struct iscsi_tcp_task *tcp_task = task->dd_data; struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); + if (!tcp_task || !tdata || (tcp_task->dd_data != tdata)) { + pr_info("task 0x%p,0x%p, tcp_task 0x%p, tdata 0x%p/0x%p.\n", + task, task->sc, tcp_task, + tcp_task ? tcp_task->dd_data : NULL, tdata); + return; + } + log_debug(1 << CXGBI_DBG_ISCSI, "task 0x%p, skb 0x%p, itt 0x%x.\n", task, tdata->skb, task->hdr_itt); diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 8914eab84337..4f7cdb28bd38 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -938,7 +938,7 @@ static struct scsi_host_template hpsa_driver_template = { #endif .sdev_attrs = hpsa_sdev_attrs, .shost_attrs = hpsa_shost_attrs, - .max_sectors = 8192, + .max_sectors = 1024, .no_write_same = 1, }; diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 4ed48ed38e79..7ee1a94c0b33 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -205,8 +205,10 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, atomic_read(&tgtp->xmt_ls_rsp_error)); len += snprintf(buf+len, PAGE_SIZE-len, - "FCP: Rcv %08x Release %08x Drop %08x\n", + "FCP: Rcv %08x Defer %08x Release %08x " + "Drop %08x\n", atomic_read(&tgtp->rcv_fcp_cmd_in), + atomic_read(&tgtp->rcv_fcp_cmd_defer), atomic_read(&tgtp->xmt_fcp_release), atomic_read(&tgtp->rcv_fcp_cmd_drop)); diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 5cc8b0f7d885..744f3f395b64 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -782,8 +782,11 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size) atomic_read(&tgtp->xmt_ls_rsp_error)); len += snprintf(buf + len, size - len, - "FCP: Rcv %08x Drop %08x\n", + "FCP: Rcv %08x Defer %08x Release %08x " + "Drop %08x\n", atomic_read(&tgtp->rcv_fcp_cmd_in), + atomic_read(&tgtp->rcv_fcp_cmd_defer), + atomic_read(&tgtp->xmt_fcp_release), atomic_read(&tgtp->rcv_fcp_cmd_drop)); if (atomic_read(&tgtp->rcv_fcp_cmd_in) != diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index fbeec344c6cc..bbbd0f84160d 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -841,12 +841,31 @@ lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport, lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); } +static void +lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport, + struct nvmefc_tgt_fcp_req *rsp) +{ + struct lpfc_nvmet_tgtport *tgtp; + struct lpfc_nvmet_rcv_ctx *ctxp = + container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req); + struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer; + struct lpfc_hba *phba = ctxp->phba; + + lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n", + ctxp->oxid, ctxp->size, smp_processor_id()); + + tgtp = phba->targetport->private; + atomic_inc(&tgtp->rcv_fcp_cmd_defer); + lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */ +} + static struct nvmet_fc_target_template lpfc_tgttemplate = { .targetport_delete = lpfc_nvmet_targetport_delete, .xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp, .fcp_op = lpfc_nvmet_xmt_fcp_op, .fcp_abort = lpfc_nvmet_xmt_fcp_abort, .fcp_req_release = lpfc_nvmet_xmt_fcp_release, + .defer_rcv = lpfc_nvmet_defer_rcv, .max_hw_queues = 1, .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS, @@ -1504,6 +1523,17 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, return; } + /* Processing of FCP command is deferred */ + if (rc == -EOVERFLOW) { + lpfc_nvmeio_data(phba, + "NVMET RCV BUSY: xri x%x sz %d from %06x\n", + oxid, size, sid); + /* defer reposting rcv buffer till .defer_rcv callback */ + ctxp->rqb_buffer = nvmebuf; + atomic_inc(&tgtp->rcv_fcp_cmd_out); + return; + } + atomic_inc(&tgtp->rcv_fcp_cmd_drop); lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, "6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n", diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h index e675ef17be08..48a76788b003 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.h +++ b/drivers/scsi/lpfc/lpfc_nvmet.h @@ -49,6 +49,7 @@ struct lpfc_nvmet_tgtport { atomic_t rcv_fcp_cmd_in; atomic_t rcv_fcp_cmd_out; atomic_t rcv_fcp_cmd_drop; + atomic_t rcv_fcp_cmd_defer; atomic_t xmt_fcp_release; /* Stats counters - lpfc_nvmet_xmt_fcp_op */ diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index f990ab4d45e1..985510628f56 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -425,7 +425,7 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance) int megasas_alloc_cmdlist_fusion(struct megasas_instance *instance) { - u32 max_mpt_cmd, i; + u32 max_mpt_cmd, i, j; struct fusion_context *fusion; fusion = instance->ctrl_context; @@ -450,11 +450,15 @@ megasas_alloc_cmdlist_fusion(struct megasas_instance *instance) fusion->cmd_list[i] = kzalloc(sizeof(struct megasas_cmd_fusion), GFP_KERNEL); if (!fusion->cmd_list[i]) { + for (j = 0; j < i; j++) + kfree(fusion->cmd_list[j]); + kfree(fusion->cmd_list); dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); return -ENOMEM; } } + return 0; } int diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h index 4d038926a455..351f06dfc5a0 100644 --- a/drivers/scsi/qedf/qedf.h +++ b/drivers/scsi/qedf/qedf.h @@ -528,7 +528,8 @@ struct fip_vlan { #define QEDF_WRITE (1 << 0) #define MAX_FIBRE_LUNS 0xffffffff -#define QEDF_MAX_NUM_CQS 8 +#define MIN_NUM_CPUS_MSIX(x) min_t(u32, x->dev_info.num_cqs, \ + num_online_cpus()) /* * PCI function probe defines diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index 7786c97e033f..1d13c9ca517d 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -2760,11 +2760,9 @@ static int qedf_set_fcoe_pf_param(struct qedf_ctx *qedf) * we allocation is the minimum off: * * Number of CPUs - * Number of MSI-X vectors - * Max number allocated in hardware (QEDF_MAX_NUM_CQS) + * Number allocated by qed for our PCI function */ - qedf->num_queues = min((unsigned int)QEDF_MAX_NUM_CQS, - num_online_cpus()); + qedf->num_queues = MIN_NUM_CPUS_MSIX(qedf); QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n", qedf->num_queues); @@ -2962,6 +2960,13 @@ static int __qedf_probe(struct pci_dev *pdev, int mode) goto err1; } + /* Learn information crucial for qedf to progress */ + rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info); + if (rc) { + QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n"); + goto err1; + } + /* queue allocation code should come here * order should be * slowpath_start @@ -2977,13 +2982,6 @@ static int __qedf_probe(struct pci_dev *pdev, int mode) } qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params); - /* Learn information crucial for qedf to progress */ - rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info); - if (rc) { - QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n"); - goto err1; - } - /* Record BDQ producer doorbell addresses */ qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr; qedf->bdq_secondary_prod = qedf->dev_info.secondary_bdq_rq_addr; diff --git a/drivers/scsi/qedi/Kconfig b/drivers/scsi/qedi/Kconfig index 21331453db7b..2ff753ce6e27 100644 --- a/drivers/scsi/qedi/Kconfig +++ b/drivers/scsi/qedi/Kconfig @@ -5,6 +5,7 @@ config QEDI select SCSI_ISCSI_ATTRS select QED_LL2 select QED_ISCSI + select ISCSI_BOOT_SYSFS ---help--- This driver supports iSCSI offload for the QLogic FastLinQ 41000 Series Converged Network Adapters. diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c index 80edd28b635f..37da9a8b43b1 100644 --- a/drivers/scsi/qedi/qedi_iscsi.c +++ b/drivers/scsi/qedi/qedi_iscsi.c @@ -824,7 +824,7 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, u32 iscsi_cid = QEDI_CID_RESERVED; u16 len = 0; char *buf = NULL; - int ret; + int ret, tmp; if (!shost) { ret = -ENXIO; @@ -940,10 +940,10 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, ep_rel_conn: qedi->ep_tbl[iscsi_cid] = NULL; - ret = qedi_ops->release_conn(qedi->cdev, qedi_ep->handle); - if (ret) + tmp = qedi_ops->release_conn(qedi->cdev, qedi_ep->handle); + if (tmp) QEDI_WARN(&qedi->dbg_ctx, "release_conn returned %d\n", - ret); + tmp); ep_free_sq: qedi_free_sq(qedi, qedi_ep); ep_conn_exit: diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index b20da0d27ad7..3f82ea1b72dc 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -500,7 +500,6 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd, static void tcm_qla2xxx_handle_data_work(struct work_struct *work) { struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); - unsigned long flags; /* * Ensure that the complete FCP WRITE payload has been received. @@ -508,17 +507,6 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work) */ cmd->cmd_in_wq = 0; - spin_lock_irqsave(&cmd->cmd_lock, flags); - cmd->data_work = 1; - if (cmd->aborted) { - cmd->data_work_free = 1; - spin_unlock_irqrestore(&cmd->cmd_lock, flags); - - tcm_qla2xxx_free_cmd(cmd); - return; - } - spin_unlock_irqrestore(&cmd->cmd_lock, flags); - cmd->qpair->tgt_counters.qla_core_ret_ctio++; if (!cmd->write_data_transferred) { /* @@ -765,31 +753,13 @@ static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd) qlt_xmit_tm_rsp(mcmd); } -#define DATA_WORK_NOT_FREE(_cmd) (_cmd->data_work && !_cmd->data_work_free) static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd) { struct qla_tgt_cmd *cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); - unsigned long flags; if (qlt_abort_cmd(cmd)) return; - - spin_lock_irqsave(&cmd->cmd_lock, flags); - if ((cmd->state == QLA_TGT_STATE_NEW)|| - ((cmd->state == QLA_TGT_STATE_DATA_IN) && - DATA_WORK_NOT_FREE(cmd))) { - cmd->data_work_free = 1; - spin_unlock_irqrestore(&cmd->cmd_lock, flags); - /* - * cmd has not reached fw, Use this trigger to free it. - */ - tcm_qla2xxx_free_cmd(cmd); - return; - } - spin_unlock_irqrestore(&cmd->cmd_lock, flags); - return; - } static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *, diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 7e24aa30c3b0..892fbd9800d9 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -1286,7 +1286,7 @@ store_fc_vport_delete(struct device *dev, struct device_attribute *attr, unsigned long flags; spin_lock_irqsave(shost->host_lock, flags); - if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) { + if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING | FC_VPORT_DELETING)) { spin_unlock_irqrestore(shost->host_lock, flags); return -EBUSY; } @@ -2430,8 +2430,10 @@ fc_remove_host(struct Scsi_Host *shost) spin_lock_irqsave(shost->host_lock, flags); /* Remove any vports */ - list_for_each_entry_safe(vport, next_vport, &fc_host->vports, peers) + list_for_each_entry_safe(vport, next_vport, &fc_host->vports, peers) { + vport->flags |= FC_VPORT_DELETING; fc_queue_work(shost, &vport->vport_delete_work); + } /* Remove any remote ports */ list_for_each_entry_safe(rport, next_rport, diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 1e82d4128a84..d7ff71e0c85c 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -751,32 +751,6 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf, return count; } -static bool sg_is_valid_dxfer(sg_io_hdr_t *hp) -{ - switch (hp->dxfer_direction) { - case SG_DXFER_NONE: - if (hp->dxferp || hp->dxfer_len > 0) - return false; - return true; - case SG_DXFER_FROM_DEV: - if (hp->dxfer_len < 0) - return false; - return true; - case SG_DXFER_TO_DEV: - case SG_DXFER_TO_FROM_DEV: - if (!hp->dxferp || hp->dxfer_len == 0) - return false; - return true; - case SG_DXFER_UNKNOWN: - if ((!hp->dxferp && hp->dxfer_len) || - (hp->dxferp && hp->dxfer_len == 0)) - return false; - return true; - default: - return false; - } -} - static int sg_common_write(Sg_fd * sfp, Sg_request * srp, unsigned char *cmnd, int timeout, int blocking) @@ -797,7 +771,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp, "sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) cmnd[0], (int) hp->cmd_len)); - if (!sg_is_valid_dxfer(hp)) + if (hp->dxfer_len >= SZ_256M) return -EINVAL; k = sg_start_req(srp, cmnd); diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h index 07ec8a8877de..e164ffade38a 100644 --- a/drivers/scsi/smartpqi/smartpqi.h +++ b/drivers/scsi/smartpqi/smartpqi.h @@ -690,7 +690,7 @@ struct pqi_config_table_heartbeat { #define PQI_MAX_OUTSTANDING_REQUESTS ((u32)~0) #define PQI_MAX_OUTSTANDING_REQUESTS_KDUMP 32 -#define PQI_MAX_TRANSFER_SIZE (4 * 1024U * 1024U) +#define PQI_MAX_TRANSFER_SIZE (1024U * 1024U) #define PQI_MAX_TRANSFER_SIZE_KDUMP (512 * 1024U) #define RAID_MAP_MAX_ENTRIES 1024 diff --git a/drivers/soc/zte/Kconfig b/drivers/soc/zte/Kconfig index 20bde38ce2f9..e9d750c510cd 100644 --- a/drivers/soc/zte/Kconfig +++ b/drivers/soc/zte/Kconfig @@ -2,6 +2,7 @@ # ZTE SoC drivers # menuconfig SOC_ZTE + depends on ARCH_ZX || COMPILE_TEST bool "ZTE SoC driver support" if SOC_ZTE diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c index ca11be21f64b..34ca7823255d 100644 --- a/drivers/staging/comedi/comedi_fops.c +++ b/drivers/staging/comedi/comedi_fops.c @@ -2396,6 +2396,7 @@ static ssize_t comedi_write(struct file *file, const char __user *buf, continue; } + set_current_state(TASK_RUNNING); wp = async->buf_write_ptr; n1 = min(n, async->prealloc_bufsz - wp); n2 = n - n1; @@ -2528,6 +2529,8 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes, } continue; } + + set_current_state(TASK_RUNNING); rp = async->buf_read_ptr; n1 = min(n, async->prealloc_bufsz - rp); n2 = n - n1; diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c index a6a8393d6664..3e00df74b18c 100644 --- a/drivers/staging/iio/resolver/ad2s1210.c +++ b/drivers/staging/iio/resolver/ad2s1210.c @@ -472,7 +472,7 @@ static int ad2s1210_read_raw(struct iio_dev *indio_dev, long m) { struct ad2s1210_state *st = iio_priv(indio_dev); - bool negative; + u16 negative; int ret = 0; u16 pos; s16 vel; diff --git a/drivers/staging/media/atomisp/i2c/ap1302.h b/drivers/staging/media/atomisp/i2c/ap1302.h index 9341232c580d..4d0b181a9671 100644 --- a/drivers/staging/media/atomisp/i2c/ap1302.h +++ b/drivers/staging/media/atomisp/i2c/ap1302.h @@ -158,8 +158,8 @@ struct ap1302_res_struct { }; struct ap1302_context_res { - s32 res_num; - s32 cur_res; + u32 res_num; + u32 cur_res; struct ap1302_res_struct *res_table; }; diff --git a/drivers/staging/media/atomisp/i2c/gc0310.h b/drivers/staging/media/atomisp/i2c/gc0310.h index f31eb277f542..7d8a0aeecb6c 100644 --- a/drivers/staging/media/atomisp/i2c/gc0310.h +++ b/drivers/staging/media/atomisp/i2c/gc0310.h @@ -454,6 +454,6 @@ struct gc0310_resolution gc0310_res_video[] = { #define N_RES_VIDEO (ARRAY_SIZE(gc0310_res_video)) static struct gc0310_resolution *gc0310_res = gc0310_res_preview; -static int N_RES = N_RES_PREVIEW; +static unsigned long N_RES = N_RES_PREVIEW; #endif diff --git a/drivers/staging/media/atomisp/i2c/gc2235.h b/drivers/staging/media/atomisp/i2c/gc2235.h index ccbc757045a5..7c3d994180cc 100644 --- a/drivers/staging/media/atomisp/i2c/gc2235.h +++ b/drivers/staging/media/atomisp/i2c/gc2235.h @@ -668,5 +668,5 @@ struct gc2235_resolution gc2235_res_video[] = { #define N_RES_VIDEO (ARRAY_SIZE(gc2235_res_video)) static struct gc2235_resolution *gc2235_res = gc2235_res_preview; -static int N_RES = N_RES_PREVIEW; +static unsigned long N_RES = N_RES_PREVIEW; #endif diff --git a/drivers/staging/media/atomisp/i2c/imx/imx.h b/drivers/staging/media/atomisp/i2c/imx/imx.h index 36b3f3a5a41f..41b4133ca995 100644 --- a/drivers/staging/media/atomisp/i2c/imx/imx.h +++ b/drivers/staging/media/atomisp/i2c/imx/imx.h @@ -480,7 +480,7 @@ struct imx_device { struct imx_vcm *vcm_driver; struct imx_otp *otp_driver; const struct imx_resolution *curr_res_table; - int entries_curr_table; + unsigned long entries_curr_table; const struct firmware *fw; struct imx_reg_addr *reg_addr; const struct imx_reg *param_hold; diff --git a/drivers/staging/media/atomisp/i2c/ov2680.h b/drivers/staging/media/atomisp/i2c/ov2680.h index 944fe8e3bcbf..ab8907e6c9ef 100644 --- a/drivers/staging/media/atomisp/i2c/ov2680.h +++ b/drivers/staging/media/atomisp/i2c/ov2680.h @@ -934,7 +934,6 @@ static struct ov2680_resolution ov2680_res_video[] = { #define N_RES_VIDEO (ARRAY_SIZE(ov2680_res_video)) static struct ov2680_resolution *ov2680_res = ov2680_res_preview; -static int N_RES = N_RES_PREVIEW; - +static unsigned long N_RES = N_RES_PREVIEW; #endif diff --git a/drivers/staging/media/atomisp/i2c/ov2722.h b/drivers/staging/media/atomisp/i2c/ov2722.h index b0d40965d89e..73ecb1679718 100644 --- a/drivers/staging/media/atomisp/i2c/ov2722.h +++ b/drivers/staging/media/atomisp/i2c/ov2722.h @@ -1263,5 +1263,5 @@ struct ov2722_resolution ov2722_res_video[] = { #define N_RES_VIDEO (ARRAY_SIZE(ov2722_res_video)) static struct ov2722_resolution *ov2722_res = ov2722_res_preview; -static int N_RES = N_RES_PREVIEW; +static unsigned long N_RES = N_RES_PREVIEW; #endif diff --git a/drivers/staging/media/atomisp/i2c/ov5693/ov5693.h b/drivers/staging/media/atomisp/i2c/ov5693/ov5693.h index d88ac1777d86..8c2e6794463b 100644 --- a/drivers/staging/media/atomisp/i2c/ov5693/ov5693.h +++ b/drivers/staging/media/atomisp/i2c/ov5693/ov5693.h @@ -1377,5 +1377,5 @@ struct ov5693_resolution ov5693_res_video[] = { #define N_RES_VIDEO (ARRAY_SIZE(ov5693_res_video)) static struct ov5693_resolution *ov5693_res = ov5693_res_preview; -static int N_RES = N_RES_PREVIEW; +static unsigned long N_RES = N_RES_PREVIEW; #endif diff --git a/drivers/staging/media/atomisp/i2c/ov8858.h b/drivers/staging/media/atomisp/i2c/ov8858.h index 9be6a0e63861..d3fde200c013 100644 --- a/drivers/staging/media/atomisp/i2c/ov8858.h +++ b/drivers/staging/media/atomisp/i2c/ov8858.h @@ -266,7 +266,7 @@ struct ov8858_device { const struct ov8858_reg *regs; struct ov8858_vcm *vcm_driver; const struct ov8858_resolution *curr_res_table; - int entries_curr_table; + unsigned long entries_curr_table; struct v4l2_ctrl_handler ctrl_handler; struct v4l2_ctrl *run_mode; diff --git a/drivers/staging/media/atomisp/i2c/ov8858_btns.h b/drivers/staging/media/atomisp/i2c/ov8858_btns.h index 09e3cdc1a394..f9a3cf8fbf1a 100644 --- a/drivers/staging/media/atomisp/i2c/ov8858_btns.h +++ b/drivers/staging/media/atomisp/i2c/ov8858_btns.h @@ -266,7 +266,7 @@ struct ov8858_device { const struct ov8858_reg *regs; struct ov8858_vcm *vcm_driver; const struct ov8858_resolution *curr_res_table; - int entries_curr_table; + unsigned long entries_curr_table; struct v4l2_ctrl_handler ctrl_handler; struct v4l2_ctrl *run_mode; diff --git a/drivers/staging/media/atomisp/pci/atomisp2/Makefile b/drivers/staging/media/atomisp/pci/atomisp2/Makefile index 726eaa293c55..2bd98f0667ec 100644 --- a/drivers/staging/media/atomisp/pci/atomisp2/Makefile +++ b/drivers/staging/media/atomisp/pci/atomisp2/Makefile @@ -354,7 +354,9 @@ ccflags-y += $(INCLUDES) $(DEFINES) -fno-common # HACK! While this driver is in bad shape, don't enable several warnings # that would be otherwise enabled with W=1 -ccflags-y += -Wno-unused-const-variable -Wno-missing-prototypes \ - -Wno-unused-but-set-variable -Wno-missing-declarations \ - -Wno-suggest-attribute=format -Wno-missing-prototypes \ - -Wno-implicit-fallthrough +ccflags-y += $(call cc-disable-warning, implicit-fallthrough) +ccflags-y += $(call cc-disable-warning, missing-prototypes) +ccflags-y += $(call cc-disable-warning, missing-declarations) +ccflags-y += $(call cc-disable-warning, suggest-attribute=format) +ccflags-y += $(call cc-disable-warning, unused-const-variable) +ccflags-y += $(call cc-disable-warning, unused-but-set-variable) diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_internal.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_internal.h index d3667132851b..c8e0c4fe3717 100644 --- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_internal.h +++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_internal.h @@ -275,7 +275,7 @@ struct atomisp_device { */ struct mutex streamoff_mutex; - int input_cnt; + unsigned int input_cnt; struct atomisp_input_subdev inputs[ATOM_ISP_MAX_INPUTS]; struct v4l2_subdev *flash; struct v4l2_subdev *motor; diff --git a/drivers/staging/media/cxd2099/cxd2099.c b/drivers/staging/media/cxd2099/cxd2099.c index 370ecb959543..f28916ea69f1 100644 --- a/drivers/staging/media/cxd2099/cxd2099.c +++ b/drivers/staging/media/cxd2099/cxd2099.c @@ -1,7 +1,7 @@ /* * cxd2099.c: Driver for the CXD2099AR Common Interface Controller * - * Copyright (C) 2010-2011 Digital Devices GmbH + * Copyright (C) 2010-2013 Digital Devices GmbH * * * This program is free software; you can redistribute it and/or @@ -33,7 +33,10 @@ #include "cxd2099.h" -#define MAX_BUFFER_SIZE 248 +/* comment this line to deactivate the cxd2099ar buffer mode */ +#define BUFFER_MODE 1 + +static int read_data(struct dvb_ca_en50221 *ca, int slot, u8 *ebuf, int ecount); struct cxd { struct dvb_ca_en50221 en; @@ -48,6 +51,7 @@ struct cxd { int mode; int ready; int dr; + int write_busy; int slot_stat; u8 amem[1024]; @@ -55,6 +59,9 @@ struct cxd { int cammode; struct mutex lock; + + u8 rbuf[1028]; + u8 wbuf[1028]; }; static int i2c_write_reg(struct i2c_adapter *adapter, u8 adr, @@ -73,7 +80,7 @@ static int i2c_write_reg(struct i2c_adapter *adapter, u8 adr, } static int i2c_write(struct i2c_adapter *adapter, u8 adr, - u8 *data, u8 len) + u8 *data, u16 len) { struct i2c_msg msg = {.addr = adr, .flags = 0, .buf = data, .len = len}; @@ -100,12 +107,12 @@ static int i2c_read_reg(struct i2c_adapter *adapter, u8 adr, } static int i2c_read(struct i2c_adapter *adapter, u8 adr, - u8 reg, u8 *data, u8 n) + u8 reg, u8 *data, u16 n) { struct i2c_msg msgs[2] = {{.addr = adr, .flags = 0, - .buf = ®, .len = 1}, - {.addr = adr, .flags = I2C_M_RD, - .buf = data, .len = n} }; + .buf = ®, .len = 1}, + {.addr = adr, .flags = I2C_M_RD, + .buf = data, .len = n} }; if (i2c_transfer(adapter, msgs, 2) != 2) { dev_err(&adapter->dev, "error in i2c_read\n"); @@ -114,14 +121,26 @@ static int i2c_read(struct i2c_adapter *adapter, u8 adr, return 0; } -static int read_block(struct cxd *ci, u8 adr, u8 *data, u8 n) +static int read_block(struct cxd *ci, u8 adr, u8 *data, u16 n) { - int status; + int status = 0; - status = i2c_write_reg(ci->i2c, ci->cfg.adr, 0, adr); + if (ci->lastaddress != adr) + status = i2c_write_reg(ci->i2c, ci->cfg.adr, 0, adr); if (!status) { ci->lastaddress = adr; - status = i2c_read(ci->i2c, ci->cfg.adr, 1, data, n); + + while (n) { + int len = n; + + if (ci->cfg.max_i2c && (len > ci->cfg.max_i2c)) + len = ci->cfg.max_i2c; + status = i2c_read(ci->i2c, ci->cfg.adr, 1, data, len); + if (status) + return status; + data += len; + n -= len; + } } return status; } @@ -182,16 +201,16 @@ static int write_io(struct cxd *ci, u16 address, u8 val) static int write_regm(struct cxd *ci, u8 reg, u8 val, u8 mask) { - int status; + int status = 0; - status = i2c_write_reg(ci->i2c, ci->cfg.adr, 0, reg); + if (ci->lastaddress != reg) + status = i2c_write_reg(ci->i2c, ci->cfg.adr, 0, reg); if (!status && reg >= 6 && reg <= 8 && mask != 0xff) status = i2c_read_reg(ci->i2c, ci->cfg.adr, 1, &ci->regs[reg]); + ci->lastaddress = reg; ci->regs[reg] = (ci->regs[reg] & (~mask)) | val; - if (!status) { - ci->lastaddress = reg; + if (!status) status = i2c_write_reg(ci->i2c, ci->cfg.adr, 1, ci->regs[reg]); - } if (reg == 0x20) ci->regs[reg] &= 0x7f; return status; @@ -203,16 +222,29 @@ static int write_reg(struct cxd *ci, u8 reg, u8 val) } #ifdef BUFFER_MODE -static int write_block(struct cxd *ci, u8 adr, u8 *data, int n) +static int write_block(struct cxd *ci, u8 adr, u8 *data, u16 n) { - int status; - u8 buf[256] = {1}; - - status = i2c_write_reg(ci->i2c, ci->cfg.adr, 0, adr); - if (!status) { - ci->lastaddress = adr; - memcpy(buf + 1, data, n); - status = i2c_write(ci->i2c, ci->cfg.adr, buf, n + 1); + int status = 0; + u8 *buf = ci->wbuf; + + if (ci->lastaddress != adr) + status = i2c_write_reg(ci->i2c, ci->cfg.adr, 0, adr); + if (status) + return status; + + ci->lastaddress = adr; + buf[0] = 1; + while (n) { + int len = n; + + if (ci->cfg.max_i2c && (len + 1 > ci->cfg.max_i2c)) + len = ci->cfg.max_i2c - 1; + memcpy(buf + 1, data, len); + status = i2c_write(ci->i2c, ci->cfg.adr, buf, len + 1); + if (status) + return status; + n -= len; + data += len; } return status; } @@ -238,6 +270,8 @@ static void set_mode(struct cxd *ci, int mode) static void cam_mode(struct cxd *ci, int mode) { + u8 dummy; + if (mode == ci->cammode) return; @@ -246,16 +280,15 @@ static void cam_mode(struct cxd *ci, int mode) write_regm(ci, 0x20, 0x80, 0x80); break; case 0x01: -#ifdef BUFFER_MODE if (!ci->en.read_data) return; + ci->write_busy = 0; dev_info(&ci->i2c->dev, "enable cam buffer mode\n"); - /* write_reg(ci, 0x0d, 0x00); */ - /* write_reg(ci, 0x0e, 0x01); */ + write_reg(ci, 0x0d, 0x00); + write_reg(ci, 0x0e, 0x01); write_regm(ci, 0x08, 0x40, 0x40); - /* read_reg(ci, 0x12, &dummy); */ + read_reg(ci, 0x12, &dummy); write_regm(ci, 0x08, 0x80, 0x80); -#endif break; default: break; @@ -325,7 +358,10 @@ static int init(struct cxd *ci) if (status < 0) break; - if (ci->cfg.clock_mode) { + if (ci->cfg.clock_mode == 2) { + /* bitrate*2^13/ 72000 */ + u32 reg = ((ci->cfg.bitrate << 13) + 71999) / 72000; + if (ci->cfg.polarity) { status = write_reg(ci, 0x09, 0x6f); if (status < 0) @@ -335,6 +371,25 @@ static int init(struct cxd *ci) if (status < 0) break; } + status = write_reg(ci, 0x20, 0x08); + if (status < 0) + break; + status = write_reg(ci, 0x21, (reg >> 8) & 0xff); + if (status < 0) + break; + status = write_reg(ci, 0x22, reg & 0xff); + if (status < 0) + break; + } else if (ci->cfg.clock_mode == 1) { + if (ci->cfg.polarity) { + status = write_reg(ci, 0x09, 0x6f); /* D */ + if (status < 0) + break; + } else { + status = write_reg(ci, 0x09, 0x6d); + if (status < 0) + break; + } status = write_reg(ci, 0x20, 0x68); if (status < 0) break; @@ -346,7 +401,7 @@ static int init(struct cxd *ci) break; } else { if (ci->cfg.polarity) { - status = write_reg(ci, 0x09, 0x4f); + status = write_reg(ci, 0x09, 0x4f); /* C */ if (status < 0) break; } else { @@ -354,7 +409,6 @@ static int init(struct cxd *ci) if (status < 0) break; } - status = write_reg(ci, 0x20, 0x28); if (status < 0) break; @@ -401,7 +455,6 @@ static int read_attribute_mem(struct dvb_ca_en50221 *ca, set_mode(ci, 1); read_pccard(ci, address, &val, 1); mutex_unlock(&ci->lock); - /* printk(KERN_INFO "%02x:%02x\n", address,val); */ return val; } @@ -446,6 +499,9 @@ static int slot_reset(struct dvb_ca_en50221 *ca, int slot) { struct cxd *ci = ca->data; + if (ci->cammode) + read_data(ca, slot, ci->rbuf, 0); + mutex_lock(&ci->lock); cam_mode(ci, 0); write_reg(ci, 0x00, 0x21); @@ -465,7 +521,6 @@ static int slot_reset(struct dvb_ca_en50221 *ca, int slot) } } mutex_unlock(&ci->lock); - /* msleep(500); */ return 0; } @@ -474,11 +529,19 @@ static int slot_shutdown(struct dvb_ca_en50221 *ca, int slot) struct cxd *ci = ca->data; dev_info(&ci->i2c->dev, "%s\n", __func__); + if (ci->cammode) + read_data(ca, slot, ci->rbuf, 0); mutex_lock(&ci->lock); + write_reg(ci, 0x00, 0x21); + write_reg(ci, 0x06, 0x1F); + msleep(300); + write_regm(ci, 0x09, 0x08, 0x08); write_regm(ci, 0x20, 0x80, 0x80); /* Reset CAM Mode */ write_regm(ci, 0x06, 0x07, 0x07); /* Clear IO Mode */ + ci->mode = -1; + ci->write_busy = 0; mutex_unlock(&ci->lock); return 0; } @@ -490,9 +553,7 @@ static int slot_ts_enable(struct dvb_ca_en50221 *ca, int slot) mutex_lock(&ci->lock); write_regm(ci, 0x09, 0x00, 0x08); set_mode(ci, 0); -#ifdef BUFFER_MODE cam_mode(ci, 1); -#endif mutex_unlock(&ci->lock); return 0; } @@ -506,12 +567,10 @@ static int campoll(struct cxd *ci) return 0; write_reg(ci, 0x05, istat); - if (istat & 0x40) { + if (istat & 0x40) ci->dr = 1; - dev_info(&ci->i2c->dev, "DR\n"); - } if (istat & 0x20) - dev_info(&ci->i2c->dev, "WC\n"); + ci->write_busy = 0; if (istat & 2) { u8 slotstat; @@ -519,7 +578,8 @@ static int campoll(struct cxd *ci) read_reg(ci, 0x01, &slotstat); if (!(2 & slotstat)) { if (!ci->slot_stat) { - ci->slot_stat = DVB_CA_EN50221_POLL_CAM_PRESENT; + ci->slot_stat |= + DVB_CA_EN50221_POLL_CAM_PRESENT; write_regm(ci, 0x03, 0x08, 0x08); } @@ -531,8 +591,8 @@ static int campoll(struct cxd *ci) ci->ready = 0; } } - if (istat & 8 && - ci->slot_stat == DVB_CA_EN50221_POLL_CAM_PRESENT) { + if ((istat & 8) && + (ci->slot_stat == DVB_CA_EN50221_POLL_CAM_PRESENT)) { ci->ready = 1; ci->slot_stat |= DVB_CA_EN50221_POLL_CAM_READY; } @@ -553,7 +613,6 @@ static int poll_slot_status(struct dvb_ca_en50221 *ca, int slot, int open) return ci->slot_stat; } -#ifdef BUFFER_MODE static int read_data(struct dvb_ca_en50221 *ca, int slot, u8 *ebuf, int ecount) { struct cxd *ci = ca->data; @@ -564,30 +623,38 @@ static int read_data(struct dvb_ca_en50221 *ca, int slot, u8 *ebuf, int ecount) campoll(ci); mutex_unlock(&ci->lock); - dev_info(&ci->i2c->dev, "%s\n", __func__); if (!ci->dr) return 0; mutex_lock(&ci->lock); read_reg(ci, 0x0f, &msb); read_reg(ci, 0x10, &lsb); - len = (msb << 8) | lsb; + len = ((u16)msb << 8) | lsb; + if (len > ecount || len < 2) { + /* read it anyway or cxd may hang */ + read_block(ci, 0x12, ci->rbuf, len); + mutex_unlock(&ci->lock); + return -EIO; + } read_block(ci, 0x12, ebuf, len); ci->dr = 0; mutex_unlock(&ci->lock); - return len; } +#ifdef BUFFER_MODE + static int write_data(struct dvb_ca_en50221 *ca, int slot, u8 *ebuf, int ecount) { struct cxd *ci = ca->data; + if (ci->write_busy) + return -EAGAIN; mutex_lock(&ci->lock); - dev_info(&ci->i2c->dev, "%s %d\n", __func__, ecount); write_reg(ci, 0x0d, ecount >> 8); write_reg(ci, 0x0e, ecount & 0xff); write_block(ci, 0x11, ebuf, ecount); + ci->write_busy = 1; mutex_unlock(&ci->lock); return ecount; } diff --git a/drivers/staging/media/cxd2099/cxd2099.h b/drivers/staging/media/cxd2099/cxd2099.h index 0eb607c5b423..f4b29b1d6eb8 100644 --- a/drivers/staging/media/cxd2099/cxd2099.h +++ b/drivers/staging/media/cxd2099/cxd2099.h @@ -30,8 +30,10 @@ struct cxd2099_cfg { u32 bitrate; u8 adr; - u8 polarity:1; - u8 clock_mode:1; + u8 polarity; + u8 clock_mode; + + u32 max_i2c; }; #if defined(CONFIG_DVB_CXD2099) || \ diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c index e583dd8a418b..d4fa41be80f9 100644 --- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c +++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c @@ -1510,11 +1510,13 @@ cxgbit_pass_open_rpl(struct cxgbit_device *cdev, struct sk_buff *skb) if (!cnp) { pr_info("%s stid %d lookup failure\n", __func__, stid); - return; + goto rel_skb; } cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status); cxgbit_put_cnp(cnp); +rel_skb: + __kfree_skb(skb); } static void @@ -1530,11 +1532,13 @@ cxgbit_close_listsrv_rpl(struct cxgbit_device *cdev, struct sk_buff *skb) if (!cnp) { pr_info("%s stid %d lookup failure\n", __func__, stid); - return; + goto rel_skb; } cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status); cxgbit_put_cnp(cnp); +rel_skb: + __kfree_skb(skb); } static void @@ -1819,12 +1823,16 @@ static void cxgbit_set_tcb_rpl(struct cxgbit_device *cdev, struct sk_buff *skb) struct tid_info *t = lldi->tids; csk = lookup_tid(t, tid); - if (unlikely(!csk)) + if (unlikely(!csk)) { pr_err("can't find connection for tid %u.\n", tid); - else + goto rel_skb; + } else { cxgbit_wake_up(&csk->com.wr_wait, __func__, rpl->status); + } cxgbit_put_csk(csk); +rel_skb: + __kfree_skb(skb); } static void cxgbit_rx_data(struct cxgbit_device *cdev, struct sk_buff *skb) diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c index dda13f1af38e..514986b57c2d 100644 --- a/drivers/target/iscsi/cxgbit/cxgbit_target.c +++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c @@ -827,7 +827,7 @@ cxgbit_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login, static void cxgbit_skb_copy_to_sg(struct sk_buff *skb, struct scatterlist *sg, - unsigned int nents) + unsigned int nents, u32 skip) { struct skb_seq_state st; const u8 *buf; @@ -846,7 +846,7 @@ cxgbit_skb_copy_to_sg(struct sk_buff *skb, struct scatterlist *sg, } consumed += sg_pcopy_from_buffer(sg, nents, (void *)buf, - buf_len, consumed); + buf_len, skip + consumed); } } @@ -912,7 +912,7 @@ cxgbit_handle_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr, struct scatterlist *sg = &cmd->se_cmd.t_data_sg[0]; u32 sg_nents = max(1UL, DIV_ROUND_UP(pdu_cb->dlen, PAGE_SIZE)); - cxgbit_skb_copy_to_sg(csk->skb, sg, sg_nents); + cxgbit_skb_copy_to_sg(csk->skb, sg, sg_nents, 0); } cmd->write_data_done += pdu_cb->dlen; @@ -1069,11 +1069,13 @@ static int cxgbit_handle_iscsi_dataout(struct cxgbit_sock *csk) cmd->se_cmd.data_length); if (!(pdu_cb->flags & PDUCBF_RX_DATA_DDPD)) { + u32 skip = data_offset % PAGE_SIZE; + sg_off = data_offset / PAGE_SIZE; sg_start = &cmd->se_cmd.t_data_sg[sg_off]; - sg_nents = max(1UL, DIV_ROUND_UP(data_len, PAGE_SIZE)); + sg_nents = max(1UL, DIV_ROUND_UP(skip + data_len, PAGE_SIZE)); - cxgbit_skb_copy_to_sg(csk->skb, sg_start, sg_nents); + cxgbit_skb_copy_to_sg(csk->skb, sg_start, sg_nents, skip); } check_payload: diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 74e4975dd1b1..5001261f5d69 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -418,6 +418,7 @@ int iscsit_reset_np_thread( return 0; } np->np_thread_state = ISCSI_NP_THREAD_RESET; + atomic_inc(&np->np_reset_count); if (np->np_thread) { spin_unlock_bh(&np->np_thread_lock); @@ -2167,6 +2168,7 @@ iscsit_setup_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, cmd->cmd_sn = be32_to_cpu(hdr->cmdsn); cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn); cmd->data_direction = DMA_NONE; + kfree(cmd->text_in_ptr); cmd->text_in_ptr = NULL; return 0; @@ -3487,9 +3489,9 @@ iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn, return text_length; if (completed) { - hdr->flags |= ISCSI_FLAG_CMD_FINAL; + hdr->flags = ISCSI_FLAG_CMD_FINAL; } else { - hdr->flags |= ISCSI_FLAG_TEXT_CONTINUE; + hdr->flags = ISCSI_FLAG_TEXT_CONTINUE; cmd->read_data_done += text_length; if (cmd->targ_xfer_tag == 0xFFFFFFFF) cmd->targ_xfer_tag = session_get_next_ttt(conn->sess); diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index e9bdc8b86e7d..dc13afbd4c88 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -1243,9 +1243,11 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) flush_signals(current); spin_lock_bh(&np->np_thread_lock); - if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { + if (atomic_dec_if_positive(&np->np_reset_count) >= 0) { np->np_thread_state = ISCSI_NP_THREAD_ACTIVE; + spin_unlock_bh(&np->np_thread_lock); complete(&np->np_restart_comp); + return 1; } else if (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN) { spin_unlock_bh(&np->np_thread_lock); goto exit; @@ -1278,7 +1280,8 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) goto exit; } else if (rc < 0) { spin_lock_bh(&np->np_thread_lock); - if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { + if (atomic_dec_if_positive(&np->np_reset_count) >= 0) { + np->np_thread_state = ISCSI_NP_THREAD_ACTIVE; spin_unlock_bh(&np->np_thread_lock); complete(&np->np_restart_comp); iscsit_put_transport(conn->conn_transport); diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index 36913734c6bc..02e8a5d86658 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c @@ -364,7 +364,7 @@ void core_tpg_del_initiator_node_acl(struct se_node_acl *acl) mutex_lock(&tpg->acl_node_mutex); if (acl->dynamic_node_acl) acl->dynamic_node_acl = 0; - list_del(&acl->acl_list); + list_del_init(&acl->acl_list); mutex_unlock(&tpg->acl_node_mutex); target_shutdown_sessions(acl); @@ -548,7 +548,7 @@ int core_tpg_deregister(struct se_portal_group *se_tpg) * in transport_deregister_session(). */ list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) { - list_del(&nacl->acl_list); + list_del_init(&nacl->acl_list); core_tpg_wait_for_nacl_pr_ref(nacl); core_free_device_list_for_node(nacl, se_tpg); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 97fed9a298bd..836d552b0385 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -466,7 +466,7 @@ static void target_complete_nacl(struct kref *kref) } mutex_lock(&se_tpg->acl_node_mutex); - list_del(&nacl->acl_list); + list_del_init(&nacl->acl_list); mutex_unlock(&se_tpg->acl_node_mutex); core_tpg_wait_for_nacl_pr_ref(nacl); @@ -538,7 +538,7 @@ void transport_free_session(struct se_session *se_sess) spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); if (se_nacl->dynamic_stop) - list_del(&se_nacl->acl_list); + list_del_init(&se_nacl->acl_list); } mutex_unlock(&se_tpg->acl_node_mutex); diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 80ee130f8253..942d094269fb 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -563,8 +563,6 @@ static int scatter_data_area(struct tcmu_dev *udev, block_remaining); to_offset = get_block_offset_user(udev, dbi, block_remaining); - offset = DATA_BLOCK_SIZE - block_remaining; - to += offset; if (*iov_cnt != 0 && to_offset == iov_tail(*iov)) { @@ -575,8 +573,10 @@ static int scatter_data_area(struct tcmu_dev *udev, (*iov)->iov_len = copy_bytes; } if (copy_data) { - memcpy(to, from + sg->length - sg_remaining, - copy_bytes); + offset = DATA_BLOCK_SIZE - block_remaining; + memcpy(to + offset, + from + sg->length - sg_remaining, + copy_bytes); tcmu_flush_dcache_range(to, copy_bytes); } sg_remaining -= copy_bytes; @@ -637,9 +637,8 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd, copy_bytes = min_t(size_t, sg_remaining, block_remaining); offset = DATA_BLOCK_SIZE - block_remaining; - from += offset; tcmu_flush_dcache_range(from, copy_bytes); - memcpy(to + sg->length - sg_remaining, from, + memcpy(to + sg->length - sg_remaining, from + offset, copy_bytes); sg_remaining -= copy_bytes; @@ -1433,6 +1432,8 @@ static int tcmu_update_uio_info(struct tcmu_dev *udev) if (udev->dev_config[0]) snprintf(str + used, size - used, "/%s", udev->dev_config); + /* If the old string exists, free it */ + kfree(info->name); info->name = str; return 0; diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c index 308b6e17c88a..fe2f00ceafc5 100644 --- a/drivers/thunderbolt/eeprom.c +++ b/drivers/thunderbolt/eeprom.c @@ -333,6 +333,15 @@ static int tb_drom_parse_entry_port(struct tb_switch *sw, int res; enum tb_port_type type; + /* + * Some DROMs list more ports than the controller actually has + * so we skip those but allow the parser to continue. + */ + if (header->index > sw->config.max_port_number) { + dev_info_once(&sw->dev, "ignoring unnecessary extra entries in DROM\n"); + return 0; + } + port = &sw->ports[header->index]; port->disabled = header->port_disabled; if (port->disabled) diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c index 8ee340290219..bdaac1ff00a5 100644 --- a/drivers/thunderbolt/icm.c +++ b/drivers/thunderbolt/icm.c @@ -904,7 +904,14 @@ static int icm_driver_ready(struct tb *tb) static int icm_suspend(struct tb *tb) { - return nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_SAVE_DEVS, 0); + int ret; + + ret = nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_SAVE_DEVS, 0); + if (ret) + tb_info(tb, "Ignoring mailbox command error (%d) in %s\n", + ret, __func__); + + return 0; } /* diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index 40219a706309..e9391bbd4036 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -30,7 +30,7 @@ static DEFINE_IDA(nvm_ida); struct nvm_auth_status { struct list_head list; - uuid_be uuid; + uuid_t uuid; u32 status; }; @@ -47,7 +47,7 @@ static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw) struct nvm_auth_status *st; list_for_each_entry(st, &nvm_auth_status_cache, list) { - if (!uuid_be_cmp(st->uuid, *sw->uuid)) + if (uuid_equal(&st->uuid, sw->uuid)) return st; } @@ -1461,7 +1461,7 @@ struct tb_sw_lookup { struct tb *tb; u8 link; u8 depth; - const uuid_be *uuid; + const uuid_t *uuid; }; static int tb_switch_match(struct device *dev, void *data) @@ -1518,7 +1518,7 @@ struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth) * Returned switch has reference count increased so the caller needs to * call tb_switch_put() when done with the switch. */ -struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_be *uuid) +struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid) { struct tb_sw_lookup lookup; struct device *dev; diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h index 3d9f64676e58..e0deee4f1eb0 100644 --- a/drivers/thunderbolt/tb.h +++ b/drivers/thunderbolt/tb.h @@ -101,7 +101,7 @@ struct tb_switch { struct tb_dma_port *dma_port; struct tb *tb; u64 uid; - uuid_be *uuid; + uuid_t *uuid; u16 vendor; u16 device; const char *vendor_name; @@ -407,7 +407,7 @@ void tb_sw_set_unplugged(struct tb_switch *sw); struct tb_switch *get_switch_at_route(struct tb_switch *sw, u64 route); struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth); -struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_be *uuid); +struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid); static inline unsigned int tb_switch_phy_port_from_link(unsigned int link) { diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h index 85b6d33c0919..de6441e4a060 100644 --- a/drivers/thunderbolt/tb_msgs.h +++ b/drivers/thunderbolt/tb_msgs.h @@ -179,7 +179,7 @@ struct icm_fr_pkg_get_topology_response { struct icm_fr_event_device_connected { struct icm_pkg_header hdr; - uuid_be ep_uuid; + uuid_t ep_uuid; u8 connection_key; u8 connection_id; u16 link_info; @@ -193,7 +193,7 @@ struct icm_fr_event_device_connected { struct icm_fr_pkg_approve_device { struct icm_pkg_header hdr; - uuid_be ep_uuid; + uuid_t ep_uuid; u8 connection_key; u8 connection_id; u16 reserved; @@ -207,7 +207,7 @@ struct icm_fr_event_device_disconnected { struct icm_fr_pkg_add_device_key { struct icm_pkg_header hdr; - uuid_be ep_uuid; + uuid_t ep_uuid; u8 connection_key; u8 connection_id; u16 reserved; @@ -216,7 +216,7 @@ struct icm_fr_pkg_add_device_key { struct icm_fr_pkg_add_device_key_response { struct icm_pkg_header hdr; - uuid_be ep_uuid; + uuid_t ep_uuid; u8 connection_key; u8 connection_id; u16 reserved; @@ -224,7 +224,7 @@ struct icm_fr_pkg_add_device_key_response { struct icm_fr_pkg_challenge_device { struct icm_pkg_header hdr; - uuid_be ep_uuid; + uuid_t ep_uuid; u8 connection_key; u8 connection_id; u16 reserved; @@ -233,7 +233,7 @@ struct icm_fr_pkg_challenge_device { struct icm_fr_pkg_challenge_device_response { struct icm_pkg_header hdr; - uuid_be ep_uuid; + uuid_t ep_uuid; u8 connection_key; u8 connection_id; u16 reserved; diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c index b5def356af63..1aab3010fbfa 100644 --- a/drivers/tty/serial/8250/8250_core.c +++ b/drivers/tty/serial/8250/8250_core.c @@ -1043,13 +1043,24 @@ int serial8250_register_8250_port(struct uart_8250_port *up) if (up->dl_write) uart->dl_write = up->dl_write; - if (serial8250_isa_config != NULL) - serial8250_isa_config(0, &uart->port, - &uart->capabilities); + if (uart->port.type != PORT_8250_CIR) { + if (serial8250_isa_config != NULL) + serial8250_isa_config(0, &uart->port, + &uart->capabilities); + + ret = uart_add_one_port(&serial8250_reg, + &uart->port); + if (ret == 0) + ret = uart->port.line; + } else { + dev_info(uart->port.dev, + "skipping CIR port at 0x%lx / 0x%llx, IRQ %d\n", + uart->port.iobase, + (unsigned long long)uart->port.mapbase, + uart->port.irq); - ret = uart_add_one_port(&serial8250_reg, &uart->port); - if (ret == 0) - ret = uart->port.line; + ret = 0; + } } mutex_unlock(&serial_mutex); diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c index b5c98e5bf524..c6360fbdf808 100644 --- a/drivers/tty/serial/8250/8250_exar.c +++ b/drivers/tty/serial/8250/8250_exar.c @@ -261,7 +261,7 @@ __xr17v35x_register_gpio(struct pci_dev *pcidev, } static const struct property_entry exar_gpio_properties[] = { - PROPERTY_ENTRY_U32("linux,first-pin", 0), + PROPERTY_ENTRY_U32("exar,first-pin", 0), PROPERTY_ENTRY_U32("ngpios", 16), { } }; @@ -326,7 +326,7 @@ static int iot2040_rs485_config(struct uart_port *port, } static const struct property_entry iot2040_gpio_properties[] = { - PROPERTY_ENTRY_U32("linux,first-pin", 10), + PROPERTY_ENTRY_U32("exar,first-pin", 10), PROPERTY_ENTRY_U32("ngpios", 1), { } }; diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index 8a857bb34fbb..1888d168a41c 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -142,15 +142,7 @@ static struct vendor_data vendor_sbsa = { .fixed_options = true, }; -/* - * Erratum 44 for QDF2432v1 and QDF2400v1 SoCs describes the BUSY bit as - * occasionally getting stuck as 1. To avoid the potential for a hang, check - * TXFE == 0 instead of BUSY == 1. This may not be suitable for all UART - * implementations, so only do so if an affected platform is detected in - * parse_spcr(). - */ -static bool qdf2400_e44_present = false; - +#ifdef CONFIG_ACPI_SPCR_TABLE static struct vendor_data vendor_qdt_qdf2400_e44 = { .reg_offset = pl011_std_offsets, .fr_busy = UART011_FR_TXFE, @@ -165,6 +157,7 @@ static struct vendor_data vendor_qdt_qdf2400_e44 = { .always_enabled = true, .fixed_options = true, }; +#endif static u16 pl011_st_offsets[REG_ARRAY_SIZE] = { [REG_DR] = UART01x_DR, @@ -2375,12 +2368,14 @@ static int __init pl011_console_match(struct console *co, char *name, int idx, resource_size_t addr; int i; - if (strcmp(name, "qdf2400_e44") == 0) { - pr_info_once("UART: Working around QDF2400 SoC erratum 44"); - qdf2400_e44_present = true; - } else if (strcmp(name, "pl011") != 0) { + /* + * Systems affected by the Qualcomm Technologies QDF2400 E44 erratum + * have a distinct console name, so make sure we check for that. + * The actual implementation of the erratum occurs in the probe + * function. + */ + if ((strcmp(name, "qdf2400_e44") != 0) && (strcmp(name, "pl011") != 0)) return -ENODEV; - } if (uart_parse_earlycon(options, &iotype, &addr, &options)) return -ENODEV; @@ -2734,11 +2729,17 @@ static int sbsa_uart_probe(struct platform_device *pdev) } uap->port.irq = ret; - uap->reg_offset = vendor_sbsa.reg_offset; - uap->vendor = qdf2400_e44_present ? - &vendor_qdt_qdf2400_e44 : &vendor_sbsa; +#ifdef CONFIG_ACPI_SPCR_TABLE + if (qdf2400_e44_present) { + dev_info(&pdev->dev, "working around QDF2400 SoC erratum 44\n"); + uap->vendor = &vendor_qdt_qdf2400_e44; + } else +#endif + uap->vendor = &vendor_sbsa; + + uap->reg_offset = uap->vendor->reg_offset; uap->fifosize = 32; - uap->port.iotype = vendor_sbsa.access_32b ? UPIO_MEM32 : UPIO_MEM; + uap->port.iotype = uap->vendor->access_32b ? UPIO_MEM32 : UPIO_MEM; uap->port.ops = &sbsa_uart_pops; uap->fixed_baud = baudrate; diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index ab1bb3b538ac..7f277b092b5b 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -1888,7 +1888,7 @@ void usb_hcd_flush_endpoint(struct usb_device *udev, /* No more submits can occur */ spin_lock_irq(&hcd_urb_list_lock); rescan: - list_for_each_entry (urb, &ep->urb_list, urb_list) { + list_for_each_entry_reverse(urb, &ep->urb_list, urb_list) { int is_in; if (urb->unlinked) @@ -2485,6 +2485,8 @@ void usb_hc_died (struct usb_hcd *hcd) } if (usb_hcd_is_primary_hcd(hcd) && hcd->shared_hcd) { hcd = hcd->shared_hcd; + clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags); + set_bit(HCD_FLAG_DEAD, &hcd->flags); if (hcd->rh_registered) { clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 6e6797d145dd..822f8c50e423 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -4725,7 +4725,8 @@ hub_power_remaining(struct usb_hub *hub) static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, u16 portchange) { - int status, i; + int status = -ENODEV; + int i; unsigned unit_load; struct usb_device *hdev = hub->hdev; struct usb_hcd *hcd = bus_to_hcd(hdev->bus); @@ -4929,9 +4930,10 @@ loop: done: hub_port_disable(hub, port1, 1); - if (hcd->driver->relinquish_port && !hub->hdev->parent) - hcd->driver->relinquish_port(hcd, port1); - + if (hcd->driver->relinquish_port && !hub->hdev->parent) { + if (status != -ENOTCONN && status != -ENODEV) + hcd->driver->relinquish_port(hcd, port1); + } } /* Handle physical or logical connection change events. diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 3116edfcdc18..574da2b4529c 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -150,6 +150,9 @@ static const struct usb_device_id usb_quirk_list[] = { /* appletouch */ { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */ + { USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM }, + /* Avision AV600U */ { USB_DEVICE(0x0638, 0x0a13), .driver_info = USB_QUIRK_STRING_FETCH_255 }, @@ -249,6 +252,7 @@ static const struct usb_device_id usb_amd_resume_quirk_list[] = { { USB_DEVICE(0x093a, 0x2500), .driver_info = USB_QUIRK_RESET_RESUME }, { USB_DEVICE(0x093a, 0x2510), .driver_info = USB_QUIRK_RESET_RESUME }, { USB_DEVICE(0x093a, 0x2521), .driver_info = USB_QUIRK_RESET_RESUME }, + { USB_DEVICE(0x03f0, 0x2b4a), .driver_info = USB_QUIRK_RESET_RESUME }, /* Logitech Optical Mouse M90/M100 */ { USB_DEVICE(0x046d, 0xc05a), .driver_info = USB_QUIRK_RESET_RESUME }, diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 6b299c7b7656..f064f1549333 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -896,9 +896,40 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb, if (!node) { trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST; + /* + * USB Specification 2.0 Section 5.9.2 states that: "If + * there is only a single transaction in the microframe, + * only a DATA0 data packet PID is used. If there are + * two transactions per microframe, DATA1 is used for + * the first transaction data packet and DATA0 is used + * for the second transaction data packet. If there are + * three transactions per microframe, DATA2 is used for + * the first transaction data packet, DATA1 is used for + * the second, and DATA0 is used for the third." + * + * IOW, we should satisfy the following cases: + * + * 1) length <= maxpacket + * - DATA0 + * + * 2) maxpacket < length <= (2 * maxpacket) + * - DATA1, DATA0 + * + * 3) (2 * maxpacket) < length <= (3 * maxpacket) + * - DATA2, DATA1, DATA0 + */ if (speed == USB_SPEED_HIGH) { struct usb_ep *ep = &dep->endpoint; - trb->size |= DWC3_TRB_SIZE_PCM1(ep->mult - 1); + unsigned int mult = ep->mult - 1; + unsigned int maxp = usb_endpoint_maxp(ep->desc); + + if (length <= (2 * maxp)) + mult--; + + if (length <= maxp) + mult--; + + trb->size |= DWC3_TRB_SIZE_PCM1(mult); } } else { trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS; diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c index 62dc9c7798e7..e1de8fe599a3 100644 --- a/drivers/usb/gadget/udc/renesas_usb3.c +++ b/drivers/usb/gadget/udc/renesas_usb3.c @@ -838,21 +838,32 @@ static struct renesas_usb3_request *usb3_get_request(struct renesas_usb3_ep return usb3_req; } -static void usb3_request_done(struct renesas_usb3_ep *usb3_ep, - struct renesas_usb3_request *usb3_req, int status) +static void __usb3_request_done(struct renesas_usb3_ep *usb3_ep, + struct renesas_usb3_request *usb3_req, + int status) { struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep); - unsigned long flags; dev_dbg(usb3_to_dev(usb3), "giveback: ep%2d, %u, %u, %d\n", usb3_ep->num, usb3_req->req.length, usb3_req->req.actual, status); usb3_req->req.status = status; - spin_lock_irqsave(&usb3->lock, flags); usb3_ep->started = false; list_del_init(&usb3_req->queue); - spin_unlock_irqrestore(&usb3->lock, flags); + spin_unlock(&usb3->lock); usb_gadget_giveback_request(&usb3_ep->ep, &usb3_req->req); + spin_lock(&usb3->lock); +} + +static void usb3_request_done(struct renesas_usb3_ep *usb3_ep, + struct renesas_usb3_request *usb3_req, int status) +{ + struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep); + unsigned long flags; + + spin_lock_irqsave(&usb3->lock, flags); + __usb3_request_done(usb3_ep, usb3_req, status); + spin_unlock_irqrestore(&usb3->lock, flags); } static void usb3_irq_epc_pipe0_status_end(struct renesas_usb3 *usb3) diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index c8989c62a262..c8f38649f749 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c @@ -98,6 +98,7 @@ enum amd_chipset_gen { AMD_CHIPSET_HUDSON2, AMD_CHIPSET_BOLTON, AMD_CHIPSET_YANGTZE, + AMD_CHIPSET_TAISHAN, AMD_CHIPSET_UNKNOWN, }; @@ -141,6 +142,11 @@ static int amd_chipset_sb_type_init(struct amd_chipset_info *pinfo) pinfo->sb_type.gen = AMD_CHIPSET_SB700; else if (rev >= 0x40 && rev <= 0x4f) pinfo->sb_type.gen = AMD_CHIPSET_SB800; + } + pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, + 0x145c, NULL); + if (pinfo->smbus_dev) { + pinfo->sb_type.gen = AMD_CHIPSET_TAISHAN; } else { pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL); @@ -260,11 +266,12 @@ int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev) { /* Make sure amd chipset type has already been initialized */ usb_amd_find_chipset_info(); - if (amd_chipset.sb_type.gen != AMD_CHIPSET_YANGTZE) - return 0; - - dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n"); - return 1; + if (amd_chipset.sb_type.gen == AMD_CHIPSET_YANGTZE || + amd_chipset.sb_type.gen == AMD_CHIPSET_TAISHAN) { + dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n"); + return 1; + } + return 0; } EXPORT_SYMBOL_GPL(usb_hcd_amd_remote_wakeup_quirk); @@ -1150,3 +1157,23 @@ static void quirk_usb_early_handoff(struct pci_dev *pdev) } DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_SERIAL_USB, 8, quirk_usb_early_handoff); + +bool usb_xhci_needs_pci_reset(struct pci_dev *pdev) +{ + /* + * Our dear uPD72020{1,2} friend only partially resets when + * asked to via the XHCI interface, and may end up doing DMA + * at the wrong addresses, as it keeps the top 32bit of some + * addresses from its previous programming under obscure + * circumstances. + * Give it a good wack at probe time. Unfortunately, this + * needs to happen before we've had a chance to discover any + * quirk, or the system will be in a rather bad state. + */ + if (pdev->vendor == PCI_VENDOR_ID_RENESAS && + (pdev->device == 0x0014 || pdev->device == 0x0015)) + return true; + + return false; +} +EXPORT_SYMBOL_GPL(usb_xhci_needs_pci_reset); diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h index 655994480198..5582cbafecd4 100644 --- a/drivers/usb/host/pci-quirks.h +++ b/drivers/usb/host/pci-quirks.h @@ -15,6 +15,7 @@ void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev); void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev); void usb_disable_xhci_ports(struct pci_dev *xhci_pdev); void sb800_prefetch(struct device *dev, int on); +bool usb_xhci_needs_pci_reset(struct pci_dev *pdev); #else struct pci_dev; static inline void usb_amd_quirk_pll_disable(void) {} diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 5b0fa553c8bc..8071c8fdd15e 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -284,6 +284,13 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) driver = (struct hc_driver *)id->driver_data; + /* For some HW implementation, a XHCI reset is just not enough... */ + if (usb_xhci_needs_pci_reset(dev)) { + dev_info(&dev->dev, "Resetting\n"); + if (pci_reset_function_locked(dev)) + dev_warn(&dev->dev, "Reset failed"); + } + /* Prevent runtime suspending between USB-2 and USB-3 initialization */ pm_runtime_get_noresume(&dev->dev); diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index 76decb8011eb..3344ffd5bb13 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c @@ -139,6 +139,7 @@ static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep) "Could not flush host TX%d fifo: csr: %04x\n", ep->epnum, csr)) return; + mdelay(1); } } diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c index 8fb86a5f458e..3d0dd2f97415 100644 --- a/drivers/usb/phy/phy-msm-usb.c +++ b/drivers/usb/phy/phy-msm-usb.c @@ -197,6 +197,7 @@ struct msm_otg { struct regulator *v3p3; struct regulator *v1p8; struct regulator *vddcx; + struct regulator_bulk_data supplies[3]; struct reset_control *phy_rst; struct reset_control *link_rst; @@ -1731,7 +1732,6 @@ static int msm_otg_reboot_notify(struct notifier_block *this, static int msm_otg_probe(struct platform_device *pdev) { - struct regulator_bulk_data regs[3]; int ret = 0; struct device_node *np = pdev->dev.of_node; struct msm_otg_platform_data *pdata; @@ -1817,17 +1817,18 @@ static int msm_otg_probe(struct platform_device *pdev) return motg->irq; } - regs[0].supply = "vddcx"; - regs[1].supply = "v3p3"; - regs[2].supply = "v1p8"; + motg->supplies[0].supply = "vddcx"; + motg->supplies[1].supply = "v3p3"; + motg->supplies[2].supply = "v1p8"; - ret = devm_regulator_bulk_get(motg->phy.dev, ARRAY_SIZE(regs), regs); + ret = devm_regulator_bulk_get(motg->phy.dev, ARRAY_SIZE(motg->supplies), + motg->supplies); if (ret) return ret; - motg->vddcx = regs[0].consumer; - motg->v3p3 = regs[1].consumer; - motg->v1p8 = regs[2].consumer; + motg->vddcx = motg->supplies[0].consumer; + motg->v3p3 = motg->supplies[1].consumer; + motg->v1p8 = motg->supplies[2].consumer; clk_set_rate(motg->clk, 60000000); diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c index 93fba9033b00..2c8161bcf5b5 100644 --- a/drivers/usb/renesas_usbhs/mod_gadget.c +++ b/drivers/usb/renesas_usbhs/mod_gadget.c @@ -639,14 +639,11 @@ static int usbhsg_ep_disable(struct usb_ep *ep) struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); struct usbhs_pipe *pipe; unsigned long flags; - int ret = 0; spin_lock_irqsave(&uep->lock, flags); pipe = usbhsg_uep_to_pipe(uep); - if (!pipe) { - ret = -EINVAL; + if (!pipe) goto out; - } usbhsg_pipe_disable(uep); usbhs_pipe_free(pipe); diff --git a/drivers/usb/renesas_usbhs/rcar3.c b/drivers/usb/renesas_usbhs/rcar3.c index d544b331c9f2..02b67abfc2a1 100644 --- a/drivers/usb/renesas_usbhs/rcar3.c +++ b/drivers/usb/renesas_usbhs/rcar3.c @@ -20,9 +20,13 @@ /* Low Power Status register (LPSTS) */ #define LPSTS_SUSPM 0x4000 -/* USB General control register 2 (UGCTRL2), bit[31:6] should be 0 */ +/* + * USB General control register 2 (UGCTRL2) + * Remarks: bit[31:11] and bit[9:6] should be 0 + */ #define UGCTRL2_RESERVED_3 0x00000001 /* bit[3:0] should be B'0001 */ #define UGCTRL2_USB0SEL_OTG 0x00000030 +#define UGCTRL2_VBUSSEL 0x00000400 static void usbhs_write32(struct usbhs_priv *priv, u32 reg, u32 data) { @@ -34,7 +38,8 @@ static int usbhs_rcar3_power_ctrl(struct platform_device *pdev, { struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev); - usbhs_write32(priv, UGCTRL2, UGCTRL2_RESERVED_3 | UGCTRL2_USB0SEL_OTG); + usbhs_write32(priv, UGCTRL2, UGCTRL2_RESERVED_3 | UGCTRL2_USB0SEL_OTG | + UGCTRL2_VBUSSEL); if (enable) { usbhs_bset(priv, LPSTS, LPSTS_SUSPM, LPSTS_SUSPM); diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index f64e914a8985..2d945c9f975c 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -142,6 +142,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */ { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */ { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */ + { USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */ { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */ diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index ebe51f11105d..fe123153b1a5 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -2025,6 +2025,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) }, /* D-Link DWM-158 */ { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */ .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */ + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */ diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index c9ebefd8f35f..a585b477415d 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c @@ -52,6 +52,8 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) }, { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID), .driver_info = PL2303_QUIRK_ENDPOINT_HACK }, + { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_UC485), + .driver_info = PL2303_QUIRK_ENDPOINT_HACK }, { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID2) }, { USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) }, { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) }, diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h index 09d9be88209e..3b5a15d1dc0d 100644 --- a/drivers/usb/serial/pl2303.h +++ b/drivers/usb/serial/pl2303.h @@ -27,6 +27,7 @@ #define ATEN_VENDOR_ID 0x0557 #define ATEN_VENDOR_ID2 0x0547 #define ATEN_PRODUCT_ID 0x2008 +#define ATEN_PRODUCT_UC485 0x2021 #define ATEN_PRODUCT_ID2 0x2118 #define IODATA_VENDOR_ID 0x04bb diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h index cbea9f329e71..cde115359793 100644 --- a/drivers/usb/storage/unusual_uas.h +++ b/drivers/usb/storage/unusual_uas.h @@ -124,9 +124,9 @@ UNUSUAL_DEV(0x0bc2, 0xab2a, 0x0000, 0x9999, /* Reported-by: Benjamin Tissoires <benjamin.tissoires@redhat.com> */ UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999, "Initio Corporation", - "", + "INIC-3069", USB_SC_DEVICE, USB_PR_DEVICE, NULL, - US_FL_NO_ATA_1X), + US_FL_NO_ATA_1X | US_FL_IGNORE_RESIDUE), /* Reported-by: Tom Arild Naess <tanaess@gmail.com> */ UNUSUAL_DEV(0x152d, 0x0539, 0x0000, 0x9999, diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c index 06615934fed1..0dceb9fa3a06 100644 --- a/drivers/usb/storage/usb.c +++ b/drivers/usb/storage/usb.c @@ -315,6 +315,7 @@ static int usb_stor_control_thread(void * __us) { struct us_data *us = (struct us_data *)__us; struct Scsi_Host *host = us_to_host(us); + struct scsi_cmnd *srb; for (;;) { usb_stor_dbg(us, "*** thread sleeping\n"); @@ -330,6 +331,7 @@ static int usb_stor_control_thread(void * __us) scsi_lock(host); /* When we are called with no command pending, we're done */ + srb = us->srb; if (us->srb == NULL) { scsi_unlock(host); mutex_unlock(&us->dev_mutex); @@ -398,14 +400,11 @@ static int usb_stor_control_thread(void * __us) /* lock access to the state */ scsi_lock(host); - /* indicate that the command is done */ - if (us->srb->result != DID_ABORT << 16) { - usb_stor_dbg(us, "scsi cmd done, result=0x%x\n", - us->srb->result); - us->srb->scsi_done(us->srb); - } else { + /* was the command aborted? */ + if (us->srb->result == DID_ABORT << 16) { SkipForAbort: usb_stor_dbg(us, "scsi command aborted\n"); + srb = NULL; /* Don't call srb->scsi_done() */ } /* @@ -429,6 +428,13 @@ SkipForAbort: /* unlock the device pointers */ mutex_unlock(&us->dev_mutex); + + /* now that the locks are released, notify the SCSI core */ + if (srb) { + usb_stor_dbg(us, "scsi cmd done, result=0x%x\n", + srb->result); + srb->scsi_done(srb); + } } /* for (;;) */ /* Wait until we are told to stop */ diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index 063c1ce6fa42..f041b1a6cf66 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -226,7 +226,14 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev) if (ret) return ret; - vdev->reset_works = (pci_reset_function(pdev) == 0); + /* If reset fails because of the device lock, fail this path entirely */ + ret = pci_try_reset_function(pdev); + if (ret == -EAGAIN) { + pci_disable_device(pdev); + return ret; + } + + vdev->reset_works = !ret; pci_save_state(pdev); vdev->pci_saved_state = pci_store_saved_state(pdev); if (!vdev->pci_saved_state) diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c index 330a57024cbc..5628fe114347 100644 --- a/drivers/vfio/pci/vfio_pci_config.c +++ b/drivers/vfio/pci/vfio_pci_config.c @@ -839,7 +839,7 @@ static int vfio_exp_config_write(struct vfio_pci_device *vdev, int pos, /* Permissions for PCI Express capability */ static int __init init_pci_cap_exp_perm(struct perm_bits *perm) { - /* Alloc larger of two possible sizes */ + /* Alloc largest of possible sizes */ if (alloc_perm_bits(perm, PCI_CAP_EXP_ENDPOINT_SIZEOF_V2)) return -ENOMEM; @@ -1243,11 +1243,16 @@ static int vfio_cap_len(struct vfio_pci_device *vdev, u8 cap, u8 pos) vdev->extended_caps = (dword != 0); } - /* length based on version */ - if ((pcie_caps_reg(pdev) & PCI_EXP_FLAGS_VERS) == 1) + /* length based on version and type */ + if ((pcie_caps_reg(pdev) & PCI_EXP_FLAGS_VERS) == 1) { + if (pci_pcie_type(pdev) == PCI_EXP_TYPE_RC_END) + return 0xc; /* "All Devices" only, no link */ return PCI_CAP_EXP_ENDPOINT_SIZEOF_V1; - else + } else { + if (pci_pcie_type(pdev) == PCI_EXP_TYPE_RC_END) + return 0x2c; /* No link */ return PCI_CAP_EXP_ENDPOINT_SIZEOF_V2; + } case PCI_CAP_ID_HT: ret = pci_read_config_byte(pdev, pos + 3, &byte); if (ret) diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index e4613a3c362d..9cb3f722dce1 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -308,7 +308,6 @@ static void vhost_vq_reset(struct vhost_dev *dev, vq->avail = NULL; vq->used = NULL; vq->last_avail_idx = 0; - vq->last_used_event = 0; vq->avail_idx = 0; vq->last_used_idx = 0; vq->signalled_used = 0; @@ -1402,7 +1401,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp) r = -EINVAL; break; } - vq->last_avail_idx = vq->last_used_event = s.num; + vq->last_avail_idx = s.num; /* Forget the cached index value. */ vq->avail_idx = vq->last_avail_idx; break; @@ -2241,6 +2240,10 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) __u16 old, new; __virtio16 event; bool v; + /* Flush out used index updates. This is paired + * with the barrier that the Guest executes when enabling + * interrupts. */ + smp_mb(); if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) && unlikely(vq->avail_idx == vq->last_avail_idx)) @@ -2248,10 +2251,6 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) { __virtio16 flags; - /* Flush out used index updates. This is paired - * with the barrier that the Guest executes when enabling - * interrupts. */ - smp_mb(); if (vhost_get_avail(vq, flags, &vq->avail->flags)) { vq_err(vq, "Failed to get flags"); return true; @@ -2266,26 +2265,11 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) if (unlikely(!v)) return true; - /* We're sure if the following conditions are met, there's no - * need to notify guest: - * 1) cached used event is ahead of new - * 2) old to new updating does not cross cached used event. */ - if (vring_need_event(vq->last_used_event, new + vq->num, new) && - !vring_need_event(vq->last_used_event, new, old)) - return false; - - /* Flush out used index updates. This is paired - * with the barrier that the Guest executes when enabling - * interrupts. */ - smp_mb(); - if (vhost_get_avail(vq, event, vhost_used_event(vq))) { vq_err(vq, "Failed to get used event idx"); return true; } - vq->last_used_event = vhost16_to_cpu(vq, event); - - return vring_need_event(vq->last_used_event, new, old); + return vring_need_event(vhost16_to_cpu(vq, event), new, old); } /* This actually signals the guest, using eventfd. */ diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index f72095868b93..bb7c29b8b9fc 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -115,9 +115,6 @@ struct vhost_virtqueue { /* Last index we used. */ u16 last_used_idx; - /* Last used evet we've seen */ - u16 last_used_event; - /* Used flags */ u16 used_flags; diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c index ff01bed7112f..1e784adb89b1 100644 --- a/drivers/video/fbdev/efifb.c +++ b/drivers/video/fbdev/efifb.c @@ -17,6 +17,7 @@ #include <asm/efi.h> static bool request_mem_succeeded = false; +static bool nowc = false; static struct fb_var_screeninfo efifb_defined = { .activate = FB_ACTIVATE_NOW, @@ -99,6 +100,8 @@ static int efifb_setup(char *options) screen_info.lfb_height = simple_strtoul(this_opt+7, NULL, 0); else if (!strncmp(this_opt, "width:", 6)) screen_info.lfb_width = simple_strtoul(this_opt+6, NULL, 0); + else if (!strcmp(this_opt, "nowc")) + nowc = true; } } @@ -255,7 +258,10 @@ static int efifb_probe(struct platform_device *dev) info->apertures->ranges[0].base = efifb_fix.smem_start; info->apertures->ranges[0].size = size_remap; - info->screen_base = ioremap_wc(efifb_fix.smem_start, efifb_fix.smem_len); + if (nowc) + info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len); + else + info->screen_base = ioremap_wc(efifb_fix.smem_start, efifb_fix.smem_len); if (!info->screen_base) { pr_err("efifb: abort, cannot ioremap video memory 0x%x @ 0x%lx\n", efifb_fix.smem_len, efifb_fix.smem_start); diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c index c166e0725be5..ba82f97fb42b 100644 --- a/drivers/video/fbdev/imxfb.c +++ b/drivers/video/fbdev/imxfb.c @@ -1073,20 +1073,16 @@ static int imxfb_remove(struct platform_device *pdev) imxfb_disable_controller(fbi); unregister_framebuffer(info); - + fb_dealloc_cmap(&info->cmap); pdata = dev_get_platdata(&pdev->dev); if (pdata && pdata->exit) pdata->exit(fbi->pdev); - - fb_dealloc_cmap(&info->cmap); - kfree(info->pseudo_palette); - framebuffer_release(info); - dma_free_wc(&pdev->dev, fbi->map_size, info->screen_base, fbi->map_dma); - iounmap(fbi->regs); release_mem_region(res->start, resource_size(res)); + kfree(info->pseudo_palette); + framebuffer_release(info); return 0; } diff --git a/drivers/video/fbdev/omap2/omapfb/dss/core.c b/drivers/video/fbdev/omap2/omapfb/dss/core.c index eecf695c16f4..09e5bb013d28 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/core.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/core.c @@ -193,7 +193,6 @@ static struct notifier_block omap_dss_pm_notif_block = { static int __init omap_dss_probe(struct platform_device *pdev) { - struct omap_dss_board_info *pdata = pdev->dev.platform_data; int r; core.pdev = pdev; diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 22caf808bfab..f0b3a0b9d42f 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -104,12 +104,6 @@ static u32 page_to_balloon_pfn(struct page *page) return pfn * VIRTIO_BALLOON_PAGES_PER_PAGE; } -static struct page *balloon_pfn_to_page(u32 pfn) -{ - BUG_ON(pfn % VIRTIO_BALLOON_PAGES_PER_PAGE); - return pfn_to_page(pfn / VIRTIO_BALLOON_PAGES_PER_PAGE); -} - static void balloon_ack(struct virtqueue *vq) { struct virtio_balloon *vb = vq->vdev->priv; @@ -138,8 +132,10 @@ static void set_page_pfns(struct virtio_balloon *vb, { unsigned int i; - /* Set balloon pfns pointing at this page. - * Note that the first pfn points at start of the page. */ + /* + * Set balloon pfns pointing at this page. + * Note that the first pfn points at start of the page. + */ for (i = 0; i < VIRTIO_BALLOON_PAGES_PER_PAGE; i++) pfns[i] = cpu_to_virtio32(vb->vdev, page_to_balloon_pfn(page) + i); @@ -182,18 +178,16 @@ static unsigned fill_balloon(struct virtio_balloon *vb, size_t num) return num_allocated_pages; } -static void release_pages_balloon(struct virtio_balloon *vb) +static void release_pages_balloon(struct virtio_balloon *vb, + struct list_head *pages) { - unsigned int i; - struct page *page; + struct page *page, *next; - /* Find pfns pointing at start of each page, get pages and free them. */ - for (i = 0; i < vb->num_pfns; i += VIRTIO_BALLOON_PAGES_PER_PAGE) { - page = balloon_pfn_to_page(virtio32_to_cpu(vb->vdev, - vb->pfns[i])); + list_for_each_entry_safe(page, next, pages, lru) { if (!virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) adjust_managed_page_count(page, 1); + list_del(&page->lru); put_page(page); /* balloon reference */ } } @@ -203,6 +197,7 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num) unsigned num_freed_pages; struct page *page; struct balloon_dev_info *vb_dev_info = &vb->vb_dev_info; + LIST_HEAD(pages); /* We can only do one array worth at a time. */ num = min(num, ARRAY_SIZE(vb->pfns)); @@ -216,6 +211,7 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num) if (!page) break; set_page_pfns(vb, vb->pfns + vb->num_pfns, page); + list_add(&page->lru, &pages); vb->num_pages -= VIRTIO_BALLOON_PAGES_PER_PAGE; } @@ -227,7 +223,7 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num) */ if (vb->num_pfns != 0) tell_host(vb, vb->deflate_vq); - release_pages_balloon(vb); + release_pages_balloon(vb, &pages); mutex_unlock(&vb->balloon_lock); return num_freed_pages; } diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index b241bfa529ce..2d43118077e4 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -343,14 +343,6 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) info->cpu = cpu; } -static void xen_evtchn_mask_all(void) -{ - unsigned int evtchn; - - for (evtchn = 0; evtchn < xen_evtchn_nr_channels(); evtchn++) - mask_evtchn(evtchn); -} - /** * notify_remote_via_irq - send event to remote end of event channel via irq * @irq: irq of event channel to send event to @@ -582,7 +574,7 @@ static void shutdown_pirq(struct irq_data *data) static void enable_pirq(struct irq_data *data) { - startup_pirq(data); + enable_dynirq(data); } static void disable_pirq(struct irq_data *data) @@ -1573,7 +1565,6 @@ void xen_irq_resume(void) struct irq_info *info; /* New event-channel space is not 'live' yet. */ - xen_evtchn_mask_all(); xen_evtchn_resume(); /* No IRQ <-> event-channel mappings. */ @@ -1681,6 +1672,7 @@ module_param(fifo_events, bool, 0); void __init xen_init_IRQ(void) { int ret = -EINVAL; + unsigned int evtchn; if (fifo_events) ret = xen_evtchn_fifo_init(); @@ -1692,7 +1684,8 @@ void __init xen_init_IRQ(void) BUG_ON(!evtchn_to_irq); /* No event channels are 'live' right now. */ - xen_evtchn_mask_all(); + for (evtchn = 0; evtchn < xen_evtchn_nr_channels(); evtchn++) + mask_evtchn(evtchn); pirq_needs_eoi = pirq_needs_eoi_flag; diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c index 66620713242a..a67e955cacd1 100644 --- a/drivers/xen/xen-selfballoon.c +++ b/drivers/xen/xen-selfballoon.c @@ -151,8 +151,8 @@ static unsigned long frontswap_inertia_counter; static void frontswap_selfshrink(void) { static unsigned long cur_frontswap_pages; - static unsigned long last_frontswap_pages; - static unsigned long tgt_frontswap_pages; + unsigned long last_frontswap_pages; + unsigned long tgt_frontswap_pages; last_frontswap_pages = cur_frontswap_pages; cur_frontswap_pages = frontswap_curr_pages(); diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c index e46080214955..3e59590c7254 100644 --- a/drivers/xen/xenbus/xenbus_xs.c +++ b/drivers/xen/xenbus/xenbus_xs.c @@ -857,6 +857,8 @@ static int xenwatch_thread(void *unused) struct list_head *ent; struct xs_watch_event *event; + xenwatch_pid = current->pid; + for (;;) { wait_event_interruptible(watch_events_waitq, !list_empty(&watch_events)); @@ -925,7 +927,6 @@ int xs_init(void) task = kthread_run(xenwatch_thread, NULL, "xenwatch"); if (IS_ERR(task)) return PTR_ERR(task); - xenwatch_pid = task->pid; /* shutdown watches for kexec boot */ xs_reset_watches(); diff --git a/drivers/xen/xenfs/super.c b/drivers/xen/xenfs/super.c index 967f069385d0..71ddfb4cf61c 100644 --- a/drivers/xen/xenfs/super.c +++ b/drivers/xen/xenfs/super.c @@ -87,7 +87,6 @@ static int __init xenfs_init(void) if (xen_domain()) return register_filesystem(&xenfs_type); - pr_info("not registering filesystem on non-xen platform\n"); return 0; } |