diff options
Diffstat (limited to 'drivers')
96 files changed, 1926 insertions, 413 deletions
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index d1585f073776..6112d942499b 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c @@ -21,6 +21,7 @@ struct regmap_irq_chip_data { struct mutex lock; + struct lock_class_key lock_key; struct irq_chip irq_chip; struct regmap *map; @@ -801,7 +802,13 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, goto err_alloc; } - mutex_init(&d->lock); + /* + * If one regmap-irq is the parent of another then we'll try + * to lock the child with the parent locked, use an explicit + * lock_key so lockdep can figure out what's going on. + */ + lockdep_register_key(&d->lock_key); + mutex_init_with_key(&d->lock, &d->lock_key); for (i = 0; i < chip->num_irqs; i++) d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride] @@ -816,7 +823,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, d->mask_buf[i], chip->irq_drv_data); if (ret) - goto err_alloc; + goto err_mutex; } if (chip->mask_base && !chip->handle_mask_sync) { @@ -827,7 +834,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, if (ret) { dev_err(map->dev, "Failed to set masks in 0x%x: %d\n", reg, ret); - goto err_alloc; + goto err_mutex; } } @@ -838,7 +845,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, if (ret) { dev_err(map->dev, "Failed to set masks in 0x%x: %d\n", reg, ret); - goto err_alloc; + goto err_mutex; } } @@ -855,7 +862,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, if (ret != 0) { dev_err(map->dev, "Failed to read IRQ status: %d\n", ret); - goto err_alloc; + goto err_mutex; } } @@ -879,7 +886,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, if (ret != 0) { dev_err(map->dev, "Failed to ack 0x%x: %d\n", reg, ret); - goto err_alloc; + goto err_mutex; } } } @@ -901,7 +908,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, if (ret != 0) { dev_err(map->dev, "Failed to set masks in 0x%x: %d\n", reg, ret); - goto err_alloc; + goto err_mutex; } } } @@ -910,7 +917,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, if (chip->status_is_level) { ret = read_irq_data(d); if (ret < 0) - goto err_alloc; + goto err_mutex; memcpy(d->prev_status_buf, d->status_buf, array_size(d->chip->num_regs, sizeof(d->prev_status_buf[0]))); @@ -918,7 +925,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, ret = regmap_irq_create_domain(fwnode, irq_base, chip, d); if (ret) - goto err_alloc; + goto err_mutex; ret = request_threaded_irq(irq, NULL, regmap_irq_thread, irq_flags | IRQF_ONESHOT, @@ -935,6 +942,9 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, err_domain: /* Should really dispose of the domain but... */ +err_mutex: + mutex_destroy(&d->lock); + lockdep_unregister_key(&d->lock_key); err_alloc: kfree(d->type_buf); kfree(d->type_buf_def); @@ -1027,6 +1037,8 @@ void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d) kfree(d->config_buf[i]); kfree(d->config_buf); } + mutex_destroy(&d->lock); + lockdep_unregister_key(&d->lock_key); kfree(d); } EXPORT_SYMBOL_GPL(regmap_del_irq_chip); diff --git a/drivers/block/zloop.c b/drivers/block/zloop.c index 553b1a713ab9..a423228e201b 100644 --- a/drivers/block/zloop.c +++ b/drivers/block/zloop.c @@ -700,6 +700,8 @@ static void zloop_free_disk(struct gendisk *disk) struct zloop_device *zlo = disk->private_data; unsigned int i; + blk_mq_free_tag_set(&zlo->tag_set); + for (i = 0; i < zlo->nr_zones; i++) { struct zloop_zone *zone = &zlo->zones[i]; @@ -1080,7 +1082,6 @@ static int zloop_ctl_remove(struct zloop_options *opts) del_gendisk(zlo->disk); put_disk(zlo->disk); - blk_mq_free_tag_set(&zlo->tag_set); pr_info("Removed device %d\n", opts->id); diff --git a/drivers/dpll/zl3073x/Kconfig b/drivers/dpll/zl3073x/Kconfig index 7db262ab8458..5bbca1400581 100644 --- a/drivers/dpll/zl3073x/Kconfig +++ b/drivers/dpll/zl3073x/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only config ZL3073X - tristate "Microchip Azurite DPLL/PTP/SyncE devices" + tristate "Microchip Azurite DPLL/PTP/SyncE devices" if COMPILE_TEST depends on NET select DPLL select NET_DEVLINK @@ -16,9 +16,9 @@ config ZL3073X config ZL3073X_I2C tristate "I2C bus implementation for Microchip Azurite devices" - depends on I2C && ZL3073X + depends on I2C && NET select REGMAP_I2C - default m + select ZL3073X help This is I2C bus implementation for Microchip Azurite DPLL/PTP/SyncE devices. @@ -28,9 +28,9 @@ config ZL3073X_I2C config ZL3073X_SPI tristate "SPI bus implementation for Microchip Azurite devices" - depends on SPI && ZL3073X + depends on NET && SPI select REGMAP_SPI - default m + select ZL3073X help This is SPI bus implementation for Microchip Azurite DPLL/PTP/SyncE devices. diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig index 16baa038d412..d528c94c5859 100644 --- a/drivers/firmware/efi/Kconfig +++ b/drivers/firmware/efi/Kconfig @@ -263,6 +263,14 @@ config EFI_COCO_SECRET virt/coco/efi_secret module to access the secrets, which in turn allows userspace programs to access the injected secrets. +config OVMF_DEBUG_LOG + bool "Expose OVMF firmware debug log via sysfs" + depends on EFI + help + Recent OVMF versions (edk2-stable202508 + newer) can write + their debug log to a memory buffer. This driver exposes the + log content via sysfs (/sys/firmware/efi/ovmf_debug_log). + config UNACCEPTED_MEMORY bool depends on EFI_STUB diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile index a2d0009560d0..8efbcf699e4f 100644 --- a/drivers/firmware/efi/Makefile +++ b/drivers/firmware/efi/Makefile @@ -29,6 +29,7 @@ obj-$(CONFIG_APPLE_PROPERTIES) += apple-properties.o obj-$(CONFIG_EFI_RCI2_TABLE) += rci2-table.o obj-$(CONFIG_EFI_EMBEDDED_FIRMWARE) += embedded-firmware.o obj-$(CONFIG_LOAD_UEFI_KEYS) += mokvar-table.o +obj-$(CONFIG_OVMF_DEBUG_LOG) += ovmf-debug-log.o obj-$(CONFIG_SYSFB) += sysfb_efi.o diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index e57bff702b5f..1ce428e2ac8a 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -45,6 +45,7 @@ struct efi __read_mostly efi = { .esrt = EFI_INVALID_TABLE_ADDR, .tpm_log = EFI_INVALID_TABLE_ADDR, .tpm_final_log = EFI_INVALID_TABLE_ADDR, + .ovmf_debug_log = EFI_INVALID_TABLE_ADDR, #ifdef CONFIG_LOAD_UEFI_KEYS .mokvar_table = EFI_INVALID_TABLE_ADDR, #endif @@ -473,6 +474,10 @@ static int __init efisubsys_init(void) platform_device_register_simple("efi_secret", 0, NULL, 0); #endif + if (IS_ENABLED(CONFIG_OVMF_DEBUG_LOG) && + efi.ovmf_debug_log != EFI_INVALID_TABLE_ADDR) + ovmf_log_probe(efi.ovmf_debug_log); + return 0; err_remove_group: @@ -617,6 +622,9 @@ static const efi_config_table_type_t common_tables[] __initconst = { {LINUX_EFI_MEMRESERVE_TABLE_GUID, &mem_reserve, "MEMRESERVE" }, {LINUX_EFI_INITRD_MEDIA_GUID, &initrd, "INITRD" }, {EFI_RT_PROPERTIES_TABLE_GUID, &rt_prop, "RTPROP" }, +#ifdef CONFIG_OVMF_DEBUG_LOG + {OVMF_MEMORY_LOG_TABLE_GUID, &efi.ovmf_debug_log, "OvmfDebugLog" }, +#endif #ifdef CONFIG_EFI_RCI2_TABLE {DELLEMC_EFI_RCI2_TABLE_GUID, &rci2_table_phys }, #endif diff --git a/drivers/firmware/efi/libstub/printk.c b/drivers/firmware/efi/libstub/printk.c index 3a67a2cea7bd..bc599212c05d 100644 --- a/drivers/firmware/efi/libstub/printk.c +++ b/drivers/firmware/efi/libstub/printk.c @@ -5,13 +5,13 @@ #include <linux/ctype.h> #include <linux/efi.h> #include <linux/kernel.h> -#include <linux/printk.h> /* For CONSOLE_LOGLEVEL_* */ +#include <linux/kern_levels.h> #include <asm/efi.h> #include <asm/setup.h> #include "efistub.h" -int efi_loglevel = CONSOLE_LOGLEVEL_DEFAULT; +int efi_loglevel = LOGLEVEL_NOTICE; /** * efi_char16_puts() - Write a UCS-2 encoded string to the console diff --git a/drivers/firmware/efi/ovmf-debug-log.c b/drivers/firmware/efi/ovmf-debug-log.c new file mode 100644 index 000000000000..5b2471ffaeed --- /dev/null +++ b/drivers/firmware/efi/ovmf-debug-log.c @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/efi.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/kobject.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/sysfs.h> + +#define OVMF_DEBUG_LOG_MAGIC1 0x3167646d666d766f // "ovmfmdg1" +#define OVMF_DEBUG_LOG_MAGIC2 0x3267646d666d766f // "ovmfmdg2" + +struct ovmf_debug_log_header { + u64 magic1; + u64 magic2; + u64 hdr_size; + u64 log_size; + u64 lock; // edk2 spinlock + u64 head_off; + u64 tail_off; + u64 truncated; + u8 fw_version[128]; +}; + +static struct ovmf_debug_log_header *hdr; +static u8 *logbuf; +static u64 logbufsize; + +static ssize_t ovmf_log_read(struct file *filp, struct kobject *kobj, + const struct bin_attribute *attr, char *buf, + loff_t offset, size_t count) +{ + u64 start, end; + + start = hdr->head_off + offset; + if (hdr->head_off > hdr->tail_off && start >= hdr->log_size) + start -= hdr->log_size; + + end = start + count; + if (start > hdr->tail_off) { + if (end > hdr->log_size) + end = hdr->log_size; + } else { + if (end > hdr->tail_off) + end = hdr->tail_off; + } + + if (start > logbufsize || end > logbufsize) + return 0; + if (start >= end) + return 0; + + memcpy(buf, logbuf + start, end - start); + return end - start; +} + +static struct bin_attribute ovmf_log_bin_attr = { + .attr = { + .name = "ovmf_debug_log", + .mode = 0444, + }, + .read = ovmf_log_read, +}; + +int __init ovmf_log_probe(unsigned long ovmf_debug_log_table) +{ + int ret = -EINVAL; + u64 size; + + /* map + verify header */ + hdr = memremap(ovmf_debug_log_table, sizeof(*hdr), MEMREMAP_WB); + if (!hdr) { + pr_err("OVMF debug log: header map failed\n"); + return -EINVAL; + } + + if (hdr->magic1 != OVMF_DEBUG_LOG_MAGIC1 || + hdr->magic2 != OVMF_DEBUG_LOG_MAGIC2) { + printk(KERN_ERR "OVMF debug log: magic mismatch\n"); + goto err_unmap; + } + + size = hdr->hdr_size + hdr->log_size; + pr_info("OVMF debug log: firmware version: \"%s\"\n", hdr->fw_version); + pr_info("OVMF debug log: buffer size: %lluk\n", size / 1024); + + /* map complete log buffer */ + memunmap(hdr); + hdr = memremap(ovmf_debug_log_table, size, MEMREMAP_WB); + if (!hdr) { + pr_err("OVMF debug log: buffer map failed\n"); + return -EINVAL; + } + logbuf = (void *)hdr + hdr->hdr_size; + logbufsize = hdr->log_size; + + ovmf_log_bin_attr.size = size; + ret = sysfs_create_bin_file(efi_kobj, &ovmf_log_bin_attr); + if (ret != 0) { + pr_err("OVMF debug log: sysfs register failed\n"); + goto err_unmap; + } + + return 0; + +err_unmap: + memunmap(hdr); + return ret; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 6f93473436be..01d234cf8156 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2570,9 +2570,6 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) adev->firmware.gpu_info_fw = NULL; - if (adev->mman.discovery_bin) - return 0; - switch (adev->asic_type) { default: return 0; @@ -2594,6 +2591,8 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) chip_name = "arcturus"; break; case CHIP_NAVI12: + if (adev->mman.discovery_bin) + return 0; chip_name = "navi12"; break; } @@ -3271,6 +3270,7 @@ static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev) * always assumed to be lost. */ switch (amdgpu_asic_reset_method(adev)) { + case AMD_RESET_METHOD_LEGACY: case AMD_RESET_METHOD_LINK: case AMD_RESET_METHOD_BACO: case AMD_RESET_METHOD_MODE1: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index 81b3443c8d7f..efe0058b48ca 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -276,7 +276,7 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev, u32 msg; if (!amdgpu_sriov_vf(adev)) { - /* It can take up to a second for IFWI init to complete on some dGPUs, + /* It can take up to two second for IFWI init to complete on some dGPUs, * but generally it should be in the 60-100ms range. Normally this starts * as soon as the device gets power so by the time the OS loads this has long * completed. However, when a card is hotplugged via e.g., USB4, we need to @@ -284,7 +284,7 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev, * continue. */ - for (i = 0; i < 1000; i++) { + for (i = 0; i < 2000; i++) { msg = RREG32(mmMP0_SMN_C2PMSG_33); if (msg & 0x80000000) break; @@ -2555,40 +2555,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_VEGA10: - case CHIP_VEGA12: - case CHIP_RAVEN: - case CHIP_VEGA20: - case CHIP_ARCTURUS: - case CHIP_ALDEBARAN: - /* this is not fatal. We have a fallback below - * if the new firmwares are not present. some of - * this will be overridden below to keep things - * consistent with the current behavior. + /* This is not fatal. We only need the discovery + * binary for sysfs. We don't need it for a + * functional system. */ - r = amdgpu_discovery_reg_base_init(adev); - if (!r) { - amdgpu_discovery_harvest_ip(adev); - amdgpu_discovery_get_gfx_info(adev); - amdgpu_discovery_get_mall_info(adev); - amdgpu_discovery_get_vcn_info(adev); - } - break; - default: - r = amdgpu_discovery_reg_base_init(adev); - if (r) { - drm_err(&adev->ddev, "discovery failed: %d\n", r); - return r; - } - - amdgpu_discovery_harvest_ip(adev); - amdgpu_discovery_get_gfx_info(adev); - amdgpu_discovery_get_mall_info(adev); - amdgpu_discovery_get_vcn_info(adev); - break; - } - - switch (adev->asic_type) { - case CHIP_VEGA10: + amdgpu_discovery_init(adev); vega10_reg_base_init(adev); adev->sdma.num_instances = 2; adev->gmc.num_umc = 4; @@ -2611,6 +2582,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0); break; case CHIP_VEGA12: + /* This is not fatal. We only need the discovery + * binary for sysfs. We don't need it for a + * functional system. + */ + amdgpu_discovery_init(adev); vega10_reg_base_init(adev); adev->sdma.num_instances = 2; adev->gmc.num_umc = 4; @@ -2633,6 +2609,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1); break; case CHIP_RAVEN: + /* This is not fatal. We only need the discovery + * binary for sysfs. We don't need it for a + * functional system. + */ + amdgpu_discovery_init(adev); vega10_reg_base_init(adev); adev->sdma.num_instances = 1; adev->vcn.num_vcn_inst = 1; @@ -2674,6 +2655,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) } break; case CHIP_VEGA20: + /* This is not fatal. We only need the discovery + * binary for sysfs. We don't need it for a + * functional system. + */ + amdgpu_discovery_init(adev); vega20_reg_base_init(adev); adev->sdma.num_instances = 2; adev->gmc.num_umc = 8; @@ -2697,6 +2683,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0); break; case CHIP_ARCTURUS: + /* This is not fatal. We only need the discovery + * binary for sysfs. We don't need it for a + * functional system. + */ + amdgpu_discovery_init(adev); arct_reg_base_init(adev); adev->sdma.num_instances = 8; adev->vcn.num_vcn_inst = 2; @@ -2725,6 +2716,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0); break; case CHIP_ALDEBARAN: + /* This is not fatal. We only need the discovery + * binary for sysfs. We don't need it for a + * functional system. + */ + amdgpu_discovery_init(adev); aldebaran_reg_base_init(adev); adev->sdma.num_instances = 5; adev->vcn.num_vcn_inst = 2; @@ -2751,6 +2747,16 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0); break; default: + r = amdgpu_discovery_reg_base_init(adev); + if (r) { + drm_err(&adev->ddev, "discovery failed: %d\n", r); + return r; + } + + amdgpu_discovery_harvest_ip(adev); + amdgpu_discovery_get_gfx_info(adev); + amdgpu_discovery_get_mall_info(adev); + amdgpu_discovery_get_vcn_info(adev); break; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index e6061d45f142..9b1c55115921 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -365,13 +365,6 @@ amdgpu_job_prepare_job(struct drm_sched_job *sched_job, dev_err(ring->adev->dev, "Error getting VM ID (%d)\n", r); goto error; } - /* - * The VM structure might be released after the VMID is - * assigned, we had multiple problems with people trying to use - * the VM pointer so better set it to NULL. - */ - if (!fence) - job->vm = NULL; return fence; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c index e56ba93a8df6..a974265837f0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c @@ -55,7 +55,8 @@ u64 amdgpu_nbio_get_pcie_replay_count(struct amdgpu_device *adev) bool amdgpu_nbio_is_replay_cnt_supported(struct amdgpu_device *adev) { - if (amdgpu_sriov_vf(adev) || !adev->asic_funcs->get_pcie_replay_count || + if (amdgpu_sriov_vf(adev) || !adev->asic_funcs || + !adev->asic_funcs->get_pcie_replay_count || (!adev->nbio.funcs || !adev->nbio.funcs->get_pcie_replay_count)) return false; diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c index 914cf4bfb033..811124ff88a8 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c @@ -227,6 +227,7 @@ static int __aqua_vanjaram_get_px_mode_info(struct amdgpu_xcp_mgr *xcp_mgr, uint16_t *nps_modes) { struct amdgpu_device *adev = xcp_mgr->adev; + uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0); if (!num_xcp || !nps_modes || !(xcp_mgr->supp_xcp_modes & BIT(px_mode))) return -EINVAL; @@ -250,12 +251,14 @@ static int __aqua_vanjaram_get_px_mode_info(struct amdgpu_xcp_mgr *xcp_mgr, *num_xcp = 4; *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) | BIT(AMDGPU_NPS4_PARTITION_MODE); + if (gc_ver == IP_VERSION(9, 5, 0)) + *nps_modes |= BIT(AMDGPU_NPS2_PARTITION_MODE); break; case AMDGPU_CPX_PARTITION_MODE: *num_xcp = NUM_XCC(adev->gfx.xcc_mask); *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) | BIT(AMDGPU_NPS4_PARTITION_MODE); - if (amdgpu_sriov_vf(adev)) + if (gc_ver == IP_VERSION(9, 5, 0)) *nps_modes |= BIT(AMDGPU_NPS2_PARTITION_MODE); break; default: diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c index 134c4ec10887..910337dc28d1 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c @@ -36,40 +36,47 @@ static const char *mmhub_client_ids_v3_0_1[][2] = { [0][0] = "VMC", + [1][0] = "ISPXT", + [2][0] = "ISPIXT", [4][0] = "DCEDMC", [5][0] = "DCEVGA", [6][0] = "MP0", [7][0] = "MP1", - [8][0] = "MPIO", - [16][0] = "HDP", - [17][0] = "LSDMA", - [18][0] = "JPEG", - [19][0] = "VCNU0", - [21][0] = "VSCH", - [22][0] = "VCNU1", - [23][0] = "VCN1", - [32+20][0] = "VCN0", - [2][1] = "DBGUNBIO", + [8][0] = "MPM", + [12][0] = "ISPTNR", + [14][0] = "ISPCRD0", + [15][0] = "ISPCRD1", + [16][0] = "ISPCRD2", + [22][0] = "HDP", + [23][0] = "LSDMA", + [24][0] = "JPEG", + [27][0] = "VSCH", + [28][0] = "VCNU", + [29][0] = "VCN", + [1][1] = "ISPXT", + [2][1] = "ISPIXT", [3][1] = "DCEDWB", [4][1] = "DCEDMC", [5][1] = "DCEVGA", [6][1] = "MP0", [7][1] = "MP1", - [8][1] = "MPIO", - [10][1] = "DBGU0", - [11][1] = "DBGU1", - [12][1] = "DBGU2", - [13][1] = "DBGU3", - [14][1] = "XDP", - [15][1] = "OSSSYS", - [16][1] = "HDP", - [17][1] = "LSDMA", - [18][1] = "JPEG", - [19][1] = "VCNU0", - [20][1] = "VCN0", - [21][1] = "VSCH", - [22][1] = "VCNU1", - [23][1] = "VCN1", + [8][1] = "MPM", + [10][1] = "ISPMWR0", + [11][1] = "ISPMWR1", + [12][1] = "ISPTNR", + [13][1] = "ISPSWR", + [14][1] = "ISPCWR0", + [15][1] = "ISPCWR1", + [16][1] = "ISPCWR2", + [17][1] = "ISPCWR3", + [18][1] = "XDP", + [21][1] = "OSSSYS", + [22][1] = "HDP", + [23][1] = "LSDMA", + [24][1] = "JPEG", + [27][1] = "VSCH", + [28][1] = "VCNU", + [29][1] = "VCN", }; static uint32_t mmhub_v3_0_1_get_invalidate_req(unsigned int vmid, diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c index bc3d6c2fc87a..f6fc9778bc30 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c @@ -40,30 +40,129 @@ static const char *mmhub_client_ids_v3_3[][2] = { [0][0] = "VMC", + [1][0] = "ISPXT", + [2][0] = "ISPIXT", [4][0] = "DCEDMC", [6][0] = "MP0", [7][0] = "MP1", [8][0] = "MPM", + [9][0] = "ISPPDPRD", + [10][0] = "ISPCSTATRD", + [11][0] = "ISPBYRPRD", + [12][0] = "ISPRGBPRD", + [13][0] = "ISPMCFPRD", + [14][0] = "ISPMCFPRD1", + [15][0] = "ISPYUVPRD", + [16][0] = "ISPMCSCRD", + [17][0] = "ISPGDCRD", + [18][0] = "ISPLMERD", + [22][0] = "ISPXT1", + [23][0] = "ISPIXT1", [24][0] = "HDP", [25][0] = "LSDMA", [26][0] = "JPEG", [27][0] = "VPE", + [28][0] = "VSCH", [29][0] = "VCNU", [30][0] = "VCN", + [1][1] = "ISPXT", + [2][1] = "ISPIXT", [3][1] = "DCEDWB", [4][1] = "DCEDMC", + [5][1] = "ISPCSISWR", [6][1] = "MP0", [7][1] = "MP1", [8][1] = "MPM", + [9][1] = "ISPPDPWR", + [10][1] = "ISPCSTATWR", + [11][1] = "ISPBYRPWR", + [12][1] = "ISPRGBPWR", + [13][1] = "ISPMCFPWR", + [14][1] = "ISPMWR0", + [15][1] = "ISPYUVPWR", + [16][1] = "ISPMCSCWR", + [17][1] = "ISPGDCWR", + [18][1] = "ISPLMEWR", + [20][1] = "ISPMWR2", [21][1] = "OSSSYS", + [22][1] = "ISPXT1", + [23][1] = "ISPIXT1", [24][1] = "HDP", [25][1] = "LSDMA", [26][1] = "JPEG", [27][1] = "VPE", + [28][1] = "VSCH", [29][1] = "VCNU", [30][1] = "VCN", }; +static const char *mmhub_client_ids_v3_3_1[][2] = { + [0][0] = "VMC", + [4][0] = "DCEDMC", + [6][0] = "MP0", + [7][0] = "MP1", + [8][0] = "MPM", + [24][0] = "HDP", + [25][0] = "LSDMA", + [26][0] = "JPEG0", + [27][0] = "VPE0", + [28][0] = "VSCH", + [29][0] = "VCNU0", + [30][0] = "VCN0", + [32+1][0] = "ISPXT", + [32+2][0] = "ISPIXT", + [32+9][0] = "ISPPDPRD", + [32+10][0] = "ISPCSTATRD", + [32+11][0] = "ISPBYRPRD", + [32+12][0] = "ISPRGBPRD", + [32+13][0] = "ISPMCFPRD", + [32+14][0] = "ISPMCFPRD1", + [32+15][0] = "ISPYUVPRD", + [32+16][0] = "ISPMCSCRD", + [32+17][0] = "ISPGDCRD", + [32+18][0] = "ISPLMERD", + [32+22][0] = "ISPXT1", + [32+23][0] = "ISPIXT1", + [32+26][0] = "JPEG1", + [32+27][0] = "VPE1", + [32+29][0] = "VCNU1", + [32+30][0] = "VCN1", + [3][1] = "DCEDWB", + [4][1] = "DCEDMC", + [6][1] = "MP0", + [7][1] = "MP1", + [8][1] = "MPM", + [21][1] = "OSSSYS", + [24][1] = "HDP", + [25][1] = "LSDMA", + [26][1] = "JPEG0", + [27][1] = "VPE0", + [28][1] = "VSCH", + [29][1] = "VCNU0", + [30][1] = "VCN0", + [32+1][1] = "ISPXT", + [32+2][1] = "ISPIXT", + [32+5][1] = "ISPCSISWR", + [32+9][1] = "ISPPDPWR", + [32+10][1] = "ISPCSTATWR", + [32+11][1] = "ISPBYRPWR", + [32+12][1] = "ISPRGBPWR", + [32+13][1] = "ISPMCFPWR", + [32+14][1] = "ISPMWR0", + [32+15][1] = "ISPYUVPWR", + [32+16][1] = "ISPMCSCWR", + [32+17][1] = "ISPGDCWR", + [32+18][1] = "ISPLMEWR", + [32+19][1] = "ISPMWR1", + [32+20][1] = "ISPMWR2", + [32+22][1] = "ISPXT1", + [32+23][1] = "ISPIXT1", + [32+26][1] = "JPEG1", + [32+27][1] = "VPE1", + [32+29][1] = "VCNU1", + [32+30][1] = "VCN1", +}; + static uint32_t mmhub_v3_3_get_invalidate_req(unsigned int vmid, uint32_t flush_type) { @@ -102,12 +201,16 @@ mmhub_v3_3_print_l2_protection_fault_status(struct amdgpu_device *adev, switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { case IP_VERSION(3, 3, 0): - case IP_VERSION(3, 3, 1): case IP_VERSION(3, 3, 2): mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_v3_3) ? mmhub_client_ids_v3_3[cid][rw] : cid == 0x140 ? "UMSCH" : NULL; break; + case IP_VERSION(3, 3, 1): + mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_v3_3_1) ? + mmhub_client_ids_v3_3_1[cid][rw] : + cid == 0x140 ? "UMSCH" : NULL; + break; default: mmhub_cid = NULL; break; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c index b8b06d4c5882..326ecc8d37d2 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c @@ -1353,7 +1353,7 @@ static int sdma_v7_0_sw_init(struct amdgpu_ip_block *ip_block) switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { case IP_VERSION(7, 0, 0): case IP_VERSION(7, 0, 1): - if ((adev->sdma.instance[0].fw_version >= 7836028) && !adev->sdma.disable_uq) + if ((adev->sdma.instance[0].fw_version >= 7966358) && !adev->sdma.disable_uq) adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs; break; default: diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index c457be3a3c56..9e74c9822e62 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -1218,6 +1218,8 @@ static int soc15_common_early_init(struct amdgpu_ip_block *ip_block) AMD_PG_SUPPORT_JPEG; /*TODO: need a new external_rev_id for GC 9.4.4? */ adev->external_rev_id = adev->rev_id + 0x46; + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0)) + adev->external_rev_id = adev->rev_id + 0x50; break; default: /* FIXME: not supported yet */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 2d91027e2a74..6c5c7c1bf5ed 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -2725,7 +2725,7 @@ static void get_queue_checkpoint_info(struct device_queue_manager *dqm, dqm_lock(dqm); mqd_mgr = dqm->mqd_mgrs[mqd_type]; - *mqd_size = mqd_mgr->mqd_size; + *mqd_size = mqd_mgr->mqd_size * NUM_XCC(mqd_mgr->dev->xcc_mask); *ctl_stack_size = 0; if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE && mqd_mgr->get_checkpoint_info) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c index aee2212e52f6..33aa23450b3f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c @@ -78,8 +78,8 @@ err_ioctl: static void kfd_exit(void) { kfd_cleanup_processes(); - kfd_debugfs_fini(); kfd_process_destroy_wq(); + kfd_debugfs_fini(); kfd_procfs_shutdown(); kfd_topology_shutdown(); kfd_chardev_exit(); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index 97933d2a3803..f2dee320fada 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -373,7 +373,7 @@ static void get_checkpoint_info(struct mqd_manager *mm, void *mqd, u32 *ctl_stac { struct v9_mqd *m = get_mqd(mqd); - *ctl_stack_size = m->cp_hqd_cntl_stack_size; + *ctl_stack_size = m->cp_hqd_cntl_stack_size * NUM_XCC(mm->dev->xcc_mask); } static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst) @@ -388,6 +388,24 @@ static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, voi memcpy(ctl_stack_dst, ctl_stack, m->cp_hqd_cntl_stack_size); } +static void checkpoint_mqd_v9_4_3(struct mqd_manager *mm, + void *mqd, + void *mqd_dst, + void *ctl_stack_dst) +{ + struct v9_mqd *m; + int xcc; + uint64_t size = get_mqd(mqd)->cp_mqd_stride_size; + + for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) { + m = get_mqd(mqd + size * xcc); + + checkpoint_mqd(mm, m, + (uint8_t *)mqd_dst + sizeof(*m) * xcc, + (uint8_t *)ctl_stack_dst + m->cp_hqd_cntl_stack_size * xcc); + } +} + static void restore_mqd(struct mqd_manager *mm, void **mqd, struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr, struct queue_properties *qp, @@ -764,13 +782,35 @@ static void restore_mqd_v9_4_3(struct mqd_manager *mm, void **mqd, const void *mqd_src, const void *ctl_stack_src, u32 ctl_stack_size) { - restore_mqd(mm, mqd, mqd_mem_obj, gart_addr, qp, mqd_src, ctl_stack_src, ctl_stack_size); - if (amdgpu_sriov_multi_vf_mode(mm->dev->adev)) { - struct v9_mqd *m; + struct kfd_mem_obj xcc_mqd_mem_obj; + u32 mqd_ctl_stack_size; + struct v9_mqd *m; + u32 num_xcc; + int xcc; - m = (struct v9_mqd *) mqd_mem_obj->cpu_ptr; - m->cp_hqd_pq_doorbell_control |= 1 << - CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_MODE__SHIFT; + uint64_t offset = mm->mqd_stride(mm, qp); + + mm->dev->dqm->current_logical_xcc_start++; + + num_xcc = NUM_XCC(mm->dev->xcc_mask); + mqd_ctl_stack_size = ctl_stack_size / num_xcc; + + memset(&xcc_mqd_mem_obj, 0x0, sizeof(struct kfd_mem_obj)); + + /* Set the MQD pointer and gart address to XCC0 MQD */ + *mqd = mqd_mem_obj->cpu_ptr; + if (gart_addr) + *gart_addr = mqd_mem_obj->gpu_addr; + + for (xcc = 0; xcc < num_xcc; xcc++) { + get_xcc_mqd(mqd_mem_obj, &xcc_mqd_mem_obj, offset * xcc); + restore_mqd(mm, (void **)&m, + &xcc_mqd_mem_obj, + NULL, + qp, + (uint8_t *)mqd_src + xcc * sizeof(*m), + (uint8_t *)ctl_stack_src + xcc * mqd_ctl_stack_size, + mqd_ctl_stack_size); } } static int destroy_mqd_v9_4_3(struct mqd_manager *mm, void *mqd, @@ -906,7 +946,6 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type, mqd->free_mqd = kfd_free_mqd_cp; mqd->is_occupied = kfd_is_occupied_cp; mqd->get_checkpoint_info = get_checkpoint_info; - mqd->checkpoint_mqd = checkpoint_mqd; mqd->mqd_size = sizeof(struct v9_mqd); mqd->mqd_stride = mqd_stride_v9; #if defined(CONFIG_DEBUG_FS) @@ -918,16 +957,18 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type, mqd->init_mqd = init_mqd_v9_4_3; mqd->load_mqd = load_mqd_v9_4_3; mqd->update_mqd = update_mqd_v9_4_3; - mqd->restore_mqd = restore_mqd_v9_4_3; mqd->destroy_mqd = destroy_mqd_v9_4_3; mqd->get_wave_state = get_wave_state_v9_4_3; + mqd->checkpoint_mqd = checkpoint_mqd_v9_4_3; + mqd->restore_mqd = restore_mqd_v9_4_3; } else { mqd->init_mqd = init_mqd; mqd->load_mqd = load_mqd; mqd->update_mqd = update_mqd; - mqd->restore_mqd = restore_mqd; mqd->destroy_mqd = kfd_destroy_mqd_cp; mqd->get_wave_state = get_wave_state; + mqd->checkpoint_mqd = checkpoint_mqd; + mqd->restore_mqd = restore_mqd; } break; case KFD_MQD_TYPE_HIQ: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index c643e0ccec52..7fbb5c274ccc 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -914,7 +914,10 @@ static int criu_checkpoint_queues_device(struct kfd_process_device *pdd, q_data = (struct kfd_criu_queue_priv_data *)q_private_data; - /* data stored in this order: priv_data, mqd, ctl_stack */ + /* + * data stored in this order: + * priv_data, mqd[xcc0], mqd[xcc1],..., ctl_stack[xcc0], ctl_stack[xcc1]... + */ q_data->mqd_size = mqd_size; q_data->ctl_stack_size = ctl_stack_size; @@ -963,7 +966,7 @@ int kfd_criu_checkpoint_queues(struct kfd_process *p, } static void set_queue_properties_from_criu(struct queue_properties *qp, - struct kfd_criu_queue_priv_data *q_data) + struct kfd_criu_queue_priv_data *q_data, uint32_t num_xcc) { qp->is_interop = false; qp->queue_percent = q_data->q_percent; @@ -976,7 +979,11 @@ static void set_queue_properties_from_criu(struct queue_properties *qp, qp->eop_ring_buffer_size = q_data->eop_ring_buffer_size; qp->ctx_save_restore_area_address = q_data->ctx_save_restore_area_address; qp->ctx_save_restore_area_size = q_data->ctx_save_restore_area_size; - qp->ctl_stack_size = q_data->ctl_stack_size; + if (q_data->type == KFD_QUEUE_TYPE_COMPUTE) + qp->ctl_stack_size = q_data->ctl_stack_size / num_xcc; + else + qp->ctl_stack_size = q_data->ctl_stack_size; + qp->type = q_data->type; qp->format = q_data->format; } @@ -1036,12 +1043,15 @@ int kfd_criu_restore_queue(struct kfd_process *p, goto exit; } - /* data stored in this order: mqd, ctl_stack */ + /* + * data stored in this order: + * mqd[xcc0], mqd[xcc1],..., ctl_stack[xcc0], ctl_stack[xcc1]... + */ mqd = q_extra_data; ctl_stack = mqd + q_data->mqd_size; memset(&qp, 0, sizeof(qp)); - set_queue_properties_from_criu(&qp, q_data); + set_queue_properties_from_criu(&qp, q_data, NUM_XCC(pdd->dev->adev->gfx.xcc_mask)); print_queue_properties(&qp); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 2a175fc0399c..cd0e2976e268 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -4756,16 +4756,16 @@ static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps, return 1; } -/* Rescale from [min..max] to [0..MAX_BACKLIGHT_LEVEL] */ +/* Rescale from [min..max] to [0..AMDGPU_MAX_BL_LEVEL] */ static inline u32 scale_input_to_fw(int min, int max, u64 input) { - return DIV_ROUND_CLOSEST_ULL(input * MAX_BACKLIGHT_LEVEL, max - min); + return DIV_ROUND_CLOSEST_ULL(input * AMDGPU_MAX_BL_LEVEL, max - min); } -/* Rescale from [0..MAX_BACKLIGHT_LEVEL] to [min..max] */ +/* Rescale from [0..AMDGPU_MAX_BL_LEVEL] to [min..max] */ static inline u32 scale_fw_to_input(int min, int max, u64 input) { - return min + DIV_ROUND_CLOSEST_ULL(input * (max - min), MAX_BACKLIGHT_LEVEL); + return min + DIV_ROUND_CLOSEST_ULL(input * (max - min), AMDGPU_MAX_BL_LEVEL); } static void convert_custom_brightness(const struct amdgpu_dm_backlight_caps *caps, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c index 2551823382f8..010172f930ae 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c @@ -661,6 +661,15 @@ static int amdgpu_dm_crtc_helper_atomic_check(struct drm_crtc *crtc, return -EINVAL; } + if (!state->legacy_cursor_update && amdgpu_dm_crtc_vrr_active(dm_crtc_state)) { + struct drm_plane_state *primary_state; + + /* Pull in primary plane for correct VRR handling */ + primary_state = drm_atomic_get_plane_state(state, crtc->primary); + if (IS_ERR(primary_state)) + return PTR_ERR(primary_state); + } + /* In some use cases, like reset, no stream is attached */ if (!dm_crtc_state->stream) return 0; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c index 33b9d36619ff..4071851f9e86 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c @@ -158,7 +158,6 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p return NULL; } dce60_clk_mgr_construct(ctx, clk_mgr); - dce_clk_mgr_construct(ctx, clk_mgr); return &clk_mgr->base; } #endif diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c index 26feefbb8990..f5ad0a177038 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c @@ -245,6 +245,11 @@ int dce_set_clock( pxl_clk_params.target_pixel_clock_100hz = requested_clk_khz * 10; pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS; + /* DCE 6.0, DCE 6.4: engine clock is the same as PLL0 */ + if (clk_mgr_base->ctx->dce_version == DCE_VERSION_6_0 || + clk_mgr_base->ctx->dce_version == DCE_VERSION_6_4) + pxl_clk_params.pll_id = CLOCK_SOURCE_ID_PLL0; + if (clk_mgr_dce->dfs_bypass_active) pxl_clk_params.flags.SET_DISPCLK_DFS_BYPASS = true; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 28aca7017f0f..9ab0ee20ca6f 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -938,17 +938,18 @@ static void dc_destruct(struct dc *dc) if (dc->link_srv) link_destroy_link_service(&dc->link_srv); - if (dc->ctx->gpio_service) - dal_gpio_service_destroy(&dc->ctx->gpio_service); + if (dc->ctx) { + if (dc->ctx->gpio_service) + dal_gpio_service_destroy(&dc->ctx->gpio_service); - if (dc->ctx->created_bios) - dal_bios_parser_destroy(&dc->ctx->dc_bios); + if (dc->ctx->created_bios) + dal_bios_parser_destroy(&dc->ctx->dc_bios); + kfree(dc->ctx->logger); + dc_perf_trace_destroy(&dc->ctx->perf_trace); - kfree(dc->ctx->logger); - dc_perf_trace_destroy(&dc->ctx->perf_trace); - - kfree(dc->ctx); - dc->ctx = NULL; + kfree(dc->ctx); + dc->ctx = NULL; + } kfree(dc->bw_vbios); dc->bw_vbios = NULL; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c index 58b59d52dc9d..53b60044653f 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c @@ -373,7 +373,7 @@ static const struct resource_caps res_cap = { .num_timing_generator = 6, .num_audio = 6, .num_stream_encoder = 6, - .num_pll = 2, + .num_pll = 3, .num_ddc = 6, }; @@ -389,7 +389,7 @@ static const struct resource_caps res_cap_64 = { .num_timing_generator = 2, .num_audio = 2, .num_stream_encoder = 2, - .num_pll = 2, + .num_pll = 3, .num_ddc = 2, }; @@ -973,21 +973,24 @@ static bool dce60_construct( if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) { pool->base.dp_clock_source = - dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true); + dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true); + /* DCE 6.0 and 6.4: PLL0 can only be used with DP. Don't initialize it here. */ pool->base.clock_sources[0] = - dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], false); + dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); pool->base.clock_sources[1] = - dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); + dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false); pool->base.clk_src_count = 2; } else { pool->base.dp_clock_source = - dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true); + dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true); pool->base.clock_sources[0] = - dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); - pool->base.clk_src_count = 1; + dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); + pool->base.clock_sources[1] = + dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false); + pool->base.clk_src_count = 2; } if (pool->base.dp_clock_source == NULL) { @@ -1365,21 +1368,24 @@ static bool dce64_construct( if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) { pool->base.dp_clock_source = - dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true); + dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true); + /* DCE 6.0 and 6.4: PLL0 can only be used with DP. Don't initialize it here. */ pool->base.clock_sources[0] = - dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[0], false); + dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); pool->base.clock_sources[1] = - dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[1], false); + dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false); pool->base.clk_src_count = 2; } else { pool->base.dp_clock_source = - dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[0], true); + dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true); pool->base.clock_sources[0] = - dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[1], false); - pool->base.clk_src_count = 1; + dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); + pool->base.clock_sources[1] = + dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false); + pool->base.clk_src_count = 2; } if (pool->base.dp_clock_source == NULL) { diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 756afe78a6e5..b47cb4a5f488 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -77,6 +77,9 @@ static void smu_power_profile_mode_get(struct smu_context *smu, static void smu_power_profile_mode_put(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile_mode); static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type); +static int smu_od_edit_dpm_table(void *handle, + enum PP_OD_DPM_TABLE_COMMAND type, + long *input, uint32_t size); static int smu_sys_get_pp_feature_mask(void *handle, char *buf) @@ -2195,6 +2198,7 @@ static int smu_resume(struct amdgpu_ip_block *ip_block) int ret; struct amdgpu_device *adev = ip_block->adev; struct smu_context *smu = adev->powerplay.pp_handle; + struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); if (amdgpu_sriov_multi_vf_mode(adev)) return 0; @@ -2226,6 +2230,18 @@ static int smu_resume(struct amdgpu_ip_block *ip_block) adev->pm.dpm_enabled = true; + if (smu->current_power_limit) { + ret = smu_set_power_limit(smu, smu->current_power_limit); + if (ret && ret != -EOPNOTSUPP) + return ret; + } + + if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { + ret = smu_od_edit_dpm_table(smu, PP_OD_COMMIT_DPM_TABLE, NULL, 0); + if (ret) + return ret; + } + dev_info(adev->dev, "SMU is resumed successfully!\n"); return 0; diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c index ed8e640b96b0..801235a5bc0a 100644 --- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c +++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c @@ -3239,14 +3239,22 @@ void intel_lnl_mac_transmit_lfps(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(encoder); - u8 owned_lane_mask = intel_cx0_get_owned_lane_mask(encoder); - bool enable = intel_alpm_is_alpm_aux_less(enc_to_intel_dp(encoder), - crtc_state); + intel_wakeref_t wakeref; int i; + u8 owned_lane_mask; - if (DISPLAY_VER(display) < 20) + if (DISPLAY_VER(display) < 20 || + !intel_alpm_is_alpm_aux_less(enc_to_intel_dp(encoder), crtc_state)) return; + owned_lane_mask = intel_cx0_get_owned_lane_mask(encoder); + + wakeref = intel_cx0_phy_transaction_begin(encoder); + + if (intel_encoder_is_c10phy(encoder)) + intel_cx0_rmw(encoder, owned_lane_mask, PHY_C10_VDR_CONTROL(1), 0, + C10_VDR_CTRL_MSGBUS_ACCESS, MB_WRITE_COMMITTED); + for (i = 0; i < 4; i++) { int tx = i % 2 + 1; u8 lane_mask = i < 2 ? INTEL_CX0_LANE0 : INTEL_CX0_LANE1; @@ -3256,9 +3264,10 @@ void intel_lnl_mac_transmit_lfps(struct intel_encoder *encoder, intel_cx0_rmw(encoder, lane_mask, PHY_CMN1_CONTROL(tx, 0), CONTROL0_MAC_TRANSMIT_LFPS, - enable ? CONTROL0_MAC_TRANSMIT_LFPS : 0, - MB_WRITE_COMMITTED); + CONTROL0_MAC_TRANSMIT_LFPS, MB_WRITE_COMMITTED); } + + intel_cx0_phy_transaction_end(encoder, wakeref); } static u8 cx0_power_control_disable_val(struct intel_encoder *encoder) diff --git a/drivers/gpu/drm/xe/Kconfig b/drivers/gpu/drm/xe/Kconfig index 2bb2bc052120..714d5702dfd7 100644 --- a/drivers/gpu/drm/xe/Kconfig +++ b/drivers/gpu/drm/xe/Kconfig @@ -5,6 +5,7 @@ config DRM_XE depends on KUNIT || !KUNIT depends on INTEL_VSEC || !INTEL_VSEC depends on X86_PLATFORM_DEVICES || !(X86 && ACPI) + depends on PAGE_SIZE_4KB || COMPILE_TEST || BROKEN select INTERVAL_TREE # we need shmfs for the swappable backing store, and in particular # the shmem_readpage() which depends upon tmpfs diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index 5bd2f7d7b4ea..6ece4defa9df 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -802,10 +802,6 @@ int xe_device_probe(struct xe_device *xe) return err; } - err = xe_devcoredump_init(xe); - if (err) - return err; - /* * From here on, if a step fails, make sure a Driver-FLR is triggereed */ @@ -870,6 +866,10 @@ int xe_device_probe(struct xe_device *xe) XE_WA(xe->tiles->media_gt, 15015404425_disable)) XE_DEVICE_WA_DISABLE(xe, 15015404425); + err = xe_devcoredump_init(xe); + if (err) + return err; + xe_nvm_init(xe); err = xe_heci_gsc_init(xe); diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c index 35489fa81825..bdbd15f3afe3 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c @@ -16,6 +16,7 @@ #include "xe_gt_sriov_pf_migration.h" #include "xe_gt_sriov_pf_service.h" #include "xe_gt_sriov_printk.h" +#include "xe_guc_submit.h" #include "xe_mmio.h" #include "xe_pm.h" @@ -47,9 +48,16 @@ static int pf_alloc_metadata(struct xe_gt *gt) static void pf_init_workers(struct xe_gt *gt) { + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); INIT_WORK(>->sriov.pf.workers.restart, pf_worker_restart_func); } +static void pf_fini_workers(struct xe_gt *gt) +{ + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + disable_work_sync(>->sriov.pf.workers.restart); +} + /** * xe_gt_sriov_pf_init_early - Prepare SR-IOV PF data structures on PF. * @gt: the &xe_gt to initialize @@ -79,6 +87,21 @@ int xe_gt_sriov_pf_init_early(struct xe_gt *gt) return 0; } +static void pf_fini_action(void *arg) +{ + struct xe_gt *gt = arg; + + pf_fini_workers(gt); +} + +static int pf_init_late(struct xe_gt *gt) +{ + struct xe_device *xe = gt_to_xe(gt); + + xe_gt_assert(gt, IS_SRIOV_PF(xe)); + return devm_add_action_or_reset(xe->drm.dev, pf_fini_action, gt); +} + /** * xe_gt_sriov_pf_init - Prepare SR-IOV PF data structures on PF. * @gt: the &xe_gt to initialize @@ -95,7 +118,15 @@ int xe_gt_sriov_pf_init(struct xe_gt *gt) if (err) return err; - return xe_gt_sriov_pf_migration_init(gt); + err = xe_gt_sriov_pf_migration_init(gt); + if (err) + return err; + + err = pf_init_late(gt); + if (err) + return err; + + return 0; } static bool pf_needs_enable_ggtt_guest_update(struct xe_device *xe) @@ -230,3 +261,27 @@ void xe_gt_sriov_pf_restart(struct xe_gt *gt) { pf_queue_restart(gt); } + +static void pf_flush_restart(struct xe_gt *gt) +{ + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + flush_work(>->sriov.pf.workers.restart); +} + +/** + * xe_gt_sriov_pf_wait_ready() - Wait until per-GT PF SR-IOV support is ready. + * @gt: the &xe_gt + * + * This function can only be called on PF. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_wait_ready(struct xe_gt *gt) +{ + /* don't wait if there is another ongoing reset */ + if (xe_guc_read_stopped(>->uc.guc)) + return -EBUSY; + + pf_flush_restart(gt); + return 0; +} diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h index e2b2ff8132dc..e7fde3f9937a 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h @@ -11,6 +11,7 @@ struct xe_gt; #ifdef CONFIG_PCI_IOV int xe_gt_sriov_pf_init_early(struct xe_gt *gt); int xe_gt_sriov_pf_init(struct xe_gt *gt); +int xe_gt_sriov_pf_wait_ready(struct xe_gt *gt); void xe_gt_sriov_pf_init_hw(struct xe_gt *gt); void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid); void xe_gt_sriov_pf_stop_prepare(struct xe_gt *gt); diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c index bf679b21f485..3ed245e04d0c 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c @@ -22,6 +22,7 @@ #include "xe_gt_sriov_pf_policy.h" #include "xe_gt_sriov_pf_service.h" #include "xe_pm.h" +#include "xe_sriov_pf.h" /* * /sys/kernel/debug/dri/0/ @@ -205,7 +206,8 @@ static int CONFIG##_set(void *data, u64 val) \ return -EOVERFLOW; \ \ xe_pm_runtime_get(xe); \ - err = xe_gt_sriov_pf_config_set_##CONFIG(gt, vfid, val); \ + err = xe_sriov_pf_wait_ready(xe) ?: \ + xe_gt_sriov_pf_config_set_##CONFIG(gt, vfid, val); \ xe_pm_runtime_put(xe); \ \ return err; \ diff --git a/drivers/gpu/drm/xe/xe_guc_capture.c b/drivers/gpu/drm/xe/xe_guc_capture.c index 859a3ba91be5..243dad3e2418 100644 --- a/drivers/gpu/drm/xe/xe_guc_capture.c +++ b/drivers/gpu/drm/xe/xe_guc_capture.c @@ -1817,6 +1817,12 @@ void xe_engine_snapshot_print(struct xe_hw_engine_snapshot *snapshot, struct drm str_yes_no(snapshot->kernel_reserved)); for (type = GUC_STATE_CAPTURE_TYPE_GLOBAL; type < GUC_STATE_CAPTURE_TYPE_MAX; type++) { + /* + * FIXME: During devcoredump print we should avoid accessing the + * driver pointers for gt or engine. Printing should be done only + * using the snapshot captured. Here we are accessing the gt + * pointer. It should be fixed. + */ list = xe_guc_capture_get_reg_desc_list(gt, GUC_CAPTURE_LIST_INDEX_PF, type, capture_class, false); snapshot_print_by_list_order(snapshot, p, type, list); diff --git a/drivers/gpu/drm/xe/xe_module.c b/drivers/gpu/drm/xe/xe_module.c index 107ffe87808c..d9391bd08194 100644 --- a/drivers/gpu/drm/xe/xe_module.c +++ b/drivers/gpu/drm/xe/xe_module.c @@ -27,6 +27,8 @@ #define DEFAULT_PROBE_DISPLAY true #define DEFAULT_VRAM_BAR_SIZE 0 #define DEFAULT_FORCE_PROBE CONFIG_DRM_XE_FORCE_PROBE +#define DEFAULT_MAX_VFS ~0 +#define DEFAULT_MAX_VFS_STR "unlimited" #define DEFAULT_WEDGED_MODE 1 #define DEFAULT_SVM_NOTIFIER_SIZE 512 @@ -34,6 +36,9 @@ struct xe_modparam xe_modparam = { .probe_display = DEFAULT_PROBE_DISPLAY, .guc_log_level = DEFAULT_GUC_LOG_LEVEL, .force_probe = DEFAULT_FORCE_PROBE, +#ifdef CONFIG_PCI_IOV + .max_vfs = DEFAULT_MAX_VFS, +#endif .wedged_mode = DEFAULT_WEDGED_MODE, .svm_notifier_size = DEFAULT_SVM_NOTIFIER_SIZE, /* the rest are 0 by default */ @@ -79,7 +84,8 @@ MODULE_PARM_DESC(force_probe, module_param_named(max_vfs, xe_modparam.max_vfs, uint, 0400); MODULE_PARM_DESC(max_vfs, "Limit number of Virtual Functions (VFs) that could be managed. " - "(0 = no VFs [default]; N = allow up to N VFs)"); + "(0=no VFs; N=allow up to N VFs " + "[default=" DEFAULT_MAX_VFS_STR "])"); #endif module_param_named_unsafe(wedged_mode, xe_modparam.wedged_mode, int, 0600); diff --git a/drivers/gpu/drm/xe/xe_pci_sriov.c b/drivers/gpu/drm/xe/xe_pci_sriov.c index 8813efdcafbb..447a7867eecb 100644 --- a/drivers/gpu/drm/xe/xe_pci_sriov.c +++ b/drivers/gpu/drm/xe/xe_pci_sriov.c @@ -12,6 +12,7 @@ #include "xe_pci_sriov.h" #include "xe_pm.h" #include "xe_sriov.h" +#include "xe_sriov_pf.h" #include "xe_sriov_pf_helpers.h" #include "xe_sriov_printk.h" @@ -138,6 +139,10 @@ static int pf_enable_vfs(struct xe_device *xe, int num_vfs) xe_assert(xe, num_vfs <= total_vfs); xe_sriov_dbg(xe, "enabling %u VF%s\n", num_vfs, str_plural(num_vfs)); + err = xe_sriov_pf_wait_ready(xe); + if (err) + goto out; + /* * We must hold additional reference to the runtime PM to keep PF in D0 * during VFs lifetime, as our VFs do not implement the PM capability. @@ -169,7 +174,7 @@ static int pf_enable_vfs(struct xe_device *xe, int num_vfs) failed: pf_unprovision_vfs(xe, num_vfs); xe_pm_runtime_put(xe); - +out: xe_sriov_notice(xe, "Failed to enable %u VF%s (%pe)\n", num_vfs, str_plural(num_vfs), ERR_PTR(err)); return err; diff --git a/drivers/gpu/drm/xe/xe_sriov_pf.c b/drivers/gpu/drm/xe/xe_sriov_pf.c index afbdd894bd6e..27ddf3cc80e9 100644 --- a/drivers/gpu/drm/xe/xe_sriov_pf.c +++ b/drivers/gpu/drm/xe/xe_sriov_pf.c @@ -9,6 +9,7 @@ #include "xe_assert.h" #include "xe_device.h" +#include "xe_gt_sriov_pf.h" #include "xe_module.h" #include "xe_sriov.h" #include "xe_sriov_pf.h" @@ -103,6 +104,32 @@ int xe_sriov_pf_init_early(struct xe_device *xe) } /** + * xe_sriov_pf_wait_ready() - Wait until PF is ready to operate. + * @xe: the &xe_device to test + * + * This function can only be called on PF. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_pf_wait_ready(struct xe_device *xe) +{ + struct xe_gt *gt; + unsigned int id; + int err; + + if (xe_device_wedged(xe)) + return -ECANCELED; + + for_each_gt(gt, xe, id) { + err = xe_gt_sriov_pf_wait_ready(gt); + if (err) + return err; + } + + return 0; +} + +/** * xe_sriov_pf_print_vfs_summary - Print SR-IOV PF information. * @xe: the &xe_device to print info from * @p: the &drm_printer diff --git a/drivers/gpu/drm/xe/xe_sriov_pf.h b/drivers/gpu/drm/xe/xe_sriov_pf.h index c392c3fcf085..e3b34f8f5e04 100644 --- a/drivers/gpu/drm/xe/xe_sriov_pf.h +++ b/drivers/gpu/drm/xe/xe_sriov_pf.h @@ -15,6 +15,7 @@ struct xe_device; #ifdef CONFIG_PCI_IOV bool xe_sriov_pf_readiness(struct xe_device *xe); int xe_sriov_pf_init_early(struct xe_device *xe); +int xe_sriov_pf_wait_ready(struct xe_device *xe); void xe_sriov_pf_debugfs_register(struct xe_device *xe, struct dentry *root); void xe_sriov_pf_print_vfs_summary(struct xe_device *xe, struct drm_printer *p); #else diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig index 4fef4797b110..02432d4a5ccd 100644 --- a/drivers/mailbox/Kconfig +++ b/drivers/mailbox/Kconfig @@ -36,6 +36,15 @@ config ARM_MHU_V3 that provides different means of transports: supported extensions will be discovered and possibly managed at probe-time. +config AST2700_MBOX + tristate "ASPEED AST2700 IPC driver" + depends on ARCH_ASPEED || COMPILE_TEST + help + Mailbox driver implementation for ASPEED AST27XX SoCs. This driver + can be used to send message between different processors in SoC. + The driver provides mailbox support for sending interrupts to the + clients. Say Y here if you want to build this driver. + config CV1800_MBOX tristate "cv1800 mailbox" depends on ARCH_SOPHGO || COMPILE_TEST @@ -350,4 +359,14 @@ config CIX_MBOX is unidirectional. Say Y here if you want to use the CIX Mailbox support. +config BCM74110_MAILBOX + tristate "Brcmstb BCM74110 Mailbox" + depends on ARCH_BRCMSTB || COMPILE_TEST + default ARCH_BRCMSTB + help + Broadcom STB mailbox driver present starting with brcmstb bcm74110 + SoCs. The mailbox is a communication channel between the host + processor and coprocessor that handles various power management task + and more. + endif diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile index 786a46587ba1..98a68f838486 100644 --- a/drivers/mailbox/Makefile +++ b/drivers/mailbox/Makefile @@ -11,6 +11,8 @@ obj-$(CONFIG_ARM_MHU_V2) += arm_mhuv2.o obj-$(CONFIG_ARM_MHU_V3) += arm_mhuv3.o +obj-$(CONFIG_AST2700_MBOX) += ast2700-mailbox.o + obj-$(CONFIG_CV1800_MBOX) += cv1800-mailbox.o obj-$(CONFIG_EXYNOS_MBOX) += exynos-mailbox.o @@ -74,3 +76,5 @@ obj-$(CONFIG_QCOM_IPCC) += qcom-ipcc.o obj-$(CONFIG_THEAD_TH1520_MBOX) += mailbox-th1520.o obj-$(CONFIG_CIX_MBOX) += cix-mailbox.o + +obj-$(CONFIG_BCM74110_MAILBOX) += bcm74110-mailbox.o diff --git a/drivers/mailbox/ast2700-mailbox.c b/drivers/mailbox/ast2700-mailbox.c new file mode 100644 index 000000000000..83c6afe5411f --- /dev/null +++ b/drivers/mailbox/ast2700-mailbox.c @@ -0,0 +1,235 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright Aspeed Technology Inc. (C) 2025. All rights reserved + */ + +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/mailbox_controller.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +/* Each bit in the register represents an IPC ID */ +#define IPCR_TX_TRIG 0x00 +#define IPCR_ENABLE 0x04 +#define IPCR_STATUS 0x08 +#define RX_IRQ(n) BIT(n) +#define RX_IRQ_MASK 0xf +#define IPCR_DATA 0x10 + +struct ast2700_mbox_data { + u8 num_chans; + u8 msg_size; +}; + +struct ast2700_mbox { + struct mbox_controller mbox; + u8 msg_size; + void __iomem *tx_regs; + void __iomem *rx_regs; + spinlock_t lock; +}; + +static inline int ch_num(struct mbox_chan *chan) +{ + return chan - chan->mbox->chans; +} + +static inline bool ast2700_mbox_tx_done(struct ast2700_mbox *mb, int idx) +{ + return !(readl(mb->tx_regs + IPCR_STATUS) & BIT(idx)); +} + +static irqreturn_t ast2700_mbox_irq(int irq, void *p) +{ + struct ast2700_mbox *mb = p; + void __iomem *data_reg; + int num_words = mb->msg_size / sizeof(u32); + u32 *word_data; + u32 status; + int n, i; + + /* Only examine channels that are currently enabled. */ + status = readl(mb->rx_regs + IPCR_ENABLE) & + readl(mb->rx_regs + IPCR_STATUS); + + if (!(status & RX_IRQ_MASK)) + return IRQ_NONE; + + for (n = 0; n < mb->mbox.num_chans; ++n) { + struct mbox_chan *chan = &mb->mbox.chans[n]; + + if (!(status & RX_IRQ(n))) + continue; + + data_reg = mb->rx_regs + IPCR_DATA + mb->msg_size * n; + word_data = chan->con_priv; + /* Read the message data */ + for (i = 0; i < num_words; i++) + word_data[i] = readl(data_reg + i * sizeof(u32)); + + mbox_chan_received_data(chan, chan->con_priv); + + /* The IRQ can be cleared only once the FIFO is empty. */ + writel(RX_IRQ(n), mb->rx_regs + IPCR_STATUS); + } + + return IRQ_HANDLED; +} + +static int ast2700_mbox_send_data(struct mbox_chan *chan, void *data) +{ + struct ast2700_mbox *mb = dev_get_drvdata(chan->mbox->dev); + int idx = ch_num(chan); + void __iomem *data_reg = mb->tx_regs + IPCR_DATA + mb->msg_size * idx; + u32 *word_data = data; + int num_words = mb->msg_size / sizeof(u32); + int i; + + if (!(readl(mb->tx_regs + IPCR_ENABLE) & BIT(idx))) { + dev_warn(mb->mbox.dev, "%s: Ch-%d not enabled yet\n", __func__, idx); + return -ENODEV; + } + + if (!(ast2700_mbox_tx_done(mb, idx))) { + dev_warn(mb->mbox.dev, "%s: Ch-%d last data has not finished\n", __func__, idx); + return -EBUSY; + } + + /* Write the message data */ + for (i = 0 ; i < num_words; i++) + writel(word_data[i], data_reg + i * sizeof(u32)); + + writel(BIT(idx), mb->tx_regs + IPCR_TX_TRIG); + dev_dbg(mb->mbox.dev, "%s: Ch-%d sent\n", __func__, idx); + + return 0; +} + +static int ast2700_mbox_startup(struct mbox_chan *chan) +{ + struct ast2700_mbox *mb = dev_get_drvdata(chan->mbox->dev); + int idx = ch_num(chan); + void __iomem *reg = mb->rx_regs + IPCR_ENABLE; + unsigned long flags; + + spin_lock_irqsave(&mb->lock, flags); + writel(readl(reg) | BIT(idx), reg); + spin_unlock_irqrestore(&mb->lock, flags); + + return 0; +} + +static void ast2700_mbox_shutdown(struct mbox_chan *chan) +{ + struct ast2700_mbox *mb = dev_get_drvdata(chan->mbox->dev); + int idx = ch_num(chan); + void __iomem *reg = mb->rx_regs + IPCR_ENABLE; + unsigned long flags; + + spin_lock_irqsave(&mb->lock, flags); + writel(readl(reg) & ~BIT(idx), reg); + spin_unlock_irqrestore(&mb->lock, flags); +} + +static bool ast2700_mbox_last_tx_done(struct mbox_chan *chan) +{ + struct ast2700_mbox *mb = dev_get_drvdata(chan->mbox->dev); + int idx = ch_num(chan); + + return ast2700_mbox_tx_done(mb, idx); +} + +static const struct mbox_chan_ops ast2700_mbox_chan_ops = { + .send_data = ast2700_mbox_send_data, + .startup = ast2700_mbox_startup, + .shutdown = ast2700_mbox_shutdown, + .last_tx_done = ast2700_mbox_last_tx_done, +}; + +static int ast2700_mbox_probe(struct platform_device *pdev) +{ + struct ast2700_mbox *mb; + const struct ast2700_mbox_data *dev_data; + struct device *dev = &pdev->dev; + int irq, ret; + + if (!pdev->dev.of_node) + return -ENODEV; + + dev_data = device_get_match_data(&pdev->dev); + + mb = devm_kzalloc(dev, sizeof(*mb), GFP_KERNEL); + if (!mb) + return -ENOMEM; + + mb->mbox.chans = devm_kcalloc(&pdev->dev, dev_data->num_chans, + sizeof(*mb->mbox.chans), GFP_KERNEL); + if (!mb->mbox.chans) + return -ENOMEM; + + /* con_priv of each channel is used to store the message received */ + for (int i = 0; i < dev_data->num_chans; i++) { + mb->mbox.chans[i].con_priv = devm_kcalloc(dev, dev_data->msg_size, + sizeof(u8), GFP_KERNEL); + if (!mb->mbox.chans[i].con_priv) + return -ENOMEM; + } + + platform_set_drvdata(pdev, mb); + + mb->tx_regs = devm_platform_ioremap_resource_byname(pdev, "tx"); + if (IS_ERR(mb->tx_regs)) + return PTR_ERR(mb->tx_regs); + + mb->rx_regs = devm_platform_ioremap_resource_byname(pdev, "rx"); + if (IS_ERR(mb->rx_regs)) + return PTR_ERR(mb->rx_regs); + + mb->msg_size = dev_data->msg_size; + mb->mbox.dev = dev; + mb->mbox.num_chans = dev_data->num_chans; + mb->mbox.ops = &ast2700_mbox_chan_ops; + mb->mbox.txdone_irq = false; + mb->mbox.txdone_poll = true; + mb->mbox.txpoll_period = 5; + spin_lock_init(&mb->lock); + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + ret = devm_request_irq(dev, irq, ast2700_mbox_irq, 0, dev_name(dev), mb); + if (ret) + return ret; + + return devm_mbox_controller_register(dev, &mb->mbox); +} + +static const struct ast2700_mbox_data ast2700_dev_data = { + .num_chans = 4, + .msg_size = 0x20, +}; + +static const struct of_device_id ast2700_mbox_of_match[] = { + { .compatible = "aspeed,ast2700-mailbox", .data = &ast2700_dev_data }, + {} +}; +MODULE_DEVICE_TABLE(of, ast2700_mbox_of_match); + +static struct platform_driver ast2700_mbox_driver = { + .driver = { + .name = "ast2700-mailbox", + .of_match_table = ast2700_mbox_of_match, + }, + .probe = ast2700_mbox_probe, +}; +module_platform_driver(ast2700_mbox_driver); + +MODULE_AUTHOR("Jammy Huang <jammy_huang@aspeedtech.com>"); +MODULE_DESCRIPTION("ASPEED AST2700 IPC driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mailbox/bcm74110-mailbox.c b/drivers/mailbox/bcm74110-mailbox.c new file mode 100644 index 000000000000..2e7e86f3e6a4 --- /dev/null +++ b/drivers/mailbox/bcm74110-mailbox.c @@ -0,0 +1,656 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Broadcom BCM74110 Mailbox Driver + * + * Copyright (c) 2025 Broadcom + */ +#include <linux/list.h> +#include <linux/types.h> +#include <linux/workqueue.h> +#include <linux/io-64-nonatomic-hi-lo.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/of.h> +#include <linux/delay.h> +#include <linux/mailbox_controller.h> +#include <linux/bitfield.h> +#include <linux/slab.h> + +#define BCM_MBOX_BASE(sel) ((sel) * 0x40) +#define BCM_MBOX_IRQ_BASE(sel) (((sel) * 0x20) + 0x800) + +#define BCM_MBOX_CFGA 0x0 +#define BCM_MBOX_CFGB 0x4 +#define BCM_MBOX_CFGC 0x8 +#define BCM_MBOX_CFGD 0xc +#define BCM_MBOX_CTRL 0x10 +#define BCM_MBOX_CTRL_EN BIT(0) +#define BCM_MBOX_CTRL_CLR BIT(1) +#define BCM_MBOX_STATUS0 0x14 +#define BCM_MBOX_STATUS0_NOT_EMPTY BIT(28) +#define BCM_MBOX_STATUS0_FULL BIT(29) +#define BCM_MBOX_STATUS1 0x18 +#define BCM_MBOX_STATUS2 0x1c +#define BCM_MBOX_WDATA 0x20 +#define BCM_MBOX_RDATA 0x28 + +#define BCM_MBOX_IRQ_STATUS 0x0 +#define BCM_MBOX_IRQ_SET 0x4 +#define BCM_MBOX_IRQ_CLEAR 0x8 +#define BCM_MBOX_IRQ_MASK_STATUS 0xc +#define BCM_MBOX_IRQ_MASK_SET 0x10 +#define BCM_MBOX_IRQ_MASK_CLEAR 0x14 +#define BCM_MBOX_IRQ_TIMEOUT BIT(0) +#define BCM_MBOX_IRQ_NOT_EMPTY BIT(1) +#define BCM_MBOX_IRQ_FULL BIT(2) +#define BCM_MBOX_IRQ_LOW_WM BIT(3) +#define BCM_MBOX_IRQ_HIGH_WM BIT(4) + +#define BCM_LINK_CODE0 0xbe0 +#define BCM_LINK_CODE1 0xbe1 +#define BCM_LINK_CODE2 0xbe2 + +enum { + BCM_MSG_FUNC_LINK_START = 0, + BCM_MSG_FUNC_LINK_STOP, + BCM_MSG_FUNC_SHMEM_TX, + BCM_MSG_FUNC_SHMEM_RX, + BCM_MSG_FUNC_SHMEM_STOP, + BCM_MSG_FUNC_MAX, +}; + +enum { + BCM_MSG_SVC_INIT = 0, + BCM_MSG_SVC_PMC, + BCM_MSG_SVC_SCMI, + BCM_MSG_SVC_DPFE, + BCM_MSG_SVC_MAX, +}; + +struct bcm74110_mbox_msg { + struct list_head list_entry; +#define BCM_MSG_VERSION_MASK GENMASK(31, 29) +#define BCM_MSG_VERSION 0x1 +#define BCM_MSG_REQ_MASK BIT(28) +#define BCM_MSG_RPLY_MASK BIT(27) +#define BCM_MSG_SVC_MASK GENMASK(26, 24) +#define BCM_MSG_FUNC_MASK GENMASK(23, 16) +#define BCM_MSG_LENGTH_MASK GENMASK(15, 4) +#define BCM_MSG_SLOT_MASK GENMASK(3, 0) + +#define BCM_MSG_SET_FIELD(hdr, field, val) \ + do { \ + hdr &= ~BCM_MSG_##field##_MASK; \ + hdr |= FIELD_PREP(BCM_MSG_##field##_MASK, val); \ + } while (0) + +#define BCM_MSG_GET_FIELD(hdr, field) \ + FIELD_GET(BCM_MSG_##field##_MASK, hdr) + u32 msg; +}; + +struct bcm74110_mbox_chan { + struct bcm74110_mbox *mbox; + bool en; + int slot; + int type; +}; + +struct bcm74110_mbox { + struct platform_device *pdev; + void __iomem *base; + + int tx_chan; + int rx_chan; + int rx_irq; + struct list_head rx_svc_init_list; + spinlock_t rx_svc_list_lock; + + struct mbox_controller controller; + struct bcm74110_mbox_chan *mbox_chan; +}; + +#define BCM74110_OFFSET_IO_WRITEL_MACRO(name, offset_base) \ +static void bcm74110_##name##_writel(struct bcm74110_mbox *mbox,\ + u32 val, u32 off) \ +{ \ + writel_relaxed(val, mbox->base + offset_base + off); \ +} +BCM74110_OFFSET_IO_WRITEL_MACRO(tx, BCM_MBOX_BASE(mbox->tx_chan)); +BCM74110_OFFSET_IO_WRITEL_MACRO(irq, BCM_MBOX_IRQ_BASE(mbox->rx_chan)); + +#define BCM74110_OFFSET_IO_READL_MACRO(name, offset_base) \ +static u32 bcm74110_##name##_readl(struct bcm74110_mbox *mbox, \ + u32 off) \ +{ \ + return readl_relaxed(mbox->base + offset_base + off); \ +} +BCM74110_OFFSET_IO_READL_MACRO(tx, BCM_MBOX_BASE(mbox->tx_chan)); +BCM74110_OFFSET_IO_READL_MACRO(rx, BCM_MBOX_BASE(mbox->rx_chan)); +BCM74110_OFFSET_IO_READL_MACRO(irq, BCM_MBOX_IRQ_BASE(mbox->rx_chan)); + +static inline struct bcm74110_mbox *bcm74110_mbox_from_cntrl( + struct mbox_controller *cntrl) +{ + return container_of(cntrl, struct bcm74110_mbox, controller); +} + +static void bcm74110_rx_push_init_msg(struct bcm74110_mbox *mbox, u32 val) +{ + struct bcm74110_mbox_msg *msg; + + msg = kzalloc(sizeof(*msg), GFP_ATOMIC); + if (!msg) + return; + + INIT_LIST_HEAD(&msg->list_entry); + msg->msg = val; + + spin_lock(&mbox->rx_svc_list_lock); + list_add_tail(&msg->list_entry, &mbox->rx_svc_init_list); + spin_unlock(&mbox->rx_svc_list_lock); +} + +static void bcm74110_rx_process_msg(struct bcm74110_mbox *mbox) +{ + struct device *dev = &mbox->pdev->dev; + struct bcm74110_mbox_chan *chan_priv; + struct mbox_chan *chan; + u32 msg, status; + int type; + + do { + msg = bcm74110_rx_readl(mbox, BCM_MBOX_RDATA); + status = bcm74110_rx_readl(mbox, BCM_MBOX_STATUS0); + + dev_dbg(dev, "rx: [{req=%lu|rply=%lu|srv=%lu|fn=%lu|length=%lu|slot=%lu]\n", + BCM_MSG_GET_FIELD(msg, REQ), BCM_MSG_GET_FIELD(msg, RPLY), + BCM_MSG_GET_FIELD(msg, SVC), BCM_MSG_GET_FIELD(msg, FUNC), + BCM_MSG_GET_FIELD(msg, LENGTH), BCM_MSG_GET_FIELD(msg, SLOT)); + + type = BCM_MSG_GET_FIELD(msg, SVC); + switch (type) { + case BCM_MSG_SVC_INIT: + bcm74110_rx_push_init_msg(mbox, msg); + break; + case BCM_MSG_SVC_PMC: + case BCM_MSG_SVC_SCMI: + case BCM_MSG_SVC_DPFE: + chan = &mbox->controller.chans[type]; + chan_priv = chan->con_priv; + if (chan_priv->en) + mbox_chan_received_data(chan, NULL); + else + dev_warn(dev, "Channel not enabled\n"); + break; + default: + dev_warn(dev, "Unsupported msg received\n"); + } + } while (status & BCM_MBOX_STATUS0_NOT_EMPTY); +} + +static irqreturn_t bcm74110_mbox_isr(int irq, void *data) +{ + struct bcm74110_mbox *mbox = data; + u32 status; + + status = bcm74110_irq_readl(mbox, BCM_MBOX_IRQ_STATUS); + + bcm74110_irq_writel(mbox, 0xffffffff, BCM_MBOX_IRQ_CLEAR); + + if (status & BCM_MBOX_IRQ_NOT_EMPTY) + bcm74110_rx_process_msg(mbox); + else + dev_warn(&mbox->pdev->dev, "Spurious interrupt\n"); + + return IRQ_HANDLED; +} + +static void bcm74110_mbox_mask_and_clear(struct bcm74110_mbox *mbox) +{ + bcm74110_irq_writel(mbox, 0xffffffff, BCM_MBOX_IRQ_MASK_SET); + bcm74110_irq_writel(mbox, 0xffffffff, BCM_MBOX_IRQ_CLEAR); +} + +static int bcm74110_rx_pop_init_msg(struct bcm74110_mbox *mbox, u32 func_type, + u32 *val) +{ + struct bcm74110_mbox_msg *msg, *msg_tmp; + unsigned long flags; + bool found = false; + + spin_lock_irqsave(&mbox->rx_svc_list_lock, flags); + list_for_each_entry_safe(msg, msg_tmp, &mbox->rx_svc_init_list, + list_entry) { + if (BCM_MSG_GET_FIELD(msg->msg, FUNC) == func_type) { + list_del(&msg->list_entry); + found = true; + break; + } + } + spin_unlock_irqrestore(&mbox->rx_svc_list_lock, flags); + + if (!found) + return -EINVAL; + + *val = msg->msg; + kfree(msg); + + return 0; +} + +static void bcm74110_rx_flush_msg(struct bcm74110_mbox *mbox) +{ + struct bcm74110_mbox_msg *msg, *msg_tmp; + LIST_HEAD(list_temp); + unsigned long flags; + + spin_lock_irqsave(&mbox->rx_svc_list_lock, flags); + list_splice_init(&mbox->rx_svc_init_list, &list_temp); + spin_unlock_irqrestore(&mbox->rx_svc_list_lock, flags); + + list_for_each_entry_safe(msg, msg_tmp, &list_temp, list_entry) { + list_del(&msg->list_entry); + kfree(msg); + } +} + +#define BCM_DEQUEUE_TIMEOUT_MS 30 +static int bcm74110_rx_pop_init_msg_block(struct bcm74110_mbox *mbox, u32 func_type, + u32 *val) +{ + int ret, timeout = 0; + + do { + ret = bcm74110_rx_pop_init_msg(mbox, func_type, val); + + if (!ret) + return 0; + + /* TODO: Figure out what is a good sleep here. */ + usleep_range(1000, 2000); + timeout++; + } while (timeout < BCM_DEQUEUE_TIMEOUT_MS); + + dev_warn(&mbox->pdev->dev, "Timeout waiting for service init response\n"); + return -ETIMEDOUT; +} + +static int bcm74110_mbox_create_msg(int req, int rply, int svc, int func, + int length, int slot) +{ + u32 msg = 0; + + BCM_MSG_SET_FIELD(msg, REQ, req); + BCM_MSG_SET_FIELD(msg, RPLY, rply); + BCM_MSG_SET_FIELD(msg, SVC, svc); + BCM_MSG_SET_FIELD(msg, FUNC, func); + BCM_MSG_SET_FIELD(msg, LENGTH, length); + BCM_MSG_SET_FIELD(msg, SLOT, slot); + + return msg; +} + +static int bcm74110_mbox_tx_msg(struct bcm74110_mbox *mbox, u32 msg) +{ + int val; + + /* We can potentially poll with timeout here instead */ + val = bcm74110_tx_readl(mbox, BCM_MBOX_STATUS0); + if (val & BCM_MBOX_STATUS0_FULL) { + dev_err(&mbox->pdev->dev, "Mailbox full\n"); + return -EINVAL; + } + + dev_dbg(&mbox->pdev->dev, "tx: [{req=%lu|rply=%lu|srv=%lu|fn=%lu|length=%lu|slot=%lu]\n", + BCM_MSG_GET_FIELD(msg, REQ), BCM_MSG_GET_FIELD(msg, RPLY), + BCM_MSG_GET_FIELD(msg, SVC), BCM_MSG_GET_FIELD(msg, FUNC), + BCM_MSG_GET_FIELD(msg, LENGTH), BCM_MSG_GET_FIELD(msg, SLOT)); + + bcm74110_tx_writel(mbox, msg, BCM_MBOX_WDATA); + + return 0; +} + +#define BCM_MBOX_LINK_TRAINING_RETRIES 5 +static int bcm74110_mbox_link_training(struct bcm74110_mbox *mbox) +{ + int ret, retries = 0; + u32 msg = 0, orig_len = 0, len = BCM_LINK_CODE0; + + do { + switch (len) { + case 0: + retries++; + dev_warn(&mbox->pdev->dev, + "Link train failed, trying again... %d\n", + retries); + if (retries > BCM_MBOX_LINK_TRAINING_RETRIES) + return -EINVAL; + len = BCM_LINK_CODE0; + fallthrough; + case BCM_LINK_CODE0: + case BCM_LINK_CODE1: + case BCM_LINK_CODE2: + msg = bcm74110_mbox_create_msg(1, 0, BCM_MSG_SVC_INIT, + BCM_MSG_FUNC_LINK_START, + len, BCM_MSG_SVC_INIT); + break; + default: + break; + } + + bcm74110_mbox_tx_msg(mbox, msg); + + /* No response expected for LINK_CODE2 */ + if (len == BCM_LINK_CODE2) + return 0; + + orig_len = len; + + ret = bcm74110_rx_pop_init_msg_block(mbox, + BCM_MSG_GET_FIELD(msg, FUNC), + &msg); + if (ret) { + len = 0; + continue; + } + + if ((BCM_MSG_GET_FIELD(msg, SVC) != BCM_MSG_SVC_INIT) || + (BCM_MSG_GET_FIELD(msg, FUNC) != BCM_MSG_FUNC_LINK_START) || + (BCM_MSG_GET_FIELD(msg, SLOT) != 0) || + (BCM_MSG_GET_FIELD(msg, RPLY) != 1) || + (BCM_MSG_GET_FIELD(msg, REQ) != 0)) { + len = 0; + continue; + } + + len = BCM_MSG_GET_FIELD(msg, LENGTH); + + /* Make sure sequence is good */ + if (len != (orig_len + 1)) { + len = 0; + continue; + } + } while (1); + + return -EINVAL; +} + +static int bcm74110_mbox_tx_msg_and_wait_ack(struct bcm74110_mbox *mbox, u32 msg) +{ + int ret; + u32 recv_msg; + + ret = bcm74110_mbox_tx_msg(mbox, msg); + if (ret) + return ret; + + ret = bcm74110_rx_pop_init_msg_block(mbox, BCM_MSG_GET_FIELD(msg, FUNC), + &recv_msg); + if (ret) + return ret; + + /* + * Modify tx message to verify rx ack. + * Flip RPLY/REQ for synchronous messages + */ + if (BCM_MSG_GET_FIELD(msg, REQ) == 1) { + BCM_MSG_SET_FIELD(msg, RPLY, 1); + BCM_MSG_SET_FIELD(msg, REQ, 0); + } + + if (msg != recv_msg) { + dev_err(&mbox->pdev->dev, "Found ack, but ack is invalid\n"); + return -EINVAL; + } + + return 0; +} + +/* Each index points to 0x100 of HAB MEM. IDX size counts from 0 */ +#define BCM_MBOX_HAB_MEM_IDX_START 0x30 +#define BCM_MBOX_HAB_MEM_IDX_SIZE 0x0 +static int bcm74110_mbox_shmem_init(struct bcm74110_mbox *mbox) +{ + u32 msg = 0; + int ret; + + msg = bcm74110_mbox_create_msg(1, 0, BCM_MSG_SVC_INIT, + BCM_MSG_FUNC_SHMEM_STOP, + 0, BCM_MSG_SVC_INIT); + ret = bcm74110_mbox_tx_msg_and_wait_ack(mbox, msg); + if (ret) + return -EINVAL; + + msg = bcm74110_mbox_create_msg(1, 0, BCM_MSG_SVC_INIT, + BCM_MSG_FUNC_SHMEM_TX, + BCM_MBOX_HAB_MEM_IDX_START, + BCM_MBOX_HAB_MEM_IDX_SIZE); + ret = bcm74110_mbox_tx_msg_and_wait_ack(mbox, msg); + if (ret) + return -EINVAL; + + msg = bcm74110_mbox_create_msg(1, 0, BCM_MSG_SVC_INIT, + BCM_MSG_FUNC_SHMEM_RX, + BCM_MBOX_HAB_MEM_IDX_START, + BCM_MBOX_HAB_MEM_IDX_SIZE); + ret = bcm74110_mbox_tx_msg_and_wait_ack(mbox, msg); + if (ret) + return -EINVAL; + + return 0; +} + +static int bcm74110_mbox_init(struct bcm74110_mbox *mbox) +{ + int ret = 0; + + /* Disable queues tx/rx */ + bcm74110_tx_writel(mbox, 0x0, BCM_MBOX_CTRL); + + /* Clear status & restart tx/rx*/ + bcm74110_tx_writel(mbox, BCM_MBOX_CTRL_EN | BCM_MBOX_CTRL_CLR, + BCM_MBOX_CTRL); + + /* Unmask irq */ + bcm74110_irq_writel(mbox, BCM_MBOX_IRQ_NOT_EMPTY, BCM_MBOX_IRQ_MASK_CLEAR); + + ret = bcm74110_mbox_link_training(mbox); + if (ret) { + dev_err(&mbox->pdev->dev, "Training failed\n"); + return ret; + } + + return bcm74110_mbox_shmem_init(mbox); +} + +static int bcm74110_mbox_send_data(struct mbox_chan *chan, void *data) +{ + struct bcm74110_mbox_chan *chan_priv = chan->con_priv; + u32 msg; + + switch (chan_priv->type) { + case BCM_MSG_SVC_PMC: + case BCM_MSG_SVC_SCMI: + case BCM_MSG_SVC_DPFE: + msg = bcm74110_mbox_create_msg(1, 0, chan_priv->type, 0, + 128 + 28, chan_priv->slot); + break; + default: + return -EINVAL; + } + + return bcm74110_mbox_tx_msg(chan_priv->mbox, msg); +} + +static int bcm74110_mbox_chan_startup(struct mbox_chan *chan) +{ + struct bcm74110_mbox_chan *chan_priv = chan->con_priv; + + chan_priv->en = true; + + return 0; +} + +static void bcm74110_mbox_chan_shutdown(struct mbox_chan *chan) +{ + struct bcm74110_mbox_chan *chan_priv = chan->con_priv; + + chan_priv->en = false; +} + +static const struct mbox_chan_ops bcm74110_mbox_chan_ops = { + .send_data = bcm74110_mbox_send_data, + .startup = bcm74110_mbox_chan_startup, + .shutdown = bcm74110_mbox_chan_shutdown, +}; + +static void bcm74110_mbox_shutdown(struct platform_device *pdev) +{ + struct bcm74110_mbox *mbox = dev_get_drvdata(&pdev->dev); + u32 msg; + + msg = bcm74110_mbox_create_msg(1, 0, BCM_MSG_SVC_INIT, + BCM_MSG_FUNC_LINK_STOP, + 0, 0); + + bcm74110_mbox_tx_msg_and_wait_ack(mbox, msg); + + /* Even if we don't receive ACK, lets shut it down */ + + bcm74110_mbox_mask_and_clear(mbox); + + /* Disable queues tx/rx */ + bcm74110_tx_writel(mbox, 0x0, BCM_MBOX_CTRL); + + /* Flush queues */ + bcm74110_rx_flush_msg(mbox); +} + +static struct mbox_chan *bcm74110_mbox_of_xlate(struct mbox_controller *cntrl, + const struct of_phandle_args *p) +{ + struct bcm74110_mbox *mbox = bcm74110_mbox_from_cntrl(cntrl); + struct device *dev = &mbox->pdev->dev; + struct bcm74110_mbox_chan *chan_priv; + int slot, type; + + if (p->args_count != 2) { + dev_err(dev, "Invalid arguments\n"); + return ERR_PTR(-EINVAL); + } + + type = p->args[0]; + slot = p->args[1]; + + switch (type) { + case BCM_MSG_SVC_PMC: + case BCM_MSG_SVC_SCMI: + case BCM_MSG_SVC_DPFE: + if (slot > BCM_MBOX_HAB_MEM_IDX_SIZE) { + dev_err(dev, "Not enough shared memory\n"); + return ERR_PTR(-EINVAL); + } + chan_priv = cntrl->chans[type].con_priv; + chan_priv->slot = slot; + chan_priv->type = type; + break; + default: + dev_err(dev, "Invalid channel type: %d\n", type); + return ERR_PTR(-EINVAL); + } + + return &cntrl->chans[type]; +} + +static int bcm74110_mbox_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct bcm74110_mbox *mbox; + int i, ret; + + mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL); + if (!mbox) + return -ENOMEM; + + mbox->pdev = pdev; + platform_set_drvdata(pdev, mbox); + + mbox->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(mbox->base)) + return dev_err_probe(dev, PTR_ERR(mbox->base), "Failed to iomap\n"); + + ret = of_property_read_u32(dev->of_node, "brcm,tx", &mbox->tx_chan); + if (ret) + return dev_err_probe(dev, ret, "Failed to find tx channel\n"); + + ret = of_property_read_u32(dev->of_node, "brcm,rx", &mbox->rx_chan); + if (ret) + return dev_err_probe(dev, ret, "Failed to find rx channel\n"); + + mbox->rx_irq = platform_get_irq(pdev, 0); + if (mbox->rx_irq < 0) + return mbox->rx_irq; + + INIT_LIST_HEAD(&mbox->rx_svc_init_list); + spin_lock_init(&mbox->rx_svc_list_lock); + bcm74110_mbox_mask_and_clear(mbox); + + ret = devm_request_irq(dev, mbox->rx_irq, bcm74110_mbox_isr, + IRQF_NO_SUSPEND, pdev->name, mbox); + if (ret) + return dev_err_probe(dev, ret, "Failed to request irq\n"); + + mbox->controller.ops = &bcm74110_mbox_chan_ops; + mbox->controller.dev = dev; + mbox->controller.num_chans = BCM_MSG_SVC_MAX; + mbox->controller.of_xlate = &bcm74110_mbox_of_xlate; + mbox->controller.chans = devm_kcalloc(dev, BCM_MSG_SVC_MAX, + sizeof(*mbox->controller.chans), + GFP_KERNEL); + if (!mbox->controller.chans) + return -ENOMEM; + + mbox->mbox_chan = devm_kcalloc(dev, BCM_MSG_SVC_MAX, + sizeof(*mbox->mbox_chan), + GFP_KERNEL); + if (!mbox->mbox_chan) + return -ENOMEM; + + for (i = 0; i < BCM_MSG_SVC_MAX; i++) { + mbox->mbox_chan[i].mbox = mbox; + mbox->controller.chans[i].con_priv = &mbox->mbox_chan[i]; + } + + ret = devm_mbox_controller_register(dev, &mbox->controller); + if (ret) + return ret; + + ret = bcm74110_mbox_init(mbox); + if (ret) + return ret; + + return 0; +} + +static const struct of_device_id bcm74110_mbox_of_match[] = { + { .compatible = "brcm,bcm74110-mbox", }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, bcm74110_mbox_of_match); + +static struct platform_driver bcm74110_mbox_driver = { + .driver = { + .name = "bcm74110-mbox", + .of_match_table = bcm74110_mbox_of_match, + }, + .probe = bcm74110_mbox_probe, + .shutdown = bcm74110_mbox_shutdown, +}; +module_platform_driver(bcm74110_mbox_driver); + +MODULE_AUTHOR("Justin Chen <justin.chen@broadcom.com>"); +MODULE_DESCRIPTION("BCM74110 mailbox driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c index ab4e8d1954a1..532929916e99 100644 --- a/drivers/mailbox/mtk-cmdq-mailbox.c +++ b/drivers/mailbox/mtk-cmdq-mailbox.c @@ -390,7 +390,7 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data) task = kzalloc(sizeof(*task), GFP_ATOMIC); if (!task) { - __pm_runtime_put_autosuspend(cmdq->mbox.dev); + pm_runtime_put_autosuspend(cmdq->mbox.dev); return -ENOMEM; } @@ -440,7 +440,7 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data) list_move_tail(&task->list_entry, &thread->task_busy_list); pm_runtime_mark_last_busy(cmdq->mbox.dev); - __pm_runtime_put_autosuspend(cmdq->mbox.dev); + pm_runtime_put_autosuspend(cmdq->mbox.dev); return 0; } @@ -488,7 +488,7 @@ done: spin_unlock_irqrestore(&thread->chan->lock, flags); pm_runtime_mark_last_busy(cmdq->mbox.dev); - __pm_runtime_put_autosuspend(cmdq->mbox.dev); + pm_runtime_put_autosuspend(cmdq->mbox.dev); } static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout) @@ -528,7 +528,7 @@ static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout) out: spin_unlock_irqrestore(&thread->chan->lock, flags); pm_runtime_mark_last_busy(cmdq->mbox.dev); - __pm_runtime_put_autosuspend(cmdq->mbox.dev); + pm_runtime_put_autosuspend(cmdq->mbox.dev); return 0; @@ -543,7 +543,7 @@ wait: return -EFAULT; } pm_runtime_mark_last_busy(cmdq->mbox.dev); - __pm_runtime_put_autosuspend(cmdq->mbox.dev); + pm_runtime_put_autosuspend(cmdq->mbox.dev); return 0; } diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c index f6714c233f5a..0a00719b2482 100644 --- a/drivers/mailbox/pcc.c +++ b/drivers/mailbox/pcc.c @@ -306,6 +306,22 @@ static void pcc_chan_acknowledge(struct pcc_chan_info *pchan) pcc_chan_reg_read_modify_write(&pchan->db); } +static void *write_response(struct pcc_chan_info *pchan) +{ + struct pcc_header pcc_header; + void *buffer; + int data_len; + + memcpy_fromio(&pcc_header, pchan->chan.shmem, + sizeof(pcc_header)); + data_len = pcc_header.length - sizeof(u32) + sizeof(struct pcc_header); + + buffer = pchan->chan.rx_alloc(pchan->chan.mchan->cl, data_len); + if (buffer != NULL) + memcpy_fromio(buffer, pchan->chan.shmem, data_len); + return buffer; +} + /** * pcc_mbox_irq - PCC mailbox interrupt handler * @irq: interrupt number @@ -317,6 +333,8 @@ static irqreturn_t pcc_mbox_irq(int irq, void *p) { struct pcc_chan_info *pchan; struct mbox_chan *chan = p; + struct pcc_header *pcc_header = chan->active_req; + void *handle = NULL; pchan = chan->con_priv; @@ -340,7 +358,17 @@ static irqreturn_t pcc_mbox_irq(int irq, void *p) * required to avoid any possible race in updatation of this flag. */ pchan->chan_in_use = false; - mbox_chan_received_data(chan, NULL); + + if (pchan->chan.rx_alloc) + handle = write_response(pchan); + + if (chan->active_req) { + pcc_header = chan->active_req; + if (pcc_header->flags & PCC_CMD_COMPLETION_NOTIFY) + mbox_chan_txdone(chan, 0); + } + + mbox_chan_received_data(chan, handle); pcc_chan_acknowledge(pchan); @@ -384,9 +412,24 @@ pcc_mbox_request_channel(struct mbox_client *cl, int subspace_id) pcc_mchan = &pchan->chan; pcc_mchan->shmem = acpi_os_ioremap(pcc_mchan->shmem_base_addr, pcc_mchan->shmem_size); - if (pcc_mchan->shmem) - return pcc_mchan; + if (!pcc_mchan->shmem) + goto err; + + pcc_mchan->manage_writes = false; + + /* This indicates that the channel is ready to accept messages. + * This needs to happen after the channel has registered + * its callback. There is no access point to do that in + * the mailbox API. That implies that the mailbox client must + * have set the allocate callback function prior to + * sending any messages. + */ + if (pchan->type == ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE) + pcc_chan_reg_read_modify_write(&pchan->cmd_update); + + return pcc_mchan; +err: mbox_free_channel(chan); return ERR_PTR(-ENXIO); } @@ -417,8 +460,38 @@ void pcc_mbox_free_channel(struct pcc_mbox_chan *pchan) } EXPORT_SYMBOL_GPL(pcc_mbox_free_channel); +static int pcc_write_to_buffer(struct mbox_chan *chan, void *data) +{ + struct pcc_chan_info *pchan = chan->con_priv; + struct pcc_mbox_chan *pcc_mbox_chan = &pchan->chan; + struct pcc_header *pcc_header = data; + + if (!pchan->chan.manage_writes) + return 0; + + /* The PCC header length includes the command field + * but not the other values from the header. + */ + int len = pcc_header->length - sizeof(u32) + sizeof(struct pcc_header); + u64 val; + + pcc_chan_reg_read(&pchan->cmd_complete, &val); + if (!val) { + pr_info("%s pchan->cmd_complete not set", __func__); + return -1; + } + memcpy_toio(pcc_mbox_chan->shmem, data, len); + return 0; +} + + /** - * pcc_send_data - Called from Mailbox Controller code. Used + * pcc_send_data - Called from Mailbox Controller code. If + * pchan->chan.rx_alloc is set, then the command complete + * flag is checked and the data is written to the shared + * buffer io memory. + * + * If pchan->chan.rx_alloc is not set, then it is used * here only to ring the channel doorbell. The PCC client * specific read/write is done in the client driver in * order to maintain atomicity over PCC channel once @@ -434,17 +507,37 @@ static int pcc_send_data(struct mbox_chan *chan, void *data) int ret; struct pcc_chan_info *pchan = chan->con_priv; + ret = pcc_write_to_buffer(chan, data); + if (ret) + return ret; + ret = pcc_chan_reg_read_modify_write(&pchan->cmd_update); if (ret) return ret; ret = pcc_chan_reg_read_modify_write(&pchan->db); + if (!ret && pchan->plat_irq > 0) pchan->chan_in_use = true; return ret; } + +static bool pcc_last_tx_done(struct mbox_chan *chan) +{ + struct pcc_chan_info *pchan = chan->con_priv; + u64 val; + + pcc_chan_reg_read(&pchan->cmd_complete, &val); + if (!val) + return false; + else + return true; +} + + + /** * pcc_startup - Called from Mailbox Controller code. Used here * to request the interrupt. @@ -490,6 +583,7 @@ static const struct mbox_chan_ops pcc_chan_ops = { .send_data = pcc_send_data, .startup = pcc_startup, .shutdown = pcc_shutdown, + .last_tx_done = pcc_last_tx_done, }; /** diff --git a/drivers/mailbox/qcom-ipcc.c b/drivers/mailbox/qcom-ipcc.c index ea44ffb5ce1a..d957d989c0ce 100644 --- a/drivers/mailbox/qcom-ipcc.c +++ b/drivers/mailbox/qcom-ipcc.c @@ -312,8 +312,7 @@ static int qcom_ipcc_probe(struct platform_device *pdev) if (!name) return -ENOMEM; - ipcc->irq_domain = irq_domain_create_tree(of_fwnode_handle(pdev->dev.of_node), - &qcom_ipcc_irq_ops, ipcc); + ipcc->irq_domain = irq_domain_create_tree(dev_fwnode(&pdev->dev), &qcom_ipcc_irq_ops, ipcc); if (!ipcc->irq_domain) return -ENOMEM; diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 15c538ee9537..79ea85d18e24 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -438,7 +438,7 @@ static bool rs_is_reshapable(struct raid_set *rs) /* Return true, if raid set in @rs is recovering */ static bool rs_is_recovering(struct raid_set *rs) { - return rs->md.recovery_cp < rs->md.dev_sectors; + return rs->md.resync_offset < rs->md.dev_sectors; } /* Return true, if raid set in @rs is reshaping */ @@ -768,7 +768,7 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r rs->md.layout = raid_type->algorithm; rs->md.new_layout = rs->md.layout; rs->md.delta_disks = 0; - rs->md.recovery_cp = MaxSector; + rs->md.resync_offset = MaxSector; for (i = 0; i < raid_devs; i++) md_rdev_init(&rs->dev[i].rdev); @@ -912,7 +912,7 @@ static int parse_dev_params(struct raid_set *rs, struct dm_arg_set *as) rs->md.external = 0; rs->md.persistent = 1; rs->md.major_version = 2; - } else if (rebuild && !rs->md.recovery_cp) { + } else if (rebuild && !rs->md.resync_offset) { /* * Without metadata, we will not be able to tell if the array * is in-sync or not - we must assume it is not. Therefore, @@ -1695,20 +1695,20 @@ static void rs_setup_recovery(struct raid_set *rs, sector_t dev_sectors) { /* raid0 does not recover */ if (rs_is_raid0(rs)) - rs->md.recovery_cp = MaxSector; + rs->md.resync_offset = MaxSector; /* * A raid6 set has to be recovered either * completely or for the grown part to * ensure proper parity and Q-Syndrome */ else if (rs_is_raid6(rs)) - rs->md.recovery_cp = dev_sectors; + rs->md.resync_offset = dev_sectors; /* * Other raid set types may skip recovery * depending on the 'nosync' flag. */ else - rs->md.recovery_cp = test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags) + rs->md.resync_offset = test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags) ? MaxSector : dev_sectors; } @@ -2143,7 +2143,7 @@ static void super_sync(struct mddev *mddev, struct md_rdev *rdev) sb->events = cpu_to_le64(mddev->events); sb->disk_recovery_offset = cpu_to_le64(rdev->recovery_offset); - sb->array_resync_offset = cpu_to_le64(mddev->recovery_cp); + sb->array_resync_offset = cpu_to_le64(mddev->resync_offset); sb->level = cpu_to_le32(mddev->level); sb->layout = cpu_to_le32(mddev->layout); @@ -2334,18 +2334,18 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev) } if (!test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) - mddev->recovery_cp = le64_to_cpu(sb->array_resync_offset); + mddev->resync_offset = le64_to_cpu(sb->array_resync_offset); /* * During load, we set FirstUse if a new superblock was written. * There are two reasons we might not have a superblock: * 1) The raid set is brand new - in which case, all of the * devices must have their In_sync bit set. Also, - * recovery_cp must be 0, unless forced. + * resync_offset must be 0, unless forced. * 2) This is a new device being added to an old raid set * and the new device needs to be rebuilt - in which * case the In_sync bit will /not/ be set and - * recovery_cp must be MaxSector. + * resync_offset must be MaxSector. * 3) This is/are a new device(s) being added to an old * raid set during takeover to a higher raid level * to provide capacity for redundancy or during reshape @@ -2390,8 +2390,8 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev) new_devs > 1 ? "s" : ""); return -EINVAL; } else if (!test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags) && rs_is_recovering(rs)) { - DMERR("'rebuild' specified while raid set is not in-sync (recovery_cp=%llu)", - (unsigned long long) mddev->recovery_cp); + DMERR("'rebuild' specified while raid set is not in-sync (resync_offset=%llu)", + (unsigned long long) mddev->resync_offset); return -EINVAL; } else if (rs_is_reshaping(rs)) { DMERR("'rebuild' specified while raid set is being reshaped (reshape_position=%llu)", @@ -2700,11 +2700,11 @@ static int rs_adjust_data_offsets(struct raid_set *rs) } out: /* - * Raise recovery_cp in case data_offset != 0 to + * Raise resync_offset in case data_offset != 0 to * avoid false recovery positives in the constructor. */ - if (rs->md.recovery_cp < rs->md.dev_sectors) - rs->md.recovery_cp += rs->dev[0].rdev.data_offset; + if (rs->md.resync_offset < rs->md.dev_sectors) + rs->md.resync_offset += rs->dev[0].rdev.data_offset; /* Adjust data offsets on all rdevs but on any raid4/5/6 journal device */ rdev_for_each(rdev, &rs->md) { @@ -2759,7 +2759,7 @@ static int rs_setup_takeover(struct raid_set *rs) } clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags); - mddev->recovery_cp = MaxSector; + mddev->resync_offset = MaxSector; while (d--) { rdev = &rs->dev[d].rdev; @@ -2767,7 +2767,7 @@ static int rs_setup_takeover(struct raid_set *rs) if (test_bit(d, (void *) rs->rebuild_disks)) { clear_bit(In_sync, &rdev->flags); clear_bit(Faulty, &rdev->flags); - mddev->recovery_cp = rdev->recovery_offset = 0; + mddev->resync_offset = rdev->recovery_offset = 0; /* Bitmap has to be created when we do an "up" takeover */ set_bit(MD_ARRAY_FIRST_USE, &mddev->flags); } @@ -3225,7 +3225,7 @@ size_check: if (r) goto bad; - rs_setup_recovery(rs, rs->md.recovery_cp < rs->md.dev_sectors ? rs->md.recovery_cp : rs->md.dev_sectors); + rs_setup_recovery(rs, rs->md.resync_offset < rs->md.dev_sectors ? rs->md.resync_offset : rs->md.dev_sectors); } else { /* This is no size change or it is shrinking, update size and record in superblocks */ r = rs_set_dev_and_array_sectors(rs, rs->ti->len, false); @@ -3449,7 +3449,7 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery, } else { if (state == st_idle && !test_bit(MD_RECOVERY_INTR, &recovery)) - r = mddev->recovery_cp; + r = mddev->resync_offset; else r = mddev->curr_resync_completed; @@ -4077,9 +4077,9 @@ static int raid_preresume(struct dm_target *ti) } /* Check for any resize/reshape on @rs and adjust/initiate */ - if (mddev->recovery_cp && mddev->recovery_cp < MaxSector) { + if (mddev->resync_offset && mddev->resync_offset < MaxSector) { set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); - mddev->resync_min = mddev->recovery_cp; + mddev->resync_min = mddev->resync_offset; if (test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags)) mddev->resync_max_sectors = mddev->dev_sectors; } diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c index 7f524a26cebc..334b71404930 100644 --- a/drivers/md/md-bitmap.c +++ b/drivers/md/md-bitmap.c @@ -1987,12 +1987,12 @@ static void bitmap_dirty_bits(struct mddev *mddev, unsigned long s, md_bitmap_set_memory_bits(bitmap, sec, 1); md_bitmap_file_set_bit(bitmap, sec); - if (sec < bitmap->mddev->recovery_cp) + if (sec < bitmap->mddev->resync_offset) /* We are asserting that the array is dirty, - * so move the recovery_cp address back so + * so move the resync_offset address back so * that it is obvious that it is dirty */ - bitmap->mddev->recovery_cp = sec; + bitmap->mddev->resync_offset = sec; } } @@ -2258,7 +2258,7 @@ static int bitmap_load(struct mddev *mddev) || bitmap->events_cleared == mddev->events) /* no need to keep dirty bits to optimise a * re-add of a missing device */ - start = mddev->recovery_cp; + start = mddev->resync_offset; mutex_lock(&mddev->bitmap_info.mutex); err = md_bitmap_init_from_disk(bitmap, start); diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index 94221d964d4f..5497eaee96e7 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c @@ -337,11 +337,11 @@ static void recover_bitmaps(struct md_thread *thread) md_wakeup_thread(mddev->sync_thread); if (hi > 0) { - if (lo < mddev->recovery_cp) - mddev->recovery_cp = lo; + if (lo < mddev->resync_offset) + mddev->resync_offset = lo; /* wake up thread to continue resync in case resync * is not finished */ - if (mddev->recovery_cp != MaxSector) { + if (mddev->resync_offset != MaxSector) { /* * clear the REMOTE flag since we will launch * resync thread in current node. @@ -863,9 +863,9 @@ static int gather_all_resync_info(struct mddev *mddev, int total_slots) lockres_free(bm_lockres); continue; } - if ((hi > 0) && (lo < mddev->recovery_cp)) { + if ((hi > 0) && (lo < mddev->resync_offset)) { set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); - mddev->recovery_cp = lo; + mddev->resync_offset = lo; md_check_recovery(mddev); } @@ -1027,7 +1027,7 @@ static int leave(struct mddev *mddev) * Also, we should send BITMAP_NEEDS_SYNC message in * case reshaping is interrupted. */ - if ((cinfo->slot_number > 0 && mddev->recovery_cp != MaxSector) || + if ((cinfo->slot_number > 0 && mddev->resync_offset != MaxSector) || (mddev->reshape_position != MaxSector && test_bit(MD_CLOSING, &mddev->flags))) resync_bitmap(mddev); @@ -1605,8 +1605,8 @@ static int gather_bitmaps(struct md_rdev *rdev) pr_warn("md-cluster: Could not gather bitmaps from slot %d", sn); goto out; } - if ((hi > 0) && (lo < mddev->recovery_cp)) - mddev->recovery_cp = lo; + if ((hi > 0) && (lo < mddev->resync_offset)) + mddev->resync_offset = lo; } out: return err; diff --git a/drivers/md/md.c b/drivers/md/md.c index 046fe85c76fe..ac85ec73a409 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -637,6 +637,12 @@ static void __mddev_put(struct mddev *mddev) return; /* + * If array is freed by stopping array, MD_DELETED is set by + * do_md_stop(), MD_DELETED is still set here in case mddev is freed + * directly by closing a mddev that is created by create_on_open. + */ + set_bit(MD_DELETED, &mddev->flags); + /* * Call queue_work inside the spinlock so that flush_workqueue() after * mddev_find will succeed in waiting for the work to be done. */ @@ -1409,13 +1415,13 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *freshest, stru mddev->layout = -1; if (sb->state & (1<<MD_SB_CLEAN)) - mddev->recovery_cp = MaxSector; + mddev->resync_offset = MaxSector; else { if (sb->events_hi == sb->cp_events_hi && sb->events_lo == sb->cp_events_lo) { - mddev->recovery_cp = sb->recovery_cp; + mddev->resync_offset = sb->resync_offset; } else - mddev->recovery_cp = 0; + mddev->resync_offset = 0; } memcpy(mddev->uuid+0, &sb->set_uuid0, 4); @@ -1541,13 +1547,13 @@ static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev) mddev->minor_version = sb->minor_version; if (mddev->in_sync) { - sb->recovery_cp = mddev->recovery_cp; + sb->resync_offset = mddev->resync_offset; sb->cp_events_hi = (mddev->events>>32); sb->cp_events_lo = (u32)mddev->events; - if (mddev->recovery_cp == MaxSector) + if (mddev->resync_offset == MaxSector) sb->state = (1<< MD_SB_CLEAN); } else - sb->recovery_cp = 0; + sb->resync_offset = 0; sb->layout = mddev->layout; sb->chunk_size = mddev->chunk_sectors << 9; @@ -1895,7 +1901,7 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *freshest, struc mddev->bitmap_info.default_space = (4096-1024) >> 9; mddev->reshape_backwards = 0; - mddev->recovery_cp = le64_to_cpu(sb->resync_offset); + mddev->resync_offset = le64_to_cpu(sb->resync_offset); memcpy(mddev->uuid, sb->set_uuid, 16); mddev->max_disks = (4096-256)/2; @@ -2081,7 +2087,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev) sb->utime = cpu_to_le64((__u64)mddev->utime); sb->events = cpu_to_le64(mddev->events); if (mddev->in_sync) - sb->resync_offset = cpu_to_le64(mddev->recovery_cp); + sb->resync_offset = cpu_to_le64(mddev->resync_offset); else if (test_bit(MD_JOURNAL_CLEAN, &mddev->flags)) sb->resync_offset = cpu_to_le64(MaxSector); else @@ -2761,7 +2767,7 @@ repeat: /* If this is just a dirty<->clean transition, and the array is clean * and 'events' is odd, we can roll back to the previous clean state */ if (nospares - && (mddev->in_sync && mddev->recovery_cp == MaxSector) + && (mddev->in_sync && mddev->resync_offset == MaxSector) && mddev->can_decrease_events && mddev->events != 1) { mddev->events--; @@ -4297,9 +4303,9 @@ __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store); static ssize_t resync_start_show(struct mddev *mddev, char *page) { - if (mddev->recovery_cp == MaxSector) + if (mddev->resync_offset == MaxSector) return sprintf(page, "none\n"); - return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp); + return sprintf(page, "%llu\n", (unsigned long long)mddev->resync_offset); } static ssize_t @@ -4325,7 +4331,7 @@ resync_start_store(struct mddev *mddev, const char *buf, size_t len) err = -EBUSY; if (!err) { - mddev->recovery_cp = n; + mddev->resync_offset = n; if (mddev->pers) set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); } @@ -6417,7 +6423,7 @@ static void md_clean(struct mddev *mddev) mddev->external_size = 0; mddev->dev_sectors = 0; mddev->raid_disks = 0; - mddev->recovery_cp = 0; + mddev->resync_offset = 0; mddev->resync_min = 0; mddev->resync_max = MaxSector; mddev->reshape_position = MaxSector; @@ -7362,9 +7368,9 @@ int md_set_array_info(struct mddev *mddev, struct mdu_array_info_s *info) * openned */ if (info->state & (1<<MD_SB_CLEAN)) - mddev->recovery_cp = MaxSector; + mddev->resync_offset = MaxSector; else - mddev->recovery_cp = 0; + mddev->resync_offset = 0; mddev->persistent = ! info->not_persistent; mddev->external = 0; @@ -8303,7 +8309,7 @@ static int status_resync(struct seq_file *seq, struct mddev *mddev) seq_printf(seq, "\tresync=REMOTE"); return 1; } - if (mddev->recovery_cp < MaxSector) { + if (mddev->resync_offset < MaxSector) { seq_printf(seq, "\tresync=PENDING"); return 1; } @@ -8946,7 +8952,7 @@ static sector_t md_sync_position(struct mddev *mddev, enum sync_action action) return mddev->resync_min; case ACTION_RESYNC: if (!mddev->bitmap) - return mddev->recovery_cp; + return mddev->resync_offset; return 0; case ACTION_RESHAPE: /* @@ -9184,8 +9190,8 @@ void md_do_sync(struct md_thread *thread) atomic_read(&mddev->recovery_active) == 0); mddev->curr_resync_completed = j; if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && - j > mddev->recovery_cp) - mddev->recovery_cp = j; + j > mddev->resync_offset) + mddev->resync_offset = j; update_time = jiffies; set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); sysfs_notify_dirent_safe(mddev->sysfs_completed); @@ -9305,19 +9311,19 @@ void md_do_sync(struct md_thread *thread) mddev->curr_resync > MD_RESYNC_ACTIVE) { if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { - if (mddev->curr_resync >= mddev->recovery_cp) { + if (mddev->curr_resync >= mddev->resync_offset) { pr_debug("md: checkpointing %s of %s.\n", desc, mdname(mddev)); if (test_bit(MD_RECOVERY_ERROR, &mddev->recovery)) - mddev->recovery_cp = + mddev->resync_offset = mddev->curr_resync_completed; else - mddev->recovery_cp = + mddev->resync_offset = mddev->curr_resync; } } else - mddev->recovery_cp = MaxSector; + mddev->resync_offset = MaxSector; } else { if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) mddev->curr_resync = MaxSector; @@ -9421,6 +9427,12 @@ static bool rdev_is_spare(struct md_rdev *rdev) static bool rdev_addable(struct md_rdev *rdev) { + struct mddev *mddev; + + mddev = READ_ONCE(rdev->mddev); + if (!mddev) + return false; + /* rdev is already used, don't add it again. */ if (test_bit(Candidate, &rdev->flags) || rdev->raid_disk >= 0 || test_bit(Faulty, &rdev->flags)) @@ -9431,7 +9443,7 @@ static bool rdev_addable(struct md_rdev *rdev) return true; /* Allow to add if array is read-write. */ - if (md_is_rdwr(rdev->mddev)) + if (md_is_rdwr(mddev)) return true; /* @@ -9533,7 +9545,7 @@ static bool md_choose_sync_action(struct mddev *mddev, int *spares) } /* Check if resync is in progress. */ - if (mddev->recovery_cp < MaxSector) { + if (mddev->resync_offset < MaxSector) { remove_spares(mddev, NULL); set_bit(MD_RECOVERY_SYNC, &mddev->recovery); clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); @@ -9714,7 +9726,7 @@ void md_check_recovery(struct mddev *mddev) test_bit(MD_RECOVERY_DONE, &mddev->recovery) || (mddev->external == 0 && mddev->safemode == 1) || (mddev->safemode == 2 - && !mddev->in_sync && mddev->recovery_cp == MaxSector) + && !mddev->in_sync && mddev->resync_offset == MaxSector) )) return; @@ -9771,8 +9783,8 @@ void md_check_recovery(struct mddev *mddev) * remove disk. */ rdev_for_each_safe(rdev, tmp, mddev) { - if (test_and_clear_bit(ClusterRemove, &rdev->flags) && - rdev->raid_disk < 0) + if (rdev->raid_disk < 0 && + test_and_clear_bit(ClusterRemove, &rdev->flags)) md_kick_rdev_from_array(rdev); } } @@ -10078,8 +10090,11 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev) /* Check for change of roles in the active devices */ rdev_for_each_safe(rdev2, tmp, mddev) { - if (test_bit(Faulty, &rdev2->flags)) + if (test_bit(Faulty, &rdev2->flags)) { + if (test_bit(ClusterRemove, &rdev2->flags)) + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); continue; + } /* Check if the roles changed */ role = le16_to_cpu(sb->dev_roles[rdev2->desc_nr]); diff --git a/drivers/md/md.h b/drivers/md/md.h index 67b365621507..51af29a03079 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -523,7 +523,7 @@ struct mddev { unsigned long normal_io_events; /* IO event timestamp */ atomic_t recovery_active; /* blocks scheduled, but not written */ wait_queue_head_t recovery_wait; - sector_t recovery_cp; + sector_t resync_offset; sector_t resync_min; /* user requested sync * starts here */ sector_t resync_max; /* resync should pause diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index cbe2a9054cb9..f1d8811a542a 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -674,7 +674,7 @@ static void *raid0_takeover_raid45(struct mddev *mddev) mddev->raid_disks--; mddev->delta_disks = -1; /* make sure it will be not marked as dirty */ - mddev->recovery_cp = MaxSector; + mddev->resync_offset = MaxSector; mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS); create_strip_zones(mddev, &priv_conf); @@ -717,7 +717,7 @@ static void *raid0_takeover_raid10(struct mddev *mddev) mddev->raid_disks += mddev->delta_disks; mddev->degraded = 0; /* make sure it will be not marked as dirty */ - mddev->recovery_cp = MaxSector; + mddev->resync_offset = MaxSector; mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS); create_strip_zones(mddev, &priv_conf); @@ -760,7 +760,7 @@ static void *raid0_takeover_raid1(struct mddev *mddev) mddev->delta_disks = 1 - mddev->raid_disks; mddev->raid_disks = 1; /* make sure it will be not marked as dirty */ - mddev->recovery_cp = MaxSector; + mddev->resync_offset = MaxSector; mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS); create_strip_zones(mddev, &priv_conf); diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c index b8b3a9069701..52881e6032da 100644 --- a/drivers/md/raid1-10.c +++ b/drivers/md/raid1-10.c @@ -283,7 +283,7 @@ static inline int raid1_check_read_range(struct md_rdev *rdev, static inline bool raid1_should_read_first(struct mddev *mddev, sector_t this_sector, int len) { - if ((mddev->recovery_cp < this_sector + len)) + if ((mddev->resync_offset < this_sector + len)) return true; if (mddev_is_clustered(mddev) && diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 64b8176907a9..408c26398321 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -127,10 +127,9 @@ static inline struct r1bio *get_resync_r1bio(struct bio *bio) return get_resync_pages(bio)->raid_bio; } -static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) +static void *r1bio_pool_alloc(gfp_t gfp_flags, struct r1conf *conf) { - struct pool_info *pi = data; - int size = offsetof(struct r1bio, bios[pi->raid_disks]); + int size = offsetof(struct r1bio, bios[conf->raid_disks * 2]); /* allocate a r1bio with room for raid_disks entries in the bios array */ return kzalloc(size, gfp_flags); @@ -145,18 +144,18 @@ static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) { - struct pool_info *pi = data; + struct r1conf *conf = data; struct r1bio *r1_bio; struct bio *bio; int need_pages; int j; struct resync_pages *rps; - r1_bio = r1bio_pool_alloc(gfp_flags, pi); + r1_bio = r1bio_pool_alloc(gfp_flags, conf); if (!r1_bio) return NULL; - rps = kmalloc_array(pi->raid_disks, sizeof(struct resync_pages), + rps = kmalloc_array(conf->raid_disks * 2, sizeof(struct resync_pages), gfp_flags); if (!rps) goto out_free_r1bio; @@ -164,7 +163,7 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) /* * Allocate bios : 1 for reading, n-1 for writing */ - for (j = pi->raid_disks ; j-- ; ) { + for (j = conf->raid_disks * 2; j-- ; ) { bio = bio_kmalloc(RESYNC_PAGES, gfp_flags); if (!bio) goto out_free_bio; @@ -177,11 +176,11 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) * If this is a user-requested check/repair, allocate * RESYNC_PAGES for each bio. */ - if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) - need_pages = pi->raid_disks; + if (test_bit(MD_RECOVERY_REQUESTED, &conf->mddev->recovery)) + need_pages = conf->raid_disks * 2; else need_pages = 1; - for (j = 0; j < pi->raid_disks; j++) { + for (j = 0; j < conf->raid_disks * 2; j++) { struct resync_pages *rp = &rps[j]; bio = r1_bio->bios[j]; @@ -207,7 +206,7 @@ out_free_pages: resync_free_pages(&rps[j]); out_free_bio: - while (++j < pi->raid_disks) { + while (++j < conf->raid_disks * 2) { bio_uninit(r1_bio->bios[j]); kfree(r1_bio->bios[j]); } @@ -220,12 +219,12 @@ out_free_r1bio: static void r1buf_pool_free(void *__r1_bio, void *data) { - struct pool_info *pi = data; + struct r1conf *conf = data; int i; struct r1bio *r1bio = __r1_bio; struct resync_pages *rp = NULL; - for (i = pi->raid_disks; i--; ) { + for (i = conf->raid_disks * 2; i--; ) { rp = get_resync_pages(r1bio->bios[i]); resync_free_pages(rp); bio_uninit(r1bio->bios[i]); @@ -255,7 +254,7 @@ static void free_r1bio(struct r1bio *r1_bio) struct r1conf *conf = r1_bio->mddev->private; put_all_bios(conf, r1_bio); - mempool_free(r1_bio, &conf->r1bio_pool); + mempool_free(r1_bio, conf->r1bio_pool); } static void put_buf(struct r1bio *r1_bio) @@ -1305,9 +1304,8 @@ alloc_r1bio(struct mddev *mddev, struct bio *bio) struct r1conf *conf = mddev->private; struct r1bio *r1_bio; - r1_bio = mempool_alloc(&conf->r1bio_pool, GFP_NOIO); - /* Ensure no bio records IO_BLOCKED */ - memset(r1_bio->bios, 0, conf->raid_disks * sizeof(r1_bio->bios[0])); + r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); + memset(r1_bio, 0, offsetof(struct r1bio, bios[conf->raid_disks * 2])); init_r1bio(r1_bio, mddev, bio); return r1_bio; } @@ -2747,7 +2745,7 @@ static int init_resync(struct r1conf *conf) BUG_ON(mempool_initialized(&conf->r1buf_pool)); return mempool_init(&conf->r1buf_pool, buffs, r1buf_pool_alloc, - r1buf_pool_free, conf->poolinfo); + r1buf_pool_free, conf); } static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf) @@ -2757,7 +2755,7 @@ static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf) struct bio *bio; int i; - for (i = conf->poolinfo->raid_disks; i--; ) { + for (i = conf->raid_disks * 2; i--; ) { bio = r1bio->bios[i]; rps = bio->bi_private; bio_reset(bio, NULL, 0); @@ -2822,7 +2820,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, } if (mddev->bitmap == NULL && - mddev->recovery_cp == MaxSector && + mddev->resync_offset == MaxSector && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && conf->fullsync == 0) { *skipped = 1; @@ -3085,6 +3083,7 @@ static struct r1conf *setup_conf(struct mddev *mddev) int i; struct raid1_info *disk; struct md_rdev *rdev; + size_t r1bio_size; int err = -ENOMEM; conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL); @@ -3121,21 +3120,15 @@ static struct r1conf *setup_conf(struct mddev *mddev) if (!conf->tmppage) goto abort; - conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL); - if (!conf->poolinfo) - goto abort; - conf->poolinfo->raid_disks = mddev->raid_disks * 2; - err = mempool_init(&conf->r1bio_pool, NR_RAID_BIOS, r1bio_pool_alloc, - rbio_pool_free, conf->poolinfo); - if (err) + r1bio_size = offsetof(struct r1bio, bios[mddev->raid_disks * 2]); + conf->r1bio_pool = mempool_create_kmalloc_pool(NR_RAID_BIOS, r1bio_size); + if (!conf->r1bio_pool) goto abort; err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0); if (err) goto abort; - conf->poolinfo->mddev = mddev; - err = -EINVAL; spin_lock_init(&conf->device_lock); conf->raid_disks = mddev->raid_disks; @@ -3198,10 +3191,9 @@ static struct r1conf *setup_conf(struct mddev *mddev) abort: if (conf) { - mempool_exit(&conf->r1bio_pool); + mempool_destroy(conf->r1bio_pool); kfree(conf->mirrors); safe_put_page(conf->tmppage); - kfree(conf->poolinfo); kfree(conf->nr_pending); kfree(conf->nr_waiting); kfree(conf->nr_queued); @@ -3282,9 +3274,9 @@ static int raid1_run(struct mddev *mddev) } if (conf->raid_disks - mddev->degraded == 1) - mddev->recovery_cp = MaxSector; + mddev->resync_offset = MaxSector; - if (mddev->recovery_cp != MaxSector) + if (mddev->resync_offset != MaxSector) pr_info("md/raid1:%s: not clean -- starting background reconstruction\n", mdname(mddev)); pr_info("md/raid1:%s: active with %d out of %d mirrors\n", @@ -3311,10 +3303,9 @@ static void raid1_free(struct mddev *mddev, void *priv) { struct r1conf *conf = priv; - mempool_exit(&conf->r1bio_pool); + mempool_destroy(conf->r1bio_pool); kfree(conf->mirrors); safe_put_page(conf->tmppage); - kfree(conf->poolinfo); kfree(conf->nr_pending); kfree(conf->nr_waiting); kfree(conf->nr_queued); @@ -3345,8 +3336,8 @@ static int raid1_resize(struct mddev *mddev, sector_t sectors) md_set_array_sectors(mddev, newsize); if (sectors > mddev->dev_sectors && - mddev->recovery_cp > mddev->dev_sectors) { - mddev->recovery_cp = mddev->dev_sectors; + mddev->resync_offset > mddev->dev_sectors) { + mddev->resync_offset = mddev->dev_sectors; set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); } mddev->dev_sectors = sectors; @@ -3367,17 +3358,13 @@ static int raid1_reshape(struct mddev *mddev) * At the same time, we "pack" the devices so that all the missing * devices have the higher raid_disk numbers. */ - mempool_t newpool, oldpool; - struct pool_info *newpoolinfo; + mempool_t *newpool, *oldpool; + size_t new_r1bio_size; struct raid1_info *newmirrors; struct r1conf *conf = mddev->private; int cnt, raid_disks; unsigned long flags; int d, d2; - int ret; - - memset(&newpool, 0, sizeof(newpool)); - memset(&oldpool, 0, sizeof(oldpool)); /* Cannot change chunk_size, layout, or level */ if (mddev->chunk_sectors != mddev->new_chunk_sectors || @@ -3403,24 +3390,16 @@ static int raid1_reshape(struct mddev *mddev) return -EBUSY; } - newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL); - if (!newpoolinfo) + new_r1bio_size = offsetof(struct r1bio, bios[raid_disks * 2]); + newpool = mempool_create_kmalloc_pool(NR_RAID_BIOS, new_r1bio_size); + if (!newpool) { return -ENOMEM; - newpoolinfo->mddev = mddev; - newpoolinfo->raid_disks = raid_disks * 2; - - ret = mempool_init(&newpool, NR_RAID_BIOS, r1bio_pool_alloc, - rbio_pool_free, newpoolinfo); - if (ret) { - kfree(newpoolinfo); - return ret; } newmirrors = kzalloc(array3_size(sizeof(struct raid1_info), raid_disks, 2), GFP_KERNEL); if (!newmirrors) { - kfree(newpoolinfo); - mempool_exit(&newpool); + mempool_destroy(newpool); return -ENOMEM; } @@ -3429,7 +3408,6 @@ static int raid1_reshape(struct mddev *mddev) /* ok, everything is stopped */ oldpool = conf->r1bio_pool; conf->r1bio_pool = newpool; - init_waitqueue_head(&conf->r1bio_pool.wait); for (d = d2 = 0; d < conf->raid_disks; d++) { struct md_rdev *rdev = conf->mirrors[d].rdev; @@ -3446,8 +3424,6 @@ static int raid1_reshape(struct mddev *mddev) } kfree(conf->mirrors); conf->mirrors = newmirrors; - kfree(conf->poolinfo); - conf->poolinfo = newpoolinfo; spin_lock_irqsave(&conf->device_lock, flags); mddev->degraded += (raid_disks - conf->raid_disks); @@ -3461,7 +3437,7 @@ static int raid1_reshape(struct mddev *mddev) set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); md_wakeup_thread(mddev->thread); - mempool_exit(&oldpool); + mempool_destroy(oldpool); return 0; } diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h index 33f318fcc268..d236ef179cfb 100644 --- a/drivers/md/raid1.h +++ b/drivers/md/raid1.h @@ -49,22 +49,6 @@ struct raid1_info { sector_t seq_start; }; -/* - * memory pools need a pointer to the mddev, so they can force an unplug - * when memory is tight, and a count of the number of drives that the - * pool was allocated for, so they know how much to allocate and free. - * mddev->raid_disks cannot be used, as it can change while a pool is active - * These two datums are stored in a kmalloced struct. - * The 'raid_disks' here is twice the raid_disks in r1conf. - * This allows space for each 'real' device can have a replacement in the - * second half of the array. - */ - -struct pool_info { - struct mddev *mddev; - int raid_disks; -}; - struct r1conf { struct mddev *mddev; struct raid1_info *mirrors; /* twice 'raid_disks' to @@ -114,11 +98,7 @@ struct r1conf { */ int recovery_disabled; - /* poolinfo contains information about the content of the - * mempools - it changes when the array grows or shrinks - */ - struct pool_info *poolinfo; - mempool_t r1bio_pool; + mempool_t *r1bio_pool; mempool_t r1buf_pool; struct bio_set bio_split; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 95dc354a86a0..b60c30bfb6c7 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -2117,7 +2117,7 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) int last = conf->geo.raid_disks - 1; struct raid10_info *p; - if (mddev->recovery_cp < MaxSector) + if (mddev->resync_offset < MaxSector) /* only hot-add to in-sync arrays, as recovery is * very different from resync */ @@ -3185,7 +3185,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, * of a clean array, like RAID1 does. */ if (mddev->bitmap == NULL && - mddev->recovery_cp == MaxSector && + mddev->resync_offset == MaxSector && mddev->reshape_position == MaxSector && !test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && @@ -4145,7 +4145,7 @@ static int raid10_run(struct mddev *mddev) disk->recovery_disabled = mddev->recovery_disabled - 1; } - if (mddev->recovery_cp != MaxSector) + if (mddev->resync_offset != MaxSector) pr_notice("md/raid10:%s: not clean -- starting background reconstruction\n", mdname(mddev)); pr_info("md/raid10:%s: active with %d out of %d devices\n", @@ -4245,8 +4245,8 @@ static int raid10_resize(struct mddev *mddev, sector_t sectors) md_set_array_sectors(mddev, size); if (sectors > mddev->dev_sectors && - mddev->recovery_cp > oldsize) { - mddev->recovery_cp = oldsize; + mddev->resync_offset > oldsize) { + mddev->resync_offset = oldsize; set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); } calc_sectors(conf, sectors); @@ -4275,7 +4275,7 @@ static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs) mddev->delta_disks = mddev->raid_disks; mddev->raid_disks *= 2; /* make sure it will be not marked as dirty */ - mddev->recovery_cp = MaxSector; + mddev->resync_offset = MaxSector; mddev->dev_sectors = size; conf = setup_conf(mddev); @@ -5087,8 +5087,8 @@ static void raid10_finish_reshape(struct mddev *mddev) return; if (mddev->delta_disks > 0) { - if (mddev->recovery_cp > mddev->resync_max_sectors) { - mddev->recovery_cp = mddev->resync_max_sectors; + if (mddev->resync_offset > mddev->resync_max_sectors) { + mddev->resync_offset = mddev->resync_max_sectors; set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); } mddev->resync_max_sectors = mddev->array_sectors; diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c index c0fb335311aa..56b234683ee6 100644 --- a/drivers/md/raid5-ppl.c +++ b/drivers/md/raid5-ppl.c @@ -1163,7 +1163,7 @@ static int ppl_load_distributed(struct ppl_log *log) le64_to_cpu(pplhdr->generation)); /* attempt to recover from log if we are starting a dirty array */ - if (pplhdr && !mddev->pers && mddev->recovery_cp != MaxSector) + if (pplhdr && !mddev->pers && mddev->resync_offset != MaxSector) ret = ppl_recover(log, pplhdr, pplhdr_offset); /* write empty header if we are starting the array */ @@ -1422,14 +1422,14 @@ int ppl_init_log(struct r5conf *conf) if (ret) { goto err; - } else if (!mddev->pers && mddev->recovery_cp == 0 && + } else if (!mddev->pers && mddev->resync_offset == 0 && ppl_conf->recovered_entries > 0 && ppl_conf->mismatch_count == 0) { /* * If we are starting a dirty array and the recovery succeeds * without any issues, set the array as clean. */ - mddev->recovery_cp = MaxSector; + mddev->resync_offset = MaxSector; set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); } else if (mddev->pers && ppl_conf->mismatch_count > 0) { /* no mismatch allowed when enabling PPL for a running array */ diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 7ec61ee7b218..023649fe2476 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -3740,7 +3740,7 @@ static int want_replace(struct stripe_head *sh, int disk_idx) && !test_bit(Faulty, &rdev->flags) && !test_bit(In_sync, &rdev->flags) && (rdev->recovery_offset <= sh->sector - || rdev->mddev->recovery_cp <= sh->sector)) + || rdev->mddev->resync_offset <= sh->sector)) rv = 1; return rv; } @@ -3832,7 +3832,7 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s, * is missing/faulty, then we need to read everything we can. */ if (!force_rcw && - sh->sector < sh->raid_conf->mddev->recovery_cp) + sh->sector < sh->raid_conf->mddev->resync_offset) /* reconstruct-write isn't being forced */ return 0; for (i = 0; i < s->failed && i < 2; i++) { @@ -4097,7 +4097,7 @@ static int handle_stripe_dirtying(struct r5conf *conf, int disks) { int rmw = 0, rcw = 0, i; - sector_t recovery_cp = conf->mddev->recovery_cp; + sector_t resync_offset = conf->mddev->resync_offset; /* Check whether resync is now happening or should start. * If yes, then the array is dirty (after unclean shutdown or @@ -4107,14 +4107,14 @@ static int handle_stripe_dirtying(struct r5conf *conf, * generate correct data from the parity. */ if (conf->rmw_level == PARITY_DISABLE_RMW || - (recovery_cp < MaxSector && sh->sector >= recovery_cp && + (resync_offset < MaxSector && sh->sector >= resync_offset && s->failed == 0)) { /* Calculate the real rcw later - for now make it * look like rcw is cheaper */ rcw = 1; rmw = 2; - pr_debug("force RCW rmw_level=%u, recovery_cp=%llu sh->sector=%llu\n", - conf->rmw_level, (unsigned long long)recovery_cp, + pr_debug("force RCW rmw_level=%u, resync_offset=%llu sh->sector=%llu\n", + conf->rmw_level, (unsigned long long)resync_offset, (unsigned long long)sh->sector); } else for (i = disks; i--; ) { /* would I have to read this buffer for read_modify_write */ @@ -4770,14 +4770,14 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) if (test_bit(STRIPE_SYNCING, &sh->state)) { /* If there is a failed device being replaced, * we must be recovering. - * else if we are after recovery_cp, we must be syncing + * else if we are after resync_offset, we must be syncing * else if MD_RECOVERY_REQUESTED is set, we also are syncing. * else we can only be replacing * sync and recovery both need to read all devices, and so * use the same flag. */ if (do_recovery || - sh->sector >= conf->mddev->recovery_cp || + sh->sector >= conf->mddev->resync_offset || test_bit(MD_RECOVERY_REQUESTED, &(conf->mddev->recovery))) s->syncing = 1; else @@ -7780,7 +7780,7 @@ static int raid5_run(struct mddev *mddev) int first = 1; int ret = -EIO; - if (mddev->recovery_cp != MaxSector) + if (mddev->resync_offset != MaxSector) pr_notice("md/raid:%s: not clean -- starting background reconstruction\n", mdname(mddev)); @@ -7921,7 +7921,7 @@ static int raid5_run(struct mddev *mddev) mdname(mddev)); mddev->ro = 1; set_disk_ro(mddev->gendisk, 1); - } else if (mddev->recovery_cp == MaxSector) + } else if (mddev->resync_offset == MaxSector) set_bit(MD_JOURNAL_CLEAN, &mddev->flags); } @@ -7988,7 +7988,7 @@ static int raid5_run(struct mddev *mddev) mddev->resync_max_sectors = mddev->dev_sectors; if (mddev->degraded > dirty_parity_disks && - mddev->recovery_cp != MaxSector) { + mddev->resync_offset != MaxSector) { if (test_bit(MD_HAS_PPL, &mddev->flags)) pr_crit("md/raid:%s: starting dirty degraded array with PPL.\n", mdname(mddev)); @@ -8328,8 +8328,8 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors) md_set_array_sectors(mddev, newsize); if (sectors > mddev->dev_sectors && - mddev->recovery_cp > mddev->dev_sectors) { - mddev->recovery_cp = mddev->dev_sectors; + mddev->resync_offset > mddev->dev_sectors) { + mddev->resync_offset = mddev->dev_sectors; set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); } mddev->dev_sectors = sectors; @@ -8423,7 +8423,7 @@ static int raid5_start_reshape(struct mddev *mddev) return -EINVAL; /* raid5 can't handle concurrent reshape and recovery */ - if (mddev->recovery_cp < MaxSector) + if (mddev->resync_offset < MaxSector) return -EBUSY; for (i = 0; i < conf->raid_disks; i++) if (conf->disks[i].replacement) @@ -8648,7 +8648,7 @@ static void *raid45_takeover_raid0(struct mddev *mddev, int level) mddev->raid_disks += 1; mddev->delta_disks = 1; /* make sure it will be not marked as dirty */ - mddev->recovery_cp = MaxSector; + mddev->resync_offset = MaxSector; return setup_conf(mddev); } diff --git a/drivers/net/ethernet/airoha/airoha_npu.c b/drivers/net/ethernet/airoha/airoha_npu.c index 9ab964c536e1..a802f95df99d 100644 --- a/drivers/net/ethernet/airoha/airoha_npu.c +++ b/drivers/net/ethernet/airoha/airoha_npu.c @@ -579,6 +579,8 @@ static struct platform_driver airoha_npu_driver = { }; module_platform_driver(airoha_npu_driver); +MODULE_FIRMWARE(NPU_EN7581_FIRMWARE_DATA); +MODULE_FIRMWARE(NPU_EN7581_FIRMWARE_RV32); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>"); MODULE_DESCRIPTION("Airoha Network Processor Unit driver"); diff --git a/drivers/net/ethernet/airoha/airoha_ppe.c b/drivers/net/ethernet/airoha/airoha_ppe.c index c354d536bc66..47411d2cbd28 100644 --- a/drivers/net/ethernet/airoha/airoha_ppe.c +++ b/drivers/net/ethernet/airoha/airoha_ppe.c @@ -508,9 +508,11 @@ static void airoha_ppe_foe_flow_stats_update(struct airoha_ppe *ppe, FIELD_PREP(AIROHA_FOE_IB2_NBQ, nbq); } -struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe, - u32 hash) +static struct airoha_foe_entry * +airoha_ppe_foe_get_entry_locked(struct airoha_ppe *ppe, u32 hash) { + lockdep_assert_held(&ppe_lock); + if (hash < PPE_SRAM_NUM_ENTRIES) { u32 *hwe = ppe->foe + hash * sizeof(struct airoha_foe_entry); struct airoha_eth *eth = ppe->eth; @@ -537,6 +539,18 @@ struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe, return ppe->foe + hash * sizeof(struct airoha_foe_entry); } +struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe, + u32 hash) +{ + struct airoha_foe_entry *hwe; + + spin_lock_bh(&ppe_lock); + hwe = airoha_ppe_foe_get_entry_locked(ppe, hash); + spin_unlock_bh(&ppe_lock); + + return hwe; +} + static bool airoha_ppe_foe_compare_entry(struct airoha_flow_table_entry *e, struct airoha_foe_entry *hwe) { @@ -651,7 +665,7 @@ airoha_ppe_foe_commit_subflow_entry(struct airoha_ppe *ppe, struct airoha_flow_table_entry *f; int type; - hwe_p = airoha_ppe_foe_get_entry(ppe, hash); + hwe_p = airoha_ppe_foe_get_entry_locked(ppe, hash); if (!hwe_p) return -EINVAL; @@ -703,7 +717,7 @@ static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe, spin_lock_bh(&ppe_lock); - hwe = airoha_ppe_foe_get_entry(ppe, hash); + hwe = airoha_ppe_foe_get_entry_locked(ppe, hash); if (!hwe) goto unlock; @@ -818,7 +832,7 @@ airoha_ppe_foe_flow_l2_entry_update(struct airoha_ppe *ppe, u32 ib1, state; int idle; - hwe = airoha_ppe_foe_get_entry(ppe, iter->hash); + hwe = airoha_ppe_foe_get_entry_locked(ppe, iter->hash); if (!hwe) continue; @@ -855,7 +869,7 @@ static void airoha_ppe_foe_flow_entry_update(struct airoha_ppe *ppe, if (e->hash == 0xffff) goto unlock; - hwe_p = airoha_ppe_foe_get_entry(ppe, e->hash); + hwe_p = airoha_ppe_foe_get_entry_locked(ppe, e->hash); if (!hwe_p) goto unlock; diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index d730af4a50c7..bb5d2fa15736 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -3856,8 +3856,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, status = be_mcc_notify_wait(adapter); err: - dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); spin_unlock_bh(&adapter->mcc_lock); + dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); return status; } diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index 5d0c0906878d..a863f7841210 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -1750,16 +1750,17 @@ err_register_mdiobus: static void ftgmac100_phy_disconnect(struct net_device *netdev) { struct ftgmac100 *priv = netdev_priv(netdev); + struct phy_device *phydev = netdev->phydev; - if (!netdev->phydev) + if (!phydev) return; - phy_disconnect(netdev->phydev); + phy_disconnect(phydev); if (of_phy_is_fixed_link(priv->dev->of_node)) of_phy_deregister_fixed_link(priv->dev->of_node); if (priv->use_ncsi) - fixed_phy_unregister(netdev->phydev); + fixed_phy_unregister(phydev); } static void ftgmac100_destroy_mdio(struct net_device *netdev) diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c index 0c588e03b15e..d09e456f14c0 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c @@ -371,8 +371,10 @@ static int dpaa_get_ts_info(struct net_device *net_dev, of_node_put(ptp_node); } - if (ptp_dev) + if (ptp_dev) { ptp = platform_get_drvdata(ptp_dev); + put_device(&ptp_dev->dev); + } if (ptp) info->phc_index = ptp->phc_index; diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c index f63a29e2e031..de0fb272c847 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c @@ -829,19 +829,29 @@ static int enetc_pf_register_with_ierb(struct pci_dev *pdev) { struct platform_device *ierb_pdev; struct device_node *ierb_node; + int ret; ierb_node = of_find_compatible_node(NULL, NULL, "fsl,ls1028a-enetc-ierb"); - if (!ierb_node || !of_device_is_available(ierb_node)) + if (!ierb_node) return -ENODEV; + if (!of_device_is_available(ierb_node)) { + of_node_put(ierb_node); + return -ENODEV; + } + ierb_pdev = of_find_device_by_node(ierb_node); of_node_put(ierb_node); if (!ierb_pdev) return -EPROBE_DEFER; - return enetc_ierb_register_pf(ierb_pdev, pdev); + ret = enetc_ierb_register_pf(ierb_pdev, pdev); + + put_device(&ierb_pdev->dev); + + return ret; } static const struct enetc_si_ops enetc_psi_ops = { diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index 28f53cf2a174..5fd1f7327680 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -1475,8 +1475,10 @@ static int gfar_get_ts_info(struct net_device *dev, if (ptp_node) { ptp_dev = of_find_device_by_node(ptp_node); of_node_put(ptp_node); - if (ptp_dev) + if (ptp_dev) { ptp = platform_get_drvdata(ptp_dev); + put_device(&ptp_dev->dev); + } } if (ptp) diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c index 73c26fcfd85e..0a80d8f8cff7 100644 --- a/drivers/net/ethernet/mediatek/mtk_wed.c +++ b/drivers/net/ethernet/mediatek/mtk_wed.c @@ -2782,7 +2782,6 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, if (!pdev) goto err_of_node_put; - get_device(&pdev->dev); irq = platform_get_irq(pdev, 0); if (irq < 0) goto err_put_device; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 218b1a09534c..b8c609d91d11 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -1574,6 +1574,7 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, unsigned int hdrlen = mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt); skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt - hdrlen, lro_num_seg); + skb_shinfo(skb)->gso_segs = lro_num_seg; /* Subtract one since we already counted this as one * "regular" packet in mlx5e_complete_rx_cqe() */ diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c index 7bd7812d9c06..e67e99487a27 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c @@ -33,7 +33,7 @@ int __fbnic_open(struct fbnic_net *fbn) dev_warn(fbd->dev, "Error %d sending host ownership message to the firmware\n", err); - goto free_resources; + goto err_reset_queues; } err = fbnic_time_start(fbn); @@ -57,6 +57,8 @@ time_stop: fbnic_time_stop(fbn); release_ownership: fbnic_fw_xmit_ownership_msg(fbn->fbd, false); +err_reset_queues: + fbnic_reset_netif_queues(fbn); free_resources: fbnic_free_resources(fbn); free_napi_vectors: @@ -420,15 +422,17 @@ static void fbnic_get_stats64(struct net_device *dev, tx_packets = stats->packets; tx_dropped = stats->dropped; - stats64->tx_bytes = tx_bytes; - stats64->tx_packets = tx_packets; - stats64->tx_dropped = tx_dropped; - /* Record drops from Tx HW Datapath */ + spin_lock(&fbd->hw_stats_lock); tx_dropped += fbd->hw_stats.tmi.drop.frames.value + fbd->hw_stats.tti.cm_drop.frames.value + fbd->hw_stats.tti.frame_drop.frames.value + fbd->hw_stats.tti.tbi_drop.frames.value; + spin_unlock(&fbd->hw_stats_lock); + + stats64->tx_bytes = tx_bytes; + stats64->tx_packets = tx_packets; + stats64->tx_dropped = tx_dropped; for (i = 0; i < fbn->num_tx_queues; i++) { struct fbnic_ring *txr = fbn->tx[i]; diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c index ac11389a764c..f9543d03485f 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c @@ -661,8 +661,8 @@ static void fbnic_page_pool_init(struct fbnic_ring *ring, unsigned int idx, { struct fbnic_rx_buf *rx_buf = &ring->rx_buf[idx]; - page_pool_fragment_page(page, PAGECNT_BIAS_MAX); - rx_buf->pagecnt_bias = PAGECNT_BIAS_MAX; + page_pool_fragment_page(page, FBNIC_PAGECNT_BIAS_MAX); + rx_buf->pagecnt_bias = FBNIC_PAGECNT_BIAS_MAX; rx_buf->page = page; } diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h index 2e361d6f03ff..34693596e5eb 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h @@ -91,10 +91,8 @@ struct fbnic_queue_stats { struct u64_stats_sync syncp; }; -/* Pagecnt bias is long max to reserve the last bit to catch overflow - * cases where if we overcharge the bias it will flip over to be negative. - */ -#define PAGECNT_BIAS_MAX LONG_MAX +#define FBNIC_PAGECNT_BIAS_MAX PAGE_SIZE + struct fbnic_rx_buf { struct page *page; long pagecnt_bias; diff --git a/drivers/net/ethernet/sfc/tc_encap_actions.c b/drivers/net/ethernet/sfc/tc_encap_actions.c index 2258f854e5be..e872f926e438 100644 --- a/drivers/net/ethernet/sfc/tc_encap_actions.c +++ b/drivers/net/ethernet/sfc/tc_encap_actions.c @@ -442,7 +442,7 @@ static void efx_tc_update_encap(struct efx_nic *efx, rule = container_of(acts, struct efx_tc_flow_rule, acts); if (rule->fallback) fallback = rule->fallback; - else /* fallback: deliver to PF */ + else /* fallback of the fallback: deliver to PF */ fallback = &efx->tc->facts.pf; rc = efx_mae_update_rule(efx, fallback->fw_id, rule->fw_id); diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c index 2a1c43316f46..50bfbc2779e4 100644 --- a/drivers/net/ethernet/ti/icssg/icss_iep.c +++ b/drivers/net/ethernet/ti/icssg/icss_iep.c @@ -685,11 +685,17 @@ struct icss_iep *icss_iep_get_idx(struct device_node *np, int idx) struct platform_device *pdev; struct device_node *iep_np; struct icss_iep *iep; + int ret; iep_np = of_parse_phandle(np, "ti,iep", idx); - if (!iep_np || !of_device_is_available(iep_np)) + if (!iep_np) return ERR_PTR(-ENODEV); + if (!of_device_is_available(iep_np)) { + of_node_put(iep_np); + return ERR_PTR(-ENODEV); + } + pdev = of_find_device_by_node(iep_np); of_node_put(iep_np); @@ -698,21 +704,28 @@ struct icss_iep *icss_iep_get_idx(struct device_node *np, int idx) return ERR_PTR(-EPROBE_DEFER); iep = platform_get_drvdata(pdev); - if (!iep) - return ERR_PTR(-EPROBE_DEFER); + if (!iep) { + ret = -EPROBE_DEFER; + goto err_put_pdev; + } device_lock(iep->dev); if (iep->client_np) { device_unlock(iep->dev); dev_err(iep->dev, "IEP is already acquired by %s", iep->client_np->name); - return ERR_PTR(-EBUSY); + ret = -EBUSY; + goto err_put_pdev; } iep->client_np = np; device_unlock(iep->dev); - get_device(iep->dev); return iep; + +err_put_pdev: + put_device(&pdev->dev); + + return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(icss_iep_get_idx); diff --git a/drivers/net/ethernet/ti/icssg/icssg_common.c b/drivers/net/ethernet/ti/icssg/icssg_common.c index 12f25cec6255..57e5f1c88f50 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_common.c +++ b/drivers/net/ethernet/ti/icssg/icssg_common.c @@ -706,9 +706,9 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id, u32 *xdp_state) struct page_pool *pool; struct sk_buff *skb; struct xdp_buff xdp; + int headroom, ret; u32 *psdata; void *pa; - int ret; *xdp_state = 0; pool = rx_chn->pg_pool; @@ -757,22 +757,23 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id, u32 *xdp_state) xdp_prepare_buff(&xdp, pa, PRUETH_HEADROOM, pkt_len, false); *xdp_state = emac_run_xdp(emac, &xdp, page, &pkt_len); - if (*xdp_state == ICSSG_XDP_PASS) - skb = xdp_build_skb_from_buff(&xdp); - else + if (*xdp_state != ICSSG_XDP_PASS) goto requeue; + headroom = xdp.data - xdp.data_hard_start; + pkt_len = xdp.data_end - xdp.data; } else { - /* prepare skb and send to n/w stack */ - skb = napi_build_skb(pa, PAGE_SIZE); + headroom = PRUETH_HEADROOM; } + /* prepare skb and send to n/w stack */ + skb = napi_build_skb(pa, PAGE_SIZE); if (!skb) { ndev->stats.rx_dropped++; page_pool_recycle_direct(pool, page); goto requeue; } - skb_reserve(skb, PRUETH_HEADROOM); + skb_reserve(skb, headroom); skb_put(skb, pkt_len); skb->dev = ndev; diff --git a/drivers/net/ipa/Kconfig b/drivers/net/ipa/Kconfig index 6782c2cbf542..01d219d3760c 100644 --- a/drivers/net/ipa/Kconfig +++ b/drivers/net/ipa/Kconfig @@ -5,7 +5,7 @@ config QCOM_IPA depends on INTERCONNECT depends on QCOM_RPROC_COMMON || (QCOM_RPROC_COMMON=n && COMPILE_TEST) depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n - select QCOM_MDT_LOADER if ARCH_QCOM + select QCOM_MDT_LOADER select QCOM_SCM select QCOM_QMI_HELPERS help diff --git a/drivers/net/ipa/ipa_sysfs.c b/drivers/net/ipa/ipa_sysfs.c index a59bd215494c..a53e9e6f6cdf 100644 --- a/drivers/net/ipa/ipa_sysfs.c +++ b/drivers/net/ipa/ipa_sysfs.c @@ -37,8 +37,12 @@ static const char *ipa_version_string(struct ipa *ipa) return "4.11"; case IPA_VERSION_5_0: return "5.0"; + case IPA_VERSION_5_1: + return "5.1"; + case IPA_VERSION_5_5: + return "5.5"; default: - return "0.0"; /* Won't happen (checked at probe time) */ + return "0.0"; /* Should not happen */ } } diff --git a/drivers/net/mdio/mdio-bcm-unimac.c b/drivers/net/mdio/mdio-bcm-unimac.c index b6e30bdf5325..7baab230008a 100644 --- a/drivers/net/mdio/mdio-bcm-unimac.c +++ b/drivers/net/mdio/mdio-bcm-unimac.c @@ -209,10 +209,9 @@ static int unimac_mdio_clk_set(struct unimac_mdio_priv *priv) if (ret) return ret; - if (!priv->clk) + rate = clk_get_rate(priv->clk); + if (!rate) rate = 250000000; - else - rate = clk_get_rate(priv->clk); div = (rate / (2 * priv->clk_freq)) - 1; if (div & ~MDIO_CLK_DIV_MASK) { diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c index 6b800081eed5..275706de5847 100644 --- a/drivers/net/phy/mscc/mscc_ptp.c +++ b/drivers/net/phy/mscc/mscc_ptp.c @@ -900,6 +900,7 @@ static int vsc85xx_eth1_conf(struct phy_device *phydev, enum ts_blk blk, get_unaligned_be32(ptp_multicast)); } else { val |= ANA_ETH1_FLOW_ADDR_MATCH2_ANY_MULTICAST; + val |= ANA_ETH1_FLOW_ADDR_MATCH2_ANY_UNICAST; vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ADDR_MATCH2(0), val); vsc85xx_ts_write_csr(phydev, blk, diff --git a/drivers/net/phy/mscc/mscc_ptp.h b/drivers/net/phy/mscc/mscc_ptp.h index da3465360e90..ae9ad925bfa8 100644 --- a/drivers/net/phy/mscc/mscc_ptp.h +++ b/drivers/net/phy/mscc/mscc_ptp.h @@ -98,6 +98,7 @@ #define MSCC_ANA_ETH1_FLOW_ADDR_MATCH2(x) (MSCC_ANA_ETH1_FLOW_ENA(x) + 3) #define ANA_ETH1_FLOW_ADDR_MATCH2_MASK_MASK GENMASK(22, 20) #define ANA_ETH1_FLOW_ADDR_MATCH2_ANY_MULTICAST 0x400000 +#define ANA_ETH1_FLOW_ADDR_MATCH2_ANY_UNICAST 0x200000 #define ANA_ETH1_FLOW_ADDR_MATCH2_FULL_ADDR 0x100000 #define ANA_ETH1_FLOW_ADDR_MATCH2_SRC_DEST_MASK GENMASK(17, 16) #define ANA_ETH1_FLOW_ADDR_MATCH2_SRC_DEST 0x020000 diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c index b6489da5cfcd..48487149c225 100644 --- a/drivers/net/phy/smsc.c +++ b/drivers/net/phy/smsc.c @@ -785,6 +785,7 @@ static struct phy_driver smsc_phy_driver[] = { /* PHY_BASIC_FEATURES */ + .flags = PHY_RST_AFTER_CLK_EN, .probe = smsc_phy_probe, /* basic functions */ diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c index 5feaa70b5f47..90737cb71892 100644 --- a/drivers/net/ppp/pptp.c +++ b/drivers/net/ppp/pptp.c @@ -159,19 +159,17 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb) int len; unsigned char *data; __u32 seq_recv; - - struct rtable *rt; struct net_device *tdev; struct iphdr *iph; int max_headroom; if (sk_pppox(po)->sk_state & PPPOX_DEAD) - goto tx_error; + goto tx_drop; rt = pptp_route_output(po, &fl4); if (IS_ERR(rt)) - goto tx_error; + goto tx_drop; tdev = rt->dst.dev; @@ -179,16 +177,20 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb) if (skb_headroom(skb) < max_headroom || skb_cloned(skb) || skb_shared(skb)) { struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); - if (!new_skb) { - ip_rt_put(rt); + + if (!new_skb) goto tx_error; - } + if (skb->sk) skb_set_owner_w(new_skb, skb->sk); consume_skb(skb); skb = new_skb; } + /* Ensure we can safely access protocol field and LCP code */ + if (!pskb_may_pull(skb, 3)) + goto tx_error; + data = skb->data; islcp = ((data[0] << 8) + data[1]) == PPP_LCP && 1 <= data[2] && data[2] <= 7; @@ -262,6 +264,8 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb) return 1; tx_error: + ip_rt_put(rt); +tx_drop: kfree_skb(skb); return 1; } diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c index f6ddbe553289..201fc8809a62 100644 --- a/drivers/nvme/host/auth.c +++ b/drivers/nvme/host/auth.c @@ -742,7 +742,7 @@ static int nvme_auth_secure_concat(struct nvme_ctrl *ctrl, "%s: qid %d failed to generate digest, error %d\n", __func__, chap->qid, ret); goto out_free_psk; - }; + } dev_dbg(ctrl->device, "%s: generated digest %s\n", __func__, digest); ret = nvme_auth_derive_tls_psk(chap->hash_id, psk, psk_len, @@ -752,7 +752,7 @@ static int nvme_auth_secure_concat(struct nvme_ctrl *ctrl, "%s: qid %d failed to derive TLS psk, error %d\n", __func__, chap->qid, ret); goto out_free_digest; - }; + } tls_key = nvme_tls_psk_refresh(ctrl->opts->keyring, ctrl->opts->host->nqn, diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 9d988f4cb87a..812c1565114f 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -3158,6 +3158,11 @@ static inline bool nvme_discovery_ctrl(struct nvme_ctrl *ctrl) return ctrl->opts && ctrl->opts->discovery_nqn; } +static inline bool nvme_admin_ctrl(struct nvme_ctrl *ctrl) +{ + return ctrl->cntrltype == NVME_CTRL_ADMIN; +} + static bool nvme_validate_cntlid(struct nvme_subsystem *subsys, struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) { @@ -3670,6 +3675,17 @@ int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl, bool was_suspended) if (ret) return ret; + if (nvme_admin_ctrl(ctrl)) { + /* + * An admin controller has one admin queue, but no I/O queues. + * Override queue_count so it only creates an admin queue. + */ + dev_dbg(ctrl->device, + "Subsystem %s is an administrative controller", + ctrl->subsys->subnqn); + ctrl->queue_count = 1; + } + ret = nvme_configure_apst(ctrl); if (ret < 0) return ret; diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 08a5ea3e9383..3e12d4683ac7 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -1363,7 +1363,7 @@ nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status) * down, and the related FC-NVME Association ID and Connection IDs * become invalid. * - * The behavior of the fc-nvme initiator is such that it's + * The behavior of the fc-nvme initiator is such that its * understanding of the association and connections will implicitly * be torn down. The action is implicit as it may be due to a loss of * connectivity with the fc-nvme target, so you may never get a @@ -2777,7 +2777,7 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx, * as WRITE ZEROES will return a non-zero rq payload_bytes yet * there is no actual payload to be transferred. * To get it right, key data transmission on there being 1 or - * more physical segments in the sg list. If there is no + * more physical segments in the sg list. If there are no * physical segments, there is no payload. */ if (blk_rq_nr_phys_segments(rq)) { diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 071efec25346..2c6d9506b172 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -935,7 +935,7 @@ static blk_status_t nvme_pci_setup_data_sgl(struct request *req, nvme_pci_sgl_set_seg(&iod->cmd.common.dptr.sgl, sgl_dma, mapped); if (unlikely(iter->status)) - nvme_free_sgls(req); + nvme_unmap_data(req); return iter->status; } diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 9233f088fac8..c0fe8cfb7229 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -2179,7 +2179,7 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new) /* * Only start IO queues for which we have allocated the tagset - * and limitted it to the available queues. On reconnects, the + * and limited it to the available queues. On reconnects, the * queue number might have changed. */ nr_queues = min(ctrl->tagset->nr_hw_queues + 1, ctrl->queue_count); diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 884286f90688..0dd7bd99afa3 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -1960,24 +1960,24 @@ static int __init nvmet_init(void) if (!nvmet_wq) goto out_free_buffered_work_queue; - error = nvmet_init_discovery(); + error = nvmet_init_debugfs(); if (error) goto out_free_nvmet_work_queue; - error = nvmet_init_debugfs(); + error = nvmet_init_discovery(); if (error) - goto out_exit_discovery; + goto out_exit_debugfs; error = nvmet_init_configfs(); if (error) - goto out_exit_debugfs; + goto out_exit_discovery; return 0; -out_exit_debugfs: - nvmet_exit_debugfs(); out_exit_discovery: nvmet_exit_discovery(); +out_exit_debugfs: + nvmet_exit_debugfs(); out_free_nvmet_work_queue: destroy_workqueue(nvmet_wq); out_free_buffered_work_queue: @@ -1992,8 +1992,8 @@ out_destroy_bvec_cache: static void __exit nvmet_exit(void) { nvmet_exit_configfs(); - nvmet_exit_debugfs(); nvmet_exit_discovery(); + nvmet_exit_debugfs(); ida_destroy(&cntlid_ida); destroy_workqueue(nvmet_wq); destroy_workqueue(buffered_io_wq); diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index 25598a46bf0d..a9b18c051f5b 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c @@ -459,7 +459,7 @@ nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status) * down, and the related FC-NVME Association ID and Connection IDs * become invalid. * - * The behavior of the fc-nvme target is such that it's + * The behavior of the fc-nvme target is such that its * understanding of the association and connections will implicitly * be torn down. The action is implicit as it may be due to a loss of * connectivity with the fc-nvme host, so the target may never get a @@ -2313,7 +2313,7 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport, ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); if (ret) { /* - * should be ok to set w/o lock as its in the thread of + * should be ok to set w/o lock as it's in the thread of * execution (not an async timer routine) and doesn't * contend with any clearing action */ @@ -2629,7 +2629,7 @@ transport_error: * and the api of the FC LLDD which may issue a hw command to send the * response, but the LLDD may not get the hw completion for that command * and upcall the nvmet_fc layer before a new command may be - * asynchronously received - its possible for a command to be received + * asynchronously received - it's possible for a command to be received * before the LLDD and nvmet_fc have recycled the job structure. It gives * the appearance of more commands received than fits in the sq. * To alleviate this scenario, a temporary queue is maintained in the diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c index 3b4b0df8f879..0c361b1e3566 100644 --- a/drivers/nvme/target/passthru.c +++ b/drivers/nvme/target/passthru.c @@ -533,6 +533,8 @@ u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req) case NVME_FEAT_HOST_ID: req->execute = nvmet_execute_get_features; return NVME_SC_SUCCESS; + case NVME_FEAT_FDP: + return nvmet_setup_passthru_command(req); default: return nvmet_passthru_get_set_features(req); } diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 67f61c67c167..0485e25ab797 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -1731,7 +1731,7 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id, * We registered an ib_client to handle device removal for queues, * so we only need to handle the listening port cm_ids. In this case * we nullify the priv to prevent double cm_id destruction and destroying - * the cm_id implicitely by returning a non-zero rc to the callout. + * the cm_id implicitly by returning a non-zero rc to the callout. */ static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id, struct nvmet_rdma_queue *queue) @@ -1742,7 +1742,7 @@ static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id, /* * This is a queue cm_id. we have registered * an ib_client to handle queues removal - * so don't interfear and just return. + * so don't interfere and just return. */ return 0; } @@ -1760,7 +1760,7 @@ static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id, /* * We need to return 1 so that the core will destroy - * it's own ID. What a great API design.. + * its own ID. What a great API design.. */ return 1; } diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c index 9bbb0ff4cc15..b679c7f28f51 100644 --- a/drivers/pci/controller/vmd.c +++ b/drivers/pci/controller/vmd.c @@ -280,10 +280,12 @@ static int vmd_msi_alloc(struct irq_domain *domain, unsigned int virq, static void vmd_msi_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs) { + struct irq_data *irq_data; struct vmd_irq *vmdirq; for (int i = 0; i < nr_irqs; ++i) { - vmdirq = irq_get_chip_data(virq + i); + irq_data = irq_domain_get_irq_data(domain, virq + i); + vmdirq = irq_data->chip_data; synchronize_srcu(&vmdirq->irq->srcu); diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 8ed9b96518cf..554d83c4af0c 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -3884,7 +3884,7 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator, new_delta = ret; /* check that voltage is converging quickly enough */ - if (new_delta - delta > rdev->constraints->max_uV_step) { + if (delta - new_delta < rdev->constraints->max_uV_step) { ret = -EWOULDBLOCK; goto out; } diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h index 88b625ba1978..4b7ffa840563 100644 --- a/drivers/s390/crypto/ap_bus.h +++ b/drivers/s390/crypto/ap_bus.h @@ -180,7 +180,7 @@ struct ap_card { atomic64_t total_request_count; /* # requests ever for this AP device.*/ }; -#define TAPQ_CARD_HWINFO_MASK 0xFEFF0000FFFF0F0FUL +#define TAPQ_CARD_HWINFO_MASK 0xFFFF0000FFFF0F0FUL #define ASSOC_IDX_INVALID 0x10000 #define to_ap_card(x) container_of((x), struct ap_card, ap_dev.device) diff --git a/drivers/spi/spi-cs42l43.c b/drivers/spi/spi-cs42l43.c index b28a840b3b04..14307dd800b7 100644 --- a/drivers/spi/spi-cs42l43.c +++ b/drivers/spi/spi-cs42l43.c @@ -295,7 +295,7 @@ static struct spi_board_info *cs42l43_create_bridge_amp(struct cs42l43_spi *priv struct spi_board_info *info; if (spkid >= 0) { - props = devm_kmalloc(priv->dev, sizeof(*props), GFP_KERNEL); + props = devm_kcalloc(priv->dev, 2, sizeof(*props), GFP_KERNEL); if (!props) return NULL; diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 23286e4d7b49..8570fdf2e14a 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -615,6 +615,7 @@ void vhost_dev_init(struct vhost_dev *dev, vq->log = NULL; vq->indirect = NULL; vq->heads = NULL; + vq->nheads = NULL; vq->dev = dev; mutex_init(&vq->mutex); vhost_vq_reset(dev, vq); diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c index f9cdbf8c53e3..37bd18730fe0 100644 --- a/drivers/video/console/vgacon.c +++ b/drivers/video/console/vgacon.c @@ -1168,7 +1168,7 @@ static bool vgacon_scroll(struct vc_data *c, unsigned int t, unsigned int b, c->vc_screenbuf_size - delta); c->vc_origin = vga_vram_end - c->vc_screenbuf_size; vga_rolled_over = 0; - } else if (oldo - delta >= (unsigned long)c->vc_screenbuf) + } else c->vc_origin -= delta; c->vc_scr_end = c->vc_origin + c->vc_screenbuf_size; scr_memsetw((u16 *) (c->vc_origin), c->vc_video_erase_char, diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index eabbc4bd7cf6..55f5731e94c3 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -837,7 +837,8 @@ static void con2fb_init_display(struct vc_data *vc, struct fb_info *info, fg_vc->vc_rows); } - update_screen(vc_cons[fg_console].d); + if (fg_console != unit) + update_screen(vc_cons[fg_console].d); } /** @@ -1375,6 +1376,7 @@ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var, struct vc_data *svc; struct fbcon_ops *ops = info->fbcon_par; int rows, cols; + unsigned long ret = 0; p = &fb_display[unit]; @@ -1425,11 +1427,10 @@ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var, rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres); cols /= vc->vc_font.width; rows /= vc->vc_font.height; - vc_resize(vc, cols, rows); + ret = vc_resize(vc, cols, rows); - if (con_is_visible(vc)) { + if (con_is_visible(vc) && !ret) update_screen(vc); - } } static __inline__ void ywrap_up(struct vc_data *vc, int count) |